Merge branch 'linux-2.6.33.y' of git://git.kernel.org/pub/scm/linux/kernel/git/inaky/wimax
diff --git a/MAINTAINERS b/MAINTAINERS
index ca4131e..0350ace 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1086,7 +1086,6 @@
 F:	net/ax25/
 
 B43 WIRELESS DRIVER
-M:	Michael Buesch <mb@bu3sch.de>
 M:	Stefano Brivio <stefano.brivio@polimi.it>
 L:	linux-wireless@vger.kernel.org
 W:	http://linuxwireless.org/en/users/Drivers/b43
@@ -3656,6 +3655,7 @@
 W:	http://www.linuxfoundation.org/en/Net
 W:	http://patchwork.ozlabs.org/project/netdev/list/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git
 S:	Maintained
 F:	net/
 F:	include/net/
@@ -4304,7 +4304,7 @@
 RALINK RT2X00 WIRELESS LAN DRIVER
 P:	rt2x00 project
 L:	linux-wireless@vger.kernel.org
-L:	users@rt2x00.serialmonkey.com
+L:	users@rt2x00.serialmonkey.com (moderated for non-subscribers)
 W:	http://rt2x00.serialmonkey.com/
 S:	Maintained
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/ivd/rt2x00.git
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index f766cc4..bc53fed 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2906,8 +2906,8 @@
 	u32 media_index    = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
 	u32 oc3_index;
 
-	if ((media_index < 0) || (media_index > 4))
-	    media_index = 5;
+	if (media_index > 4)
+		media_index = 5;
 	
 	switch (fore200e->loop_mode) {
 	    case ATM_LM_NONE:    oc3_index = 0;
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 7066703..e906658 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2739,7 +2739,7 @@
 			spin_lock_irqsave(&he_dev->global_lock, flags);
 			switch (reg.type) {
 				case HE_REGTYPE_PCI:
-					if (reg.addr < 0 || reg.addr >= HE_REGMAP_SIZE) {
+					if (reg.addr >= HE_REGMAP_SIZE) {
 						err = -EINVAL;
 						break;
 					}
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
index a25216b..ccb2a7b 100644
--- a/drivers/isdn/gigaset/asyncdata.c
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -19,7 +19,7 @@
 
 /* check if byte must be stuffed/escaped
  * I'm not sure which data should be encoded.
- * Therefore I will go the hard way and decode every value
+ * Therefore I will go the hard way and encode every value
  * less than 0x20, the flag sequence and the control escape char.
  */
 static inline int muststuff(unsigned char c)
@@ -35,288 +35,383 @@
 
 /* == data input =========================================================== */
 
-/* process a block of received bytes in command mode (modem response)
+/* process a block of received bytes in command mode
+ * (mstate != MS_LOCKED && (inputstate & INS_command))
+ * Append received bytes to the command response buffer and forward them
+ * line by line to the response handler. Exit whenever a mode/state change
+ * might have occurred.
  * Return value:
  *	number of processed bytes
  */
-static inline int cmd_loop(unsigned char c, unsigned char *src, int numbytes,
-			   struct inbuf_t *inbuf)
+static unsigned cmd_loop(unsigned numbytes, struct inbuf_t *inbuf)
 {
+	unsigned char *src = inbuf->data + inbuf->head;
 	struct cardstate *cs = inbuf->cs;
-	unsigned cbytes      = cs->cbytes;
-	int inputstate = inbuf->inputstate;
-	int startbytes = numbytes;
+	unsigned cbytes = cs->cbytes;
+	unsigned procbytes = 0;
+	unsigned char c;
 
-	for (;;) {
-		cs->respdata[cbytes] = c;
-		if (c == 10 || c == 13) {
-			gig_dbg(DEBUG_TRANSCMD, "%s: End of Command (%d Bytes)",
-				__func__, cbytes);
-			cs->cbytes = cbytes;
-			gigaset_handle_modem_response(cs); /* can change
-							      cs->dle */
-			cbytes = 0;
+	while (procbytes < numbytes) {
+		c = *src++;
+		procbytes++;
 
-			if (cs->dle &&
-			    !(inputstate & INS_DLE_command)) {
-				inputstate &= ~INS_command;
+		switch (c) {
+		case '\n':
+			if (cbytes == 0 && cs->respdata[0] == '\r') {
+				/* collapse LF with preceding CR */
+				cs->respdata[0] = 0;
 				break;
 			}
-		} else {
-			/* advance in line buffer, checking for overflow */
-			if (cbytes < MAX_RESP_SIZE - 1)
-				cbytes++;
-			else
-				dev_warn(cs->dev, "response too large\n");
-		}
+			/* --v-- fall through --v-- */
+		case '\r':
+			/* end of message line, pass to response handler */
+			gig_dbg(DEBUG_TRANSCMD, "%s: End of Message (%d Bytes)",
+				__func__, cbytes);
+			if (cbytes >= MAX_RESP_SIZE) {
+				dev_warn(cs->dev, "response too large (%d)\n",
+					 cbytes);
+				cbytes = MAX_RESP_SIZE;
+			}
+			cs->cbytes = cbytes;
+			gigaset_handle_modem_response(cs);
+			cbytes = 0;
 
-		if (!numbytes)
-			break;
-		c = *src++;
-		--numbytes;
-		if (c == DLE_FLAG &&
-		    (cs->dle || inputstate & INS_DLE_command)) {
-			inputstate |= INS_DLE_char;
-			break;
+			/* store EOL byte for CRLF collapsing */
+			cs->respdata[0] = c;
+
+			/* cs->dle may have changed */
+			if (cs->dle && !(inbuf->inputstate & INS_DLE_command))
+				inbuf->inputstate &= ~INS_command;
+
+			/* return for reevaluating state */
+			goto exit;
+
+		case DLE_FLAG:
+			if (inbuf->inputstate & INS_DLE_char) {
+				/* quoted DLE: clear quote flag */
+				inbuf->inputstate &= ~INS_DLE_char;
+			} else if (cs->dle ||
+				   (inbuf->inputstate & INS_DLE_command)) {
+				/* DLE escape, pass up for handling */
+				inbuf->inputstate |= INS_DLE_char;
+				goto exit;
+			}
+			/* quoted or not in DLE mode: treat as regular data */
+			/* --v-- fall through --v-- */
+		default:
+			/* append to line buffer if possible */
+			if (cbytes < MAX_RESP_SIZE)
+				cs->respdata[cbytes] = c;
+			cbytes++;
 		}
 	}
-
+exit:
 	cs->cbytes = cbytes;
-	inbuf->inputstate = inputstate;
-
-	return startbytes - numbytes;
+	return procbytes;
 }
 
-/* process a block of received bytes in lock mode (tty i/f)
+/* process a block of received bytes in lock mode
+ * All received bytes are passed unmodified to the tty i/f.
  * Return value:
  *	number of processed bytes
  */
-static inline int lock_loop(unsigned char *src, int numbytes,
-			    struct inbuf_t *inbuf)
+static unsigned lock_loop(unsigned numbytes, struct inbuf_t *inbuf)
 {
-	struct cardstate *cs = inbuf->cs;
+	unsigned char *src = inbuf->data + inbuf->head;
 
-	gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response",
-			   numbytes, src);
-	gigaset_if_receive(cs, src, numbytes);
-
+	gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response", numbytes, src);
+	gigaset_if_receive(inbuf->cs, src, numbytes);
 	return numbytes;
 }
 
+/* set up next receive skb for data mode
+ */
+static void new_rcv_skb(struct bc_state *bcs)
+{
+	struct cardstate *cs = bcs->cs;
+	unsigned short hw_hdr_len = cs->hw_hdr_len;
+
+	if (bcs->ignore) {
+		bcs->skb = NULL;
+		return;
+	}
+
+	bcs->skb = dev_alloc_skb(SBUFSIZE + hw_hdr_len);
+	if (bcs->skb == NULL) {
+		dev_warn(cs->dev, "could not allocate new skb\n");
+		return;
+	}
+	skb_reserve(bcs->skb, hw_hdr_len);
+}
+
 /* process a block of received bytes in HDLC data mode
+ * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 == L2_HDLC)
  * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes.
  * When a frame is complete, check the FCS and pass valid frames to the LL.
  * If DLE is encountered, return immediately to let the caller handle it.
  * Return value:
  *	number of processed bytes
- *	numbytes (all bytes processed) on error --FIXME
  */
-static inline int hdlc_loop(unsigned char c, unsigned char *src, int numbytes,
-			    struct inbuf_t *inbuf)
+static unsigned hdlc_loop(unsigned numbytes, struct inbuf_t *inbuf)
 {
 	struct cardstate *cs = inbuf->cs;
-	struct bc_state *bcs = inbuf->bcs;
+	struct bc_state *bcs = cs->bcs;
 	int inputstate = bcs->inputstate;
 	__u16 fcs = bcs->fcs;
 	struct sk_buff *skb = bcs->skb;
-	int startbytes = numbytes;
+	unsigned char *src = inbuf->data + inbuf->head;
+	unsigned procbytes = 0;
+	unsigned char c;
 
-	if (unlikely(inputstate & INS_byte_stuff)) {
+	if (inputstate & INS_byte_stuff) {
+		if (!numbytes)
+			return 0;
 		inputstate &= ~INS_byte_stuff;
 		goto byte_stuff;
 	}
-	for (;;) {
-		if (unlikely(c == PPP_ESCAPE)) {
-			if (unlikely(!numbytes)) {
-				inputstate |= INS_byte_stuff;
+
+	while (procbytes < numbytes) {
+		c = *src++;
+		procbytes++;
+		if (c == DLE_FLAG) {
+			if (inputstate & INS_DLE_char) {
+				/* quoted DLE: clear quote flag */
+				inputstate &= ~INS_DLE_char;
+			} else if (cs->dle || (inputstate & INS_DLE_command)) {
+				/* DLE escape, pass up for handling */
+				inputstate |= INS_DLE_char;
 				break;
 			}
-			c = *src++;
-			--numbytes;
-			if (unlikely(c == DLE_FLAG &&
-				     (cs->dle ||
-				      inbuf->inputstate & INS_DLE_command))) {
-				inbuf->inputstate |= INS_DLE_char;
+		}
+
+		if (c == PPP_ESCAPE) {
+			/* byte stuffing indicator: pull in next byte */
+			if (procbytes >= numbytes) {
+				/* end of buffer, save for later processing */
 				inputstate |= INS_byte_stuff;
 				break;
 			}
 byte_stuff:
+			c = *src++;
+			procbytes++;
+			if (c == DLE_FLAG) {
+				if (inputstate & INS_DLE_char) {
+					/* quoted DLE: clear quote flag */
+					inputstate &= ~INS_DLE_char;
+				} else if (cs->dle ||
+					   (inputstate & INS_DLE_command)) {
+					/* DLE escape, pass up for handling */
+					inputstate |=
+						INS_DLE_char | INS_byte_stuff;
+					break;
+				}
+			}
 			c ^= PPP_TRANS;
-			if (unlikely(!muststuff(c)))
+#ifdef CONFIG_GIGASET_DEBUG
+			if (!muststuff(c))
 				gig_dbg(DEBUG_HDLC, "byte stuffed: 0x%02x", c);
-		} else if (unlikely(c == PPP_FLAG)) {
-			if (unlikely(inputstate & INS_skip_frame)) {
-#ifdef CONFIG_GIGASET_DEBUG
-				if (!(inputstate & INS_have_data)) { /* 7E 7E */
-					++bcs->emptycount;
-				} else
-					gig_dbg(DEBUG_HDLC,
-					    "7e----------------------------");
 #endif
-
-				/* end of frame */
-				gigaset_isdn_rcv_err(bcs);
-				dev_kfree_skb(skb);
-			} else if (!(inputstate & INS_have_data)) { /* 7E 7E */
-#ifdef CONFIG_GIGASET_DEBUG
-				++bcs->emptycount;
-#endif
-				break;
-			} else {
+		} else if (c == PPP_FLAG) {
+			/* end of frame: process content if any */
+			if (inputstate & INS_have_data) {
 				gig_dbg(DEBUG_HDLC,
 					"7e----------------------------");
 
-				/* end of frame */
-				if (unlikely(fcs != PPP_GOODFCS)) {
+				/* check and pass received frame */
+				if (!skb) {
+					/* skipped frame */
+					gigaset_isdn_rcv_err(bcs);
+				} else if (skb->len < 2) {
+					/* frame too short for FCS */
+					dev_warn(cs->dev,
+						 "short frame (%d)\n",
+						 skb->len);
+					gigaset_isdn_rcv_err(bcs);
+					dev_kfree_skb_any(skb);
+				} else if (fcs != PPP_GOODFCS) {
+					/* frame check error */
 					dev_err(cs->dev,
 				"Checksum failed, %u bytes corrupted!\n",
 						skb->len);
 					gigaset_isdn_rcv_err(bcs);
-					dev_kfree_skb(skb);
-				} else if (likely(skb->len > 2)) {
+					dev_kfree_skb_any(skb);
+				} else {
+					/* good frame */
 					__skb_trim(skb, skb->len - 2);
 					gigaset_skb_rcvd(bcs, skb);
-				} else {
-					if (skb->len) {
-						dev_err(cs->dev,
-					"invalid packet size (%d)\n", skb->len);
-						gigaset_isdn_rcv_err(bcs);
-					}
-					dev_kfree_skb(skb);
+				}
+
+				/* prepare reception of next frame */
+				inputstate &= ~INS_have_data;
+				new_rcv_skb(bcs);
+				skb = bcs->skb;
+			} else {
+				/* empty frame (7E 7E) */
+#ifdef CONFIG_GIGASET_DEBUG
+				++bcs->emptycount;
+#endif
+				if (!skb) {
+					/* skipped (?) */
+					gigaset_isdn_rcv_err(bcs);
+					new_rcv_skb(bcs);
+					skb = bcs->skb;
 				}
 			}
 
 			fcs = PPP_INITFCS;
-			inputstate &= ~(INS_have_data | INS_skip_frame);
-			if (unlikely(bcs->ignore)) {
-				inputstate |= INS_skip_frame;
-				skb = NULL;
-			} else {
-				skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
-				if (skb != NULL) {
-					skb_reserve(skb, cs->hw_hdr_len);
-				} else {
-					dev_warn(cs->dev,
-						"could not allocate new skb\n");
-					inputstate |= INS_skip_frame;
-				}
-			}
-
-			break;
-		} else if (unlikely(muststuff(c))) {
+			continue;
+#ifdef CONFIG_GIGASET_DEBUG
+		} else if (muststuff(c)) {
 			/* Should not happen. Possible after ZDLE=1<CR><LF>. */
 			gig_dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c);
+#endif
 		}
 
-		/* add character */
-
+		/* regular data byte, append to skb */
 #ifdef CONFIG_GIGASET_DEBUG
-		if (unlikely(!(inputstate & INS_have_data))) {
+		if (!(inputstate & INS_have_data)) {
 			gig_dbg(DEBUG_HDLC, "7e (%d x) ================",
 				bcs->emptycount);
 			bcs->emptycount = 0;
 		}
 #endif
-
 		inputstate |= INS_have_data;
-
-		if (likely(!(inputstate & INS_skip_frame))) {
-			if (unlikely(skb->len == SBUFSIZE)) {
+		if (skb) {
+			if (skb->len == SBUFSIZE) {
 				dev_warn(cs->dev, "received packet too long\n");
 				dev_kfree_skb_any(skb);
-				skb = NULL;
-				inputstate |= INS_skip_frame;
-				break;
+				/* skip remainder of packet */
+				bcs->skb = skb = NULL;
+			} else {
+				*__skb_put(skb, 1) = c;
+				fcs = crc_ccitt_byte(fcs, c);
 			}
-			*__skb_put(skb, 1) = c;
-			fcs = crc_ccitt_byte(fcs, c);
-		}
-
-		if (unlikely(!numbytes))
-			break;
-		c = *src++;
-		--numbytes;
-		if (unlikely(c == DLE_FLAG &&
-			     (cs->dle ||
-			      inbuf->inputstate & INS_DLE_command))) {
-			inbuf->inputstate |= INS_DLE_char;
-			break;
 		}
 	}
+
 	bcs->inputstate = inputstate;
 	bcs->fcs = fcs;
-	bcs->skb = skb;
-	return startbytes - numbytes;
+	return procbytes;
 }
 
 /* process a block of received bytes in transparent data mode
+ * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 != L2_HDLC)
  * Invert bytes, undoing byte stuffing and watching for DLE escapes.
  * If DLE is encountered, return immediately to let the caller handle it.
  * Return value:
  *	number of processed bytes
- *	numbytes (all bytes processed) on error --FIXME
  */
-static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes,
-			    struct inbuf_t *inbuf)
+static unsigned iraw_loop(unsigned numbytes, struct inbuf_t *inbuf)
 {
 	struct cardstate *cs = inbuf->cs;
-	struct bc_state *bcs = inbuf->bcs;
+	struct bc_state *bcs = cs->bcs;
 	int inputstate = bcs->inputstate;
 	struct sk_buff *skb = bcs->skb;
-	int startbytes = numbytes;
+	unsigned char *src = inbuf->data + inbuf->head;
+	unsigned procbytes = 0;
+	unsigned char c;
 
-	for (;;) {
-		/* add character */
-		inputstate |= INS_have_data;
+	if (!skb) {
+		/* skip this block */
+		new_rcv_skb(bcs);
+		return numbytes;
+	}
 
-		if (likely(!(inputstate & INS_skip_frame))) {
-			if (unlikely(skb->len == SBUFSIZE)) {
-				//FIXME just pass skb up and allocate a new one
-				dev_warn(cs->dev, "received packet too long\n");
-				dev_kfree_skb_any(skb);
-				skb = NULL;
-				inputstate |= INS_skip_frame;
+	while (procbytes < numbytes && skb->len < SBUFSIZE) {
+		c = *src++;
+		procbytes++;
+
+		if (c == DLE_FLAG) {
+			if (inputstate & INS_DLE_char) {
+				/* quoted DLE: clear quote flag */
+				inputstate &= ~INS_DLE_char;
+			} else if (cs->dle || (inputstate & INS_DLE_command)) {
+				/* DLE escape, pass up for handling */
+				inputstate |= INS_DLE_char;
 				break;
 			}
-			*__skb_put(skb, 1) = bitrev8(c);
 		}
 
-		if (unlikely(!numbytes))
-			break;
-		c = *src++;
-		--numbytes;
-		if (unlikely(c == DLE_FLAG &&
-			     (cs->dle ||
-			      inbuf->inputstate & INS_DLE_command))) {
-			inbuf->inputstate |= INS_DLE_char;
-			break;
-		}
+		/* regular data byte: append to current skb */
+		inputstate |= INS_have_data;
+		*__skb_put(skb, 1) = bitrev8(c);
 	}
 
 	/* pass data up */
-	if (likely(inputstate & INS_have_data)) {
-		if (likely(!(inputstate & INS_skip_frame))) {
-			gigaset_skb_rcvd(bcs, skb);
-		}
-		inputstate &= ~(INS_have_data | INS_skip_frame);
-		if (unlikely(bcs->ignore)) {
-			inputstate |= INS_skip_frame;
-			skb = NULL;
-		} else {
-			skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
-			if (skb != NULL) {
-				skb_reserve(skb, cs->hw_hdr_len);
-			} else {
-				dev_warn(cs->dev,
-					 "could not allocate new skb\n");
-				inputstate |= INS_skip_frame;
-			}
-		}
+	if (inputstate & INS_have_data) {
+		gigaset_skb_rcvd(bcs, skb);
+		inputstate &= ~INS_have_data;
+		new_rcv_skb(bcs);
 	}
 
 	bcs->inputstate = inputstate;
-	bcs->skb = skb;
-	return startbytes - numbytes;
+	return procbytes;
+}
+
+/* process DLE escapes
+ * Called whenever a DLE sequence might be encountered in the input stream.
+ * Either processes the entire DLE sequence or, if that isn't possible,
+ * notes the fact that an initial DLE has been received in the INS_DLE_char
+ * inputstate flag and resumes processing of the sequence on the next call.
+ */
+static void handle_dle(struct inbuf_t *inbuf)
+{
+	struct cardstate *cs = inbuf->cs;
+
+	if (cs->mstate == MS_LOCKED)
+		return;		/* no DLE processing in lock mode */
+
+	if (!(inbuf->inputstate & INS_DLE_char)) {
+		/* no DLE pending */
+		if (inbuf->data[inbuf->head] == DLE_FLAG &&
+		    (cs->dle || inbuf->inputstate & INS_DLE_command)) {
+			/* start of DLE sequence */
+			inbuf->head++;
+			if (inbuf->head == inbuf->tail ||
+			    inbuf->head == RBUFSIZE) {
+				/* end of buffer, save for later processing */
+				inbuf->inputstate |= INS_DLE_char;
+				return;
+			}
+		} else {
+			/* regular data byte */
+			return;
+		}
+	}
+
+	/* consume pending DLE */
+	inbuf->inputstate &= ~INS_DLE_char;
+
+	switch (inbuf->data[inbuf->head]) {
+	case 'X':	/* begin of event message */
+		if (inbuf->inputstate & INS_command)
+			dev_notice(cs->dev,
+				   "received <DLE>X in command mode\n");
+		inbuf->inputstate |= INS_command | INS_DLE_command;
+		inbuf->head++;	/* byte consumed */
+		break;
+	case '.':	/* end of event message */
+		if (!(inbuf->inputstate & INS_DLE_command))
+			dev_notice(cs->dev,
+				   "received <DLE>. without <DLE>X\n");
+		inbuf->inputstate &= ~INS_DLE_command;
+		/* return to data mode if in DLE mode */
+		if (cs->dle)
+			inbuf->inputstate &= ~INS_command;
+		inbuf->head++;	/* byte consumed */
+		break;
+	case DLE_FLAG:	/* DLE in data stream */
+		/* mark as quoted */
+		inbuf->inputstate |= INS_DLE_char;
+		if (!(cs->dle || inbuf->inputstate & INS_DLE_command))
+			dev_notice(cs->dev,
+				   "received <DLE><DLE> not in DLE mode\n");
+		break;	/* quoted byte left in buffer */
+	default:
+		dev_notice(cs->dev, "received <DLE><%02x>\n",
+			   inbuf->data[inbuf->head]);
+		/* quoted byte left in buffer */
+	}
 }
 
 /**
@@ -330,94 +425,39 @@
  */
 void gigaset_m10x_input(struct inbuf_t *inbuf)
 {
-	struct cardstate *cs;
-	unsigned tail, head, numbytes;
-	unsigned char *src, c;
-	int procbytes;
+	struct cardstate *cs = inbuf->cs;
+	unsigned numbytes, procbytes;
 
-	head = inbuf->head;
-	tail = inbuf->tail;
-	gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
+	gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", inbuf->head, inbuf->tail);
 
-	if (head != tail) {
-		cs = inbuf->cs;
-		src = inbuf->data + head;
-		numbytes = (head > tail ? RBUFSIZE : tail) - head;
+	while (inbuf->head != inbuf->tail) {
+		/* check for DLE escape */
+		handle_dle(inbuf);
+
+		/* process a contiguous block of bytes */
+		numbytes = (inbuf->head > inbuf->tail ?
+			    RBUFSIZE : inbuf->tail) - inbuf->head;
 		gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
+		/*
+		 * numbytes may be 0 if handle_dle() ate the last byte.
+		 * This does no harm, *_loop() will just return 0 immediately.
+		 */
 
-		while (numbytes) {
-			if (cs->mstate == MS_LOCKED) {
-				procbytes = lock_loop(src, numbytes, inbuf);
-				src += procbytes;
-				numbytes -= procbytes;
-			} else {
-				c = *src++;
-				--numbytes;
-				if (c == DLE_FLAG && (cs->dle ||
-				    inbuf->inputstate & INS_DLE_command)) {
-					if (!(inbuf->inputstate & INS_DLE_char)) {
-						inbuf->inputstate |= INS_DLE_char;
-						goto nextbyte;
-					}
-					/* <DLE> <DLE> => <DLE> in data stream */
-					inbuf->inputstate &= ~INS_DLE_char;
-				}
+		if (cs->mstate == MS_LOCKED)
+			procbytes = lock_loop(numbytes, inbuf);
+		else if (inbuf->inputstate & INS_command)
+			procbytes = cmd_loop(numbytes, inbuf);
+		else if (cs->bcs->proto2 == L2_HDLC)
+			procbytes = hdlc_loop(numbytes, inbuf);
+		else
+			procbytes = iraw_loop(numbytes, inbuf);
+		inbuf->head += procbytes;
 
-				if (!(inbuf->inputstate & INS_DLE_char)) {
+		/* check for buffer wraparound */
+		if (inbuf->head >= RBUFSIZE)
+			inbuf->head = 0;
 
-					/* FIXME use function pointers?  */
-					if (inbuf->inputstate & INS_command)
-						procbytes = cmd_loop(c, src, numbytes, inbuf);
-					else if (inbuf->bcs->proto2 == L2_HDLC)
-						procbytes = hdlc_loop(c, src, numbytes, inbuf);
-					else
-						procbytes = iraw_loop(c, src, numbytes, inbuf);
-
-					src += procbytes;
-					numbytes -= procbytes;
-				} else {  /* DLE char */
-					inbuf->inputstate &= ~INS_DLE_char;
-					switch (c) {
-					case 'X': /*begin of command*/
-						if (inbuf->inputstate & INS_command)
-							dev_warn(cs->dev,
-					"received <DLE> 'X' in command mode\n");
-						inbuf->inputstate |=
-							INS_command | INS_DLE_command;
-						break;
-					case '.': /*end of command*/
-						if (!(inbuf->inputstate & INS_command))
-							dev_warn(cs->dev,
-					"received <DLE> '.' in hdlc mode\n");
-						inbuf->inputstate &= cs->dle ?
-							~(INS_DLE_command|INS_command)
-							: ~INS_DLE_command;
-						break;
-					//case DLE_FLAG: /*DLE_FLAG in data stream*/ /* schon oben behandelt! */
-					default:
-						dev_err(cs->dev,
-						      "received 0x10 0x%02x!\n",
-							(int) c);
-						/* FIXME: reset driver?? */
-					}
-				}
-			}
-nextbyte:
-			if (!numbytes) {
-				/* end of buffer, check for wrap */
-				if (head > tail) {
-					head = 0;
-					src = inbuf->data;
-					numbytes = tail;
-				} else {
-					head = tail;
-					break;
-				}
-			}
-		}
-
-		gig_dbg(DEBUG_INTR, "setting head to %u", head);
-		inbuf->head = head;
+		gig_dbg(DEBUG_INTR, "head set to %u", inbuf->head);
 	}
 }
 EXPORT_SYMBOL_GPL(gigaset_m10x_input);
@@ -430,11 +470,11 @@
  * opening and closing flags, preserving headroom data.
  * parameters:
  *	skb		skb containing original packet (freed upon return)
- *	headroom	number of headroom bytes to preserve
  * Return value:
  *	pointer to newly allocated skb containing the result frame
+ *	and the original link layer header, NULL on error
  */
-static struct sk_buff *HDLC_Encode(struct sk_buff *skb, int headroom)
+static struct sk_buff *HDLC_Encode(struct sk_buff *skb)
 {
 	struct sk_buff *hdlc_skb;
 	__u16 fcs;
@@ -456,17 +496,19 @@
 
 	/* size of new buffer: original size + number of stuffing bytes
 	 * + 2 bytes FCS + 2 stuffing bytes for FCS (if needed) + 2 flag bytes
-	 * + room for acknowledgement header
+	 * + room for link layer header
 	 */
-	hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + headroom);
+	hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + skb->mac_len);
 	if (!hdlc_skb) {
-		dev_kfree_skb(skb);
+		dev_kfree_skb_any(skb);
 		return NULL;
 	}
 
-	/* Copy acknowledgement header into new skb */
-	skb_reserve(hdlc_skb, headroom);
-	memcpy(hdlc_skb->head, skb->head, headroom);
+	/* Copy link layer header into new skb */
+	skb_reset_mac_header(hdlc_skb);
+	skb_reserve(hdlc_skb, skb->mac_len);
+	memcpy(skb_mac_header(hdlc_skb), skb_mac_header(skb), skb->mac_len);
+	hdlc_skb->mac_len = skb->mac_len;
 
 	/* Add flag sequence in front of everything.. */
 	*(skb_put(hdlc_skb, 1)) = PPP_FLAG;
@@ -497,7 +539,7 @@
 
 	*(skb_put(hdlc_skb, 1)) = PPP_FLAG;
 
-	dev_kfree_skb(skb);
+	dev_kfree_skb_any(skb);
 	return hdlc_skb;
 }
 
@@ -506,28 +548,33 @@
  * preserving headroom data.
  * parameters:
  *	skb		skb containing original packet (freed upon return)
- *	headroom	number of headroom bytes to preserve
  * Return value:
  *	pointer to newly allocated skb containing the result frame
+ *	and the original link layer header, NULL on error
  */
-static struct sk_buff *iraw_encode(struct sk_buff *skb, int headroom)
+static struct sk_buff *iraw_encode(struct sk_buff *skb)
 {
 	struct sk_buff *iraw_skb;
 	unsigned char c;
 	unsigned char *cp;
 	int len;
 
-	/* worst case: every byte must be stuffed */
-	iraw_skb = dev_alloc_skb(2*skb->len + headroom);
+	/* size of new buffer (worst case = every byte must be stuffed):
+	 * 2 * original size + room for link layer header
+	 */
+	iraw_skb = dev_alloc_skb(2*skb->len + skb->mac_len);
 	if (!iraw_skb) {
-		dev_kfree_skb(skb);
+		dev_kfree_skb_any(skb);
 		return NULL;
 	}
 
-	/* Copy acknowledgement header into new skb */
-	skb_reserve(iraw_skb, headroom);
-	memcpy(iraw_skb->head, skb->head, headroom);
+	/* copy link layer header into new skb */
+	skb_reset_mac_header(iraw_skb);
+	skb_reserve(iraw_skb, skb->mac_len);
+	memcpy(skb_mac_header(iraw_skb), skb_mac_header(skb), skb->mac_len);
+	iraw_skb->mac_len = skb->mac_len;
 
+	/* copy and stuff data */
 	cp = skb->data;
 	len = skb->len;
 	while (len--) {
@@ -536,7 +583,7 @@
 			*(skb_put(iraw_skb, 1)) = c;
 		*(skb_put(iraw_skb, 1)) = c;
 	}
-	dev_kfree_skb(skb);
+	dev_kfree_skb_any(skb);
 	return iraw_skb;
 }
 
@@ -548,7 +595,7 @@
  * Called by LL to encode and queue an skb for sending, and start
  * transmission if necessary.
  * Once the payload data has been transmitted completely, gigaset_skb_sent()
- * will be called with the first cs->hw_hdr_len bytes of skb->head preserved.
+ * will be called with the skb's link layer header preserved.
  *
  * Return value:
  *	number of bytes accepted for sending (skb->len) if ok,
@@ -556,24 +603,25 @@
  */
 int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb)
 {
+	struct cardstate *cs = bcs->cs;
 	unsigned len = skb->len;
 	unsigned long flags;
 
 	if (bcs->proto2 == L2_HDLC)
-		skb = HDLC_Encode(skb, bcs->cs->hw_hdr_len);
+		skb = HDLC_Encode(skb);
 	else
-		skb = iraw_encode(skb, bcs->cs->hw_hdr_len);
+		skb = iraw_encode(skb);
 	if (!skb) {
-		dev_err(bcs->cs->dev,
+		dev_err(cs->dev,
 			"unable to allocate memory for encoding!\n");
 		return -ENOMEM;
 	}
 
 	skb_queue_tail(&bcs->squeue, skb);
-	spin_lock_irqsave(&bcs->cs->lock, flags);
-	if (bcs->cs->connected)
-		tasklet_schedule(&bcs->cs->write_tasklet);
-	spin_unlock_irqrestore(&bcs->cs->lock, flags);
+	spin_lock_irqsave(&cs->lock, flags);
+	if (cs->connected)
+		tasklet_schedule(&cs->write_tasklet);
+	spin_unlock_irqrestore(&cs->lock, flags);
 
 	return len;	/* ok so far */
 }
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 388e63a..9fd19db 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -57,7 +57,7 @@
 #define USB_SX353_PRODUCT_ID    0x0022
 
 /* table of devices that work with this driver */
-static const struct usb_device_id gigaset_table [] = {
+static const struct usb_device_id gigaset_table[] = {
 	{ USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3070_PRODUCT_ID) },
 	{ USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3075_PRODUCT_ID) },
 	{ USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX303_PRODUCT_ID) },
@@ -137,7 +137,7 @@
 #define BS_RESETTING	0x200	/* waiting for HD_RESET_INTERRUPT_PIPE_ACK */
 
 
-static struct gigaset_driver *driver = NULL;
+static struct gigaset_driver *driver;
 
 /* usb specific object needed to register this driver with the usb subsystem */
 static struct usb_driver gigaset_usb_driver = {
@@ -601,11 +601,12 @@
 	ucs->dr_cmd_in.wLength = cpu_to_le16(ucs->rcvbuf_size);
 	usb_fill_control_urb(ucs->urb_cmd_in, ucs->udev,
 			     usb_rcvctrlpipe(ucs->udev, 0),
-			     (unsigned char*) & ucs->dr_cmd_in,
+			     (unsigned char *) &ucs->dr_cmd_in,
 			     ucs->rcvbuf, ucs->rcvbuf_size,
 			     read_ctrl_callback, cs->inbuf);
 
-	if ((ret = usb_submit_urb(ucs->urb_cmd_in, GFP_ATOMIC)) != 0) {
+	ret = usb_submit_urb(ucs->urb_cmd_in, GFP_ATOMIC);
+	if (ret != 0) {
 		update_basstate(ucs, 0, BS_ATRDPEND);
 		dev_err(cs->dev, "could not submit HD_READ_ATMESSAGE: %s\n",
 			get_usb_rcmsg(ret));
@@ -652,13 +653,11 @@
 		return;
 	case -ENODEV:			/* device removed */
 	case -ESHUTDOWN:		/* device shut down */
-		//FIXME use this as disconnect indicator?
 		gig_dbg(DEBUG_USBREQ, "%s: device disconnected", __func__);
 		return;
 	default:		/* severe trouble */
 		dev_warn(cs->dev, "interrupt read: %s\n",
 			 get_usb_statmsg(status));
-		//FIXME corrective action? resubmission always ok?
 		goto resubmit;
 	}
 
@@ -742,7 +741,8 @@
 			kfree(ucs->rcvbuf);
 			ucs->rcvbuf_size = 0;
 		}
-		if ((ucs->rcvbuf = kmalloc(l, GFP_ATOMIC)) == NULL) {
+		ucs->rcvbuf = kmalloc(l, GFP_ATOMIC);
+		if (ucs->rcvbuf == NULL) {
 			spin_unlock_irqrestore(&cs->lock, flags);
 			dev_err(cs->dev, "out of memory receiving AT data\n");
 			error_reset(cs);
@@ -750,12 +750,12 @@
 		}
 		ucs->rcvbuf_size = l;
 		ucs->retry_cmd_in = 0;
-		if ((rc = atread_submit(cs, BAS_TIMEOUT)) < 0) {
+		rc = atread_submit(cs, BAS_TIMEOUT);
+		if (rc < 0) {
 			kfree(ucs->rcvbuf);
 			ucs->rcvbuf = NULL;
 			ucs->rcvbuf_size = 0;
 			if (rc != -ENODEV) {
-				//FIXME corrective action?
 				spin_unlock_irqrestore(&cs->lock, flags);
 				error_reset(cs);
 				break;
@@ -940,7 +940,8 @@
 		}
 
 		dump_urb(DEBUG_ISO, "Initial isoc read", urb);
-		if ((rc = usb_submit_urb(urb, GFP_ATOMIC)) != 0)
+		rc = usb_submit_urb(urb, GFP_ATOMIC);
+		if (rc != 0)
 			goto error;
 	}
 
@@ -1045,7 +1046,8 @@
 
 		/* compute frame length according to flow control */
 		ifd->length = BAS_NORMFRAME;
-		if ((corrbytes = atomic_read(&ubc->corrbytes)) != 0) {
+		corrbytes = atomic_read(&ubc->corrbytes);
+		if (corrbytes != 0) {
 			gig_dbg(DEBUG_ISO, "%s: corrbytes=%d",
 				__func__, corrbytes);
 			if (corrbytes > BAS_HIGHFRAME - BAS_NORMFRAME)
@@ -1284,7 +1286,8 @@
 	for (;;) {
 		/* retrieve URB */
 		spin_lock_irqsave(&ubc->isoinlock, flags);
-		if (!(urb = ubc->isoindone)) {
+		urb = ubc->isoindone;
+		if (!urb) {
 			spin_unlock_irqrestore(&ubc->isoinlock, flags);
 			return;
 		}
@@ -1371,7 +1374,7 @@
 				 "isochronous read: %d data bytes missing\n",
 				 totleft);
 
-	error:
+error:
 		/* URB processed, resubmit */
 		for (frame = 0; frame < BAS_NUMFRAMES; frame++) {
 			urb->iso_frame_desc[frame].status = 0;
@@ -1568,7 +1571,7 @@
 	ucs->dr_ctrl.wLength = 0;
 	usb_fill_control_urb(ucs->urb_ctrl, ucs->udev,
 			     usb_sndctrlpipe(ucs->udev, 0),
-			     (unsigned char*) &ucs->dr_ctrl, NULL, 0,
+			     (unsigned char *) &ucs->dr_ctrl, NULL, 0,
 			     write_ctrl_callback, ucs);
 	ucs->retry_ctrl = 0;
 	ret = usb_submit_urb(ucs->urb_ctrl, GFP_ATOMIC);
@@ -1621,7 +1624,8 @@
 		return -EHOSTUNREACH;
 	}
 
-	if ((ret = starturbs(bcs)) < 0) {
+	ret = starturbs(bcs);
+	if (ret < 0) {
 		dev_err(cs->dev,
 			"could not start isochronous I/O for channel B%d: %s\n",
 			bcs->channel + 1,
@@ -1633,7 +1637,8 @@
 	}
 
 	req = bcs->channel ? HD_OPEN_B2CHANNEL : HD_OPEN_B1CHANNEL;
-	if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0) {
+	ret = req_submit(bcs, req, 0, BAS_TIMEOUT);
+	if (ret < 0) {
 		dev_err(cs->dev, "could not open channel B%d\n",
 			bcs->channel + 1);
 		stopurbs(bcs->hw.bas);
@@ -1677,7 +1682,8 @@
 
 	/* channel running: tell device to close it */
 	req = bcs->channel ? HD_CLOSE_B2CHANNEL : HD_CLOSE_B1CHANNEL;
-	if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0)
+	ret = req_submit(bcs, req, 0, BAS_TIMEOUT);
+	if (ret < 0)
 		dev_err(cs->dev, "closing channel B%d failed\n",
 			bcs->channel + 1);
 
@@ -1703,10 +1709,12 @@
 	gig_dbg(DEBUG_TRANSCMD|DEBUG_LOCKCMD,
 		"write_command: sent %u bytes, %u left",
 		cs->curlen, cs->cmdbytes);
-	if ((cs->cmdbuf = cb->next) != NULL) {
+	if (cb->next != NULL) {
+		cs->cmdbuf = cb->next;
 		cs->cmdbuf->prev = NULL;
 		cs->curlen = cs->cmdbuf->len;
 	} else {
+		cs->cmdbuf = NULL;
 		cs->lastcmdbuf = NULL;
 		cs->curlen = 0;
 	}
@@ -1833,7 +1841,7 @@
 	ucs->dr_cmd_out.wLength = cpu_to_le16(len);
 	usb_fill_control_urb(ucs->urb_cmd_out, ucs->udev,
 			     usb_sndctrlpipe(ucs->udev, 0),
-			     (unsigned char*) &ucs->dr_cmd_out, buf, len,
+			     (unsigned char *) &ucs->dr_cmd_out, buf, len,
 			     write_command_callback, cs);
 	rc = usb_submit_urb(ucs->urb_cmd_out, GFP_ATOMIC);
 	if (unlikely(rc)) {
@@ -1953,7 +1961,8 @@
 
 	if (len > IF_WRITEBUF)
 		len = IF_WRITEBUF;
-	if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) {
+	cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC);
+	if (!cb) {
 		dev_err(cs->dev, "%s: out of memory\n", __func__);
 		rc = -ENOMEM;
 		goto notqueued;
@@ -2100,7 +2109,8 @@
 	}
 	ubc->isooutdone = ubc->isooutfree = ubc->isooutovfl = NULL;
 	ubc->numsub = 0;
-	if (!(ubc->isooutbuf = kmalloc(sizeof(struct isowbuf_t), GFP_KERNEL))) {
+	ubc->isooutbuf = kmalloc(sizeof(struct isowbuf_t), GFP_KERNEL);
+	if (!ubc->isooutbuf) {
 		pr_err("out of memory\n");
 		kfree(ubc);
 		bcs->hw.bas = NULL;
@@ -2252,7 +2262,8 @@
 		gig_dbg(DEBUG_ANY,
 			"%s: wrong alternate setting %d - trying to switch",
 			__func__, hostif->desc.bAlternateSetting);
-		if (usb_set_interface(udev, hostif->desc.bInterfaceNumber, 3) < 0) {
+		if (usb_set_interface(udev, hostif->desc.bInterfaceNumber, 3)
+		    < 0) {
 			dev_warn(&udev->dev, "usb_set_interface failed, "
 				 "device %d interface %d altsetting %d\n",
 				 udev->devnum, hostif->desc.bInterfaceNumber,
@@ -2321,14 +2332,16 @@
 					(endpoint->bEndpointAddress) & 0x0f),
 			 ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs,
 			 endpoint->bInterval);
-	if ((rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL)) != 0) {
+	rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL);
+	if (rc != 0) {
 		dev_err(cs->dev, "could not submit interrupt URB: %s\n",
 			get_usb_rcmsg(rc));
 		goto error;
 	}
 
 	/* tell the device that the driver is ready */
-	if ((rc = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0)) != 0)
+	rc = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0);
+	if (rc != 0)
 		goto error;
 
 	/* tell common part that the device is ready */
@@ -2524,9 +2537,10 @@
 	int result;
 
 	/* allocate memory for our driver state and intialize it */
-	if ((driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
-				       GIGASET_MODULENAME, GIGASET_DEVNAME,
-				       &gigops, THIS_MODULE)) == NULL)
+	driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
+				    GIGASET_MODULENAME, GIGASET_DEVNAME,
+				    &gigops, THIS_MODULE);
+	if (driver == NULL)
 		goto error;
 
 	/* register this driver with the USB subsystem */
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index c276a92..3f5cd06 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -168,14 +168,6 @@
 			 msgname, paramname);
 }
 
-static inline void ignore_cmstruct_param(struct cardstate *cs, _cmstruct param,
-				       char *msgname, char *paramname)
-{
-	if (param != CAPI_DEFAULT)
-		dev_warn(cs->dev, "%s: ignoring unsupported parameter: %s\n",
-			 msgname, paramname);
-}
-
 /*
  * check for legal hex digit
  */
@@ -370,6 +362,7 @@
 	struct cardstate *cs = bcs->cs;
 	struct gigaset_capi_ctr *iif = cs->iif;
 	struct gigaset_capi_appl *ap = bcs->ap;
+	unsigned char *req = skb_mac_header(dskb);
 	struct sk_buff *cskb;
 	u16 flags;
 
@@ -388,7 +381,7 @@
 	}
 
 	/* ToDo: honor unset "delivery confirmation" bit */
-	flags = CAPIMSG_FLAGS(dskb->head);
+	flags = CAPIMSG_FLAGS(req);
 
 	/* build DATA_B3_CONF message */
 	cskb = alloc_skb(CAPI_DATA_B3_CONF_LEN, GFP_ATOMIC);
@@ -401,11 +394,11 @@
 	CAPIMSG_SETAPPID(cskb->data, ap->id);
 	CAPIMSG_SETCOMMAND(cskb->data, CAPI_DATA_B3);
 	CAPIMSG_SETSUBCOMMAND(cskb->data,  CAPI_CONF);
-	CAPIMSG_SETMSGID(cskb->data, CAPIMSG_MSGID(dskb->head));
+	CAPIMSG_SETMSGID(cskb->data, CAPIMSG_MSGID(req));
 	CAPIMSG_SETCONTROLLER(cskb->data, iif->ctr.cnr);
 	CAPIMSG_SETPLCI_PART(cskb->data, bcs->channel + 1);
 	CAPIMSG_SETNCCI_PART(cskb->data, 1);
-	CAPIMSG_SETHANDLE_CONF(cskb->data, CAPIMSG_HANDLE_REQ(dskb->head));
+	CAPIMSG_SETHANDLE_CONF(cskb->data, CAPIMSG_HANDLE_REQ(req));
 	if (flags & ~CAPI_FLAGS_DELIVERY_CONFIRMATION)
 		CAPIMSG_SETINFO_CONF(cskb->data,
 				     CapiFlagsNotSupportedByProtocol);
@@ -445,7 +438,7 @@
 	/* don't send further B3 messages if disconnected */
 	if (ap->connected < APCONN_ACTIVE) {
 		gig_dbg(DEBUG_LLDATA, "disconnected, discarding data");
-		dev_kfree_skb(skb);
+		dev_kfree_skb_any(skb);
 		return;
 	}
 
@@ -1062,6 +1055,7 @@
 			    struct sk_buff *skb)
 {
 	struct cardstate *cs = iif->ctr.driverdata;
+	_cmsg *cmsg = &iif->acmsg;
 	struct sk_buff *cskb;
 	u8 *pparam;
 	unsigned int msgsize = CAPI_FACILITY_CONF_BASELEN;
@@ -1069,14 +1063,14 @@
 	static u8 confparam[10];	/* max. 9 octets + length byte */
 
 	/* decode message */
-	capi_message2cmsg(&iif->acmsg, skb->data);
-	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+	capi_message2cmsg(cmsg, skb->data);
+	dump_cmsg(DEBUG_CMD, __func__, cmsg);
 
 	/*
 	 * Facility Request Parameter is not decoded by capi_message2cmsg()
 	 * encoding depends on Facility Selector
 	 */
-	switch (iif->acmsg.FacilitySelector) {
+	switch (cmsg->FacilitySelector) {
 	case CAPI_FACILITY_DTMF:	/* ToDo */
 		info = CapiFacilityNotSupported;
 		confparam[0] = 2;	/* length */
@@ -1093,7 +1087,7 @@
 
 	case CAPI_FACILITY_SUPPSVC:
 		/* decode Function parameter */
-		pparam = iif->acmsg.FacilityRequestParameter;
+		pparam = cmsg->FacilityRequestParameter;
 		if (pparam == NULL || *pparam < 2) {
 			dev_notice(cs->dev, "%s: %s missing\n", "FACILITY_REQ",
 				   "Facility Request Parameter");
@@ -1141,18 +1135,18 @@
 	}
 
 	/* send FACILITY_CONF with given Info and confirmation parameter */
-	capi_cmsg_answer(&iif->acmsg);
-	iif->acmsg.Info = info;
-	iif->acmsg.FacilityConfirmationParameter = confparam;
+	capi_cmsg_answer(cmsg);
+	cmsg->Info = info;
+	cmsg->FacilityConfirmationParameter = confparam;
 	msgsize += confparam[0];	/* length */
 	cskb = alloc_skb(msgsize, GFP_ATOMIC);
 	if (!cskb) {
 		dev_err(cs->dev, "%s: out of memory\n", __func__);
 		return;
 	}
-	capi_cmsg2message(&iif->acmsg, __skb_put(cskb, msgsize));
-	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
-		capi_ctr_handle_message(&iif->ctr, ap->id, cskb);
+	capi_cmsg2message(cmsg, __skb_put(cskb, msgsize));
+	dump_cmsg(DEBUG_CMD, __func__, cmsg);
+	capi_ctr_handle_message(&iif->ctr, ap->id, cskb);
 }
 
 
@@ -1207,8 +1201,8 @@
 	u16 info;
 
 	/* decode message */
-	capi_message2cmsg(&iif->acmsg, skb->data);
-	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+	capi_message2cmsg(cmsg, skb->data);
+	dump_cmsg(DEBUG_CMD, __func__, cmsg);
 
 	/* get free B channel & construct PLCI */
 	bcs = gigaset_get_free_channel(cs);
@@ -1261,7 +1255,7 @@
 	commands[AT_DIAL] = kmalloc(l+3, GFP_KERNEL);
 	if (!commands[AT_DIAL])
 		goto oom;
-	snprintf(commands[AT_DIAL], l+3, "D%*s\r", l, pp);
+	snprintf(commands[AT_DIAL], l+3, "D%.*s\r", l, pp);
 
 	/* encode parameter: Calling party number */
 	pp = cmsg->CallingPartyNumber;
@@ -1411,8 +1405,16 @@
 					"CONNECT_REQ", "Calling pty subaddr");
 	ignore_cstruct_param(cs, cmsg->LLC,
 					"CONNECT_REQ", "LLC");
-	ignore_cmstruct_param(cs, cmsg->AdditionalInfo,
-					"CONNECT_REQ", "Additional Info");
+	if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
+		ignore_cstruct_param(cs, cmsg->BChannelinformation,
+					"CONNECT_REQ", "B Channel Information");
+		ignore_cstruct_param(cs, cmsg->Keypadfacility,
+					"CONNECT_REQ", "Keypad Facility");
+		ignore_cstruct_param(cs, cmsg->Useruserdata,
+					"CONNECT_REQ", "User-User Data");
+		ignore_cstruct_param(cs, cmsg->Facilitydataarray,
+					"CONNECT_REQ", "Facility Data Array");
+	}
 
 	/* encode parameter: B channel to use */
 	commands[AT_ISO] = kmalloc(9, GFP_KERNEL);
@@ -1458,9 +1460,9 @@
 	int channel;
 
 	/* decode message */
-	capi_message2cmsg(&iif->acmsg, skb->data);
-	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
-	dev_kfree_skb(skb);
+	capi_message2cmsg(cmsg, skb->data);
+	dump_cmsg(DEBUG_CMD, __func__, cmsg);
+	dev_kfree_skb_any(skb);
 
 	/* extract and check channel number from PLCI */
 	channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
@@ -1524,8 +1526,16 @@
 					"CONNECT_RESP", "Connected Subaddress");
 		ignore_cstruct_param(cs, cmsg->LLC,
 					"CONNECT_RESP", "LLC");
-		ignore_cmstruct_param(cs, cmsg->AdditionalInfo,
-					"CONNECT_RESP", "Additional Info");
+		if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
+			ignore_cstruct_param(cs, cmsg->BChannelinformation,
+					"CONNECT_RESP", "BChannel Information");
+			ignore_cstruct_param(cs, cmsg->Keypadfacility,
+					"CONNECT_RESP", "Keypad Facility");
+			ignore_cstruct_param(cs, cmsg->Useruserdata,
+					"CONNECT_RESP", "User-User Data");
+			ignore_cstruct_param(cs, cmsg->Facilitydataarray,
+					"CONNECT_RESP", "Facility Data Array");
+		}
 
 		/* Accept call */
 		if (!gigaset_add_event(cs, &cs->bcs[channel-1].at_state,
@@ -1587,17 +1597,18 @@
 			      struct sk_buff *skb)
 {
 	struct cardstate *cs = iif->ctr.driverdata;
+	_cmsg *cmsg = &iif->acmsg;
 	int channel;
 
 	/* decode message */
-	capi_message2cmsg(&iif->acmsg, skb->data);
-	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+	capi_message2cmsg(cmsg, skb->data);
+	dump_cmsg(DEBUG_CMD, __func__, cmsg);
 
 	/* extract and check channel number from PLCI */
-	channel = (iif->acmsg.adr.adrPLCI >> 8) & 0xff;
+	channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
 	if (!channel || channel > cs->channels) {
 		dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
-			   "CONNECT_B3_REQ", "PLCI", iif->acmsg.adr.adrPLCI);
+			   "CONNECT_B3_REQ", "PLCI", cmsg->adr.adrPLCI);
 		send_conf(iif, ap, skb, CapiIllContrPlciNcci);
 		return;
 	}
@@ -1606,14 +1617,12 @@
 	ap->connected = APCONN_ACTIVE;
 
 	/* build NCCI: always 1 (one B3 connection only) */
-	iif->acmsg.adr.adrNCCI |= 1 << 16;
+	cmsg->adr.adrNCCI |= 1 << 16;
 
 	/* NCPI parameter: not applicable for B3 Transparent */
-	ignore_cstruct_param(cs, iif->acmsg.NCPI,
-				"CONNECT_B3_REQ", "NCPI");
-	send_conf(iif, ap, skb,
-		  (iif->acmsg.NCPI && iif->acmsg.NCPI[0]) ?
-			CapiNcpiNotSupportedByProtocol : CapiSuccess);
+	ignore_cstruct_param(cs, cmsg->NCPI, "CONNECT_B3_REQ", "NCPI");
+	send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ?
+				CapiNcpiNotSupportedByProtocol : CapiSuccess);
 }
 
 /*
@@ -1628,27 +1637,28 @@
 			       struct sk_buff *skb)
 {
 	struct cardstate *cs = iif->ctr.driverdata;
-	struct bc_state *bcs = NULL;
+	_cmsg *cmsg = &iif->acmsg;
+	struct bc_state *bcs;
 	int channel;
 	unsigned int msgsize;
 	u8 command;
 
 	/* decode message */
-	capi_message2cmsg(&iif->acmsg, skb->data);
-	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+	capi_message2cmsg(cmsg, skb->data);
+	dump_cmsg(DEBUG_CMD, __func__, cmsg);
 
 	/* extract and check channel number and NCCI */
-	channel = (iif->acmsg.adr.adrNCCI >> 8) & 0xff;
+	channel = (cmsg->adr.adrNCCI >> 8) & 0xff;
 	if (!channel || channel > cs->channels ||
-	    ((iif->acmsg.adr.adrNCCI >> 16) & 0xffff) != 1) {
+	    ((cmsg->adr.adrNCCI >> 16) & 0xffff) != 1) {
 		dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
-			   "CONNECT_B3_RESP", "NCCI", iif->acmsg.adr.adrNCCI);
-		dev_kfree_skb(skb);
+			   "CONNECT_B3_RESP", "NCCI", cmsg->adr.adrNCCI);
+		dev_kfree_skb_any(skb);
 		return;
 	}
 	bcs = &cs->bcs[channel-1];
 
-	if (iif->acmsg.Reject) {
+	if (cmsg->Reject) {
 		/* Reject: clear B3 connect received flag */
 		ap->connected = APCONN_SETUP;
 
@@ -1656,7 +1666,7 @@
 		if (!gigaset_add_event(cs, &bcs->at_state,
 				       EV_HUP, NULL, 0, NULL)) {
 			dev_err(cs->dev, "%s: out of memory\n", __func__);
-			dev_kfree_skb(skb);
+			dev_kfree_skb_any(skb);
 			return;
 		}
 		gig_dbg(DEBUG_CMD, "scheduling HUP");
@@ -1673,11 +1683,11 @@
 		command = CAPI_CONNECT_B3_ACTIVE;
 		msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN;
 	}
-	capi_cmsg_header(&iif->acmsg, ap->id, command, CAPI_IND,
-			 ap->nextMessageNumber++, iif->acmsg.adr.adrNCCI);
+	capi_cmsg_header(cmsg, ap->id, command, CAPI_IND,
+			 ap->nextMessageNumber++, cmsg->adr.adrNCCI);
 	__skb_trim(skb, msgsize);
-	capi_cmsg2message(&iif->acmsg, skb->data);
-	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+	capi_cmsg2message(cmsg, skb->data);
+	dump_cmsg(DEBUG_CMD, __func__, cmsg);
 	capi_ctr_handle_message(&iif->ctr, ap->id, skb);
 }
 
@@ -1691,28 +1701,37 @@
 			      struct sk_buff *skb)
 {
 	struct cardstate *cs = iif->ctr.driverdata;
+	_cmsg *cmsg = &iif->acmsg;
 	struct bc_state *bcs;
 	_cmsg *b3cmsg;
 	struct sk_buff *b3skb;
 	int channel;
 
 	/* decode message */
-	capi_message2cmsg(&iif->acmsg, skb->data);
-	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+	capi_message2cmsg(cmsg, skb->data);
+	dump_cmsg(DEBUG_CMD, __func__, cmsg);
 
 	/* extract and check channel number from PLCI */
-	channel = (iif->acmsg.adr.adrPLCI >> 8) & 0xff;
+	channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
 	if (!channel || channel > cs->channels) {
 		dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
-			   "DISCONNECT_REQ", "PLCI", iif->acmsg.adr.adrPLCI);
+			   "DISCONNECT_REQ", "PLCI", cmsg->adr.adrPLCI);
 		send_conf(iif, ap, skb, CapiIllContrPlciNcci);
 		return;
 	}
 	bcs = cs->bcs + channel - 1;
 
 	/* ToDo: process parameter: Additional info */
-	ignore_cmstruct_param(cs, iif->acmsg.AdditionalInfo,
-			      "DISCONNECT_REQ", "Additional Info");
+	if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
+		ignore_cstruct_param(cs, cmsg->BChannelinformation,
+				     "DISCONNECT_REQ", "B Channel Information");
+		ignore_cstruct_param(cs, cmsg->Keypadfacility,
+				     "DISCONNECT_REQ", "Keypad Facility");
+		ignore_cstruct_param(cs, cmsg->Useruserdata,
+				     "DISCONNECT_REQ", "User-User Data");
+		ignore_cstruct_param(cs, cmsg->Facilitydataarray,
+				     "DISCONNECT_REQ", "Facility Data Array");
+	}
 
 	/* skip if DISCONNECT_IND already sent */
 	if (!ap->connected)
@@ -1733,7 +1752,7 @@
 		}
 		capi_cmsg_header(b3cmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND,
 				 ap->nextMessageNumber++,
-				 iif->acmsg.adr.adrPLCI | (1 << 16));
+				 cmsg->adr.adrPLCI | (1 << 16));
 		b3cmsg->Reason_B3 = CapiProtocolErrorLayer1;
 		b3skb = alloc_skb(CAPI_DISCONNECT_B3_IND_BASELEN, GFP_KERNEL);
 		if (b3skb == NULL) {
@@ -1769,18 +1788,19 @@
 				 struct sk_buff *skb)
 {
 	struct cardstate *cs = iif->ctr.driverdata;
+	_cmsg *cmsg = &iif->acmsg;
 	int channel;
 
 	/* decode message */
-	capi_message2cmsg(&iif->acmsg, skb->data);
-	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+	capi_message2cmsg(cmsg, skb->data);
+	dump_cmsg(DEBUG_CMD, __func__, cmsg);
 
 	/* extract and check channel number and NCCI */
-	channel = (iif->acmsg.adr.adrNCCI >> 8) & 0xff;
+	channel = (cmsg->adr.adrNCCI >> 8) & 0xff;
 	if (!channel || channel > cs->channels ||
-	    ((iif->acmsg.adr.adrNCCI >> 16) & 0xffff) != 1) {
+	    ((cmsg->adr.adrNCCI >> 16) & 0xffff) != 1) {
 		dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
-			   "DISCONNECT_B3_REQ", "NCCI", iif->acmsg.adr.adrNCCI);
+			   "DISCONNECT_B3_REQ", "NCCI", cmsg->adr.adrNCCI);
 		send_conf(iif, ap, skb, CapiIllContrPlciNcci);
 		return;
 	}
@@ -1803,11 +1823,10 @@
 	gigaset_schedule_event(cs);
 
 	/* NCPI parameter: not applicable for B3 Transparent */
-	ignore_cstruct_param(cs, iif->acmsg.NCPI,
+	ignore_cstruct_param(cs, cmsg->NCPI,
 				"DISCONNECT_B3_REQ", "NCPI");
-	send_conf(iif, ap, skb,
-		  (iif->acmsg.NCPI && iif->acmsg.NCPI[0]) ?
-			CapiNcpiNotSupportedByProtocol : CapiSuccess);
+	send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ?
+				CapiNcpiNotSupportedByProtocol : CapiSuccess);
 }
 
 /*
@@ -1862,12 +1881,12 @@
 		return;
 	}
 
-	/*
-	 * pull CAPI message from skb,
-	 * pass payload data to device-specific module
-	 * CAPI message will be preserved in headroom
-	 */
+	/* pull CAPI message into link layer header */
+	skb_reset_mac_header(skb);
+	skb->mac_len = msglen;
 	skb_pull(skb, msglen);
+
+	/* pass to device-specific module */
 	if (cs->ops->send_skb(&cs->bcs[channel-1], skb) < 0) {
 		send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
 		return;
@@ -1928,7 +1947,7 @@
 		capi_message2cmsg(&iif->acmsg, skb->data);
 		dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
 	}
-	dev_kfree_skb(skb);
+	dev_kfree_skb_any(skb);
 }
 
 static void do_data_b3_resp(struct gigaset_capi_ctr *iif,
@@ -1936,7 +1955,7 @@
 			    struct sk_buff *skb)
 {
 	dump_rawmsg(DEBUG_LLDATA, __func__, skb->data);
-	dev_kfree_skb(skb);
+	dev_kfree_skb_any(skb);
 }
 
 /* table of outgoing CAPI message handlers with lookup function */
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 1d2ae2e..c438cfc 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -108,7 +108,7 @@
 {
 	int i, r;
 
-	cs->control_state = TIOCM_RTS; //FIXME
+	cs->control_state = TIOCM_RTS;
 
 	r = setflags(cs, TIOCM_DTR, 200);
 	if (r < 0)
@@ -132,10 +132,10 @@
 
 error:
 	dev_err(cs->dev, "error %d on setuartbits\n", -r);
-	cs->control_state = TIOCM_RTS|TIOCM_DTR; // FIXME is this a good value?
+	cs->control_state = TIOCM_RTS|TIOCM_DTR;
 	cs->ops->set_modem_ctrl(cs, 0, TIOCM_RTS|TIOCM_DTR);
 
-	return -1; //r
+	return -1;
 }
 
 static int test_timeout(struct at_state_t *at_state)
@@ -150,10 +150,9 @@
 	}
 
 	if (!gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL,
-			       at_state->timer_index, NULL)) {
-		//FIXME what should we do?
-	}
-
+			       at_state->timer_index, NULL))
+			dev_err(at_state->cs->dev, "%s: out of memory\n",
+				__func__);
 	return 1;
 }
 
@@ -393,16 +392,15 @@
 	int i;
 
 	gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel);
-	if (!bcs->cs->ops->freebcshw(bcs)) {
+	if (!bcs->cs->ops->freebcshw(bcs))
 		gig_dbg(DEBUG_INIT, "failed");
-	}
 
 	gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel);
 	clear_at_state(&bcs->at_state);
 	gig_dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel);
+	dev_kfree_skb(bcs->skb);
+	bcs->skb = NULL;
 
-	if (bcs->skb)
-		dev_kfree_skb(bcs->skb);
 	for (i = 0; i < AT_NUM; ++i) {
 		kfree(bcs->commands[i]);
 		bcs->commands[i] = NULL;
@@ -503,8 +501,6 @@
 		gig_dbg(DEBUG_INIT, "clearing hw");
 		cs->ops->freecshw(cs);
 
-		//FIXME cmdbuf
-
 		/* fall through */
 	case 2: /* error in initcshw */
 		/* Deregister from LL */
@@ -560,16 +556,13 @@
 }
 
 
-static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct bc_state *bcs,
-			       struct cardstate *cs, int inputstate)
+static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct cardstate *cs)
 /* inbuf->read must be allocated before! */
 {
 	inbuf->head = 0;
 	inbuf->tail = 0;
 	inbuf->cs = cs;
-	inbuf->bcs = bcs; /*base driver: NULL*/
-	inbuf->rcvbuf = NULL;
-	inbuf->inputstate = inputstate;
+	inbuf->inputstate = INS_command;
 }
 
 /**
@@ -625,7 +618,7 @@
 {
 	int i;
 
-	bcs->tx_skb = NULL; //FIXME -> hw part
+	bcs->tx_skb = NULL;
 
 	skb_queue_head_init(&bcs->squeue);
 
@@ -644,16 +637,13 @@
 	bcs->fcs = PPP_INITFCS;
 	bcs->inputstate = 0;
 	if (cs->ignoreframes) {
-		bcs->inputstate |= INS_skip_frame;
 		bcs->skb = NULL;
 	} else {
 		bcs->skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
 		if (bcs->skb != NULL)
 			skb_reserve(bcs->skb, cs->hw_hdr_len);
-		else {
+		else
 			pr_err("out of memory\n");
-			bcs->inputstate |= INS_skip_frame;
-		}
 	}
 
 	bcs->channel = channel;
@@ -674,8 +664,8 @@
 	gig_dbg(DEBUG_INIT, "  failed");
 
 	gig_dbg(DEBUG_INIT, "  freeing bcs[%d]->skb", channel);
-	if (bcs->skb)
-		dev_kfree_skb(bcs->skb);
+	dev_kfree_skb(bcs->skb);
+	bcs->skb = NULL;
 
 	return NULL;
 }
@@ -702,12 +692,13 @@
 				 int onechannel, int ignoreframes,
 				 int cidmode, const char *modulename)
 {
-	struct cardstate *cs = NULL;
+	struct cardstate *cs;
 	unsigned long flags;
 	int i;
 
 	gig_dbg(DEBUG_INIT, "allocating cs");
-	if (!(cs = alloc_cs(drv))) {
+	cs = alloc_cs(drv);
+	if (!cs) {
 		pr_err("maximum number of devices exceeded\n");
 		return NULL;
 	}
@@ -764,10 +755,7 @@
 	cs->cbytes = 0;
 
 	gig_dbg(DEBUG_INIT, "setting up inbuf");
-	if (onechannel) {			//FIXME distinction necessary?
-		gigaset_inbuf_init(cs->inbuf, cs->bcs, cs, INS_command);
-	} else
-		gigaset_inbuf_init(cs->inbuf, NULL,    cs, INS_command);
+	gigaset_inbuf_init(cs->inbuf, cs);
 
 	cs->connected = 0;
 	cs->isdn_up = 0;
@@ -854,9 +842,10 @@
 	bcs->chstate = 0;
 
 	bcs->ignore = cs->ignoreframes;
-	if (bcs->ignore)
-		bcs->inputstate |= INS_skip_frame;
-
+	if (bcs->ignore) {
+		dev_kfree_skb(bcs->skb);
+		bcs->skb = NULL;
+	}
 
 	cs->ops->reinitbcshw(bcs);
 }
@@ -877,8 +866,6 @@
 	free_strings(&cs->at_state);
 	gigaset_at_init(&cs->at_state, NULL, cs, 0);
 
-	kfree(cs->inbuf->rcvbuf);
-	cs->inbuf->rcvbuf = NULL;
 	cs->inbuf->inputstate = INS_command;
 	cs->inbuf->head = 0;
 	cs->inbuf->tail = 0;
@@ -941,15 +928,13 @@
 		cs->ops->baud_rate(cs, B115200);
 		cs->ops->set_line_ctrl(cs, CS8);
 		cs->control_state = TIOCM_DTR|TIOCM_RTS;
-	} else {
-		//FIXME use some saved values?
 	}
 
 	cs->waiting = 1;
 
 	if (!gigaset_add_event(cs, &cs->at_state, EV_START, NULL, 0, NULL)) {
 		cs->waiting = 0;
-		//FIXME what should we do?
+		dev_err(cs->dev, "%s: out of memory\n", __func__);
 		goto error;
 	}
 
@@ -989,7 +974,7 @@
 	cs->waiting = 1;
 
 	if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL)) {
-		//FIXME what should we do?
+		dev_err(cs->dev, "%s: out of memory\n", __func__);
 		goto exit;
 	}
 
@@ -1020,7 +1005,7 @@
 	cs->waiting = 1;
 
 	if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL)) {
-		//FIXME what should we do?
+		dev_err(cs->dev, "%s: out of memory\n", __func__);
 		goto exit;
 	}
 
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index 369927f..ddeb045 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -40,8 +40,8 @@
 
 /* Possible ASCII responses */
 #define RSP_OK		0
-//#define RSP_BUSY	1
-//#define RSP_CONNECT	2
+#define RSP_BUSY	1
+#define RSP_CONNECT	2
 #define RSP_ZGCI	3
 #define RSP_RING	4
 #define RSP_ZAOC	5
@@ -68,7 +68,6 @@
 #define RSP_ZHLC	(RSP_STR + STR_ZHLC)
 #define RSP_ERROR	-1	/* ERROR              */
 #define RSP_WRONG_CID	-2	/* unknown cid in cmd */
-//#define RSP_EMPTY	-3
 #define RSP_UNKNOWN	-4	/* unknown response   */
 #define RSP_FAIL	-5	/* internal error     */
 #define RSP_INVAL	-6	/* invalid response   */
@@ -76,9 +75,9 @@
 #define RSP_NONE	-19
 #define RSP_STRING	-20
 #define RSP_NULL	-21
-//#define RSP_RETRYFAIL	-22
-//#define RSP_RETRY	-23
-//#define RSP_SKIP	-24
+#define RSP_RETRYFAIL	-22
+#define RSP_RETRY	-23
+#define RSP_SKIP	-24
 #define RSP_INIT	-27
 #define RSP_ANY		-26
 #define RSP_LAST	-28
@@ -158,229 +157,229 @@
 #define SEQ_UMMODE	11
 
 
-// 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid), 400: hup, 500: reset, 600: dial, 700: ring
+/* 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid),
+ * 400: hup, 500: reset, 600: dial, 700: ring */
 struct reply_t gigaset_tab_nocid[] =
 {
-	/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */
+/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout,
+ * action, command */
 
-	/* initialize device, set cid mode if possible */
-	//{RSP_INIT,     -1, -1,100,                900, 0, {ACT_TEST}},
-	//{RSP_ERROR,   900,900, -1,                  0, 0, {ACT_FAILINIT}},
-	//{RSP_OK,      900,900, -1,                100, INIT_TIMEOUT,
-	//                                                  {ACT_TIMEOUT}},
+/* initialize device, set cid mode if possible */
+{RSP_INIT,	 -1,  -1, SEQ_INIT,		100,  1, {ACT_TIMEOUT} },
 
-	{RSP_INIT,     -1, -1,SEQ_INIT,           100, INIT_TIMEOUT,
-							  {ACT_TIMEOUT}},                /* wait until device is ready */
+{EV_TIMEOUT,	100, 100, -1,			101,  3, {0},	"Z\r"},
+{RSP_OK,	101, 103, -1,			120,  5, {ACT_GETSTRING},
+								"+GMR\r"},
 
-	{EV_TIMEOUT,  100,100, -1,                101, 3, {0},             "Z\r"},       /* device in transparent mode? try to initialize it. */
-	{RSP_OK,      101,103, -1,                120, 5, {ACT_GETSTRING}, "+GMR\r"},    /* get version */
+{EV_TIMEOUT,	101, 101, -1,			102,  5, {0},	"Z\r"},
+{RSP_ERROR,	101, 101, -1,			102,  5, {0},	"Z\r"},
 
-	{EV_TIMEOUT,  101,101, -1,                102, 5, {0},             "Z\r"},       /* timeout => try once again. */
-	{RSP_ERROR,   101,101, -1,                102, 5, {0},             "Z\r"},       /* error => try once again. */
+{EV_TIMEOUT,	102, 102, -1,			108,  5, {ACT_SETDLE1},
+								"^SDLE=0\r"},
+{RSP_OK,	108, 108, -1,			104, -1},
+{RSP_ZDLE,	104, 104,  0,			103,  5, {0},	"Z\r"},
+{EV_TIMEOUT,	104, 104, -1,			  0,  0, {ACT_FAILINIT} },
+{RSP_ERROR,	108, 108, -1,			  0,  0, {ACT_FAILINIT} },
 
-	{EV_TIMEOUT,  102,102, -1,                108, 5, {ACT_SETDLE1},   "^SDLE=0\r"}, /* timeout => try again in DLE mode. */
-	{RSP_OK,      108,108, -1,                104,-1},
-	{RSP_ZDLE,    104,104,  0,                103, 5, {0},             "Z\r"},
-	{EV_TIMEOUT,  104,104, -1,                  0, 0, {ACT_FAILINIT}},
-	{RSP_ERROR,   108,108, -1,                  0, 0, {ACT_FAILINIT}},
+{EV_TIMEOUT,	108, 108, -1,			105,  2, {ACT_SETDLE0,
+							  ACT_HUPMODEM,
+							  ACT_TIMEOUT} },
+{EV_TIMEOUT,	105, 105, -1,			103,  5, {0},	"Z\r"},
 
-	{EV_TIMEOUT,  108,108, -1,                105, 2, {ACT_SETDLE0,
-							   ACT_HUPMODEM,
-							   ACT_TIMEOUT}},                /* still timeout => connection in unimodem mode? */
-	{EV_TIMEOUT,  105,105, -1,                103, 5, {0},             "Z\r"},
+{RSP_ERROR,	102, 102, -1,			107,  5, {0},	"^GETPRE\r"},
+{RSP_OK,	107, 107, -1,			  0,  0, {ACT_CONFIGMODE} },
+{RSP_ERROR,	107, 107, -1,			  0,  0, {ACT_FAILINIT} },
+{EV_TIMEOUT,	107, 107, -1,			  0,  0, {ACT_FAILINIT} },
 
-	{RSP_ERROR,   102,102, -1,                107, 5, {0},             "^GETPRE\r"}, /* ERROR on ATZ => maybe in config mode? */
-	{RSP_OK,      107,107, -1,                  0, 0, {ACT_CONFIGMODE}},
-	{RSP_ERROR,   107,107, -1,                  0, 0, {ACT_FAILINIT}},
-	{EV_TIMEOUT,  107,107, -1,                  0, 0, {ACT_FAILINIT}},
+{RSP_ERROR,	103, 103, -1,			  0,  0, {ACT_FAILINIT} },
+{EV_TIMEOUT,	103, 103, -1,			  0,  0, {ACT_FAILINIT} },
 
-	{RSP_ERROR,   103,103, -1,                  0, 0, {ACT_FAILINIT}},
-	{EV_TIMEOUT,  103,103, -1,                  0, 0, {ACT_FAILINIT}},
+{RSP_STRING,	120, 120, -1,			121, -1, {ACT_SETVER} },
 
-	{RSP_STRING,  120,120, -1,                121,-1, {ACT_SETVER}},
+{EV_TIMEOUT,	120, 121, -1,			  0,  0, {ACT_FAILVER,
+							  ACT_INIT} },
+{RSP_ERROR,	120, 121, -1,			  0,  0, {ACT_FAILVER,
+							  ACT_INIT} },
+{RSP_OK,	121, 121, -1,			  0,  0, {ACT_GOTVER,
+							  ACT_INIT} },
 
-	{EV_TIMEOUT,  120,121, -1,                  0, 0, {ACT_FAILVER, ACT_INIT}},
-	{RSP_ERROR,   120,121, -1,                  0, 0, {ACT_FAILVER, ACT_INIT}},
-	{RSP_OK,      121,121, -1,                  0, 0, {ACT_GOTVER,  ACT_INIT}},
+/* leave dle mode */
+{RSP_INIT,	  0,   0, SEQ_DLE0,		201,  5, {0},	"^SDLE=0\r"},
+{RSP_OK,	201, 201, -1,			202, -1},
+{RSP_ZDLE,	202, 202,  0,			  0,  0, {ACT_DLE0} },
+{RSP_NODEV,	200, 249, -1,			  0,  0, {ACT_FAKEDLE0} },
+{RSP_ERROR,	200, 249, -1,			  0,  0, {ACT_FAILDLE0} },
+{EV_TIMEOUT,	200, 249, -1,			  0,  0, {ACT_FAILDLE0} },
 
-	/* leave dle mode */
-	{RSP_INIT,      0,  0,SEQ_DLE0,           201, 5, {0},             "^SDLE=0\r"},
-	{RSP_OK,      201,201, -1,                202,-1},
-	{RSP_ZDLE,    202,202,  0,                  0, 0, {ACT_DLE0}},
-	{RSP_NODEV,   200,249, -1,                  0, 0, {ACT_FAKEDLE0}},
-	{RSP_ERROR,   200,249, -1,                  0, 0, {ACT_FAILDLE0}},
-	{EV_TIMEOUT,  200,249, -1,                  0, 0, {ACT_FAILDLE0}},
+/* enter dle mode */
+{RSP_INIT,	  0,   0, SEQ_DLE1,		251,  5, {0},	"^SDLE=1\r"},
+{RSP_OK,	251, 251, -1,			252, -1},
+{RSP_ZDLE,	252, 252,  1,			  0,  0, {ACT_DLE1} },
+{RSP_ERROR,	250, 299, -1,			  0,  0, {ACT_FAILDLE1} },
+{EV_TIMEOUT,	250, 299, -1,			  0,  0, {ACT_FAILDLE1} },
 
-	/* enter dle mode */
-	{RSP_INIT,      0,  0,SEQ_DLE1,           251, 5, {0},             "^SDLE=1\r"},
-	{RSP_OK,      251,251, -1,                252,-1},
-	{RSP_ZDLE,    252,252,  1,                  0, 0, {ACT_DLE1}},
-	{RSP_ERROR,   250,299, -1,                  0, 0, {ACT_FAILDLE1}},
-	{EV_TIMEOUT,  250,299, -1,                  0, 0, {ACT_FAILDLE1}},
+/* incoming call */
+{RSP_RING,	 -1,  -1, -1,			 -1, -1, {ACT_RING} },
 
-	/* incoming call */
-	{RSP_RING,     -1, -1, -1,                 -1,-1, {ACT_RING}},
+/* get cid */
+{RSP_INIT,	  0,   0, SEQ_CID,		301,  5, {0},	"^SGCI?\r"},
+{RSP_OK,	301, 301, -1,			302, -1},
+{RSP_ZGCI,	302, 302, -1,			  0,  0, {ACT_CID} },
+{RSP_ERROR,	301, 349, -1,			  0,  0, {ACT_FAILCID} },
+{EV_TIMEOUT,	301, 349, -1,			  0,  0, {ACT_FAILCID} },
 
-	/* get cid */
-	//{RSP_INIT,      0,  0,300,                901, 0, {ACT_TEST}},
-	//{RSP_ERROR,   901,901, -1,                  0, 0, {ACT_FAILCID}},
-	//{RSP_OK,      901,901, -1,                301, 5, {0},             "^SGCI?\r"},
+/* enter cid mode */
+{RSP_INIT,	  0,   0, SEQ_CIDMODE,		150,  5, {0},	"^SGCI=1\r"},
+{RSP_OK,	150, 150, -1,			  0,  0, {ACT_CMODESET} },
+{RSP_ERROR,	150, 150, -1,			  0,  0, {ACT_FAILCMODE} },
+{EV_TIMEOUT,	150, 150, -1,			  0,  0, {ACT_FAILCMODE} },
 
-	{RSP_INIT,      0,  0,SEQ_CID,            301, 5, {0},             "^SGCI?\r"},
-	{RSP_OK,      301,301, -1,                302,-1},
-	{RSP_ZGCI,    302,302, -1,                  0, 0, {ACT_CID}},
-	{RSP_ERROR,   301,349, -1,                  0, 0, {ACT_FAILCID}},
-	{EV_TIMEOUT,  301,349, -1,                  0, 0, {ACT_FAILCID}},
+/* leave cid mode */
+{RSP_INIT,	  0,   0, SEQ_UMMODE,		160,  5, {0},	"Z\r"},
+{RSP_OK,	160, 160, -1,			  0,  0, {ACT_UMODESET} },
+{RSP_ERROR,	160, 160, -1,			  0,  0, {ACT_FAILUMODE} },
+{EV_TIMEOUT,	160, 160, -1,			  0,  0, {ACT_FAILUMODE} },
 
-	/* enter cid mode */
-	{RSP_INIT,      0,  0,SEQ_CIDMODE,        150, 5, {0},             "^SGCI=1\r"},
-	{RSP_OK,      150,150, -1,                  0, 0, {ACT_CMODESET}},
-	{RSP_ERROR,   150,150, -1,                  0, 0, {ACT_FAILCMODE}},
-	{EV_TIMEOUT,  150,150, -1,                  0, 0, {ACT_FAILCMODE}},
+/* abort getting cid */
+{RSP_INIT,	  0,   0, SEQ_NOCID,		  0,  0, {ACT_ABORTCID} },
 
-	/* leave cid mode */
-	//{RSP_INIT,      0,  0,SEQ_UMMODE,         160, 5, {0},             "^SGCI=0\r"},
-	{RSP_INIT,      0,  0,SEQ_UMMODE,         160, 5, {0},             "Z\r"},
-	{RSP_OK,      160,160, -1,                  0, 0, {ACT_UMODESET}},
-	{RSP_ERROR,   160,160, -1,                  0, 0, {ACT_FAILUMODE}},
-	{EV_TIMEOUT,  160,160, -1,                  0, 0, {ACT_FAILUMODE}},
+/* reset */
+{RSP_INIT,	  0,   0, SEQ_SHUTDOWN,		504,  5, {0},	"Z\r"},
+{RSP_OK,	504, 504, -1,			  0,  0, {ACT_SDOWN} },
+{RSP_ERROR,	501, 599, -1,			  0,  0, {ACT_FAILSDOWN} },
+{EV_TIMEOUT,	501, 599, -1,			  0,  0, {ACT_FAILSDOWN} },
+{RSP_NODEV,	501, 599, -1,			  0,  0, {ACT_FAKESDOWN} },
 
-	/* abort getting cid */
-	{RSP_INIT,      0,  0,SEQ_NOCID,            0, 0, {ACT_ABORTCID}},
+{EV_PROC_CIDMODE, -1, -1, -1,			 -1, -1, {ACT_PROC_CIDMODE} },
+{EV_IF_LOCK,	 -1,  -1, -1,			 -1, -1, {ACT_IF_LOCK} },
+{EV_IF_VER,	 -1,  -1, -1,			 -1, -1, {ACT_IF_VER} },
+{EV_START,	 -1,  -1, -1,			 -1, -1, {ACT_START} },
+{EV_STOP,	 -1,  -1, -1,			 -1, -1, {ACT_STOP} },
+{EV_SHUTDOWN,	 -1,  -1, -1,			 -1, -1, {ACT_SHUTDOWN} },
 
-	/* reset */
-	{RSP_INIT,      0,  0,SEQ_SHUTDOWN,       504, 5, {0},             "Z\r"},
-	{RSP_OK,      504,504, -1,                  0, 0, {ACT_SDOWN}},
-	{RSP_ERROR,   501,599, -1,                  0, 0, {ACT_FAILSDOWN}},
-	{EV_TIMEOUT,  501,599, -1,                  0, 0, {ACT_FAILSDOWN}},
-	{RSP_NODEV,   501,599, -1,                  0, 0, {ACT_FAKESDOWN}},
+/* misc. */
+{RSP_ERROR,	 -1,  -1, -1,			 -1, -1, {ACT_ERROR} },
+{RSP_ZCFGT,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZCFG,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZLOG,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZMWI,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZABINFO,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZSMLSTCHG,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
 
-	{EV_PROC_CIDMODE,-1, -1, -1,               -1,-1, {ACT_PROC_CIDMODE}}, //FIXME
-	{EV_IF_LOCK,   -1, -1, -1,                 -1,-1, {ACT_IF_LOCK}}, //FIXME
-	{EV_IF_VER,    -1, -1, -1,                 -1,-1, {ACT_IF_VER}}, //FIXME
-	{EV_START,     -1, -1, -1,                 -1,-1, {ACT_START}}, //FIXME
-	{EV_STOP,      -1, -1, -1,                 -1,-1, {ACT_STOP}}, //FIXME
-	{EV_SHUTDOWN,  -1, -1, -1,                 -1,-1, {ACT_SHUTDOWN}}, //FIXME
-
-	/* misc. */
-	{RSP_ERROR,    -1, -1, -1,                 -1, -1, {ACT_ERROR} },
-	{RSP_EMPTY,    -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-	{RSP_ZCFGT,    -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-	{RSP_ZCFG,     -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-	{RSP_ZLOG,     -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-	{RSP_ZMWI,     -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-	{RSP_ZABINFO,  -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-	{RSP_ZSMLSTCHG,-1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-
-	{RSP_ZCAU,     -1, -1, -1,                 -1,-1, {ACT_ZCAU}},
-	{RSP_NONE,     -1, -1, -1,                 -1,-1, {ACT_DEBUG}},
-	{RSP_ANY,      -1, -1, -1,                 -1,-1, {ACT_WARN}},
-	{RSP_LAST}
+{RSP_ZCAU,	 -1,  -1, -1,			 -1, -1, {ACT_ZCAU} },
+{RSP_NONE,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ANY,	 -1,  -1, -1,			 -1, -1, {ACT_WARN} },
+{RSP_LAST}
 };
 
-// 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring, 400: hup, 750: accepted icall
+/* 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring,
+ * 400: hup, 750: accepted icall */
 struct reply_t gigaset_tab_cid[] =
 {
-	/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */
+/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout,
+ * action, command */
 
-	/* dial */
-	{EV_DIAL,      -1, -1, -1,                 -1,-1, {ACT_DIAL}}, //FIXME
-	{RSP_INIT,      0,  0,SEQ_DIAL,           601, 5, {ACT_CMD+AT_BC}},
-	{RSP_OK,      601,601, -1,                602, 5, {ACT_CMD+AT_HLC}},
-	{RSP_NULL,    602,602, -1,                603, 5, {ACT_CMD+AT_PROTO}},
-	{RSP_OK,      602,602, -1,                603, 5, {ACT_CMD+AT_PROTO}},
-	{RSP_OK,      603,603, -1,                604, 5, {ACT_CMD+AT_TYPE}},
-	{RSP_OK,      604,604, -1,                605, 5, {ACT_CMD+AT_MSN}},
-	{RSP_NULL,    605, 605, -1,               606, 5, {ACT_CMD+AT_CLIP} },
-	{RSP_OK,      605, 605, -1,               606, 5, {ACT_CMD+AT_CLIP} },
-	{RSP_NULL,    606, 606, -1,               607, 5, {ACT_CMD+AT_ISO} },
-	{RSP_OK,      606, 606, -1,               607, 5, {ACT_CMD+AT_ISO} },
-	{RSP_OK,      607, 607, -1,               608, 5, {0}, "+VLS=17\r"},
-	{RSP_OK,      608, 608, -1,               609, -1},
-	{RSP_ZSAU,    609, 609, ZSAU_PROCEEDING,  610, 5, {ACT_CMD+AT_DIAL} },
-	{RSP_OK,      610, 610, -1,               650, 0, {ACT_DIALING} },
+/* dial */
+{EV_DIAL,	 -1,  -1, -1,			 -1, -1, {ACT_DIAL} },
+{RSP_INIT,	  0,   0, SEQ_DIAL,		601,  5, {ACT_CMD+AT_BC} },
+{RSP_OK,	601, 601, -1,			602,  5, {ACT_CMD+AT_HLC} },
+{RSP_NULL,	602, 602, -1,			603,  5, {ACT_CMD+AT_PROTO} },
+{RSP_OK,	602, 602, -1,			603,  5, {ACT_CMD+AT_PROTO} },
+{RSP_OK,	603, 603, -1,			604,  5, {ACT_CMD+AT_TYPE} },
+{RSP_OK,	604, 604, -1,			605,  5, {ACT_CMD+AT_MSN} },
+{RSP_NULL,	605, 605, -1,			606,  5, {ACT_CMD+AT_CLIP} },
+{RSP_OK,	605, 605, -1,			606,  5, {ACT_CMD+AT_CLIP} },
+{RSP_NULL,	606, 606, -1,			607,  5, {ACT_CMD+AT_ISO} },
+{RSP_OK,	606, 606, -1,			607,  5, {ACT_CMD+AT_ISO} },
+{RSP_OK,	607, 607, -1,			608,  5, {0},	"+VLS=17\r"},
+{RSP_OK,	608, 608, -1,			609, -1},
+{RSP_ZSAU,	609, 609, ZSAU_PROCEEDING,	610,  5, {ACT_CMD+AT_DIAL} },
+{RSP_OK,	610, 610, -1,			650,  0, {ACT_DIALING} },
 
-	{RSP_ERROR,   601, 610, -1,                 0, 0, {ACT_ABORTDIAL} },
-	{EV_TIMEOUT,  601, 610, -1,                 0, 0, {ACT_ABORTDIAL} },
+{RSP_ERROR,	601, 610, -1,			  0,  0, {ACT_ABORTDIAL} },
+{EV_TIMEOUT,	601, 610, -1,			  0,  0, {ACT_ABORTDIAL} },
 
-	/* optional dialing responses */
-	{EV_BC_OPEN,  650,650, -1,                651,-1},
-	{RSP_ZVLS,    609, 651, 17,                -1, -1, {ACT_DEBUG} },
-	{RSP_ZCTP,    610, 651, -1,                -1, -1, {ACT_DEBUG} },
-	{RSP_ZCPN,    610, 651, -1,                -1, -1, {ACT_DEBUG} },
-	{RSP_ZSAU,    650,651,ZSAU_CALL_DELIVERED, -1,-1, {ACT_DEBUG}},
+/* optional dialing responses */
+{EV_BC_OPEN,	650, 650, -1,			651, -1},
+{RSP_ZVLS,	609, 651, 17,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZCTP,	610, 651, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZCPN,	610, 651, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZSAU,	650, 651, ZSAU_CALL_DELIVERED,	 -1, -1, {ACT_DEBUG} },
 
-	/* connect */
-	{RSP_ZSAU,    650,650,ZSAU_ACTIVE,        800,-1, {ACT_CONNECT}},
-	{RSP_ZSAU,    651,651,ZSAU_ACTIVE,        800,-1, {ACT_CONNECT,
-							   ACT_NOTIFY_BC_UP}},
-	{RSP_ZSAU,    750,750,ZSAU_ACTIVE,        800,-1, {ACT_CONNECT}},
-	{RSP_ZSAU,    751,751,ZSAU_ACTIVE,        800,-1, {ACT_CONNECT,
-							   ACT_NOTIFY_BC_UP}},
-	{EV_BC_OPEN,  800,800, -1,                800,-1, {ACT_NOTIFY_BC_UP}},
+/* connect */
+{RSP_ZSAU,	650, 650, ZSAU_ACTIVE,		800, -1, {ACT_CONNECT} },
+{RSP_ZSAU,	651, 651, ZSAU_ACTIVE,		800, -1, {ACT_CONNECT,
+							  ACT_NOTIFY_BC_UP} },
+{RSP_ZSAU,	750, 750, ZSAU_ACTIVE,		800, -1, {ACT_CONNECT} },
+{RSP_ZSAU,	751, 751, ZSAU_ACTIVE,		800, -1, {ACT_CONNECT,
+							  ACT_NOTIFY_BC_UP} },
+{EV_BC_OPEN,	800, 800, -1,			800, -1, {ACT_NOTIFY_BC_UP} },
 
-	/* remote hangup */
-	{RSP_ZSAU,    650,651,ZSAU_DISCONNECT_IND,  0, 0, {ACT_REMOTEREJECT}},
-	{RSP_ZSAU,    750,751,ZSAU_DISCONNECT_IND,  0, 0, {ACT_REMOTEHUP}},
-	{RSP_ZSAU,    800,800,ZSAU_DISCONNECT_IND,  0, 0, {ACT_REMOTEHUP}},
+/* remote hangup */
+{RSP_ZSAU,	650, 651, ZSAU_DISCONNECT_IND,	  0,  0, {ACT_REMOTEREJECT} },
+{RSP_ZSAU,	750, 751, ZSAU_DISCONNECT_IND,	  0,  0, {ACT_REMOTEHUP} },
+{RSP_ZSAU,	800, 800, ZSAU_DISCONNECT_IND,	  0,  0, {ACT_REMOTEHUP} },
 
-	/* hangup */
-	{EV_HUP,       -1, -1, -1,                 -1,-1, {ACT_HUP}}, //FIXME
-	{RSP_INIT,     -1, -1,SEQ_HUP,            401, 5, {0},             "+VLS=0\r"}, /* hang up */ //-1,-1?
-	{RSP_OK,      401,401, -1,                402, 5},
-	{RSP_ZVLS,    402,402,  0,                403, 5},
-	{RSP_ZSAU,    403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} },
-	{RSP_ZSAU,    403, 403, ZSAU_NULL,            0,  0, {ACT_DISCONNECT} },
-	{RSP_NODEV,   401, 403, -1,                   0,  0, {ACT_FAKEHUP} },
-	{RSP_ERROR,   401,401, -1,                  0, 0, {ACT_ABORTHUP}},
-	{EV_TIMEOUT,  401,403, -1,                  0, 0, {ACT_ABORTHUP}},
+/* hangup */
+{EV_HUP,	 -1,  -1, -1,			 -1, -1, {ACT_HUP} },
+{RSP_INIT,	 -1,  -1, SEQ_HUP,		401,  5, {0},	"+VLS=0\r"},
+{RSP_OK,	401, 401, -1,			402,  5},
+{RSP_ZVLS,	402, 402,  0,			403,  5},
+{RSP_ZSAU,	403, 403, ZSAU_DISCONNECT_REQ,	 -1, -1, {ACT_DEBUG} },
+{RSP_ZSAU,	403, 403, ZSAU_NULL,		  0,  0, {ACT_DISCONNECT} },
+{RSP_NODEV,	401, 403, -1,			  0,  0, {ACT_FAKEHUP} },
+{RSP_ERROR,	401, 401, -1,			  0,  0, {ACT_ABORTHUP} },
+{EV_TIMEOUT,	401, 403, -1,			  0,  0, {ACT_ABORTHUP} },
 
-	{EV_BC_CLOSED,  0,  0, -1,                  0,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME new constate + timeout
+{EV_BC_CLOSED,	  0,   0, -1,			  0, -1, {ACT_NOTIFY_BC_DOWN} },
 
-	/* ring */
-	{RSP_ZBC,     700,700, -1,                 -1,-1, {0}},
-	{RSP_ZHLC,    700,700, -1,                 -1,-1, {0}},
-	{RSP_NMBR,    700,700, -1,                 -1,-1, {0}},
-	{RSP_ZCPN,    700,700, -1,                 -1,-1, {0}},
-	{RSP_ZCTP,    700,700, -1,                 -1,-1, {0}},
-	{EV_TIMEOUT,  700,700, -1,               720,720, {ACT_ICALL}},
-	{EV_BC_CLOSED,720,720, -1,                  0,-1, {ACT_NOTIFY_BC_DOWN}},
+/* ring */
+{RSP_ZBC,	700, 700, -1,			 -1, -1, {0} },
+{RSP_ZHLC,	700, 700, -1,			 -1, -1, {0} },
+{RSP_NMBR,	700, 700, -1,			 -1, -1, {0} },
+{RSP_ZCPN,	700, 700, -1,			 -1, -1, {0} },
+{RSP_ZCTP,	700, 700, -1,			 -1, -1, {0} },
+{EV_TIMEOUT,	700, 700, -1,			720, 720, {ACT_ICALL} },
+{EV_BC_CLOSED,	720, 720, -1,			  0, -1, {ACT_NOTIFY_BC_DOWN} },
 
-	/*accept icall*/
-	{EV_ACCEPT,    -1, -1, -1,                 -1,-1, {ACT_ACCEPT}}, //FIXME
-	{RSP_INIT,    720,720,SEQ_ACCEPT,         721, 5, {ACT_CMD+AT_PROTO}},
-	{RSP_OK,      721,721, -1,                722, 5, {ACT_CMD+AT_ISO}},
-	{RSP_OK,      722,722, -1,                723, 5, {0},             "+VLS=17\r"}, /* set "Endgeraetemodus" */
-	{RSP_OK,      723,723, -1,                724, 5, {0}},
-	{RSP_ZVLS,    724,724, 17,                750,50, {ACT_ACCEPTED}},
-	{RSP_ERROR,   721,729, -1,                  0, 0, {ACT_ABORTACCEPT}},
-	{EV_TIMEOUT,  721,729, -1,                  0, 0, {ACT_ABORTACCEPT}},
-	{RSP_ZSAU,    700,729,ZSAU_NULL,            0, 0, {ACT_ABORTACCEPT}},
-	{RSP_ZSAU,    700,729,ZSAU_ACTIVE,          0, 0, {ACT_ABORTACCEPT}},
-	{RSP_ZSAU,    700,729,ZSAU_DISCONNECT_IND,  0, 0, {ACT_ABORTACCEPT}},
+/*accept icall*/
+{EV_ACCEPT,	 -1,  -1, -1,			 -1, -1, {ACT_ACCEPT} },
+{RSP_INIT,	720, 720, SEQ_ACCEPT,		721,  5, {ACT_CMD+AT_PROTO} },
+{RSP_OK,	721, 721, -1,			722,  5, {ACT_CMD+AT_ISO} },
+{RSP_OK,	722, 722, -1,			723,  5, {0},	"+VLS=17\r"},
+{RSP_OK,	723, 723, -1,			724,  5, {0} },
+{RSP_ZVLS,	724, 724, 17,			750, 50, {ACT_ACCEPTED} },
+{RSP_ERROR,	721, 729, -1,			  0,  0, {ACT_ABORTACCEPT} },
+{EV_TIMEOUT,	721, 729, -1,			  0,  0, {ACT_ABORTACCEPT} },
+{RSP_ZSAU,	700, 729, ZSAU_NULL,		  0,  0, {ACT_ABORTACCEPT} },
+{RSP_ZSAU,	700, 729, ZSAU_ACTIVE,		  0,  0, {ACT_ABORTACCEPT} },
+{RSP_ZSAU,	700, 729, ZSAU_DISCONNECT_IND,	  0,  0, {ACT_ABORTACCEPT} },
 
-	{EV_BC_OPEN,  750,750, -1,                751,-1},
-	{EV_TIMEOUT,  750,751, -1,                  0, 0, {ACT_CONNTIMEOUT}},
+{EV_BC_OPEN,	750, 750, -1,			751, -1},
+{EV_TIMEOUT,	750, 751, -1,			  0,  0, {ACT_CONNTIMEOUT} },
 
-	/* B channel closed (general case) */
-	{EV_BC_CLOSED, -1, -1, -1,                 -1,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME
+/* B channel closed (general case) */
+{EV_BC_CLOSED,	 -1,  -1, -1,			 -1, -1, {ACT_NOTIFY_BC_DOWN} },
 
-	/* misc. */
-	{RSP_ZCON,     -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-	{RSP_ZCCR,     -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-	{RSP_ZAOC,     -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
-	{RSP_ZCSTR,    -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
+/* misc. */
+{RSP_ZCON,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZCCR,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZAOC,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ZCSTR,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
 
-	{RSP_ZCAU,     -1, -1, -1,                 -1,-1, {ACT_ZCAU}},
-	{RSP_NONE,     -1, -1, -1,                 -1,-1, {ACT_DEBUG}},
-	{RSP_ANY,      -1, -1, -1,                 -1,-1, {ACT_WARN}},
-	{RSP_LAST}
+{RSP_ZCAU,	 -1,  -1, -1,			 -1, -1, {ACT_ZCAU} },
+{RSP_NONE,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
+{RSP_ANY,	 -1,  -1, -1,			 -1, -1, {ACT_WARN} },
+{RSP_LAST}
 };
 
 
-static const struct resp_type_t resp_type[] =
+static const struct resp_type_t {
+	unsigned char	*response;
+	int		resp_code;
+	int		type;
+} resp_type[] =
 {
-	/*{"",		RSP_EMPTY,	RT_NOTHING},*/
 	{"OK",		RSP_OK,		RT_NOTHING},
 	{"ERROR",	RSP_ERROR,	RT_NOTHING},
 	{"ZSAU",	RSP_ZSAU,	RT_ZSAU},
@@ -404,7 +403,21 @@
 	{"ZLOG",	RSP_ZLOG,	RT_NOTHING},
 	{"ZABINFO",	RSP_ZABINFO,	RT_NOTHING},
 	{"ZSMLSTCHG",	RSP_ZSMLSTCHG,	RT_NOTHING},
-	{NULL,0,0}
+	{NULL,		0,		0}
+};
+
+static const struct zsau_resp_t {
+	unsigned char	*str;
+	int		code;
+} zsau_resp[] =
+{
+	{"OUTGOING_CALL_PROCEEDING",	ZSAU_OUTGOING_CALL_PROCEEDING},
+	{"CALL_DELIVERED",		ZSAU_CALL_DELIVERED},
+	{"ACTIVE",			ZSAU_ACTIVE},
+	{"DISCONNECT_IND",		ZSAU_DISCONNECT_IND},
+	{"NULL",			ZSAU_NULL},
+	{"DISCONNECT_REQ",		ZSAU_DISCONNECT_REQ},
+	{NULL,				ZSAU_UNKNOWN}
 };
 
 /*
@@ -469,7 +482,6 @@
 	if (cid < 1 || cid > 65535)
 		return -1;	/* CID out of range */
 	return cid;
-	//FIXME is ;<digit>+ at end of non-CID response really impossible?
 }
 
 /**
@@ -486,6 +498,7 @@
 	int params;
 	int i, j;
 	const struct resp_type_t *rt;
+	const struct zsau_resp_t *zr;
 	int curarg;
 	unsigned long flags;
 	unsigned next, tail, head;
@@ -612,24 +625,14 @@
 				event->parameter = ZSAU_NONE;
 				break;
 			}
-			if (!strcmp(argv[curarg], "OUTGOING_CALL_PROCEEDING"))
-				event->parameter = ZSAU_OUTGOING_CALL_PROCEEDING;
-			else if (!strcmp(argv[curarg], "CALL_DELIVERED"))
-				event->parameter = ZSAU_CALL_DELIVERED;
-			else if (!strcmp(argv[curarg], "ACTIVE"))
-				event->parameter = ZSAU_ACTIVE;
-			else if (!strcmp(argv[curarg], "DISCONNECT_IND"))
-				event->parameter = ZSAU_DISCONNECT_IND;
-			else if (!strcmp(argv[curarg], "NULL"))
-				event->parameter = ZSAU_NULL;
-			else if (!strcmp(argv[curarg], "DISCONNECT_REQ"))
-				event->parameter = ZSAU_DISCONNECT_REQ;
-			else {
-				event->parameter = ZSAU_UNKNOWN;
+			for (zr = zsau_resp; zr->str; ++zr)
+				if (!strcmp(argv[curarg], zr->str))
+					break;
+			event->parameter = zr->code;
+			if (!zr->str)
 				dev_warn(cs->dev,
 					"%s: unknown parameter %s after ZSAU\n",
 					 __func__, argv[curarg]);
-			}
 			++curarg;
 			break;
 		case RT_STRING:
@@ -896,7 +899,8 @@
 	gigaset_isdn_connB(bcs);
 }
 
-static void start_dial(struct at_state_t *at_state, void *data, unsigned seq_index)
+static void start_dial(struct at_state_t *at_state, void *data,
+			unsigned seq_index)
 {
 	struct bc_state *bcs = at_state->bcs;
 	struct cardstate *cs = at_state->cs;
@@ -973,8 +977,6 @@
 
 	cs->isdn_up = 1;
 	gigaset_isdn_start(cs);
-					// FIXME: not in locked mode
-					// FIXME 2: only after init sequence
 
 	cs->waiting = 0;
 	wake_up(&cs->waitqueue);
@@ -1128,7 +1130,6 @@
 
 		break;
 	case MS_LOCKED:
-		//retval = -EACCES;
 		break;
 	default:
 		return -EBUSY;
@@ -1384,7 +1385,7 @@
 		cs->cur_at_seq = SEQ_NONE;
 		break;
 
-	case ACT_ABORTACCEPT:	/* hangup/error/timeout during ICALL processing */
+	case ACT_ABORTACCEPT:	/* hangup/error/timeout during ICALL procssng */
 		disconnect(p_at_state);
 		break;
 
@@ -1458,17 +1459,6 @@
 			__func__, at_state->ConState);
 		cs->cur_at_seq = SEQ_NONE;
 		break;
-#ifdef CONFIG_GIGASET_DEBUG
-	case ACT_TEST:
-		{
-			static int count = 3; //2; //1;
-			*p_genresp = 1;
-			*p_resp_code = count ? RSP_ERROR : RSP_OK;
-			if (count > 0)
-				--count;
-		}
-		break;
-#endif
 	case ACT_DEBUG:
 		gig_dbg(DEBUG_ANY, "%s: resp_code %d in ConState %d",
 			__func__, ev->type, at_state->ConState);
@@ -1503,7 +1493,7 @@
 		do_start(cs);
 		break;
 
-	/* events from the interface */ // FIXME without ACT_xxxx?
+	/* events from the interface */
 	case ACT_IF_LOCK:
 		cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs);
 		cs->waiting = 0;
@@ -1522,7 +1512,7 @@
 		wake_up(&cs->waitqueue);
 		break;
 
-	/* events from the proc file system */ // FIXME without ACT_xxxx?
+	/* events from the proc file system */
 	case ACT_PROC_CIDMODE:
 		spin_lock_irqsave(&cs->lock, flags);
 		if (ev->parameter != cs->cidmode) {
@@ -1659,7 +1649,8 @@
 	for (curact = 0; curact < MAXACT; ++curact) {
 		/* The row tells us what we should do  ..
 		 */
-		do_action(rep->action[curact], cs, bcs, &at_state, &p_command, &genresp, &resp_code, ev);
+		do_action(rep->action[curact], cs, bcs, &at_state, &p_command,
+			  &genresp, &resp_code, ev);
 		if (!at_state)
 			break; /* may be freed after disconnect */
 	}
@@ -1671,13 +1662,14 @@
 
 		if (genresp) {
 			spin_lock_irqsave(&cs->lock, flags);
-			at_state->timer_expires = 0; //FIXME
-			at_state->timer_active = 0; //FIXME
+			at_state->timer_expires = 0;
+			at_state->timer_active = 0;
 			spin_unlock_irqrestore(&cs->lock, flags);
-			gigaset_add_event(cs, at_state, resp_code, NULL, 0, NULL);
+			gigaset_add_event(cs, at_state, resp_code,
+					  NULL, 0, NULL);
 		} else {
 			/* Send command to modem if not NULL... */
-			if (p_command/*rep->command*/) {
+			if (p_command) {
 				if (cs->connected)
 					send_command(cs, p_command,
 						     sendcid, cs->dle,
@@ -1764,7 +1756,8 @@
 		}
 	}
 
-	/* only switch back to unimodem mode, if no commands are pending and no channels are up */
+	/* only switch back to unimodem mode if no commands are pending and
+	 * no channels are up */
 	spin_lock_irqsave(&cs->lock, flags);
 	if (cs->at_state.pending_commands == PC_UMMODE
 	    && !cs->cidmode
@@ -1823,9 +1816,8 @@
 
 	if (cs->at_state.pending_commands & PC_INIT) {
 		cs->at_state.pending_commands &= ~PC_INIT;
-		cs->dle = 0; //FIXME
+		cs->dle = 0;
 		cs->inbuf->inputstate = INS_command;
-		//FIXME reset card state (or -> LOCK0)?
 		schedule_sequence(cs, &cs->at_state, SEQ_INIT);
 		return;
 	}
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index 4749ef1..e963a6c 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -34,8 +34,8 @@
 #include <linux/list.h>
 #include <asm/atomic.h>
 
-#define GIG_VERSION {0,5,0,0}
-#define GIG_COMPAT  {0,4,0,0}
+#define GIG_VERSION {0, 5, 0, 0}
+#define GIG_COMPAT  {0, 4, 0, 0}
 
 #define MAX_REC_PARAMS 10	/* Max. number of params in response string */
 #define MAX_RESP_SIZE 512	/* Max. size of a response string */
@@ -133,35 +133,32 @@
 #define OUT_VENDOR_REQ	(USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
 #define IN_VENDOR_REQ	(USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
 
-/* int-in-events 3070 */
+/* interrupt pipe messages */
 #define HD_B1_FLOW_CONTROL		0x80
 #define HD_B2_FLOW_CONTROL		0x81
-#define HD_RECEIVEATDATA_ACK		(0x35)		// 3070
-						// att: HD_RECEIVE>>AT<<DATA_ACK
-#define HD_READY_SEND_ATDATA		(0x36)		// 3070
-#define HD_OPEN_ATCHANNEL_ACK		(0x37)		// 3070
-#define HD_CLOSE_ATCHANNEL_ACK		(0x38)		// 3070
-#define HD_DEVICE_INIT_OK		(0x11)		// ISurf USB + 3070
-#define HD_OPEN_B1CHANNEL_ACK		(0x51)		// ISurf USB + 3070
-#define HD_OPEN_B2CHANNEL_ACK		(0x52)		// ISurf USB + 3070
-#define HD_CLOSE_B1CHANNEL_ACK		(0x53)		// ISurf USB + 3070
-#define HD_CLOSE_B2CHANNEL_ACK		(0x54)		// ISurf USB + 3070
-// 	 Powermangment
-#define HD_SUSPEND_END			(0x61)		// ISurf USB
-//   Configuration
-#define HD_RESET_INTERRUPT_PIPE_ACK	(0xFF)		// ISurf USB + 3070
+#define HD_RECEIVEATDATA_ACK		(0x35)		/* 3070 */
+#define HD_READY_SEND_ATDATA		(0x36)		/* 3070 */
+#define HD_OPEN_ATCHANNEL_ACK		(0x37)		/* 3070 */
+#define HD_CLOSE_ATCHANNEL_ACK		(0x38)		/* 3070 */
+#define HD_DEVICE_INIT_OK		(0x11)		/* ISurf USB + 3070 */
+#define HD_OPEN_B1CHANNEL_ACK		(0x51)		/* ISurf USB + 3070 */
+#define HD_OPEN_B2CHANNEL_ACK		(0x52)		/* ISurf USB + 3070 */
+#define HD_CLOSE_B1CHANNEL_ACK		(0x53)		/* ISurf USB + 3070 */
+#define HD_CLOSE_B2CHANNEL_ACK		(0x54)		/* ISurf USB + 3070 */
+#define HD_SUSPEND_END			(0x61)		/* ISurf USB */
+#define HD_RESET_INTERRUPT_PIPE_ACK	(0xFF)		/* ISurf USB + 3070 */
 
-/* control requests 3070 */
-#define	HD_OPEN_B1CHANNEL		(0x23)		// ISurf USB + 3070
-#define	HD_CLOSE_B1CHANNEL		(0x24)		// ISurf USB + 3070
-#define	HD_OPEN_B2CHANNEL		(0x25)		// ISurf USB + 3070
-#define	HD_CLOSE_B2CHANNEL		(0x26)		// ISurf USB + 3070
-#define HD_RESET_INTERRUPT_PIPE		(0x27)		// ISurf USB + 3070
-#define	HD_DEVICE_INIT_ACK		(0x34)		// ISurf USB + 3070
-#define	HD_WRITE_ATMESSAGE		(0x12)		// 3070
-#define	HD_READ_ATMESSAGE		(0x13)		// 3070
-#define	HD_OPEN_ATCHANNEL		(0x28)		// 3070
-#define	HD_CLOSE_ATCHANNEL		(0x29)		// 3070
+/* control requests */
+#define	HD_OPEN_B1CHANNEL		(0x23)		/* ISurf USB + 3070 */
+#define	HD_CLOSE_B1CHANNEL		(0x24)		/* ISurf USB + 3070 */
+#define	HD_OPEN_B2CHANNEL		(0x25)		/* ISurf USB + 3070 */
+#define	HD_CLOSE_B2CHANNEL		(0x26)		/* ISurf USB + 3070 */
+#define HD_RESET_INTERRUPT_PIPE		(0x27)		/* ISurf USB + 3070 */
+#define	HD_DEVICE_INIT_ACK		(0x34)		/* ISurf USB + 3070 */
+#define	HD_WRITE_ATMESSAGE		(0x12)		/* 3070 */
+#define	HD_READ_ATMESSAGE		(0x13)		/* 3070 */
+#define	HD_OPEN_ATCHANNEL		(0x28)		/* 3070 */
+#define	HD_CLOSE_ATCHANNEL		(0x29)		/* 3070 */
 
 /* number of B channels supported by base driver */
 #define BAS_CHANNELS	2
@@ -223,12 +220,11 @@
 #define EV_BC_CLOSED	-118
 
 /* input state */
-#define INS_command	0x0001
-#define INS_DLE_char	0x0002
+#define INS_command	0x0001	/* receiving messages (not payload data) */
+#define INS_DLE_char	0x0002	/* DLE flag received (in DLE mode) */
 #define INS_byte_stuff	0x0004
 #define INS_have_data	0x0008
-#define INS_skip_frame	0x0010
-#define INS_DLE_command	0x0020
+#define INS_DLE_command	0x0020	/* DLE message start (<DLE> X) received */
 #define INS_flag_hunt	0x0040
 
 /* channel state */
@@ -290,8 +286,6 @@
 extern struct reply_t gigaset_tab_nocid[];
 
 struct inbuf_t {
-	unsigned char		*rcvbuf;	/* usb-gigaset receive buffer */
-	struct bc_state		*bcs;
 	struct cardstate	*cs;
 	int			inputstate;
 	int			head, tail;
@@ -363,12 +357,6 @@
 	struct bc_state		*bcs;
 };
 
-struct resp_type_t {
-	unsigned char	*response;
-	int		resp_code;	/* RSP_XXXX */
-	int		type;		/* RT_XXXX */
-};
-
 struct event_t {
 	int type;
 	void *ptr, *arg;
@@ -483,8 +471,8 @@
 
 	struct timer_list timer;
 	int retry_count;
-	int dle;			/* !=0 if modem commands/responses are
-					   dle encoded */
+	int dle;			/* !=0 if DLE mode is active
+					   (ZDLE=1 received -- M10x only) */
 	int cur_at_seq;			/* sequence of AT commands being
 					   processed */
 	int curchannel;			/* channel those commands are meant
@@ -625,7 +613,7 @@
 
 	/* Called from LL interface to put an skb into the send-queue.
 	 * After sending is completed, gigaset_skb_sent() must be called
-	 * with the first cs->hw_hdr_len bytes of skb->head preserved. */
+	 * with the skb's link layer header preserved. */
 	int (*send_skb)(struct bc_state *bcs, struct sk_buff *skb);
 
 	/* Called from ev-layer.c to process a block of data
@@ -634,7 +622,8 @@
 
 };
 
-/* = Common structures and definitions ======================================= */
+/* = Common structures and definitions =======================================
+ */
 
 /* Parser states for DLE-Event:
  * <DLE-EVENT>: <DLE_FLAG> "X" <EVENT> <DLE_FLAG> "."
@@ -779,7 +768,7 @@
 				  void *ptr, int parameter, void *arg);
 
 /* Called on CONFIG1 command from frontend. */
-int gigaset_enterconfigmode(struct cardstate *cs); //0: success <0: errorcode
+int gigaset_enterconfigmode(struct cardstate *cs);
 
 /* cs->lock must not be locked */
 static inline void gigaset_schedule_event(struct cardstate *cs)
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index aca72a0..c129ee4 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -39,12 +39,12 @@
 static int writebuf_from_LL(int driverID, int channel, int ack,
 			    struct sk_buff *skb)
 {
-	struct cardstate *cs;
+	struct cardstate *cs = gigaset_get_cs_by_id(driverID);
 	struct bc_state *bcs;
+	unsigned char *ack_header;
 	unsigned len;
-	unsigned skblen;
 
-	if (!(cs = gigaset_get_cs_by_id(driverID))) {
+	if (!cs) {
 		pr_err("%s: invalid driver ID (%d)\n", __func__, driverID);
 		return -ENODEV;
 	}
@@ -78,11 +78,23 @@
 		return -EINVAL;
 	}
 
-	skblen = ack ? len : 0;
-	skb->head[0] = skblen & 0xff;
-	skb->head[1] = skblen >> 8;
-	gig_dbg(DEBUG_MCMD, "skb: len=%u, skblen=%u: %02x %02x",
-		len, skblen, (unsigned) skb->head[0], (unsigned) skb->head[1]);
+	/* set up acknowledgement header */
+	if (skb_headroom(skb) < HW_HDR_LEN) {
+		/* should never happen */
+		dev_err(cs->dev, "%s: insufficient skb headroom\n", __func__);
+		return -ENOMEM;
+	}
+	skb_set_mac_header(skb, -HW_HDR_LEN);
+	skb->mac_len = HW_HDR_LEN;
+	ack_header = skb_mac_header(skb);
+	if (ack) {
+		ack_header[0] = len & 0xff;
+		ack_header[1] = len >> 8;
+	} else {
+		ack_header[0] = ack_header[1] = 0;
+	}
+	gig_dbg(DEBUG_MCMD, "skb: len=%u, ack=%d: %02x %02x",
+		len, ack, ack_header[0], ack_header[1]);
 
 	/* pass to device-specific module */
 	return cs->ops->send_skb(bcs, skb);
@@ -99,6 +111,7 @@
 void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
 {
 	isdn_if *iif = bcs->cs->iif;
+	unsigned char *ack_header = skb_mac_header(skb);
 	unsigned len;
 	isdn_ctrl response;
 
@@ -108,8 +121,7 @@
 		dev_warn(bcs->cs->dev, "%s: skb->len==%d\n",
 			 __func__, skb->len);
 
-	len = (unsigned char) skb->head[0] |
-	      (unsigned) (unsigned char) skb->head[1] << 8;
+	len = ack_header[0] + ((unsigned) ack_header[1] << 8);
 	if (len) {
 		gig_dbg(DEBUG_MCMD, "ACKing to LL (id: %d, ch: %d, sz: %u)",
 			bcs->cs->myid, bcs->channel, len);
@@ -379,22 +391,19 @@
 
 		break;
 	case ISDN_CMD_PROCEED:
-		gig_dbg(DEBUG_ANY, "ISDN_CMD_PROCEED"); //FIXME
+		gig_dbg(DEBUG_ANY, "ISDN_CMD_PROCEED");
 		break;
 	case ISDN_CMD_ALERT:
-		gig_dbg(DEBUG_ANY, "ISDN_CMD_ALERT"); //FIXME
+		gig_dbg(DEBUG_ANY, "ISDN_CMD_ALERT");
 		if (cntrl->arg >= cs->channels) {
 			dev_err(cs->dev,
 				"ISDN_CMD_ALERT: invalid channel (%d)\n",
 				(int) cntrl->arg);
 			return -EINVAL;
 		}
-		//bcs = cs->bcs + cntrl->arg;
-		//bcs->proto2 = -1;
-		// FIXME
 		break;
 	case ISDN_CMD_REDIR:
-		gig_dbg(DEBUG_ANY, "ISDN_CMD_REDIR"); //FIXME
+		gig_dbg(DEBUG_ANY, "ISDN_CMD_REDIR");
 		break;
 	case ISDN_CMD_PROT_IO:
 		gig_dbg(DEBUG_ANY, "ISDN_CMD_PROT_IO");
@@ -474,7 +483,7 @@
 	/* fill ICALL structure */
 	response.parm.setup.si1 = 0;	/* default: unknown */
 	response.parm.setup.si2 = 0;
-	response.parm.setup.screen = 0;	//FIXME how to set these?
+	response.parm.setup.screen = 0;
 	response.parm.setup.plan = 0;
 	if (!at_state->str_var[STR_ZBC]) {
 		/* no BC (internal call): assume speech, A-law */
@@ -495,26 +504,24 @@
 		return ICALL_IGNORE;
 	}
 	if (at_state->str_var[STR_NMBR]) {
-		strncpy(response.parm.setup.phone, at_state->str_var[STR_NMBR],
-			sizeof response.parm.setup.phone - 1);
-		response.parm.setup.phone[sizeof response.parm.setup.phone - 1] = 0;
+		strlcpy(response.parm.setup.phone, at_state->str_var[STR_NMBR],
+			sizeof response.parm.setup.phone);
 	} else
 		response.parm.setup.phone[0] = 0;
 	if (at_state->str_var[STR_ZCPN]) {
-		strncpy(response.parm.setup.eazmsn, at_state->str_var[STR_ZCPN],
-			sizeof response.parm.setup.eazmsn - 1);
-		response.parm.setup.eazmsn[sizeof response.parm.setup.eazmsn - 1] = 0;
+		strlcpy(response.parm.setup.eazmsn, at_state->str_var[STR_ZCPN],
+			sizeof response.parm.setup.eazmsn);
 	} else
 		response.parm.setup.eazmsn[0] = 0;
 
 	if (!bcs) {
 		dev_notice(cs->dev, "no channel for incoming call\n");
 		response.command = ISDN_STAT_ICALLW;
-		response.arg = 0; //FIXME
+		response.arg = 0;
 	} else {
 		gig_dbg(DEBUG_CMD, "Sending ICALL");
 		response.command = ISDN_STAT_ICALL;
-		response.arg = bcs->channel; //FIXME
+		response.arg = bcs->channel;
 	}
 	response.driver = cs->myid;
 	retval = iif->statcallb(&response);
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index 6a8e138..577809c 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -162,7 +162,7 @@
 		return -ENODEV;
 
 	if (mutex_lock_interruptible(&cs->mutex))
-		return -ERESTARTSYS; // FIXME -EINTR?
+		return -ERESTARTSYS;
 	tty->driver_data = cs;
 
 	++cs->open_count;
@@ -171,7 +171,7 @@
 		spin_lock_irqsave(&cs->lock, flags);
 		cs->tty = tty;
 		spin_unlock_irqrestore(&cs->lock, flags);
-		tty->low_latency = 1; //FIXME test
+		tty->low_latency = 1;
 	}
 
 	mutex_unlock(&cs->mutex);
@@ -228,7 +228,7 @@
 	gig_dbg(DEBUG_IF, "%u: %s(0x%x)", cs->minor_index, __func__, cmd);
 
 	if (mutex_lock_interruptible(&cs->mutex))
-		return -ERESTARTSYS; // FIXME -EINTR?
+		return -ERESTARTSYS;
 
 	if (!cs->connected) {
 		gig_dbg(DEBUG_IF, "not connected");
@@ -299,9 +299,8 @@
 	gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
 
 	if (mutex_lock_interruptible(&cs->mutex))
-		return -ERESTARTSYS; // FIXME -EINTR?
+		return -ERESTARTSYS;
 
-	// FIXME read from device?
 	retval = cs->control_state & (TIOCM_RTS|TIOCM_DTR);
 
 	mutex_unlock(&cs->mutex);
@@ -326,7 +325,7 @@
 		cs->minor_index, __func__, set, clear);
 
 	if (mutex_lock_interruptible(&cs->mutex))
-		return -ERESTARTSYS; // FIXME -EINTR?
+		return -ERESTARTSYS;
 
 	if (!cs->connected) {
 		gig_dbg(DEBUG_IF, "not connected");
@@ -356,7 +355,7 @@
 	gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
 
 	if (mutex_lock_interruptible(&cs->mutex))
-		return -ERESTARTSYS; // FIXME -EINTR?
+		return -ERESTARTSYS;
 
 	if (!cs->connected) {
 		gig_dbg(DEBUG_IF, "not connected");
@@ -390,7 +389,7 @@
 	gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
 
 	if (mutex_lock_interruptible(&cs->mutex))
-		return -ERESTARTSYS; // FIXME -EINTR?
+		return -ERESTARTSYS;
 
 	if (!cs->connected) {
 		gig_dbg(DEBUG_IF, "not connected");
@@ -455,9 +454,8 @@
 		gig_dbg(DEBUG_IF, "not connected");	/* nothing to do */
 	else if (!cs->open_count)
 		dev_warn(cs->dev, "%s: device not opened\n", __func__);
-	else {
-		//FIXME
-	}
+	else
+		gig_dbg(DEBUG_ANY, "%s: not implemented\n", __func__);
 
 	mutex_unlock(&cs->mutex);
 }
@@ -480,9 +478,8 @@
 		gig_dbg(DEBUG_IF, "not connected");	/* nothing to do */
 	else if (!cs->open_count)
 		dev_warn(cs->dev, "%s: device not opened\n", __func__);
-	else {
-		//FIXME
-	}
+	else
+		gig_dbg(DEBUG_ANY, "%s: not implemented\n", __func__);
 
 	mutex_unlock(&cs->mutex);
 }
@@ -515,10 +512,9 @@
 		goto out;
 	}
 
-	// stolen from mct_u232.c
 	iflag = tty->termios->c_iflag;
 	cflag = tty->termios->c_cflag;
-	old_cflag = old ? old->c_cflag : cflag; //FIXME?
+	old_cflag = old ? old->c_cflag : cflag;
 	gig_dbg(DEBUG_IF, "%u: iflag %x cflag %x old %x",
 		cs->minor_index, iflag, cflag, old_cflag);
 
@@ -632,7 +628,8 @@
 	struct tty_struct *tty;
 
 	spin_lock_irqsave(&cs->lock, flags);
-	if ((tty = cs->tty) == NULL)
+	tty = cs->tty;
+	if (tty == NULL)
 		gig_dbg(DEBUG_ANY, "receive on closed device");
 	else {
 		tty_buffer_request_room(tty, len);
@@ -659,9 +656,9 @@
 
 	drv->have_tty = 0;
 
-	if ((drv->tty = alloc_tty_driver(minors)) == NULL)
+	drv->tty = tty = alloc_tty_driver(minors);
+	if (tty == NULL)
 		goto enomem;
-	tty = drv->tty;
 
 	tty->magic =		TTY_DRIVER_MAGIC,
 	tty->major =		GIG_MAJOR,
@@ -676,8 +673,8 @@
 
 	tty->owner =		THIS_MODULE;
 
-	tty->init_termios          = tty_std_termios; //FIXME
-	tty->init_termios.c_cflag  = B9600 | CS8 | CREAD | HUPCL | CLOCAL; //FIXME
+	tty->init_termios          = tty_std_termios;
+	tty->init_termios.c_cflag  = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
 	tty_set_operations(tty, &if_ops);
 
 	ret = tty_register_driver(tty);
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index 7dabfd3..85394a6 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -41,7 +41,8 @@
 
 	read = iwb->read;
 	write = iwb->write;
-	if ((freebytes = read - write) > 0) {
+	freebytes = read - write;
+	if (freebytes > 0) {
 		/* no wraparound: need padding space within regular area */
 		return freebytes - BAS_OUTBUFPAD;
 	} else if (read < BAS_OUTBUFPAD) {
@@ -53,29 +54,6 @@
 	}
 }
 
-/* compare two offsets within the buffer
- * The buffer is seen as circular, with the read position as start
- * returns -1/0/1 if position a </=/> position b without crossing 'read'
- */
-static inline int isowbuf_poscmp(struct isowbuf_t *iwb, int a, int b)
-{
-	int read;
-	if (a == b)
-		return 0;
-	read = iwb->read;
-	if (a < b) {
-		if (a < read && read <= b)
-			return +1;
-		else
-			return -1;
-	} else {
-		if (b < read && read <= a)
-			return -1;
-		else
-			return +1;
-	}
-}
-
 /* start writing
  * acquire the write semaphore
  * return true if acquired, false if busy
@@ -271,7 +249,7 @@
  *        bit 14..13 = number of bits added by stuffing
  */
 static const u16 stufftab[5 * 256] = {
-// previous 1s = 0:
+/* previous 1s = 0: */
  0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
  0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x201f,
  0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
@@ -289,7 +267,7 @@
  0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x0cef,
  0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x2ddf,
 
-// previous 1s = 1:
+/* previous 1s = 1: */
  0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x200f,
  0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x202f,
  0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x204f,
@@ -307,7 +285,7 @@
  0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dcf,
  0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x31ef,
 
-// previous 1s = 2:
+/* previous 1s = 2: */
  0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x2007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x2017,
  0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x2027, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x2037,
  0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x2047, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x2057,
@@ -325,7 +303,7 @@
  0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dc7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dd7,
  0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x31e7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x41f7,
 
-// previous 1s = 3:
+/* previous 1s = 3: */
  0x0000, 0x0001, 0x0002, 0x2003, 0x0004, 0x0005, 0x0006, 0x200b, 0x0008, 0x0009, 0x000a, 0x2013, 0x000c, 0x000d, 0x000e, 0x201b,
  0x0010, 0x0011, 0x0012, 0x2023, 0x0014, 0x0015, 0x0016, 0x202b, 0x0018, 0x0019, 0x001a, 0x2033, 0x001c, 0x001d, 0x001e, 0x203b,
  0x0020, 0x0021, 0x0022, 0x2043, 0x0024, 0x0025, 0x0026, 0x204b, 0x0028, 0x0029, 0x002a, 0x2053, 0x002c, 0x002d, 0x002e, 0x205b,
@@ -343,7 +321,7 @@
  0x0ce0, 0x0ce1, 0x0ce2, 0x2dc3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dcb, 0x0ce8, 0x0ce9, 0x0cea, 0x2dd3, 0x0cec, 0x0ced, 0x0cee, 0x2ddb,
  0x10f0, 0x10f1, 0x10f2, 0x31e3, 0x10f4, 0x10f5, 0x10f6, 0x31eb, 0x20f8, 0x20f9, 0x20fa, 0x41f3, 0x257c, 0x257d, 0x29be, 0x46fb,
 
-// previous 1s = 4:
+/* previous 1s = 4: */
  0x0000, 0x2001, 0x0002, 0x2005, 0x0004, 0x2009, 0x0006, 0x200d, 0x0008, 0x2011, 0x000a, 0x2015, 0x000c, 0x2019, 0x000e, 0x201d,
  0x0010, 0x2021, 0x0012, 0x2025, 0x0014, 0x2029, 0x0016, 0x202d, 0x0018, 0x2031, 0x001a, 0x2035, 0x001c, 0x2039, 0x001e, 0x203d,
  0x0020, 0x2041, 0x0022, 0x2045, 0x0024, 0x2049, 0x0026, 0x204d, 0x0028, 0x2051, 0x002a, 0x2055, 0x002c, 0x2059, 0x002e, 0x205d,
@@ -367,7 +345,8 @@
  * parameters:
  *	cin	input byte
  *	ones	number of trailing '1' bits in result before this step
- *	iwb	pointer to output buffer structure (write semaphore must be held)
+ *	iwb	pointer to output buffer structure
+ *		(write semaphore must be held)
  * return value:
  *	number of trailing '1' bits in result after this step
  */
@@ -408,7 +387,8 @@
  * parameters:
  *	in	input buffer
  *	count	number of bytes in input buffer
- *	iwb	pointer to output buffer structure (write semaphore must be held)
+ *	iwb	pointer to output buffer structure
+ *		(write semaphore must be held)
  * return value:
  *	position of end of packet in output buffer on success,
  *	-EAGAIN if write semaphore busy or buffer full
@@ -440,7 +420,8 @@
 		fcs = crc_ccitt_byte(fcs, c);
 	}
 
-	/* bitstuff and append FCS (complemented, least significant byte first) */
+	/* bitstuff and append FCS
+	 * (complemented, least significant byte first) */
 	fcs ^= 0xffff;
 	ones = hdlc_bitstuff_byte(iwb, fcs & 0x00ff, ones);
 	ones = hdlc_bitstuff_byte(iwb, (fcs >> 8) & 0x00ff, ones);
@@ -459,7 +440,8 @@
  * parameters:
  *	in	input buffer
  *	count	number of bytes in input buffer
- *	iwb	pointer to output buffer structure (write semaphore must be held)
+ *	iwb	pointer to output buffer structure
+ *		(write semaphore must be held)
  * return value:
  *	position of end of packet in output buffer on success,
  *	-EAGAIN if write semaphore busy or buffer full
@@ -567,8 +549,8 @@
 		hdlc_flush(bcs);
 		return;
 	}
-
-	if ((procskb = bcs->skb) == NULL) {
+	procskb = bcs->skb;
+	if (procskb == NULL) {
 		/* previous error */
 		gig_dbg(DEBUG_ISO, "%s: skb=NULL", __func__);
 		gigaset_isdn_rcv_err(bcs);
@@ -576,12 +558,12 @@
 		dev_notice(cs->dev, "received short frame (%d octets)\n",
 			   procskb->len);
 		bcs->hw.bas->runts++;
-		dev_kfree_skb(procskb);
+		dev_kfree_skb_any(procskb);
 		gigaset_isdn_rcv_err(bcs);
 	} else if (bcs->fcs != PPP_GOODFCS) {
 		dev_notice(cs->dev, "frame check error (0x%04x)\n", bcs->fcs);
 		bcs->hw.bas->fcserrs++;
-		dev_kfree_skb(procskb);
+		dev_kfree_skb_any(procskb);
 		gigaset_isdn_rcv_err(bcs);
 	} else {
 		len = procskb->len;
@@ -646,8 +628,8 @@
 };
 
 /* hdlc_unpack
- * perform HDLC frame processing (bit unstuffing, flag detection, FCS calculation)
- * on a sequence of received data bytes (8 bits each, LSB first)
+ * perform HDLC frame processing (bit unstuffing, flag detection, FCS
+ * calculation) on a sequence of received data bytes (8 bits each, LSB first)
  * pass on successfully received, complete frames as SKBs via gigaset_skb_rcvd
  * notify of errors via gigaset_isdn_rcv_err
  * tally frames, errors etc. in BC structure counters
@@ -665,9 +647,12 @@
 
 	/* load previous state:
 	 * inputstate = set of flag bits:
-	 * - INS_flag_hunt: no complete opening flag received since connection setup or last abort
-	 * - INS_have_data: at least one complete data byte received since last flag
-	 * seqlen = number of consecutive '1' bits in last 7 input stream bits (0..7)
+	 * - INS_flag_hunt: no complete opening flag received since connection
+	 *                  setup or last abort
+	 * - INS_have_data: at least one complete data byte received since last
+	 *                  flag
+	 * seqlen = number of consecutive '1' bits in last 7 input stream bits
+	 *          (0..7)
 	 * inbyte = accumulated partial data byte (if !INS_flag_hunt)
 	 * inbits = number of valid bits in inbyte, starting at LSB (0..6)
 	 */
@@ -701,9 +686,11 @@
 				inbyte = c >> (lead1 + 1);
 				inbits = 7 - lead1;
 				if (trail1 >= 8) {
-					/* interior stuffing: omitting the MSB handles most cases */
+					/* interior stuffing:
+					 * omitting the MSB handles most cases,
+					 * correct the incorrectly handled
+					 * cases individually */
 					inbits--;
-					/* correct the incorrectly handled cases individually */
 					switch (c) {
 					case 0xbe:
 						inbyte = 0x3f;
@@ -729,13 +716,14 @@
 			hdlc_flush(bcs);
 			inputstate |= INS_flag_hunt;
 		} else if (seqlen == 6) {
-			/* closing flag, including (6 - lead1) '1's and one '0' from inbits */
+			/* closing flag, including (6 - lead1) '1's
+			 * and one '0' from inbits */
 			if (inbits > 7 - lead1) {
 				hdlc_frag(bcs, inbits + lead1 - 7);
 				inputstate &= ~INS_have_data;
 			} else {
 				if (inbits < 7 - lead1)
-					ubc->stolen0s ++;
+					ubc->stolen0s++;
 				if (inputstate & INS_have_data) {
 					hdlc_done(bcs);
 					inputstate &= ~INS_have_data;
@@ -744,7 +732,7 @@
 
 			if (c == PPP_FLAG) {
 				/* complete flag, LSB overlaps preceding flag */
-				ubc->shared0s ++;
+				ubc->shared0s++;
 				inbits = 0;
 				inbyte = 0;
 			} else if (trail1 != 7) {
@@ -752,9 +740,11 @@
 				inbyte = c >> (lead1 + 1);
 				inbits = 7 - lead1;
 				if (trail1 >= 8) {
-					/* interior stuffing: omitting the MSB handles most cases */
+					/* interior stuffing:
+					 * omitting the MSB handles most cases,
+					 * correct the incorrectly handled
+					 * cases individually */
 					inbits--;
-					/* correct the incorrectly handled cases individually */
 					switch (c) {
 					case 0xbe:
 						inbyte = 0x3f;
@@ -762,7 +752,8 @@
 					}
 				}
 			} else {
-				/* abort sequence follows, skb already empty anyway */
+				/* abort sequence follows,
+				 * skb already empty anyway */
 				ubc->aborts++;
 				inputstate |= INS_flag_hunt;
 			}
@@ -787,14 +778,17 @@
 			} else {
 				/* stuffed data */
 				if (trail1 < 7) { /* => seqlen == 5 */
-					/* stuff bit at position lead1, no interior stuffing */
+					/* stuff bit at position lead1,
+					 * no interior stuffing */
 					unsigned char mask = (1 << lead1) - 1;
 					c = (c & mask) | ((c & ~mask) >> 1);
 					inbyte |= c << inbits;
 					inbits += 7;
 				} else if (seqlen < 5) { /* trail1 >= 8 */
-					/* interior stuffing: omitting the MSB handles most cases */
-					/* correct the incorrectly handled cases individually */
+					/* interior stuffing:
+					 * omitting the MSB handles most cases,
+					 * correct the incorrectly handled
+					 * cases individually */
 					switch (c) {
 					case 0xbe:
 						c = 0x7e;
@@ -804,8 +798,9 @@
 					inbits += 7;
 				} else { /* seqlen == 5 && trail1 >= 8 */
 
-					/* stuff bit at lead1 *and* interior stuffing */
-					switch (c) {	/* unstuff individually */
+					/* stuff bit at lead1 *and* interior
+					 * stuffing -- unstuff individually */
+					switch (c) {
 					case 0x7d:
 						c = 0x3f;
 						break;
@@ -862,7 +857,8 @@
 		hdlc_flush(bcs);
 		return;
 	}
-	if (unlikely((skb = bcs->skb) == NULL)) {
+	skb = bcs->skb;
+	if (unlikely(skb == NULL)) {
 		bcs->skb = skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
 		if (!skb) {
 			dev_err(cs->dev, "could not allocate skb\n");
@@ -895,7 +891,8 @@
 	}
 }
 
-void gigaset_isoc_receive(unsigned char *src, unsigned count, struct bc_state *bcs)
+void gigaset_isoc_receive(unsigned char *src, unsigned count,
+			  struct bc_state *bcs)
 {
 	switch (bcs->proto2) {
 	case L2_HDLC:
@@ -985,7 +982,7 @@
  * Called by LL to queue an skb for sending, and start transmission if
  * necessary.
  * Once the payload data has been transmitted completely, gigaset_skb_sent()
- * will be called with the first cs->hw_hdr_len bytes of skb->head preserved.
+ * will be called with the skb's link layer header preserved.
  *
  * Return value:
  *	number of bytes accepted for sending (skb->len) if ok,
diff --git a/drivers/isdn/gigaset/proc.c b/drivers/isdn/gigaset/proc.c
index 9715aad..758a00c 100644
--- a/drivers/isdn/gigaset/proc.c
+++ b/drivers/isdn/gigaset/proc.c
@@ -39,7 +39,7 @@
 			return -EINVAL;
 
 	if (mutex_lock_interruptible(&cs->mutex))
-		return -ERESTARTSYS; // FIXME -EINTR?
+		return -ERESTARTSYS;
 
 	cs->waiting = 1;
 	if (!gigaset_add_event(cs, &cs->at_state, EV_PROC_CIDMODE,
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 3071a52..ac3409e 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -164,9 +164,15 @@
 {
 	struct cardstate *cs = (struct cardstate *) data;
 	struct bc_state *bcs;
+	struct sk_buff *nextskb;
 	int sent = 0;
 
-	if (!cs || !(bcs = cs->bcs)) {
+	if (!cs) {
+		gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__);
+		return;
+	}
+	bcs = cs->bcs;
+	if (!bcs) {
 		gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__);
 		return;
 	}
@@ -179,9 +185,11 @@
 			return;
 
 		/* no command to send; get skb */
-		if (!(bcs->tx_skb = skb_dequeue(&bcs->squeue)))
+		nextskb = skb_dequeue(&bcs->squeue);
+		if (!nextskb)
 			/* no skb either, nothing to do */
 			return;
+		bcs->tx_skb = nextskb;
 
 		gig_dbg(DEBUG_INTR, "Dequeued skb (Adr: %lx)",
 			(unsigned long) bcs->tx_skb);
@@ -236,19 +244,20 @@
  *	number of bytes queued, or error code < 0
  */
 static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf,
-                             int len, struct tasklet_struct *wake_tasklet)
+			     int len, struct tasklet_struct *wake_tasklet)
 {
 	struct cmdbuf_t *cb;
 	unsigned long flags;
 
 	gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
-	                     DEBUG_TRANSCMD : DEBUG_LOCKCMD,
-	                   "CMD Transmit", len, buf);
+				DEBUG_TRANSCMD : DEBUG_LOCKCMD,
+			   "CMD Transmit", len, buf);
 
 	if (len <= 0)
 		return 0;
 
-	if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) {
+	cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC);
+	if (!cb) {
 		dev_err(cs->dev, "%s: out of memory!\n", __func__);
 		return -ENOMEM;
 	}
@@ -392,7 +401,6 @@
 	struct platform_device *pdev = to_platform_device(dev);
 
 	/* adapted from platform_device_release() in drivers/base/platform.c */
-	//FIXME is this actually necessary?
 	kfree(dev->platform_data);
 	kfree(pdev->resource);
 }
@@ -404,16 +412,20 @@
 static int gigaset_initcshw(struct cardstate *cs)
 {
 	int rc;
+	struct ser_cardstate *scs;
 
-	if (!(cs->hw.ser = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL))) {
+	scs = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL);
+	if (!scs) {
 		pr_err("out of memory\n");
 		return 0;
 	}
+	cs->hw.ser = scs;
 
 	cs->hw.ser->dev.name = GIGASET_MODULENAME;
 	cs->hw.ser->dev.id = cs->minor_index;
 	cs->hw.ser->dev.dev.release = gigaset_device_release;
-	if ((rc = platform_device_register(&cs->hw.ser->dev)) != 0) {
+	rc = platform_device_register(&cs->hw.ser->dev);
+	if (rc != 0) {
 		pr_err("error %d registering platform device\n", rc);
 		kfree(cs->hw.ser);
 		cs->hw.ser = NULL;
@@ -422,7 +434,7 @@
 	dev_set_drvdata(&cs->hw.ser->dev.dev, cs);
 
 	tasklet_init(&cs->write_tasklet,
-	             &gigaset_modem_fill, (unsigned long) cs);
+		     &gigaset_modem_fill, (unsigned long) cs);
 	return 1;
 }
 
@@ -434,7 +446,8 @@
  * Called by "gigaset_start" and "gigaset_enterconfigmode" in common.c
  * and by "if_lock" and "if_termios" in interface.c
  */
-static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, unsigned new_state)
+static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
+				  unsigned new_state)
 {
 	struct tty_struct *tty = cs->hw.ser->tty;
 	unsigned int set, clear;
@@ -520,8 +533,8 @@
 	}
 
 	/* allocate memory for our device state and intialize it */
-	if (!(cs = gigaset_initcs(driver, 1, 1, 0, cidmode,
-				  GIGASET_MODULENAME)))
+	cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
+	if (!cs)
 		goto error;
 
 	cs->dev = &cs->hw.ser->dev.dev;
@@ -690,7 +703,8 @@
 
 	if (!cs)
 		return;
-	if (!(inbuf = cs->inbuf)) {
+	inbuf = cs->inbuf;
+	if (!inbuf) {
 		dev_err(cs->dev, "%s: no inbuf\n", __func__);
 		cs_put(cs);
 		return;
@@ -770,18 +784,21 @@
 	int rc;
 
 	gig_dbg(DEBUG_INIT, "%s", __func__);
-	if ((rc = platform_driver_register(&device_driver)) != 0) {
+	rc = platform_driver_register(&device_driver);
+	if (rc != 0) {
 		pr_err("error %d registering platform driver\n", rc);
 		return rc;
 	}
 
 	/* allocate memory for our driver state and intialize it */
-	if (!(driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
+	driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
 					  GIGASET_MODULENAME, GIGASET_DEVNAME,
-					  &ops, THIS_MODULE)))
+					  &ops, THIS_MODULE);
+	if (!driver)
 		goto error;
 
-	if ((rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc)) != 0) {
+	rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc);
+	if (rc != 0) {
 		pr_err("error %d registering line discipline\n", rc);
 		goto error;
 	}
@@ -808,7 +825,8 @@
 		driver = NULL;
 	}
 
-	if ((rc = tty_unregister_ldisc(N_GIGASET_M101)) != 0)
+	rc = tty_unregister_ldisc(N_GIGASET_M101);
+	if (rc != 0)
 		pr_err("error %d unregistering line discipline\n", rc);
 
 	platform_driver_unregister(&device_driver);
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 4deb1ab0..f56b2a8 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -43,14 +43,14 @@
 #define GIGASET_MODULENAME "usb_gigaset"
 #define GIGASET_DEVNAME    "ttyGU"
 
-#define IF_WRITEBUF 2000 //FIXME  // WAKEUP_CHARS: 256
+#define IF_WRITEBUF 2000	/* arbitrary limit */
 
 /* Values for the Gigaset M105 Data */
 #define USB_M105_VENDOR_ID	0x0681
 #define USB_M105_PRODUCT_ID	0x0009
 
 /* table of devices that work with this driver */
-static const struct usb_device_id gigaset_table [] = {
+static const struct usb_device_id gigaset_table[] = {
 	{ USB_DEVICE(USB_M105_VENDOR_ID, USB_M105_PRODUCT_ID) },
 	{ }					/* Terminating entry */
 };
@@ -97,8 +97,8 @@
  *       41 19 -- -- -- -- 06 00 00 00 00 xx 11 13
  *            Used after every "configuration sequence" (RQ 12, RQs 01/03/13).
  *            xx is usually 0x00 but was 0x7e before starting data transfer
- *            in unimodem mode. So, this might be an array of characters that need
- *            special treatment ("commit all bufferd data"?), 11=^Q, 13=^S.
+ *            in unimodem mode. So, this might be an array of characters that
+ *            need special treatment ("commit all bufferd data"?), 11=^Q, 13=^S.
  *
  * Unimodem mode: use "modprobe ppp_async flag_time=0" as the device _needs_ two
  * flags per packet.
@@ -114,7 +114,7 @@
 static int gigaset_resume(struct usb_interface *intf);
 static int gigaset_pre_reset(struct usb_interface *intf);
 
-static struct gigaset_driver *driver = NULL;
+static struct gigaset_driver *driver;
 
 /* usb specific object needed to register this driver with the usb subsystem */
 static struct usb_driver gigaset_usb_driver = {
@@ -141,6 +141,7 @@
 	struct urb		*bulk_out_urb;
 
 	/* Input buffer */
+	unsigned char		*rcvbuf;
 	int			rcvbuf_size;
 	struct urb		*read_urb;
 	__u8			int_in_endpointAddr;
@@ -164,13 +165,11 @@
 	val = tiocm_to_gigaset(new_state);
 
 	gig_dbg(DEBUG_USBREQ, "set flags 0x%02x with mask 0x%02x", val, mask);
-	// don't use this in an interrupt/BH
 	r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 7, 0x41,
 			    (val & 0xff) | ((mask & 0xff) << 8), 0,
 			    NULL, 0, 2000 /* timeout? */);
 	if (r < 0)
 		return r;
-	//..
 	return 0;
 }
 
@@ -220,7 +219,6 @@
 	cflag &= CBAUD;
 
 	switch (cflag) {
-	//FIXME more values?
 	case    B300: rate =     300; break;
 	case    B600: rate =     600; break;
 	case   B1200: rate =    1200; break;
@@ -273,7 +271,7 @@
 	/* set the number of stop bits */
 	if (cflag & CSTOPB) {
 		if ((cflag & CSIZE) == CS5)
-			val |= 1; /* 1.5 stop bits */ //FIXME is this okay?
+			val |= 1; /* 1.5 stop bits */
 		else
 			val |= 2; /* 2 stop bits */
 	}
@@ -282,7 +280,7 @@
 }
 
 
- /*================================================================================================================*/
+/*============================================================================*/
 static int gigaset_init_bchannel(struct bc_state *bcs)
 {
 	/* nothing to do for M10x */
@@ -344,7 +342,6 @@
 			if (write_modem(cs) < 0) {
 				gig_dbg(DEBUG_OUTPUT,
 					"modem_fill: write_modem failed");
-				// FIXME should we tell the LL?
 				again = 1; /* no callback will be called! */
 			}
 		}
@@ -356,8 +353,8 @@
  */
 static void gigaset_read_int_callback(struct urb *urb)
 {
-	struct inbuf_t *inbuf = urb->context;
-	struct cardstate *cs = inbuf->cs;
+	struct cardstate *cs = urb->context;
+	struct inbuf_t *inbuf = cs->inbuf;
 	int status = urb->status;
 	int r;
 	unsigned numbytes;
@@ -368,7 +365,7 @@
 		numbytes = urb->actual_length;
 
 		if (numbytes) {
-			src = inbuf->rcvbuf;
+			src = cs->hw.usb->rcvbuf;
 			if (unlikely(*src))
 				dev_warn(cs->dev,
 				    "%s: There was no leading 0, but 0x%02x!\n",
@@ -440,7 +437,7 @@
 	struct cmdbuf_t *tcb;
 	unsigned long flags;
 	int count;
-	int status = -ENOENT; // FIXME
+	int status = -ENOENT;
 	struct usb_cardstate *ucs = cs->hw.usb;
 
 	do {
@@ -480,7 +477,9 @@
 			ucs->busy = 1;
 
 			spin_lock_irqsave(&cs->lock, flags);
-			status = cs->connected ? usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) : -ENODEV;
+			status = cs->connected ?
+				usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) :
+				-ENODEV;
 			spin_unlock_irqrestore(&cs->lock, flags);
 
 			if (status) {
@@ -510,8 +509,8 @@
 
 	if (len <= 0)
 		return 0;
-
-	if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) {
+	cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC);
+	if (!cb) {
 		dev_err(cs->dev, "%s: out of memory\n", __func__);
 		return -ENOMEM;
 	}
@@ -637,9 +636,7 @@
 		return -EINVAL;
 	}
 
-	/* Copy data to bulk out buffer and  // FIXME copying not necessary
-	 * transmit data
-	 */
+	/* Copy data to bulk out buffer and transmit data */
 	count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size);
 	skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count);
 	skb_pull(bcs->tx_skb, count);
@@ -650,7 +647,8 @@
 	if (cs->connected) {
 		usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev,
 				  usb_sndbulkpipe(ucs->udev,
-						  ucs->bulk_out_endpointAddr & 0x0f),
+						  ucs->bulk_out_endpointAddr &
+						  0x0f),
 				  ucs->bulk_out_buffer, count,
 				  gigaset_write_bulk_callback, cs);
 		ret = usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC);
@@ -666,7 +664,7 @@
 
 	if (!bcs->tx_skb->len) {
 		/* skb sent completely */
-		gigaset_skb_sent(bcs, bcs->tx_skb); //FIXME also, when ret<0?
+		gigaset_skb_sent(bcs, bcs->tx_skb);
 
 		gig_dbg(DEBUG_INTR, "kfree skb (Adr: %lx)!",
 			(unsigned long) bcs->tx_skb);
@@ -763,8 +761,8 @@
 	buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
 	ucs->rcvbuf_size = buffer_size;
 	ucs->int_in_endpointAddr = endpoint->bEndpointAddress;
-	cs->inbuf[0].rcvbuf = kmalloc(buffer_size, GFP_KERNEL);
-	if (!cs->inbuf[0].rcvbuf) {
+	ucs->rcvbuf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!ucs->rcvbuf) {
 		dev_err(cs->dev, "Couldn't allocate rcvbuf\n");
 		retval = -ENOMEM;
 		goto error;
@@ -773,9 +771,9 @@
 	usb_fill_int_urb(ucs->read_urb, udev,
 			 usb_rcvintpipe(udev,
 					endpoint->bEndpointAddress & 0x0f),
-			 cs->inbuf[0].rcvbuf, buffer_size,
+			 ucs->rcvbuf, buffer_size,
 			 gigaset_read_int_callback,
-			 cs->inbuf + 0, endpoint->bInterval);
+			 cs, endpoint->bInterval);
 
 	retval = usb_submit_urb(ucs->read_urb, GFP_KERNEL);
 	if (retval) {
@@ -789,7 +787,7 @@
 
 	if (!gigaset_start(cs)) {
 		tasklet_kill(&cs->write_tasklet);
-		retval = -ENODEV; //FIXME
+		retval = -ENODEV;
 		goto error;
 	}
 	return 0;
@@ -798,11 +796,11 @@
 	usb_kill_urb(ucs->read_urb);
 	kfree(ucs->bulk_out_buffer);
 	usb_free_urb(ucs->bulk_out_urb);
-	kfree(cs->inbuf[0].rcvbuf);
+	kfree(ucs->rcvbuf);
 	usb_free_urb(ucs->read_urb);
 	usb_set_intfdata(interface, NULL);
 	ucs->read_urb = ucs->bulk_out_urb = NULL;
-	cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL;
+	ucs->rcvbuf = ucs->bulk_out_buffer = NULL;
 	usb_put_dev(ucs->udev);
 	ucs->udev = NULL;
 	ucs->interface = NULL;
@@ -831,10 +829,10 @@
 
 	kfree(ucs->bulk_out_buffer);
 	usb_free_urb(ucs->bulk_out_urb);
-	kfree(cs->inbuf[0].rcvbuf);
+	kfree(ucs->rcvbuf);
 	usb_free_urb(ucs->read_urb);
 	ucs->read_urb = ucs->bulk_out_urb = NULL;
-	cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL;
+	ucs->rcvbuf = ucs->bulk_out_buffer = NULL;
 
 	usb_put_dev(ucs->udev);
 	ucs->interface = NULL;
@@ -916,9 +914,10 @@
 	int result;
 
 	/* allocate memory for our driver state and intialize it */
-	if ((driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
-				       GIGASET_MODULENAME, GIGASET_DEVNAME,
-				       &ops, THIS_MODULE)) == NULL)
+	driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
+				    GIGASET_MODULENAME, GIGASET_DEVNAME,
+				    &ops, THIS_MODULE);
+	if (driver == NULL)
 		goto error;
 
 	/* register this driver with the USB subsystem */
diff --git a/drivers/isdn/hardware/eicon/maintidi.c b/drivers/isdn/hardware/eicon/maintidi.c
index 23960cb..41c26e7 100644
--- a/drivers/isdn/hardware/eicon/maintidi.c
+++ b/drivers/isdn/hardware/eicon/maintidi.c
@@ -959,8 +959,9 @@
 	}
 	if (!strncmp("State\\Layer2 No1", path, pVar->path_length)) {
 		char* tmp = &pLib->lines[0].pInterface->Layer2[0];
-    dword l2_state;
-    diva_strace_read_uint (pVar, &l2_state);
+		dword l2_state;
+		if (diva_strace_read_uint(pVar, &l2_state))
+			return -1;
 
 		switch (l2_state) {
 			case 0:
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index 27d5dd6..ae89fb8 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -2692,7 +2692,7 @@
           if (!(fax_control_bits & T30_CONTROL_BIT_MORE_DOCUMENTS)
            || (fax_feature_bits & T30_FEATURE_BIT_MORE_DOCUMENTS))
           {
-            len = (byte)(&(((T30_INFO *) 0)->universal_6));
+            len = offsetof(T30_INFO, universal_6);
             fax_info_change = false;
             if (ncpi->length >= 4)
             {
@@ -2754,7 +2754,7 @@
                     for (i = 0; i < w; i++)
                       ((T30_INFO   *)(plci->fax_connect_info_buffer))->station_id[i] = fax_parms[4].info[1+i];
                     ((T30_INFO   *)(plci->fax_connect_info_buffer))->head_line_len = 0;
-                    len = (byte)(((T30_INFO *) 0)->station_id + 20);
+                    len = offsetof(T30_INFO, station_id) + 20;
                     w = fax_parms[5].length;
                     if (w > 20)
                       w = 20;
@@ -2788,7 +2788,7 @@
                 }
                 else
                 {
-                  len = (byte)(&(((T30_INFO *) 0)->universal_6));
+                  len = offsetof(T30_INFO, universal_6);
                 }
                 fax_info_change = true;
 
@@ -2892,7 +2892,7 @@
     && (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_ENABLE_NSF)
     && (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_NEGOTIATE_RESP))
    {
-            len = ((byte)(((T30_INFO *) 0)->station_id + 20));
+            len = offsetof(T30_INFO, station_id) + 20;
             if (plci->fax_connect_info_length < len)
             {
               ((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = 0;
@@ -3802,7 +3802,7 @@
       break;
     }
     ncpi = &m_parms[1];
-    len = ((byte)(((T30_INFO *) 0)->station_id + 20));
+    len = offsetof(T30_INFO, station_id) + 20;
     if (plci->fax_connect_info_length < len)
     {
       ((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = 0;
@@ -6844,7 +6844,7 @@
         if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[plci->appl->Id-1])
           & ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD)))
         {
-          i = ((word)(((T30_INFO *) 0)->station_id + 20)) + ((T30_INFO   *)plci->NL.RBuffer->P)->head_line_len;
+          i = offsetof(T30_INFO, station_id) + 20 + ((T30_INFO   *)plci->NL.RBuffer->P)->head_line_len;
           while (i < plci->NL.RBuffer->length)
             plci->ncpi_buffer[++len] = plci->NL.RBuffer->P[i++];
         }
@@ -7236,7 +7236,7 @@
     {
       plci->RData[1].P = plci->RData[0].P;
       plci->RData[1].PLength = plci->RData[0].PLength;
-      plci->RData[0].P = v120_header_buffer + (-((int) v120_header_buffer) & 3);
+      plci->RData[0].P = v120_header_buffer + (-((unsigned long)v120_header_buffer) & 3);
       if ((plci->NL.RBuffer->P[0] & V120_HEADER_EXTEND_BIT) || (plci->NL.RLength == 1))
         plci->RData[0].PLength = 1;
       else
@@ -8473,7 +8473,7 @@
             fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SEL_POLLING;
           }
             len = nlc[0];
-          pos = ((byte)(((T30_INFO *) 0)->station_id + 20));
+          pos = offsetof(T30_INFO, station_id) + 20;
    if (pos < plci->fax_connect_info_length)
    {
      for (i = 1 + plci->fax_connect_info_buffer[pos]; i != 0; i--)
@@ -8525,7 +8525,7 @@
       }
 
       PUT_WORD(&(((T30_INFO *)&nlc[1])->control_bits_low), fax_control_bits);
-      len = ((byte)(((T30_INFO *) 0)->station_id + 20));
+      len = offsetof(T30_INFO, station_id) + 20;
       for (i = 0; i < len; i++)
         plci->fax_connect_info_buffer[i] = nlc[1+i];
       ((T30_INFO   *) plci->fax_connect_info_buffer)->head_line_len = 0;
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
index bf526a7a..d6fdf1f 100644
--- a/drivers/isdn/hisax/amd7930_fn.c
+++ b/drivers/isdn/hisax/amd7930_fn.c
@@ -594,6 +594,7 @@
 				if (cs->debug & L1_DEB_WARN)
 					debugl1(cs, "Amd7930: l1hw: l2l1 tx_skb exist this shouldn't happen");
 				skb_queue_tail(&cs->sq, skb);
+				spin_unlock_irqrestore(&cs->lock, flags);
 				break;
 			}
 			if (cs->debug & DEB_DLOG_HEX)
diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c
index 018bd29..0b0c2e5d 100644
--- a/drivers/isdn/hisax/diva.c
+++ b/drivers/isdn/hisax/diva.c
@@ -382,7 +382,7 @@
 {
 	int to = 50;
 
-	while ((!(MemReadHSCX(cs, hscx, HSCX_STAR) & 0x44) == 0x40) && to) {
+	while (((MemReadHSCX(cs, hscx, HSCX_STAR) & 0x44) != 0x40) && to) {
 		udelay(1);
 		to--;
 	}
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
index 9de5420..a420b64 100644
--- a/drivers/isdn/hisax/hfc_usb.c
+++ b/drivers/isdn/hisax/hfc_usb.c
@@ -817,8 +817,8 @@
 	}
 	/* we have a complete hdlc packet */
 	if (finish) {
-		if ((!fifo->skbuff->data[fifo->skbuff->len - 1])
-		    && (fifo->skbuff->len > 3)) {
+		if (fifo->skbuff->len > 3 &&
+				!fifo->skbuff->data[fifo->skbuff->len - 1]) {
 
 			if (fifon == HFCUSB_D_RX) {
 				DBG(HFCUSB_DBG_DCHANNEL,
diff --git a/drivers/isdn/hisax/hscx_irq.c b/drivers/isdn/hisax/hscx_irq.c
index 7b1ad5e..2387d76 100644
--- a/drivers/isdn/hisax/hscx_irq.c
+++ b/drivers/isdn/hisax/hscx_irq.c
@@ -32,7 +32,7 @@
 {
 	int to = 50;
 
-	while ((!(READHSCX(cs, hscx, HSCX_STAR) & 0x44) == 0x40) && to) {
+	while (((READHSCX(cs, hscx, HSCX_STAR) & 0x44) != 0x40) && to) {
 		udelay(1);
 		to--;
 	}
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c
index 9aba646..c80cbb8 100644
--- a/drivers/isdn/hisax/icc.c
+++ b/drivers/isdn/hisax/icc.c
@@ -468,6 +468,7 @@
 				if (cs->debug & L1_DEB_WARN)
 					debugl1(cs, " l2l1 tx_skb exist this shouldn't happen");
 				skb_queue_tail(&cs->sq, skb);
+				spin_unlock_irqrestore(&cs->lock, flags);
 				break;
 			}
 			if (cs->debug & DEB_DLOG_HEX)
diff --git a/drivers/isdn/i4l/isdn_net.h b/drivers/isdn/i4l/isdn_net.h
index 74032d0..7511f08 100644
--- a/drivers/isdn/i4l/isdn_net.h
+++ b/drivers/isdn/i4l/isdn_net.h
@@ -83,19 +83,19 @@
 
 	spin_lock_irqsave(&nd->queue_lock, flags);
 	lp = nd->queue;         /* get lp on top of queue */
-	spin_lock(&nd->queue->xmit_lock);
 	while (isdn_net_lp_busy(nd->queue)) {
-		spin_unlock(&nd->queue->xmit_lock);
 		nd->queue = nd->queue->next;
 		if (nd->queue == lp) { /* not found -- should never happen */
 			lp = NULL;
 			goto errout;
 		}
-		spin_lock(&nd->queue->xmit_lock);
 	}
 	lp = nd->queue;
 	nd->queue = nd->queue->next;
+	spin_unlock_irqrestore(&nd->queue_lock, flags);
+	spin_lock(&lp->xmit_lock);
 	local_bh_disable();
+	return lp;
 errout:
 	spin_unlock_irqrestore(&nd->queue_lock, flags);
 	return lp;
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 28182ed..fcfe17a 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -779,7 +779,7 @@
 }
 
 static int
-mISDN_sock_create(struct net *net, struct socket *sock, int proto)
+mISDN_sock_create(struct net *net, struct socket *sock, int proto, int kern)
 {
 	int err = -EPROTONOSUPPORT;
 
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 3e1532a..0d05ec4 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -364,7 +364,7 @@
 static int
 st_own_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
 {
-	if (!ch->st || ch->st->layer1)
+	if (!ch->st || !ch->st->layer1)
 		return -EINVAL;
 	return ch->st->layer1->ctrl(ch->st->layer1, cmd, arg);
 }
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index df1f86b..a2ea383 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -249,5 +249,6 @@
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
+source "drivers/misc/iwmc3200top/Kconfig"
 
 endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f982d2e..e311267 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -21,5 +21,6 @@
 obj-$(CONFIG_ISL29003)		+= isl29003.o
 obj-$(CONFIG_EP93XX_PWM)	+= ep93xx_pwm.o
 obj-$(CONFIG_C2PORT)		+= c2port/
+obj-$(CONFIG_IWMC3200TOP)      += iwmc3200top/
 obj-y				+= eeprom/
 obj-y				+= cb710/
diff --git a/drivers/misc/iwmc3200top/Kconfig b/drivers/misc/iwmc3200top/Kconfig
new file mode 100644
index 0000000..9e4b88f
--- /dev/null
+++ b/drivers/misc/iwmc3200top/Kconfig
@@ -0,0 +1,20 @@
+config IWMC3200TOP
+        tristate "Intel Wireless MultiCom Top Driver"
+        depends on MMC && EXPERIMENTAL
+        select FW_LOADER
+	---help---
+	  Intel Wireless MultiCom 3200 Top driver is responsible for
+	  for firmware load and enabled coms enumeration
+
+config IWMC3200TOP_DEBUG
+	bool "Enable full debug output of iwmc3200top Driver"
+	depends on IWMC3200TOP
+	---help---
+	  Enable full debug output of iwmc3200top Driver
+
+config IWMC3200TOP_DEBUGFS
+	bool "Enable Debugfs debugging interface for iwmc3200top"
+	depends on IWMC3200TOP
+	---help---
+	  Enable creation of debugfs files for iwmc3200top
+
diff --git a/drivers/misc/iwmc3200top/Makefile b/drivers/misc/iwmc3200top/Makefile
new file mode 100644
index 0000000..fbf53fb
--- /dev/null
+++ b/drivers/misc/iwmc3200top/Makefile
@@ -0,0 +1,29 @@
+# iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+# drivers/misc/iwmc3200top/Makefile
+#
+# Copyright (C) 2009 Intel Corporation. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License version
+# 2 as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+#
+#
+# Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+#  -
+#
+#
+
+obj-$(CONFIG_IWMC3200TOP)	+= iwmc3200top.o
+iwmc3200top-objs	:= main.o fw-download.o
+iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUG) += log.o
+iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUGFS) += debugfs.o
diff --git a/drivers/misc/iwmc3200top/debugfs.c b/drivers/misc/iwmc3200top/debugfs.c
new file mode 100644
index 0000000..0c8ea0a1
--- /dev/null
+++ b/drivers/misc/iwmc3200top/debugfs.c
@@ -0,0 +1,133 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/debufs.c
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ *  -
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio.h>
+#include <linux/debugfs.h>
+
+#include "iwmc3200top.h"
+#include "fw-msg.h"
+#include "log.h"
+#include "debugfs.h"
+
+
+
+/*      Constants definition        */
+#define HEXADECIMAL_RADIX	16
+
+/*      Functions definition        */
+
+
+#define DEBUGFS_ADD(name, parent) do {					\
+	dbgfs->dbgfs_##parent##_files.file_##name =			\
+	debugfs_create_file(#name, 0644, dbgfs->dir_##parent, priv,	\
+				&iwmct_dbgfs_##name##_ops);		\
+} while (0)
+
+#define DEBUGFS_RM(name)  do {		\
+	debugfs_remove(name);		\
+	name = NULL;			\
+} while (0)
+
+#define DEBUGFS_READ_FUNC(name)						\
+ssize_t iwmct_dbgfs_##name##_read(struct file *file,			\
+				  char __user *user_buf,		\
+				  size_t count, loff_t *ppos);
+
+#define DEBUGFS_WRITE_FUNC(name)					\
+ssize_t iwmct_dbgfs_##name##_write(struct file *file,			\
+				   const char __user *user_buf,		\
+				   size_t count, loff_t *ppos);
+
+#define DEBUGFS_READ_FILE_OPS(name)					\
+	DEBUGFS_READ_FUNC(name)						\
+	static const struct file_operations iwmct_dbgfs_##name##_ops = {  \
+		.read = iwmct_dbgfs_##name##_read,			\
+		.open = iwmct_dbgfs_open_file_generic,			\
+	};
+
+#define DEBUGFS_WRITE_FILE_OPS(name)					\
+	DEBUGFS_WRITE_FUNC(name)					\
+	static const struct file_operations iwmct_dbgfs_##name##_ops = {  \
+		.write = iwmct_dbgfs_##name##_write,			\
+		.open = iwmct_dbgfs_open_file_generic,			\
+	};
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name)				\
+	DEBUGFS_READ_FUNC(name)						\
+	DEBUGFS_WRITE_FUNC(name)					\
+	static const struct file_operations iwmct_dbgfs_##name##_ops = {\
+		.write = iwmct_dbgfs_##name##_write,			\
+		.read = iwmct_dbgfs_##name##_read,			\
+		.open = iwmct_dbgfs_open_file_generic,			\
+	};
+
+
+/*      Debugfs file ops definitions        */
+
+/*
+ * Create the debugfs files and directories
+ *
+ */
+void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
+{
+	struct iwmct_debugfs *dbgfs;
+
+	dbgfs = kzalloc(sizeof(struct iwmct_debugfs), GFP_KERNEL);
+	if (!dbgfs) {
+		LOG_ERROR(priv, DEBUGFS, "failed to allocate %zd bytes\n",
+					sizeof(struct iwmct_debugfs));
+		return;
+	}
+
+	priv->dbgfs = dbgfs;
+	dbgfs->name = name;
+	dbgfs->dir_drv = debugfs_create_dir(name, NULL);
+	if (!dbgfs->dir_drv) {
+		LOG_ERROR(priv, DEBUGFS, "failed to create debugfs dir\n");
+		return;
+	}
+
+	return;
+}
+
+/**
+ * Remove the debugfs files and directories
+ *
+ */
+void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
+{
+	if (!dbgfs)
+		return;
+
+	DEBUGFS_RM(dbgfs->dir_drv);
+	kfree(dbgfs);
+	dbgfs = NULL;
+}
+
diff --git a/drivers/misc/iwmc3200top/debugfs.h b/drivers/misc/iwmc3200top/debugfs.h
new file mode 100644
index 0000000..71d4575
--- /dev/null
+++ b/drivers/misc/iwmc3200top/debugfs.h
@@ -0,0 +1,58 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/debufs.h
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ *  -
+ *
+ */
+
+#ifndef __DEBUGFS_H__
+#define __DEBUGFS_H__
+
+
+#ifdef CONFIG_IWMC3200TOP_DEBUGFS
+
+struct iwmct_debugfs {
+	const char *name;
+	struct dentry *dir_drv;
+	struct dir_drv_files {
+	} dbgfs_drv_files;
+};
+
+void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name);
+void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs);
+
+#else /* CONFIG_IWMC3200TOP_DEBUGFS */
+
+struct iwmct_debugfs;
+
+static inline void
+iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
+{}
+
+static inline void
+iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
+{}
+
+#endif /* CONFIG_IWMC3200TOP_DEBUGFS */
+
+#endif /* __DEBUGFS_H__ */
+
diff --git a/drivers/misc/iwmc3200top/fw-download.c b/drivers/misc/iwmc3200top/fw-download.c
new file mode 100644
index 0000000..33cb693
--- /dev/null
+++ b/drivers/misc/iwmc3200top/fw-download.c
@@ -0,0 +1,359 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/fw-download.c
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ *  -
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/mmc/sdio_func.h>
+#include <asm/unaligned.h>
+
+#include "iwmc3200top.h"
+#include "log.h"
+#include "fw-msg.h"
+
+#define CHECKSUM_BYTES_NUM sizeof(u32)
+
+/**
+  init parser struct with file
+ */
+static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
+			      size_t file_size, size_t block_size)
+{
+	struct iwmct_parser *parser = &priv->parser;
+	struct iwmct_fw_hdr *fw_hdr = &parser->versions;
+
+	LOG_INFOEX(priv, INIT, "-->\n");
+
+	LOG_INFO(priv, FW_DOWNLOAD, "file_size=%zd\n", file_size);
+
+	parser->file = file;
+	parser->file_size = file_size;
+	parser->cur_pos = 0;
+	parser->buf = NULL;
+
+	parser->buf = kzalloc(block_size, GFP_KERNEL);
+	if (!parser->buf) {
+		LOG_ERROR(priv, FW_DOWNLOAD, "kzalloc error\n");
+		return -ENOMEM;
+	}
+	parser->buf_size = block_size;
+
+	/* extract fw versions */
+	memcpy(fw_hdr, parser->file, sizeof(struct iwmct_fw_hdr));
+	LOG_INFO(priv, FW_DOWNLOAD, "fw versions are:\n"
+		"top %u.%u.%u gps %u.%u.%u bt %u.%u.%u tic %s\n",
+		fw_hdr->top_major, fw_hdr->top_minor, fw_hdr->top_revision,
+		fw_hdr->gps_major, fw_hdr->gps_minor, fw_hdr->gps_revision,
+		fw_hdr->bt_major, fw_hdr->bt_minor, fw_hdr->bt_revision,
+		fw_hdr->tic_name);
+
+	parser->cur_pos += sizeof(struct iwmct_fw_hdr);
+
+	LOG_INFOEX(priv, INIT, "<--\n");
+	return 0;
+}
+
+static bool iwmct_checksum(struct iwmct_priv *priv)
+{
+	struct iwmct_parser *parser = &priv->parser;
+	__le32 *file = (__le32 *)parser->file;
+	int i, pad, steps;
+	u32 accum = 0;
+	u32 checksum;
+	u32 mask = 0xffffffff;
+
+	pad = (parser->file_size - CHECKSUM_BYTES_NUM) % 4;
+	steps =  (parser->file_size - CHECKSUM_BYTES_NUM) / 4;
+
+	LOG_INFO(priv, FW_DOWNLOAD, "pad=%d steps=%d\n", pad, steps);
+
+	for (i = 0; i < steps; i++)
+		accum += le32_to_cpu(file[i]);
+
+	if (pad) {
+		mask <<= 8 * (4 - pad);
+		accum += le32_to_cpu(file[steps]) & mask;
+	}
+
+	checksum = get_unaligned_le32((__le32 *)(parser->file +
+			parser->file_size - CHECKSUM_BYTES_NUM));
+
+	LOG_INFO(priv, FW_DOWNLOAD,
+		"compare checksum accum=0x%x to checksum=0x%x\n",
+		accum, checksum);
+
+	return checksum == accum;
+}
+
+static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
+				  size_t *sec_size, __le32 *sec_addr)
+{
+	struct iwmct_parser *parser = &priv->parser;
+	struct iwmct_dbg *dbg = &priv->dbg;
+	struct iwmct_fw_sec_hdr *sec_hdr;
+
+	LOG_INFOEX(priv, INIT, "-->\n");
+
+	while (parser->cur_pos + sizeof(struct iwmct_fw_sec_hdr)
+		<= parser->file_size) {
+
+		sec_hdr = (struct iwmct_fw_sec_hdr *)
+				(parser->file + parser->cur_pos);
+		parser->cur_pos += sizeof(struct iwmct_fw_sec_hdr);
+
+		LOG_INFO(priv, FW_DOWNLOAD,
+			"sec hdr: type=%s addr=0x%x size=%d\n",
+			sec_hdr->type, sec_hdr->target_addr,
+			sec_hdr->data_size);
+
+		if (strcmp(sec_hdr->type, "ENT") == 0)
+			parser->entry_point = le32_to_cpu(sec_hdr->target_addr);
+		else if (strcmp(sec_hdr->type, "LBL") == 0)
+			strcpy(dbg->label_fw, parser->file + parser->cur_pos);
+		else if (((strcmp(sec_hdr->type, "TOP") == 0) &&
+			  (priv->barker & BARKER_DNLOAD_TOP_MSK)) ||
+			 ((strcmp(sec_hdr->type, "GPS") == 0) &&
+			  (priv->barker & BARKER_DNLOAD_GPS_MSK)) ||
+			 ((strcmp(sec_hdr->type, "BTH") == 0) &&
+			  (priv->barker & BARKER_DNLOAD_BT_MSK))) {
+			*sec_addr = sec_hdr->target_addr;
+			*sec_size = le32_to_cpu(sec_hdr->data_size);
+			*p_sec = parser->file + parser->cur_pos;
+			parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
+			return 1;
+		} else if (strcmp(sec_hdr->type, "LOG") != 0)
+			LOG_WARNING(priv, FW_DOWNLOAD,
+				    "skipping section type %s\n",
+				    sec_hdr->type);
+
+		parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
+		LOG_INFO(priv, FW_DOWNLOAD,
+			"finished with section cur_pos=%zd\n", parser->cur_pos);
+	}
+
+	LOG_INFOEX(priv, INIT, "<--\n");
+	return 0;
+}
+
+static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
+				size_t sec_size, __le32 addr)
+{
+	struct iwmct_parser *parser = &priv->parser;
+	struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
+	const u8 *cur_block = p_sec;
+	size_t sent = 0;
+	int cnt = 0;
+	int ret = 0;
+	u32 cmd = 0;
+
+	LOG_INFOEX(priv, INIT, "-->\n");
+	LOG_INFO(priv, FW_DOWNLOAD, "Download address 0x%x size 0x%zx\n",
+				addr, sec_size);
+
+	while (sent < sec_size) {
+		int i;
+		u32 chksm = 0;
+		u32 reset = atomic_read(&priv->reset);
+		/* actual FW data */
+		u32 data_size = min(parser->buf_size - sizeof(*hdr),
+				    sec_size - sent);
+		/* Pad to block size */
+		u32 trans_size = (data_size + sizeof(*hdr) +
+				  IWMC_SDIO_BLK_SIZE - 1) &
+				  ~(IWMC_SDIO_BLK_SIZE - 1);
+		++cnt;
+
+		/* in case of reset, interrupt FW DOWNLAOD */
+		if (reset) {
+			LOG_INFO(priv, FW_DOWNLOAD,
+				 "Reset detected. Abort FW download!!!");
+			ret = -ECANCELED;
+			goto exit;
+		}
+
+		memset(parser->buf, 0, parser->buf_size);
+		cmd |= IWMC_OPCODE_WRITE << CMD_HDR_OPCODE_POS;
+		cmd |= IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
+		cmd |= (priv->dbg.direct ? 1 : 0) << CMD_HDR_DIRECT_ACCESS_POS;
+		cmd |= (priv->dbg.checksum ? 1 : 0) << CMD_HDR_USE_CHECKSUM_POS;
+		hdr->data_size = cpu_to_le32(data_size);
+		hdr->target_addr = addr;
+
+		/* checksum is allowed for sizes divisible by 4 */
+		if (data_size & 0x3)
+			cmd &= ~CMD_HDR_USE_CHECKSUM_MSK;
+
+		memcpy(hdr->data, cur_block, data_size);
+
+
+		if (cmd & CMD_HDR_USE_CHECKSUM_MSK) {
+
+			chksm = data_size + le32_to_cpu(addr) + cmd;
+			for (i = 0; i < data_size >> 2; i++)
+				chksm += ((u32 *)cur_block)[i];
+
+			hdr->block_chksm = cpu_to_le32(chksm);
+			LOG_INFO(priv, FW_DOWNLOAD, "Checksum = 0x%X\n",
+				 hdr->block_chksm);
+		}
+
+		LOG_INFO(priv, FW_DOWNLOAD, "trans#%d, len=%d, sent=%zd, "
+				"sec_size=%zd, startAddress 0x%X\n",
+				cnt, trans_size, sent, sec_size, addr);
+
+		if (priv->dbg.dump)
+			LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, trans_size);
+
+
+		hdr->cmd = cpu_to_le32(cmd);
+		/* send it down */
+		/* TODO: add more proper sending and error checking */
+		ret = iwmct_tx(priv, 0, parser->buf, trans_size);
+		if (ret != 0) {
+			LOG_INFO(priv, FW_DOWNLOAD,
+				"iwmct_tx returned %d\n", ret);
+			goto exit;
+		}
+
+		addr = cpu_to_le32(le32_to_cpu(addr) + data_size);
+		sent += data_size;
+		cur_block = p_sec + sent;
+
+		if (priv->dbg.blocks && (cnt + 1) >= priv->dbg.blocks) {
+			LOG_INFO(priv, FW_DOWNLOAD,
+				"Block number limit is reached [%d]\n",
+				priv->dbg.blocks);
+			break;
+		}
+	}
+
+	if (sent < sec_size)
+		ret = -EINVAL;
+exit:
+	LOG_INFOEX(priv, INIT, "<--\n");
+	return ret;
+}
+
+static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
+{
+	struct iwmct_parser *parser = &priv->parser;
+	struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
+	int ret;
+	u32 cmd;
+
+	LOG_INFOEX(priv, INIT, "-->\n");
+
+	memset(parser->buf, 0, parser->buf_size);
+	cmd = IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
+	if (jump) {
+		cmd |= IWMC_OPCODE_JUMP << CMD_HDR_OPCODE_POS;
+		hdr->target_addr = cpu_to_le32(parser->entry_point);
+		LOG_INFO(priv, FW_DOWNLOAD, "jump address 0x%x\n",
+				parser->entry_point);
+	} else {
+		cmd |= IWMC_OPCODE_LAST_COMMAND << CMD_HDR_OPCODE_POS;
+		LOG_INFO(priv, FW_DOWNLOAD, "last command\n");
+	}
+
+	hdr->cmd = cpu_to_le32(cmd);
+
+	LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, sizeof(*hdr));
+	/* send it down */
+	/* TODO: add more proper sending and error checking */
+	ret = iwmct_tx(priv, 0, parser->buf, IWMC_SDIO_BLK_SIZE);
+	if (ret)
+		LOG_INFO(priv, FW_DOWNLOAD, "iwmct_tx returned %d", ret);
+
+	LOG_INFOEX(priv, INIT, "<--\n");
+	return 0;
+}
+
+int iwmct_fw_load(struct iwmct_priv *priv)
+{
+	const struct firmware *raw = NULL;
+	__le32 addr;
+	size_t len;
+	const u8 *pdata;
+	const u8 *name = "iwmc3200top.1.fw";
+	int ret = 0;
+
+	/* clear parser struct */
+	memset(&priv->parser, 0, sizeof(struct iwmct_parser));
+	if (!name) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	/* get the firmware */
+	ret = request_firmware(&raw, name, &priv->func->dev);
+	if (ret < 0) {
+		LOG_ERROR(priv, FW_DOWNLOAD, "%s request_firmware failed %d\n",
+			  name, ret);
+		goto exit;
+	}
+
+	if (raw->size < sizeof(struct iwmct_fw_sec_hdr)) {
+		LOG_ERROR(priv, FW_DOWNLOAD, "%s smaller then (%zd) (%zd)\n",
+			  name, sizeof(struct iwmct_fw_sec_hdr), raw->size);
+		goto exit;
+	}
+
+	LOG_INFO(priv, FW_DOWNLOAD, "Read firmware '%s'\n", name);
+
+	ret = iwmct_fw_parser_init(priv, raw->data, raw->size, priv->trans_len);
+	if (ret < 0) {
+		LOG_ERROR(priv, FW_DOWNLOAD,
+			  "iwmct_parser_init failed: Reason %d\n", ret);
+		goto exit;
+	}
+
+	/* checksum  */
+	if (!iwmct_checksum(priv)) {
+		LOG_ERROR(priv, FW_DOWNLOAD, "checksum error\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	/* download firmware to device */
+	while (iwmct_parse_next_section(priv, &pdata, &len, &addr)) {
+		if (iwmct_download_section(priv, pdata, len, addr)) {
+			LOG_ERROR(priv, FW_DOWNLOAD,
+				  "%s download section failed\n", name);
+			ret = -EIO;
+			goto exit;
+		}
+	}
+
+	iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK));
+
+exit:
+	kfree(priv->parser.buf);
+
+	if (raw)
+		release_firmware(raw);
+
+	raw = NULL;
+
+	return ret;
+}
diff --git a/drivers/misc/iwmc3200top/fw-msg.h b/drivers/misc/iwmc3200top/fw-msg.h
new file mode 100644
index 0000000..9e26b75
--- /dev/null
+++ b/drivers/misc/iwmc3200top/fw-msg.h
@@ -0,0 +1,113 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/fw-msg.h
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ *  -
+ *
+ */
+
+#ifndef __FWMSG_H__
+#define __FWMSG_H__
+
+#define COMM_TYPE_D2H           	0xFF
+#define COMM_TYPE_H2D           	0xEE
+
+#define COMM_CATEGORY_OPERATIONAL      	0x00
+#define COMM_CATEGORY_DEBUG            	0x01
+#define COMM_CATEGORY_TESTABILITY      	0x02
+#define COMM_CATEGORY_DIAGNOSTICS      	0x03
+
+#define OP_DBG_ZSTR_MSG			cpu_to_le16(0x1A)
+
+#define FW_LOG_SRC_MAX			32
+#define FW_LOG_SRC_ALL			255
+
+#define FW_STRING_TABLE_ADDR		cpu_to_le32(0x0C000000)
+
+#define CMD_DBG_LOG_LEVEL		cpu_to_le16(0x0001)
+#define CMD_TST_DEV_RESET		cpu_to_le16(0x0060)
+#define CMD_TST_FUNC_RESET		cpu_to_le16(0x0062)
+#define CMD_TST_IFACE_RESET		cpu_to_le16(0x0064)
+#define CMD_TST_CPU_UTILIZATION		cpu_to_le16(0x0065)
+#define CMD_TST_TOP_DEEP_SLEEP		cpu_to_le16(0x0080)
+#define CMD_TST_WAKEUP			cpu_to_le16(0x0081)
+#define CMD_TST_FUNC_WAKEUP		cpu_to_le16(0x0082)
+#define CMD_TST_FUNC_DEEP_SLEEP_REQUEST	cpu_to_le16(0x0083)
+#define CMD_TST_GET_MEM_DUMP		cpu_to_le16(0x0096)
+
+#define OP_OPR_ALIVE			cpu_to_le16(0x0010)
+#define OP_OPR_CMD_ACK			cpu_to_le16(0x001F)
+#define OP_OPR_CMD_NACK			cpu_to_le16(0x0020)
+#define OP_TST_MEM_DUMP			cpu_to_le16(0x0043)
+
+#define CMD_FLAG_PADDING_256		0x80
+
+#define FW_HCMD_BLOCK_SIZE      	256
+
+struct msg_hdr {
+	u8 type;
+	u8 category;
+	__le16 opcode;
+	u8 seqnum;
+	u8 flags;
+	__le16 length;
+} __attribute__((__packed__));
+
+struct log_hdr {
+	__le32 timestamp;
+	u8 severity;
+	u8 logsource;
+	__le16 reserved;
+} __attribute__((__packed__));
+
+struct mdump_hdr {
+	u8 dmpid;
+	u8 frag;
+	__le16 size;
+	__le32 addr;
+} __attribute__((__packed__));
+
+struct top_msg {
+	struct msg_hdr hdr;
+	union {
+		/* D2H messages */
+		struct {
+			struct log_hdr log_hdr;
+			u8 data[1];
+		} __attribute__((__packed__)) log;
+
+		struct {
+			struct log_hdr log_hdr;
+			struct mdump_hdr md_hdr;
+			u8 data[1];
+		} __attribute__((__packed__)) mdump;
+
+		/* H2D messages */
+		struct {
+			u8 logsource;
+			u8 sevmask;
+		} __attribute__((__packed__)) logdefs[FW_LOG_SRC_MAX];
+		struct mdump_hdr mdump_req;
+	} u;
+} __attribute__((__packed__));
+
+
+#endif /* __FWMSG_H__ */
diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h
new file mode 100644
index 0000000..f572fcf
--- /dev/null
+++ b/drivers/misc/iwmc3200top/iwmc3200top.h
@@ -0,0 +1,206 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/iwmc3200top.h
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ *  -
+ *
+ */
+
+#ifndef __IWMC3200TOP_H__
+#define __IWMC3200TOP_H__
+
+#include <linux/workqueue.h>
+
+#define DRV_NAME "iwmc3200top"
+
+#define IWMC_SDIO_BLK_SIZE			256
+#define IWMC_DEFAULT_TR_BLK			64
+#define IWMC_SDIO_DATA_ADDR			0x0
+#define IWMC_SDIO_INTR_ENABLE_ADDR		0x14
+#define IWMC_SDIO_INTR_STATUS_ADDR		0x13
+#define IWMC_SDIO_INTR_CLEAR_ADDR		0x13
+#define IWMC_SDIO_INTR_GET_SIZE_ADDR		0x2C
+
+#define COMM_HUB_HEADER_LENGTH 16
+#define LOGGER_HEADER_LENGTH   10
+
+
+#define BARKER_DNLOAD_BT_POS		0
+#define BARKER_DNLOAD_BT_MSK		BIT(BARKER_DNLOAD_BT_POS)
+#define BARKER_DNLOAD_GPS_POS		1
+#define BARKER_DNLOAD_GPS_MSK		BIT(BARKER_DNLOAD_GPS_POS)
+#define BARKER_DNLOAD_TOP_POS		2
+#define BARKER_DNLOAD_TOP_MSK		BIT(BARKER_DNLOAD_TOP_POS)
+#define BARKER_DNLOAD_RESERVED1_POS	3
+#define BARKER_DNLOAD_RESERVED1_MSK	BIT(BARKER_DNLOAD_RESERVED1_POS)
+#define BARKER_DNLOAD_JUMP_POS		4
+#define BARKER_DNLOAD_JUMP_MSK		BIT(BARKER_DNLOAD_JUMP_POS)
+#define BARKER_DNLOAD_SYNC_POS		5
+#define BARKER_DNLOAD_SYNC_MSK		BIT(BARKER_DNLOAD_SYNC_POS)
+#define BARKER_DNLOAD_RESERVED2_POS	6
+#define BARKER_DNLOAD_RESERVED2_MSK	(0x3 << BARKER_DNLOAD_RESERVED2_POS)
+#define BARKER_DNLOAD_BARKER_POS	8
+#define BARKER_DNLOAD_BARKER_MSK	(0xffffff << BARKER_DNLOAD_BARKER_POS)
+
+#define IWMC_BARKER_REBOOT 	(0xdeadbe << BARKER_DNLOAD_BARKER_POS)
+/* whole field barker */
+#define IWMC_BARKER_ACK 	0xfeedbabe
+
+#define IWMC_CMD_SIGNATURE 	0xcbbc
+
+#define CMD_HDR_OPCODE_POS		0
+#define CMD_HDR_OPCODE_MSK_MSK		(0xf << CMD_HDR_OPCODE_MSK_POS)
+#define CMD_HDR_RESPONSE_CODE_POS	4
+#define CMD_HDR_RESPONSE_CODE_MSK	(0xf << CMD_HDR_RESPONSE_CODE_POS)
+#define CMD_HDR_USE_CHECKSUM_POS	8
+#define CMD_HDR_USE_CHECKSUM_MSK	BIT(CMD_HDR_USE_CHECKSUM_POS)
+#define CMD_HDR_RESPONSE_REQUIRED_POS	9
+#define CMD_HDR_RESPONSE_REQUIRED_MSK	BIT(CMD_HDR_RESPONSE_REQUIRED_POS)
+#define CMD_HDR_DIRECT_ACCESS_POS	10
+#define CMD_HDR_DIRECT_ACCESS_MSK	BIT(CMD_HDR_DIRECT_ACCESS_POS)
+#define CMD_HDR_RESERVED_POS		11
+#define CMD_HDR_RESERVED_MSK		BIT(0x1f << CMD_HDR_RESERVED_POS)
+#define CMD_HDR_SIGNATURE_POS		16
+#define CMD_HDR_SIGNATURE_MSK		BIT(0xffff << CMD_HDR_SIGNATURE_POS)
+
+enum {
+	IWMC_OPCODE_PING = 0,
+	IWMC_OPCODE_READ = 1,
+	IWMC_OPCODE_WRITE = 2,
+	IWMC_OPCODE_JUMP = 3,
+	IWMC_OPCODE_REBOOT = 4,
+	IWMC_OPCODE_PERSISTENT_WRITE = 5,
+	IWMC_OPCODE_PERSISTENT_READ = 6,
+	IWMC_OPCODE_READ_MODIFY_WRITE = 7,
+	IWMC_OPCODE_LAST_COMMAND = 15
+};
+
+struct iwmct_fw_load_hdr {
+	__le32 cmd;
+	__le32 target_addr;
+	__le32 data_size;
+	__le32 block_chksm;
+	u8 data[0];
+};
+
+/**
+ * struct iwmct_fw_hdr
+ * holds all sw components versions
+ */
+struct iwmct_fw_hdr {
+	u8 top_major;
+	u8 top_minor;
+	u8 top_revision;
+	u8 gps_major;
+	u8 gps_minor;
+	u8 gps_revision;
+	u8 bt_major;
+	u8 bt_minor;
+	u8 bt_revision;
+	u8 tic_name[31];
+};
+
+/**
+ * struct iwmct_fw_sec_hdr
+ * @type: function type
+ * @data_size: section's data size
+ * @target_addr: download address
+ */
+struct iwmct_fw_sec_hdr {
+	u8 type[4];
+	__le32 data_size;
+	__le32 target_addr;
+};
+
+/**
+ * struct iwmct_parser
+ * @file: fw image
+ * @file_size: fw size
+ * @cur_pos: position in file
+ * @buf: temp buf for download
+ * @buf_size: size of buf
+ * @entry_point: address to jump in fw kick-off
+ */
+struct iwmct_parser {
+	const u8 *file;
+	size_t file_size;
+	size_t cur_pos;
+	u8 *buf;
+	size_t buf_size;
+	u32 entry_point;
+	struct iwmct_fw_hdr versions;
+};
+
+
+struct iwmct_work_struct {
+	struct list_head list;
+	ssize_t iosize;
+};
+
+struct iwmct_dbg {
+	int blocks;
+	bool dump;
+	bool jump;
+	bool direct;
+	bool checksum;
+	bool fw_download;
+	int block_size;
+	int download_trans_blks;
+
+	char label_fw[256];
+};
+
+struct iwmct_debugfs;
+
+struct iwmct_priv {
+	struct sdio_func *func;
+	struct iwmct_debugfs *dbgfs;
+	struct iwmct_parser parser;
+	atomic_t reset;
+	atomic_t dev_sync;
+	u32 trans_len;
+	u32 barker;
+	struct iwmct_dbg dbg;
+
+	/* drivers work queue */
+	struct workqueue_struct *wq;
+	struct workqueue_struct *bus_rescan_wq;
+	struct work_struct bus_rescan_worker;
+	struct work_struct isr_worker;
+
+	/* drivers wait queue */
+	wait_queue_head_t wait_q;
+
+	/* rx request list */
+	struct list_head read_req_list;
+};
+
+extern int iwmct_tx(struct iwmct_priv *priv, unsigned int addr,
+		void *src, int count);
+
+extern int iwmct_fw_load(struct iwmct_priv *priv);
+
+extern void iwmct_dbg_init_params(struct iwmct_priv *drv);
+extern void iwmct_dbg_init_drv_attrs(struct device_driver *drv);
+extern void iwmct_dbg_remove_drv_attrs(struct device_driver *drv);
+extern int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len);
+
+#endif  /*  __IWMC3200TOP_H__  */
diff --git a/drivers/misc/iwmc3200top/log.c b/drivers/misc/iwmc3200top/log.c
new file mode 100644
index 0000000..d569279
--- /dev/null
+++ b/drivers/misc/iwmc3200top/log.c
@@ -0,0 +1,347 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/log.c
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ *  -
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/ctype.h>
+#include "fw-msg.h"
+#include "iwmc3200top.h"
+#include "log.h"
+
+/* Maximal hexadecimal string size of the FW memdump message */
+#define LOG_MSG_SIZE_MAX		12400
+
+/* iwmct_logdefs is a global used by log macros */
+u8 iwmct_logdefs[LOG_SRC_MAX];
+static u8 iwmct_fw_logdefs[FW_LOG_SRC_MAX];
+
+
+static int _log_set_log_filter(u8 *logdefs, int size, u8 src, u8 logmask)
+{
+	int i;
+
+	if (src < size)
+		logdefs[src] = logmask;
+	else if (src == LOG_SRC_ALL)
+		for (i = 0; i < size; i++)
+			logdefs[i] = logmask;
+	else
+		return -1;
+
+	return 0;
+}
+
+
+int iwmct_log_set_filter(u8 src, u8 logmask)
+{
+	return _log_set_log_filter(iwmct_logdefs, LOG_SRC_MAX, src, logmask);
+}
+
+
+int iwmct_log_set_fw_filter(u8 src, u8 logmask)
+{
+	return _log_set_log_filter(iwmct_fw_logdefs,
+				   FW_LOG_SRC_MAX, src, logmask);
+}
+
+
+static int log_msg_format_hex(char *str, int slen, u8 *ibuf,
+			      int ilen, char *pref)
+{
+	int pos = 0;
+	int i;
+	int len;
+
+	for (pos = 0, i = 0; pos < slen - 2 && pref[i] != '\0'; i++, pos++)
+		str[pos] = pref[i];
+
+	for (i = 0; pos < slen - 2 && i < ilen; pos += len, i++)
+		len = snprintf(&str[pos], slen - pos - 1, " %2.2X", ibuf[i]);
+
+	if (i < ilen)
+		return -1;
+
+	return 0;
+}
+
+/*	NOTE: This function is not thread safe.
+	Currently it's called only from sdio rx worker - no race there
+*/
+void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len)
+{
+	struct top_msg *msg;
+	static char logbuf[LOG_MSG_SIZE_MAX];
+
+	msg = (struct top_msg *)buf;
+
+	if (len < sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)) {
+		LOG_ERROR(priv, FW_MSG, "Log message from TOP "
+			  "is too short %d (expected %zd)\n",
+			  len, sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr));
+		return;
+	}
+
+	if (!(iwmct_fw_logdefs[msg->u.log.log_hdr.logsource] &
+		BIT(msg->u.log.log_hdr.severity)) ||
+	    !(iwmct_logdefs[LOG_SRC_FW_MSG] & BIT(msg->u.log.log_hdr.severity)))
+		return;
+
+	switch (msg->hdr.category) {
+	case COMM_CATEGORY_TESTABILITY:
+		if (!(iwmct_logdefs[LOG_SRC_TST] &
+		      BIT(msg->u.log.log_hdr.severity)))
+			return;
+		if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
+				       le16_to_cpu(msg->hdr.length) +
+				       sizeof(msg->hdr), "<TST>"))
+			LOG_WARNING(priv, TST,
+				  "TOP TST message is too long, truncating...");
+		LOG_WARNING(priv, TST, "%s\n", logbuf);
+		break;
+	case COMM_CATEGORY_DEBUG:
+		if (msg->hdr.opcode == OP_DBG_ZSTR_MSG)
+			LOG_INFO(priv, FW_MSG, "%s %s", "<DBG>",
+				       ((u8 *)msg) + sizeof(msg->hdr)
+					+ sizeof(msg->u.log.log_hdr));
+		else {
+			if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
+					le16_to_cpu(msg->hdr.length)
+						+ sizeof(msg->hdr),
+					"<DBG>"))
+				LOG_WARNING(priv, FW_MSG,
+					"TOP DBG message is too long,"
+					"truncating...");
+			LOG_WARNING(priv, FW_MSG, "%s\n", logbuf);
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+static int _log_get_filter_str(u8 *logdefs, int logdefsz, char *buf, int size)
+{
+	int i, pos, len;
+	for (i = 0, pos = 0; (pos < size-1) && (i < logdefsz); i++) {
+		len = snprintf(&buf[pos], size - pos - 1, "0x%02X%02X,",
+				i, logdefs[i]);
+		pos += len;
+	}
+	buf[pos-1] = '\n';
+	buf[pos] = '\0';
+
+	if (i < logdefsz)
+		return -1;
+	return 0;
+}
+
+int log_get_filter_str(char *buf, int size)
+{
+	return _log_get_filter_str(iwmct_logdefs, LOG_SRC_MAX, buf, size);
+}
+
+int log_get_fw_filter_str(char *buf, int size)
+{
+	return _log_get_filter_str(iwmct_fw_logdefs, FW_LOG_SRC_MAX, buf, size);
+}
+
+#define HEXADECIMAL_RADIX	16
+#define LOG_SRC_FORMAT		7 /* log level is in format of "0xXXXX," */
+
+ssize_t show_iwmct_log_level(struct device *d,
+				struct device_attribute *attr, char *buf)
+{
+	struct iwmct_priv *priv = dev_get_drvdata(d);
+	char *str_buf;
+	int buf_size;
+	ssize_t ret;
+
+	buf_size = (LOG_SRC_FORMAT * LOG_SRC_MAX) + 1;
+	str_buf = kzalloc(buf_size, GFP_KERNEL);
+	if (!str_buf) {
+		LOG_ERROR(priv, DEBUGFS,
+			"failed to allocate %d bytes\n", buf_size);
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	if (log_get_filter_str(str_buf, buf_size) < 0) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	ret = sprintf(buf, "%s", str_buf);
+
+exit:
+	kfree(str_buf);
+	return ret;
+}
+
+ssize_t store_iwmct_log_level(struct device *d,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct iwmct_priv *priv = dev_get_drvdata(d);
+	char *token, *str_buf = NULL;
+	long val;
+	ssize_t ret = count;
+	u8 src, mask;
+
+	if (!count)
+		goto exit;
+
+	str_buf = kzalloc(count, GFP_KERNEL);
+	if (!str_buf) {
+		LOG_ERROR(priv, DEBUGFS,
+			"failed to allocate %zd bytes\n", count);
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	memcpy(str_buf, buf, count);
+
+	while ((token = strsep(&str_buf, ",")) != NULL) {
+		while (isspace(*token))
+			++token;
+		if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
+			LOG_ERROR(priv, DEBUGFS,
+				  "failed to convert string to long %s\n",
+				  token);
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		mask  = val & 0xFF;
+		src = (val & 0XFF00) >> 8;
+		iwmct_log_set_filter(src, mask);
+	}
+
+exit:
+	kfree(str_buf);
+	return ret;
+}
+
+ssize_t show_iwmct_log_level_fw(struct device *d,
+			struct device_attribute *attr, char *buf)
+{
+	struct iwmct_priv *priv = dev_get_drvdata(d);
+	char *str_buf;
+	int buf_size;
+	ssize_t ret;
+
+	buf_size = (LOG_SRC_FORMAT * FW_LOG_SRC_MAX) + 2;
+
+	str_buf = kzalloc(buf_size, GFP_KERNEL);
+	if (!str_buf) {
+		LOG_ERROR(priv, DEBUGFS,
+			"failed to allocate %d bytes\n", buf_size);
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	if (log_get_fw_filter_str(str_buf, buf_size) < 0) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	ret = sprintf(buf, "%s", str_buf);
+
+exit:
+	kfree(str_buf);
+	return ret;
+}
+
+ssize_t store_iwmct_log_level_fw(struct device *d,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct iwmct_priv *priv = dev_get_drvdata(d);
+	struct top_msg cmd;
+	char *token, *str_buf = NULL;
+	ssize_t ret = count;
+	u16 cmdlen = 0;
+	int i;
+	long val;
+	u8 src, mask;
+
+	if (!count)
+		goto exit;
+
+	str_buf = kzalloc(count, GFP_KERNEL);
+	if (!str_buf) {
+		LOG_ERROR(priv, DEBUGFS,
+			"failed to allocate %zd bytes\n", count);
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	memcpy(str_buf, buf, count);
+
+	cmd.hdr.type = COMM_TYPE_H2D;
+	cmd.hdr.category = COMM_CATEGORY_DEBUG;
+	cmd.hdr.opcode = CMD_DBG_LOG_LEVEL;
+
+	for (i = 0; ((token = strsep(&str_buf, ",")) != NULL) &&
+		     (i < FW_LOG_SRC_MAX); i++) {
+
+		while (isspace(*token))
+			++token;
+
+		if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
+			LOG_ERROR(priv, DEBUGFS,
+				  "failed to convert string to long %s\n",
+				  token);
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		mask  = val & 0xFF; /* LSB */
+		src = (val & 0XFF00) >> 8; /* 2nd least significant byte. */
+		iwmct_log_set_fw_filter(src, mask);
+
+		cmd.u.logdefs[i].logsource = src;
+		cmd.u.logdefs[i].sevmask = mask;
+	}
+
+	cmd.hdr.length = cpu_to_le16(i * sizeof(cmd.u.logdefs[0]));
+	cmdlen = (i * sizeof(cmd.u.logdefs[0]) + sizeof(cmd.hdr));
+
+	ret = iwmct_send_hcmd(priv, (u8 *)&cmd, cmdlen);
+	if (ret) {
+		LOG_ERROR(priv, DEBUGFS,
+			  "Failed to send %d bytes of fwcmd, ret=%zd\n",
+			  cmdlen, ret);
+		goto exit;
+	} else
+		LOG_INFO(priv, DEBUGFS, "fwcmd sent (%d bytes)\n", cmdlen);
+
+	ret = count;
+
+exit:
+	kfree(str_buf);
+	return ret;
+}
+
diff --git a/drivers/misc/iwmc3200top/log.h b/drivers/misc/iwmc3200top/log.h
new file mode 100644
index 0000000..aba8121
--- /dev/null
+++ b/drivers/misc/iwmc3200top/log.h
@@ -0,0 +1,158 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/log.h
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ *  -
+ *
+ */
+
+#ifndef __LOG_H__
+#define __LOG_H__
+
+
+/* log severity:
+ * The log levels here match FW log levels
+ * so values need to stay as is */
+#define LOG_SEV_CRITICAL		0
+#define LOG_SEV_ERROR			1
+#define LOG_SEV_WARNING			2
+#define LOG_SEV_INFO			3
+#define LOG_SEV_INFOEX			4
+
+#define LOG_SEV_FILTER_ALL		\
+	(BIT(LOG_SEV_CRITICAL) |	\
+	 BIT(LOG_SEV_ERROR)    |	\
+	 BIT(LOG_SEV_WARNING)  | 	\
+	 BIT(LOG_SEV_INFO)     |	\
+	 BIT(LOG_SEV_INFOEX))
+
+/* log source */
+#define LOG_SRC_INIT			0
+#define LOG_SRC_DEBUGFS			1
+#define LOG_SRC_FW_DOWNLOAD		2
+#define LOG_SRC_FW_MSG			3
+#define LOG_SRC_TST			4
+#define LOG_SRC_IRQ			5
+
+#define	LOG_SRC_MAX			6
+#define	LOG_SRC_ALL			0xFF
+
+/**
+ * Default intitialization runtime log level
+ */
+#ifndef LOG_SEV_FILTER_RUNTIME
+#define LOG_SEV_FILTER_RUNTIME			\
+	(BIT(LOG_SEV_CRITICAL)	|		\
+	 BIT(LOG_SEV_ERROR)	|		\
+	 BIT(LOG_SEV_WARNING))
+#endif
+
+#ifndef FW_LOG_SEV_FILTER_RUNTIME
+#define FW_LOG_SEV_FILTER_RUNTIME	LOG_SEV_FILTER_ALL
+#endif
+
+#ifdef CONFIG_IWMC3200TOP_DEBUG
+/**
+ * Log macros
+ */
+
+#define priv2dev(priv) (&(priv->func)->dev)
+
+#define LOG_CRITICAL(priv, src, fmt, args...)				\
+do {									\
+	if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_CRITICAL))	\
+		dev_crit(priv2dev(priv), "%s %d: " fmt,			\
+			__func__, __LINE__, ##args);			\
+} while (0)
+
+#define LOG_ERROR(priv, src, fmt, args...)				\
+do {									\
+	if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_ERROR))	\
+		dev_err(priv2dev(priv), "%s %d: " fmt,			\
+			__func__, __LINE__, ##args);			\
+} while (0)
+
+#define LOG_WARNING(priv, src, fmt, args...)				\
+do {									\
+	if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_WARNING))	\
+		dev_warn(priv2dev(priv), "%s %d: " fmt,			\
+			 __func__, __LINE__, ##args);			\
+} while (0)
+
+#define LOG_INFO(priv, src, fmt, args...)				\
+do {									\
+	if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFO))		\
+		dev_info(priv2dev(priv), "%s %d: " fmt,			\
+			 __func__, __LINE__, ##args);			\
+} while (0)
+
+#define LOG_INFOEX(priv, src, fmt, args...)				\
+do {									\
+	if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX))	\
+		dev_dbg(priv2dev(priv), "%s %d: " fmt,			\
+			 __func__, __LINE__, ##args);			\
+} while (0)
+
+#define LOG_HEXDUMP(src, ptr, len)					\
+do {									\
+	if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX))	\
+		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE,	\
+				16, 1, ptr, len, false);		\
+} while (0)
+
+void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len);
+
+extern u8 iwmct_logdefs[];
+
+int iwmct_log_set_filter(u8 src, u8 logmask);
+int iwmct_log_set_fw_filter(u8 src, u8 logmask);
+
+ssize_t show_iwmct_log_level(struct device *d,
+			struct device_attribute *attr, char *buf);
+ssize_t store_iwmct_log_level(struct device *d,
+			struct device_attribute *attr,
+			const char *buf, size_t count);
+ssize_t show_iwmct_log_level_fw(struct device *d,
+			struct device_attribute *attr, char *buf);
+ssize_t store_iwmct_log_level_fw(struct device *d,
+			struct device_attribute *attr,
+			const char *buf, size_t count);
+
+#else
+
+#define LOG_CRITICAL(priv, src, fmt, args...)
+#define LOG_ERROR(priv, src, fmt, args...)
+#define LOG_WARNING(priv, src, fmt, args...)
+#define LOG_INFO(priv, src, fmt, args...)
+#define LOG_INFOEX(priv, src, fmt, args...)
+#define LOG_HEXDUMP(src, ptr, len)
+
+static inline void iwmct_log_top_message(struct iwmct_priv *priv,
+					 u8 *buf, int len) {}
+static inline int iwmct_log_set_filter(u8 src, u8 logmask) { return 0; }
+static inline int iwmct_log_set_fw_filter(u8 src, u8 logmask) { return 0; }
+
+#endif /* CONFIG_IWMC3200TOP_DEBUG */
+
+int log_get_filter_str(char *buf, int size);
+int log_get_fw_filter_str(char *buf, int size);
+
+#endif /* __LOG_H__ */
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
new file mode 100644
index 0000000..6e4e491
--- /dev/null
+++ b/drivers/misc/iwmc3200top/main.c
@@ -0,0 +1,699 @@
+/*
+ * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
+ * drivers/misc/iwmc3200top/main.c
+ *
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
+ *  -
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio.h>
+
+#include "iwmc3200top.h"
+#include "log.h"
+#include "fw-msg.h"
+#include "debugfs.h"
+
+
+#define DRIVER_DESCRIPTION "Intel(R) IWMC 3200 Top Driver"
+#define DRIVER_COPYRIGHT "Copyright (c) 2008 Intel Corporation."
+
+#define IWMCT_VERSION "0.1.62"
+
+#ifdef REPOSITORY_LABEL
+#define RL REPOSITORY_LABEL
+#else
+#define RL local
+#endif
+
+#ifdef CONFIG_IWMC3200TOP_DEBUG
+#define VD "-d"
+#else
+#define VD
+#endif
+
+#define DRIVER_VERSION IWMCT_VERSION "-"  __stringify(RL) VD
+
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR(DRIVER_COPYRIGHT);
+
+
+/* FIXME: These can be found in sdio_ids.h in newer kernels */
+#ifndef SDIO_INTEL_VENDOR_ID
+#define SDIO_INTEL_VENDOR_ID			0x0089
+#endif
+#ifndef SDIO_DEVICE_ID_INTEL_IWMC3200TOP
+#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP	0x1404
+#endif
+
+/*
+ * This workers main task is to wait for OP_OPR_ALIVE
+ * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed.
+ * When OP_OPR_ALIVE received it will issue
+ * a call to "bus_rescan_devices".
+ */
+static void iwmct_rescan_worker(struct work_struct *ws)
+{
+	struct iwmct_priv *priv;
+	int ret;
+
+	priv = container_of(ws, struct iwmct_priv, bus_rescan_worker);
+
+	LOG_INFO(priv, FW_MSG, "Calling bus_rescan\n");
+
+	ret = bus_rescan_devices(priv->func->dev.bus);
+	if (ret < 0)
+		LOG_INFO(priv, FW_DOWNLOAD, "bus_rescan_devices FAILED!!!\n");
+}
+
+static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg)
+{
+	switch (msg->hdr.opcode) {
+	case OP_OPR_ALIVE:
+		LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n");
+		queue_work(priv->bus_rescan_wq, &priv->bus_rescan_worker);
+		break;
+	default:
+		LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n",
+			msg->hdr.opcode);
+		break;
+	}
+}
+
+
+static void handle_top_message(struct iwmct_priv *priv, u8 *buf, int len)
+{
+	struct top_msg *msg;
+
+	msg = (struct top_msg *)buf;
+
+	if (msg->hdr.type != COMM_TYPE_D2H) {
+		LOG_ERROR(priv, FW_MSG,
+			"Message from TOP with invalid message type 0x%X\n",
+			msg->hdr.type);
+		return;
+	}
+
+	if (len < sizeof(msg->hdr)) {
+		LOG_ERROR(priv, FW_MSG,
+			"Message from TOP is too short for message header "
+			"received %d bytes, expected at least %zd bytes\n",
+			len, sizeof(msg->hdr));
+		return;
+	}
+
+	if (len < le16_to_cpu(msg->hdr.length) + sizeof(msg->hdr)) {
+		LOG_ERROR(priv, FW_MSG,
+			"Message length (%d bytes) is shorter than "
+			"in header (%d bytes)\n",
+			len, le16_to_cpu(msg->hdr.length));
+		return;
+	}
+
+	switch (msg->hdr.category) {
+	case COMM_CATEGORY_OPERATIONAL:
+		op_top_message(priv, (struct top_msg *)buf);
+		break;
+
+	case COMM_CATEGORY_DEBUG:
+	case COMM_CATEGORY_TESTABILITY:
+	case COMM_CATEGORY_DIAGNOSTICS:
+		iwmct_log_top_message(priv, buf, len);
+		break;
+
+	default:
+		LOG_ERROR(priv, FW_MSG,
+			"Message from TOP with unknown category 0x%X\n",
+			msg->hdr.category);
+		break;
+	}
+}
+
+int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
+{
+	int ret;
+	u8 *buf;
+
+	LOG_INFOEX(priv, FW_MSG, "Sending hcmd:\n");
+
+	/* add padding to 256 for IWMC */
+	((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256;
+
+	LOG_HEXDUMP(FW_MSG, cmd, len);
+
+	if (len > FW_HCMD_BLOCK_SIZE) {
+		LOG_ERROR(priv, FW_MSG, "size %d exceeded hcmd max size %d\n",
+			  len, FW_HCMD_BLOCK_SIZE);
+		return -1;
+	}
+
+	buf = kzalloc(FW_HCMD_BLOCK_SIZE, GFP_KERNEL);
+	if (!buf) {
+		LOG_ERROR(priv, FW_MSG, "kzalloc error, buf size %d\n",
+			  FW_HCMD_BLOCK_SIZE);
+		return -1;
+	}
+
+	memcpy(buf, cmd, len);
+
+	sdio_claim_host(priv->func);
+	ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, buf,
+			       FW_HCMD_BLOCK_SIZE);
+	sdio_release_host(priv->func);
+
+	kfree(buf);
+	return ret;
+}
+
+int iwmct_tx(struct iwmct_priv *priv, unsigned int addr,
+	void *src, int count)
+{
+	int ret;
+
+	sdio_claim_host(priv->func);
+	ret = sdio_memcpy_toio(priv->func, addr, src, count);
+	sdio_release_host(priv->func);
+
+	return ret;
+}
+
+static void iwmct_irq_read_worker(struct work_struct *ws)
+{
+	struct iwmct_priv *priv;
+	struct iwmct_work_struct *read_req;
+	__le32 *buf = NULL;
+	int ret;
+	int iosize;
+	u32 barker;
+	bool is_barker;
+
+	priv = container_of(ws, struct iwmct_priv, isr_worker);
+
+	LOG_INFO(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws);
+
+	/* --------------------- Handshake with device -------------------- */
+	sdio_claim_host(priv->func);
+
+	/* all list manipulations have to be protected by
+	 * sdio_claim_host/sdio_release_host */
+	if (list_empty(&priv->read_req_list)) {
+		LOG_ERROR(priv, IRQ, "read_req_list empty in read worker\n");
+		goto exit_release;
+	}
+
+	read_req = list_entry(priv->read_req_list.next,
+			      struct iwmct_work_struct, list);
+
+	list_del(&read_req->list);
+	iosize = read_req->iosize;
+	kfree(read_req);
+
+	buf = kzalloc(iosize, GFP_KERNEL);
+	if (!buf) {
+		LOG_ERROR(priv, IRQ, "kzalloc error, buf size %d\n", iosize);
+		goto exit_release;
+	}
+
+	LOG_INFO(priv, IRQ, "iosize=%d, buf=%p, func=%d\n",
+				iosize, buf, priv->func->num);
+
+	/* read from device */
+	ret = sdio_memcpy_fromio(priv->func, buf, IWMC_SDIO_DATA_ADDR, iosize);
+	if (ret) {
+		LOG_ERROR(priv, IRQ, "error %d reading buffer\n", ret);
+		goto exit_release;
+	}
+
+	LOG_HEXDUMP(IRQ, (u8 *)buf, iosize);
+
+	barker = le32_to_cpu(buf[0]);
+
+	/* Verify whether it's a barker and if not - treat as regular Rx */
+	if (barker == IWMC_BARKER_ACK ||
+	    (barker & BARKER_DNLOAD_BARKER_MSK) == IWMC_BARKER_REBOOT) {
+
+		/* Valid Barker is equal on first 4 dwords */
+		is_barker = (buf[1] == buf[0]) &&
+			    (buf[2] == buf[0]) &&
+			    (buf[3] == buf[0]);
+
+		if (!is_barker) {
+			LOG_WARNING(priv, IRQ,
+				"Potentially inconsistent barker "
+				"%08X_%08X_%08X_%08X\n",
+				le32_to_cpu(buf[0]), le32_to_cpu(buf[1]),
+				le32_to_cpu(buf[2]), le32_to_cpu(buf[3]));
+		}
+	} else {
+		is_barker = false;
+	}
+
+	/* Handle Top CommHub message */
+	if (!is_barker) {
+		sdio_release_host(priv->func);
+		handle_top_message(priv, (u8 *)buf, iosize);
+		goto exit;
+	} else if (barker == IWMC_BARKER_ACK) { /* Handle barkers */
+		if (atomic_read(&priv->dev_sync) == 0) {
+			LOG_ERROR(priv, IRQ,
+				  "ACK barker arrived out-of-sync\n");
+			goto exit_release;
+		}
+
+		/* Continuing to FW download (after Sync is completed)*/
+		atomic_set(&priv->dev_sync, 0);
+		LOG_INFO(priv, IRQ, "ACK barker arrived "
+				"- starting FW download\n");
+	} else { /* REBOOT barker */
+		LOG_INFO(priv, IRQ, "Recieved reboot barker: %x\n", barker);
+		priv->barker = barker;
+
+		if (barker & BARKER_DNLOAD_SYNC_MSK) {
+			/* Send the same barker back */
+			ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR,
+					       buf, iosize);
+			if (ret) {
+				LOG_ERROR(priv, IRQ,
+					 "error %d echoing barker\n", ret);
+				goto exit_release;
+			}
+			LOG_INFO(priv, IRQ, "Echoing barker to device\n");
+			atomic_set(&priv->dev_sync, 1);
+			goto exit_release;
+		}
+
+		/* Continuing to FW download (without Sync) */
+		LOG_INFO(priv, IRQ, "No sync requested "
+				    "- starting FW download\n");
+	}
+
+	sdio_release_host(priv->func);
+
+
+	LOG_INFO(priv, IRQ, "barker download request 0x%x is:\n", priv->barker);
+	LOG_INFO(priv, IRQ, "*******  Top FW %s requested ********\n",
+			(priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
+	LOG_INFO(priv, IRQ, "*******  GPS FW %s requested ********\n",
+			(priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
+	LOG_INFO(priv, IRQ, "*******  BT FW %s requested ********\n",
+			(priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
+
+	if (priv->dbg.fw_download)
+		iwmct_fw_load(priv);
+	else
+		LOG_ERROR(priv, IRQ, "FW download not allowed\n");
+
+	goto exit;
+
+exit_release:
+	sdio_release_host(priv->func);
+exit:
+	kfree(buf);
+	LOG_INFO(priv, IRQ, "exit iwmct_irq_read_worker\n");
+}
+
+static void iwmct_irq(struct sdio_func *func)
+{
+	struct iwmct_priv *priv;
+	int val, ret;
+	int iosize;
+	int addr = IWMC_SDIO_INTR_GET_SIZE_ADDR;
+	struct iwmct_work_struct *read_req;
+
+	priv = sdio_get_drvdata(func);
+
+	LOG_INFO(priv, IRQ, "enter iwmct_irq\n");
+
+	/* read the function's status register */
+	val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret);
+
+	LOG_INFO(priv, IRQ, "iir value = %d, ret=%d\n", val, ret);
+
+	if (!val) {
+		LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n");
+		goto exit_clear_intr;
+	}
+
+
+	/*
+	 * read 2 bytes of the transaction size
+	 * IMPORTANT: sdio transaction size has to be read before clearing
+	 * sdio interrupt!!!
+	 */
+	val = sdio_readb(priv->func, addr++, &ret);
+	iosize = val;
+	val = sdio_readb(priv->func, addr++, &ret);
+	iosize += val << 8;
+
+	LOG_INFO(priv, IRQ, "READ size %d\n", iosize);
+
+	if (iosize == 0) {
+		LOG_ERROR(priv, IRQ, "READ size %d, exiting ISR\n", iosize);
+		goto exit_clear_intr;
+	}
+
+	/* allocate a work structure to pass iosize to the worker */
+	read_req = kzalloc(sizeof(struct iwmct_work_struct), GFP_KERNEL);
+	if (!read_req) {
+		LOG_ERROR(priv, IRQ, "failed to allocate read_req, exit ISR\n");
+		goto exit_clear_intr;
+	}
+
+	INIT_LIST_HEAD(&read_req->list);
+	read_req->iosize = iosize;
+
+	list_add_tail(&priv->read_req_list, &read_req->list);
+
+	/* clear the function's interrupt request bit (write 1 to clear) */
+	sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
+
+	queue_work(priv->wq, &priv->isr_worker);
+
+	LOG_INFO(priv, IRQ, "exit iwmct_irq\n");
+
+	return;
+
+exit_clear_intr:
+	/* clear the function's interrupt request bit (write 1 to clear) */
+	sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
+}
+
+
+static int blocks;
+module_param(blocks, int, 0604);
+MODULE_PARM_DESC(blocks, "max_blocks_to_send");
+
+static int dump;
+module_param(dump, bool, 0604);
+MODULE_PARM_DESC(dump, "dump_hex_content");
+
+static int jump = 1;
+module_param(jump, bool, 0604);
+
+static int direct = 1;
+module_param(direct, bool, 0604);
+
+static int checksum = 1;
+module_param(checksum, bool, 0604);
+
+static int fw_download = 1;
+module_param(fw_download, bool, 0604);
+
+static int block_size = IWMC_SDIO_BLK_SIZE;
+module_param(block_size, int, 0404);
+
+static int download_trans_blks = IWMC_DEFAULT_TR_BLK;
+module_param(download_trans_blks, int, 0604);
+
+static int rubbish_barker;
+module_param(rubbish_barker, bool, 0604);
+
+#ifdef CONFIG_IWMC3200TOP_DEBUG
+static int log_level[LOG_SRC_MAX];
+static unsigned int log_level_argc;
+module_param_array(log_level, int, &log_level_argc, 0604);
+MODULE_PARM_DESC(log_level, "log_level");
+
+static int log_level_fw[FW_LOG_SRC_MAX];
+static unsigned int log_level_fw_argc;
+module_param_array(log_level_fw, int, &log_level_fw_argc, 0604);
+MODULE_PARM_DESC(log_level_fw, "log_level_fw");
+#endif
+
+void iwmct_dbg_init_params(struct iwmct_priv *priv)
+{
+#ifdef CONFIG_IWMC3200TOP_DEBUG
+	int i;
+
+	for (i = 0; i < log_level_argc; i++) {
+		dev_notice(&priv->func->dev, "log_level[%d]=0x%X\n",
+						i, log_level[i]);
+		iwmct_log_set_filter((log_level[i] >> 8) & 0xFF,
+			       log_level[i] & 0xFF);
+	}
+	for (i = 0; i < log_level_fw_argc; i++) {
+		dev_notice(&priv->func->dev, "log_level_fw[%d]=0x%X\n",
+						i, log_level_fw[i]);
+		iwmct_log_set_fw_filter((log_level_fw[i] >> 8) & 0xFF,
+				  log_level_fw[i] & 0xFF);
+	}
+#endif
+
+	priv->dbg.blocks = blocks;
+	LOG_INFO(priv, INIT, "blocks=%d\n", blocks);
+	priv->dbg.dump = (bool)dump;
+	LOG_INFO(priv, INIT, "dump=%d\n", dump);
+	priv->dbg.jump = (bool)jump;
+	LOG_INFO(priv, INIT, "jump=%d\n", jump);
+	priv->dbg.direct = (bool)direct;
+	LOG_INFO(priv, INIT, "direct=%d\n", direct);
+	priv->dbg.checksum = (bool)checksum;
+	LOG_INFO(priv, INIT, "checksum=%d\n", checksum);
+	priv->dbg.fw_download = (bool)fw_download;
+	LOG_INFO(priv, INIT, "fw_download=%d\n", fw_download);
+	priv->dbg.block_size = block_size;
+	LOG_INFO(priv, INIT, "block_size=%d\n", block_size);
+	priv->dbg.download_trans_blks = download_trans_blks;
+	LOG_INFO(priv, INIT, "download_trans_blks=%d\n", download_trans_blks);
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+static ssize_t show_iwmct_fw_version(struct device *d,
+				  struct device_attribute *attr, char *buf)
+{
+	struct iwmct_priv *priv = dev_get_drvdata(d);
+	return sprintf(buf, "%s\n", priv->dbg.label_fw);
+}
+static DEVICE_ATTR(cc_label_fw, S_IRUGO, show_iwmct_fw_version, NULL);
+
+#ifdef CONFIG_IWMC3200TOP_DEBUG
+static DEVICE_ATTR(log_level, S_IWUSR | S_IRUGO,
+		   show_iwmct_log_level, store_iwmct_log_level);
+static DEVICE_ATTR(log_level_fw, S_IWUSR | S_IRUGO,
+		   show_iwmct_log_level_fw, store_iwmct_log_level_fw);
+#endif
+
+static struct attribute *iwmct_sysfs_entries[] = {
+	&dev_attr_cc_label_fw.attr,
+#ifdef CONFIG_IWMC3200TOP_DEBUG
+	&dev_attr_log_level.attr,
+	&dev_attr_log_level_fw.attr,
+#endif
+	NULL
+};
+
+static struct attribute_group iwmct_attribute_group = {
+	.name = NULL,		/* put in device directory */
+	.attrs = iwmct_sysfs_entries,
+};
+
+
+static int iwmct_probe(struct sdio_func *func,
+			   const struct sdio_device_id *id)
+{
+	struct iwmct_priv *priv;
+	int ret;
+	int val = 1;
+	int addr = IWMC_SDIO_INTR_ENABLE_ADDR;
+
+	dev_dbg(&func->dev, "enter iwmct_probe\n");
+
+	dev_dbg(&func->dev, "IRQ polling period id %u msecs, HZ is %d\n",
+		jiffies_to_msecs(2147483647), HZ);
+
+	priv = kzalloc(sizeof(struct iwmct_priv), GFP_KERNEL);
+	if (!priv) {
+		dev_err(&func->dev, "kzalloc error\n");
+		return -ENOMEM;
+	}
+	priv->func = func;
+	sdio_set_drvdata(func, priv);
+
+
+	/* create drivers work queue */
+	priv->wq = create_workqueue(DRV_NAME "_wq");
+	priv->bus_rescan_wq = create_workqueue(DRV_NAME "_rescan_wq");
+	INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker);
+	INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker);
+
+	init_waitqueue_head(&priv->wait_q);
+
+	sdio_claim_host(func);
+	/* FIXME: Remove after it is fixed in the Boot ROM upgrade */
+	func->enable_timeout = 10;
+
+	/* In our HW, setting the block size also wakes up the boot rom. */
+	ret = sdio_set_block_size(func, priv->dbg.block_size);
+	if (ret) {
+		LOG_ERROR(priv, INIT,
+			"sdio_set_block_size() failure: %d\n", ret);
+		goto error_sdio_enable;
+	}
+
+	ret = sdio_enable_func(func);
+	if (ret) {
+		LOG_ERROR(priv, INIT, "sdio_enable_func() failure: %d\n", ret);
+		goto error_sdio_enable;
+	}
+
+	/* init reset and dev_sync states */
+	atomic_set(&priv->reset, 0);
+	atomic_set(&priv->dev_sync, 0);
+
+	/* init read req queue */
+	INIT_LIST_HEAD(&priv->read_req_list);
+
+	/* process configurable parameters */
+	iwmct_dbg_init_params(priv);
+	ret = sysfs_create_group(&func->dev.kobj, &iwmct_attribute_group);
+	if (ret) {
+		LOG_ERROR(priv, INIT, "Failed to register attributes and "
+			 "initialize module_params\n");
+		goto error_dev_attrs;
+	}
+
+	iwmct_dbgfs_register(priv, DRV_NAME);
+
+	if (!priv->dbg.direct && priv->dbg.download_trans_blks > 8) {
+		LOG_INFO(priv, INIT,
+			 "Reducing transaction to 8 blocks = 2K (from %d)\n",
+			 priv->dbg.download_trans_blks);
+		priv->dbg.download_trans_blks = 8;
+	}
+	priv->trans_len = priv->dbg.download_trans_blks * priv->dbg.block_size;
+	LOG_INFO(priv, INIT, "Transaction length = %d\n", priv->trans_len);
+
+	ret = sdio_claim_irq(func, iwmct_irq);
+	if (ret) {
+		LOG_ERROR(priv, INIT, "sdio_claim_irq() failure: %d\n", ret);
+		goto error_claim_irq;
+	}
+
+
+	/* Enable function's interrupt */
+	sdio_writeb(priv->func, val, addr, &ret);
+	if (ret) {
+		LOG_ERROR(priv, INIT, "Failure writing to "
+			  "Interrupt Enable Register (%d): %d\n", addr, ret);
+		goto error_enable_int;
+	}
+
+	sdio_release_host(func);
+
+	LOG_INFO(priv, INIT, "exit iwmct_probe\n");
+
+	return ret;
+
+error_enable_int:
+	sdio_release_irq(func);
+error_claim_irq:
+	sdio_disable_func(func);
+error_dev_attrs:
+	iwmct_dbgfs_unregister(priv->dbgfs);
+	sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
+error_sdio_enable:
+	sdio_release_host(func);
+	return ret;
+}
+
+static void iwmct_remove(struct sdio_func *func)
+{
+	struct iwmct_work_struct *read_req;
+	struct iwmct_priv *priv = sdio_get_drvdata(func);
+
+	priv = sdio_get_drvdata(func);
+
+	LOG_INFO(priv, INIT, "enter\n");
+
+	sdio_claim_host(func);
+	sdio_release_irq(func);
+	sdio_release_host(func);
+
+	/* Safely destroy osc workqueue */
+	destroy_workqueue(priv->bus_rescan_wq);
+	destroy_workqueue(priv->wq);
+
+	sdio_claim_host(func);
+	sdio_disable_func(func);
+	sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
+	iwmct_dbgfs_unregister(priv->dbgfs);
+	sdio_release_host(func);
+
+	/* free read requests */
+	while (!list_empty(&priv->read_req_list)) {
+		read_req = list_entry(priv->read_req_list.next,
+			struct iwmct_work_struct, list);
+
+		list_del(&read_req->list);
+		kfree(read_req);
+	}
+
+	kfree(priv);
+}
+
+
+static const struct sdio_device_id iwmct_ids[] = {
+	{ SDIO_DEVICE(SDIO_INTEL_VENDOR_ID, SDIO_DEVICE_ID_INTEL_IWMC3200TOP)},
+	{ /* end: all zeroes */	},
+};
+
+MODULE_DEVICE_TABLE(sdio, iwmct_ids);
+
+static struct sdio_driver iwmct_driver = {
+	.probe		= iwmct_probe,
+	.remove		= iwmct_remove,
+	.name		= DRV_NAME,
+	.id_table	= iwmct_ids,
+};
+
+static int __init iwmct_init(void)
+{
+	int rc;
+
+	/* Default log filter settings */
+	iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME);
+	iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FILTER_ALL);
+	iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME);
+
+	rc = sdio_register_driver(&iwmct_driver);
+
+	return rc;
+}
+
+static void __exit iwmct_exit(void)
+{
+	sdio_unregister_driver(&iwmct_driver);
+}
+
+module_init(iwmct_init);
+module_exit(iwmct_exit);
+
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 04fb8b0..e012c2e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1741,6 +1741,7 @@
 config KS8851_MLL
 	tristate "Micrel KS8851 MLL"
 	depends on HAS_IOMEM
+	select MII
 	help
 	  This platform driver is for Micrel KS8851 Address/data bus
 	  multiplexed network chip.
@@ -2482,6 +2483,8 @@
 	  To compile this driver as a module, choose M here. The module
 	  will be called s6gmac.
 
+source "drivers/net/stmmac/Kconfig"
+
 endif # NETDEV_1000
 
 #
@@ -3232,7 +3235,7 @@
 
 config VMXNET3
        tristate "VMware VMXNET3 ethernet driver"
-       depends on PCI && X86
+       depends on PCI && X86 && INET
        help
          This driver supports VMware's vmxnet3 virtual ethernet NIC.
          To compile this driver as a module, choose M here: the
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index fc6c8bb..246323d 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -100,6 +100,7 @@
 obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
 obj-$(CONFIG_RIONET) += rionet.o
 obj-$(CONFIG_SH_ETH) += sh_eth.o
+obj-$(CONFIG_STMMAC_ETH) += stmmac/
 
 #
 # end link order section
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 2a7b774..0073d19 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -35,11 +35,13 @@
 
 #include <mach/regs-switch.h>
 #include <mach/regs-misc.h>
+#include <asm/mach/irq.h>
+#include <mach/regs-irq.h>
 
 #include "ks8695net.h"
 
 #define MODULENAME	"ks8695_ether"
-#define MODULEVERSION	"1.01"
+#define MODULEVERSION	"1.02"
 
 /*
  * Transmit and device reset timeout, default 5 seconds.
@@ -95,6 +97,9 @@
 #define MAX_RX_DESC 16
 #define MAX_RX_DESC_MASK 0xf
 
+/*napi_weight have better more than rx DMA buffers*/
+#define NAPI_WEIGHT   64
+
 #define MAX_RXBUF_SIZE 0x700
 
 #define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC)
@@ -120,6 +125,7 @@
  *	@dev: The platform device object for this interface
  *	@dtype: The type of this device
  *	@io_regs: The ioremapped registers for this interface
+ *      @napi : Add support NAPI for Rx
  *	@rx_irq_name: The textual name of the RX IRQ from the platform data
  *	@tx_irq_name: The textual name of the TX IRQ from the platform data
  *	@link_irq_name: The textual name of the link IRQ from the
@@ -143,6 +149,7 @@
  *	@rx_ring_dma: The DMA mapped equivalent of rx_ring
  *	@rx_buffers: The sk_buff mappings for the RX ring
  *	@next_rx_desc_read: The next RX descriptor to read from on IRQ
+ *      @rx_lock: A lock to protect Rx irq function
  *	@msg_enable: The flags for which messages to emit
  */
 struct ks8695_priv {
@@ -152,6 +159,8 @@
 	enum ks8695_dtype dtype;
 	void __iomem *io_regs;
 
+	struct napi_struct	napi;
+
 	const char *rx_irq_name, *tx_irq_name, *link_irq_name;
 	int rx_irq, tx_irq, link_irq;
 
@@ -172,6 +181,7 @@
 	dma_addr_t rx_ring_dma;
 	struct ks8695_skbuff rx_buffers[MAX_RX_DESC];
 	int next_rx_desc_read;
+	spinlock_t rx_lock;
 
 	int msg_enable;
 };
@@ -392,29 +402,82 @@
 }
 
 /**
+ *	ks8695_get_rx_enable_bit - Get rx interrupt enable/status bit
+ *	@ksp: Private data for the KS8695 Ethernet
+ *
+ *    For KS8695 document:
+ *    Interrupt Enable Register (offset 0xE204)
+ *        Bit29 : WAN MAC Receive Interrupt Enable
+ *        Bit16 : LAN MAC Receive Interrupt Enable
+ *    Interrupt Status Register (Offset 0xF208)
+ *        Bit29: WAN MAC Receive Status
+ *        Bit16: LAN MAC Receive Status
+ *    So, this Rx interrrupt enable/status bit number is equal
+ *    as Rx IRQ number.
+ */
+static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp)
+{
+	return ksp->rx_irq;
+}
+
+/**
  *	ks8695_rx_irq - Receive IRQ handler
  *	@irq: The IRQ which went off (ignored)
  *	@dev_id: The net_device for the interrupt
  *
- *	Process the RX ring, passing any received packets up to the
- *	host.  If we received anything other than errors, we then
- *	refill the ring.
+ *	Inform NAPI that packet reception needs to be scheduled
  */
+
 static irqreturn_t
 ks8695_rx_irq(int irq, void *dev_id)
 {
 	struct net_device *ndev = (struct net_device *)dev_id;
 	struct ks8695_priv *ksp = netdev_priv(ndev);
+	unsigned long status;
+
+	unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
+
+	spin_lock(&ksp->rx_lock);
+
+	status = readl(KS8695_IRQ_VA + KS8695_INTST);
+
+	/*clean rx status bit*/
+	writel(status | mask_bit , KS8695_IRQ_VA + KS8695_INTST);
+
+	if (status & mask_bit) {
+		if (napi_schedule_prep(&ksp->napi)) {
+			/*disable rx interrupt*/
+			status &= ~mask_bit;
+			writel(status , KS8695_IRQ_VA + KS8695_INTEN);
+			__napi_schedule(&ksp->napi);
+		}
+	}
+
+	spin_unlock(&ksp->rx_lock);
+	return IRQ_HANDLED;
+}
+
+/**
+ *	ks8695_rx - Receive packets  called by NAPI poll method
+ *	@ksp: Private data for the KS8695 Ethernet
+ *	@budget: The max packets would be receive
+ */
+
+static int ks8695_rx(struct ks8695_priv *ksp, int budget)
+{
+	struct net_device *ndev = ksp->ndev;
 	struct sk_buff *skb;
 	int buff_n;
 	u32 flags;
 	int pktlen;
 	int last_rx_processed = -1;
+	int received = 0;
 
 	buff_n = ksp->next_rx_desc_read;
-	do {
-		if (ksp->rx_buffers[buff_n].skb &&
-		    !(ksp->rx_ring[buff_n].status & cpu_to_le32(RDES_OWN))) {
+	while (received < budget
+			&& ksp->rx_buffers[buff_n].skb
+			&& (!(ksp->rx_ring[buff_n].status &
+					cpu_to_le32(RDES_OWN)))) {
 			rmb();
 			flags = le32_to_cpu(ksp->rx_ring[buff_n].status);
 			/* Found an SKB which we own, this means we
@@ -464,7 +527,7 @@
 			/* Relinquish the SKB to the network layer */
 			skb_put(skb, pktlen);
 			skb->protocol = eth_type_trans(skb, ndev);
-			netif_rx(skb);
+			netif_receive_skb(skb);
 
 			/* Record stats */
 			ndev->stats.rx_packets++;
@@ -478,29 +541,56 @@
 			/* Give the ring entry back to the hardware */
 			ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
 rx_finished:
+			received++;
 			/* And note this as processed so we can start
 			 * from here next time
 			 */
 			last_rx_processed = buff_n;
-		} else {
-			/* Ran out of things to process, stop now */
-			break;
-		}
-		buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
-	} while (buff_n != ksp->next_rx_desc_read);
+			buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
+			/*And note which RX descriptor we last did */
+			if (likely(last_rx_processed != -1))
+				ksp->next_rx_desc_read =
+					(last_rx_processed + 1) &
+					MAX_RX_DESC_MASK;
 
-	/* And note which RX descriptor we last did anything with */
-	if (likely(last_rx_processed != -1))
-		ksp->next_rx_desc_read =
-			(last_rx_processed + 1) & MAX_RX_DESC_MASK;
+			/* And refill the buffers */
+			ks8695_refill_rxbuffers(ksp);
 
-	/* And refill the buffers */
-	ks8695_refill_rxbuffers(ksp);
+			/* Kick the RX DMA engine, in case it became
+			 *  suspended */
+			ks8695_writereg(ksp, KS8695_DRSC, 0);
+	}
+	return received;
+}
 
-	/* Kick the RX DMA engine, in case it became suspended */
-	ks8695_writereg(ksp, KS8695_DRSC, 0);
 
-	return IRQ_HANDLED;
+/**
+ *	ks8695_poll - Receive packet by NAPI poll method
+ *	@ksp: Private data for the KS8695 Ethernet
+ *	@budget: The remaining number packets for network subsystem
+ *
+ *     Invoked by the network core when it requests for new
+ *     packets from the driver
+ */
+static int ks8695_poll(struct napi_struct *napi, int budget)
+{
+	struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi);
+	unsigned long  work_done;
+
+	unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN);
+	unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
+
+	work_done = ks8695_rx(ksp, budget);
+
+	if (work_done < budget) {
+		unsigned long flags;
+		spin_lock_irqsave(&ksp->rx_lock, flags);
+		/*enable rx interrupt*/
+		writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
+		__napi_complete(napi);
+		spin_unlock_irqrestore(&ksp->rx_lock, flags);
+	}
+	return work_done;
 }
 
 /**
@@ -1253,6 +1343,7 @@
 	struct ks8695_priv *ksp = netdev_priv(ndev);
 
 	netif_stop_queue(ndev);
+	napi_disable(&ksp->napi);
 	netif_carrier_off(ndev);
 
 	ks8695_shutdown(ksp);
@@ -1287,6 +1378,7 @@
 		return ret;
 	}
 
+	napi_enable(&ksp->napi);
 	netif_start_queue(ndev);
 
 	return 0;
@@ -1472,6 +1564,8 @@
 	SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
 	ndev->watchdog_timeo	 = msecs_to_jiffies(watchdog);
 
+	netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
+
 	/* Retrieve the default MAC addr from the chip. */
 	/* The bootloader should have left it in there for us. */
 
@@ -1505,6 +1599,7 @@
 
 	/* And initialise the queue's lock */
 	spin_lock_init(&ksp->txq_lock);
+	spin_lock_init(&ksp->rx_lock);
 
 	/* Specify the RX DMA ring buffer */
 	ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE;
@@ -1626,6 +1721,7 @@
 	struct ks8695_priv *ksp = netdev_priv(ndev);
 
 	platform_set_drvdata(pdev, NULL);
+	netif_napi_del(&ksp->napi);
 
 	unregister_netdev(ndev);
 	ks8695_release_device(ksp);
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 1372e9a9..3b8801a 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -1981,8 +1981,6 @@
 		else {
 			use_tpd = atl1c_get_tpd(adapter, type);
 			memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
-			use_tpd = atl1c_get_tpd(adapter, type);
-			memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
 		}
 		buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
 		buffer_info->length = buf_len - mapped_len;
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 04f63c7..ce6f1ac 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -34,6 +34,7 @@
  *
  *
  */
+#include <linux/capability.h>
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 25b6602..cc75dd0 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -243,15 +243,26 @@
 
 int be_cmd_POST(struct be_adapter *adapter)
 {
-	u16 stage, error;
+	u16 stage;
+	int status, timeout = 0;
 
-	error = be_POST_stage_get(adapter, &stage);
-	if (error || stage != POST_STAGE_ARMFW_RDY) {
-		dev_err(&adapter->pdev->dev, "POST failed.\n");
-		return -1;
-	}
+	do {
+		status = be_POST_stage_get(adapter, &stage);
+		if (status) {
+			dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
+				stage);
+			return -1;
+		} else if (stage != POST_STAGE_ARMFW_RDY) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule_timeout(2 * HZ);
+			timeout += 2;
+		} else {
+			return 0;
+		}
+	} while (timeout < 20);
 
-	return 0;
+	dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
+	return -1;
 }
 
 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
@@ -729,8 +740,8 @@
 /* Create an rx filtering policy configuration on an i/f
  * Uses mbox
  */
-int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
-		bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
+int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
+		u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_if_create *req;
@@ -746,8 +757,8 @@
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
 
-	req->capability_flags = cpu_to_le32(flags);
-	req->enable_flags = cpu_to_le32(flags);
+	req->capability_flags = cpu_to_le32(cap_flags);
+	req->enable_flags = cpu_to_le32(en_flags);
 	req->pmac_invalid = pmac_invalid;
 	if (!pmac_invalid)
 		memcpy(req->mac_addr, mac, ETH_ALEN);
@@ -823,7 +834,7 @@
 
 /* Uses synchronous mcc */
 int be_cmd_link_status_query(struct be_adapter *adapter,
-			bool *link_up)
+			bool *link_up, u8 *mac_speed, u16 *link_speed)
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_link_status *req;
@@ -844,8 +855,11 @@
 	status = be_mcc_notify_wait(adapter);
 	if (!status) {
 		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
-		if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
+		if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
 			*link_up = true;
+			*link_speed = le16_to_cpu(resp->link_speed);
+			*mac_speed = resp->mac_speed;
+		}
 	}
 
 	spin_unlock_bh(&adapter->mcc_lock);
@@ -1177,6 +1191,36 @@
 	return status;
 }
 
+/* Uses sync mcc */
+int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
+				u8 *connector)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_port_type *req;
+	int status;
+
+	spin_lock_bh(&adapter->mcc_lock);
+
+	wrb = wrb_from_mccq(adapter);
+	req = embedded_payload(wrb);
+
+	be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0);
+
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+		OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
+
+	req->port = cpu_to_le32(port);
+	req->page_num = cpu_to_le32(TR_PAGE_A0);
+	status = be_mcc_notify_wait(adapter);
+	if (!status) {
+		struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
+			*connector = resp->data.connector;
+	}
+
+	spin_unlock_bh(&adapter->mcc_lock);
+	return status;
+}
+
 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
 			u32 flash_type, u32 flash_opcode, u32 buf_size)
 {
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index a1e78cc..69dc017 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -68,7 +68,7 @@
 #define CQE_STATUS_COMPL_MASK		0xFFFF
 #define CQE_STATUS_COMPL_SHIFT		0	/* bits 0 - 15 */
 #define CQE_STATUS_EXTD_MASK		0xFFFF
-#define CQE_STATUS_EXTD_SHIFT		0	/* bits 0 - 15 */
+#define CQE_STATUS_EXTD_SHIFT		16	/* bits 16 - 31 */
 
 struct be_mcc_compl {
 	u32 status;		/* dword 0 */
@@ -140,6 +140,7 @@
 #define OPCODE_COMMON_FUNCTION_RESET			61
 #define OPCODE_COMMON_ENABLE_DISABLE_BEACON		69
 #define OPCODE_COMMON_GET_BEACON_STATE			70
+#define OPCODE_COMMON_READ_TRANSRECV_DATA		73
 
 #define OPCODE_ETH_ACPI_CONFIG				2
 #define OPCODE_ETH_PROMISCUOUS				3
@@ -635,9 +636,47 @@
 	u8 mac_fault;
 	u8 mgmt_mac_duplex;
 	u8 mgmt_mac_speed;
-	u16 rsvd0;
+	u16 link_speed;
+	u32 rsvd0;
 } __packed;
 
+/******************** Port Identification ***************************/
+/*    Identifies the type of port attached to NIC     */
+struct be_cmd_req_port_type {
+	struct be_cmd_req_hdr hdr;
+	u32 page_num;
+	u32 port;
+};
+
+enum {
+	TR_PAGE_A0 = 0xa0,
+	TR_PAGE_A2 = 0xa2
+};
+
+struct be_cmd_resp_port_type {
+	struct be_cmd_resp_hdr hdr;
+	u32 page_num;
+	u32 port;
+	struct data {
+		u8 identifier;
+		u8 identifier_ext;
+		u8 connector;
+		u8 transceiver[8];
+		u8 rsvd0[3];
+		u8 length_km;
+		u8 length_hm;
+		u8 length_om1;
+		u8 length_om2;
+		u8 length_cu;
+		u8 length_cu_m;
+		u8 vendor_name[16];
+		u8 rsvd;
+		u8 vendor_oui[3];
+		u8 vendor_pn[16];
+		u8 vendor_rev[4];
+	} data;
+};
+
 /******************** Get FW Version *******************/
 struct be_cmd_req_get_fw_version {
 	struct be_cmd_req_hdr hdr;
@@ -753,8 +792,9 @@
 extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
 			u32 if_id, u32 *pmac_id);
 extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
-extern int be_cmd_if_create(struct be_adapter *adapter, u32 if_flags, u8 *mac,
-			bool pmac_invalid, u32 *if_handle, u32 *pmac_id);
+extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
+			u32 en_flags, u8 *mac, bool pmac_invalid,
+			u32 *if_handle, u32 *pmac_id);
 extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
 extern int be_cmd_eq_create(struct be_adapter *adapter,
 			struct be_queue_info *eq, int eq_delay);
@@ -775,7 +815,7 @@
 extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
 			int type);
 extern int be_cmd_link_status_query(struct be_adapter *adapter,
-			bool *link_up);
+			bool *link_up, u8 *mac_speed, u16 *link_speed);
 extern int be_cmd_reset(struct be_adapter *adapter);
 extern int be_cmd_get_stats(struct be_adapter *adapter,
 			struct be_dma_mem *nonemb_cmd);
@@ -801,6 +841,8 @@
 			u8 port_num, u8 beacon, u8 status, u8 state);
 extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
 			u8 port_num, u32 *state);
+extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
+					u8 *connector);
 extern int be_cmd_write_flashrom(struct be_adapter *adapter,
 			struct be_dma_mem *cmd, u32 flash_oper,
 			u32 flash_opcode, u32 buf_size);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 280471e..edebce9 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -293,9 +293,43 @@
 
 static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 {
-	ecmd->speed = SPEED_10000;
+	struct be_adapter *adapter = netdev_priv(netdev);
+	u8 mac_speed = 0, connector = 0;
+	u16 link_speed = 0;
+	bool link_up = false;
+
+	be_cmd_link_status_query(adapter, &link_up, &mac_speed, &link_speed);
+
+	/* link_speed is in units of 10 Mbps */
+	if (link_speed) {
+		ecmd->speed = link_speed*10;
+	} else {
+		switch (mac_speed) {
+		case PHY_LINK_SPEED_1GBPS:
+			ecmd->speed = SPEED_1000;
+			break;
+		case PHY_LINK_SPEED_10GBPS:
+			ecmd->speed = SPEED_10000;
+			break;
+		}
+	}
 	ecmd->duplex = DUPLEX_FULL;
 	ecmd->autoneg = AUTONEG_DISABLE;
+	ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
+
+	be_cmd_read_port_type(adapter, adapter->port_num, &connector);
+	switch (connector) {
+	case 7:
+		ecmd->port = PORT_FIBRE;
+		break;
+	default:
+		ecmd->port = PORT_TP;
+		break;
+	}
+
+	ecmd->phy_address = adapter->port_num;
+	ecmd->transceiver = XCVR_INTERNAL;
+
 	return 0;
 }
 
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index e0f9d64..43180dc 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -31,8 +31,10 @@
 
 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
+	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
+	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
 	{ 0 }
 };
 MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -1586,6 +1588,8 @@
 	struct be_eq_obj *tx_eq = &adapter->tx_eq;
 	bool link_up;
 	int status;
+	u8 mac_speed;
+	u16 link_speed;
 
 	/* First time posting */
 	be_post_rx_frags(adapter);
@@ -1604,7 +1608,8 @@
 	/* Rx compl queue may be in unarmed state; rearm it */
 	be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
 
-	status = be_cmd_link_status_query(adapter, &link_up);
+	status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
+			&link_speed);
 	if (status)
 		return status;
 	be_link_status_update(adapter, link_up);
@@ -1616,19 +1621,22 @@
 static int be_setup(struct be_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
-	u32 if_flags;
+	u32 cap_flags, en_flags;
 	int status;
 
-	if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS |
-		BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED |
-		BE_IF_FLAGS_PASS_L3L4_ERRORS;
-	status = be_cmd_if_create(adapter, if_flags, netdev->dev_addr,
-			false/* pmac_invalid */, &adapter->if_handle,
-			&adapter->pmac_id);
+	cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+			BE_IF_FLAGS_MCAST_PROMISCUOUS |
+			BE_IF_FLAGS_PROMISCUOUS |
+			BE_IF_FLAGS_PASS_L3L4_ERRORS;
+	en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+			BE_IF_FLAGS_PASS_L3L4_ERRORS;
+
+	status = be_cmd_if_create(adapter, cap_flags, en_flags,
+			netdev->dev_addr, false/* pmac_invalid */,
+			&adapter->if_handle, &adapter->pmac_id);
 	if (status != 0)
 		goto do_none;
 
-
 	status = be_tx_queues_create(adapter);
 	if (status != 0)
 		goto if_destroy;
@@ -2051,6 +2059,10 @@
 	if (status)
 		return status;
 
+	status = be_cmd_reset_function(adapter);
+	if (status)
+		return status;
+
 	status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
 	if (status)
 		return status;
@@ -2104,10 +2116,6 @@
 	if (status)
 		goto free_netdev;
 
-	status = be_cmd_reset_function(adapter);
-	if (status)
-		goto ctrl_clean;
-
 	status = be_stats_init(adapter);
 	if (status)
 		goto ctrl_clean;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 08cddb6..539d23b 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -1466,6 +1466,8 @@
 	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
 		bmcr |= BCM5708S_BMCR_FORCE_2500;
+	} else {
+		return;
 	}
 
 	if (bp->autoneg & AUTONEG_SPEED) {
@@ -1500,6 +1502,8 @@
 	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
 		bmcr &= ~BCM5708S_BMCR_FORCE_2500;
+	} else {
+		return;
 	}
 
 	if (bp->autoneg & AUTONEG_SPEED)
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 6c7f795..a4d8340 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -361,9 +361,12 @@
 #define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE	 (1<<28)
 
 #define BNX2_L2CTX_HOST_BDIDX				0x00000004
-#define BNX2_L2CTX_STATUSB_NUM_SHIFT			 16
-#define BNX2_L2CTX_STATUSB_NUM(sb_id)			 \
-	(((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_STATUSB_NUM_SHIFT) : 0)
+#define BNX2_L2CTX_L5_STATUSB_NUM_SHIFT			 16
+#define BNX2_L2CTX_L2_STATUSB_NUM_SHIFT			 24
+#define BNX2_L2CTX_L5_STATUSB_NUM(sb_id)		\
+	(((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L5_STATUSB_NUM_SHIFT) : 0)
+#define BNX2_L2CTX_L2_STATUSB_NUM(sb_id)		\
+	(((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT) : 0)
 #define BNX2_L2CTX_HOST_BSEQ				0x00000008
 #define BNX2_L2CTX_NX_BSEQ				0x0000000c
 #define BNX2_L2CTX_NX_BDHADDR_HI			0x00000010
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
index dc2f8ed..5258533 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x_hsi.h
@@ -264,6 +264,7 @@
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101	    0x00000800
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727	    0x00000900
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC   0x00000a00
+#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823	    0x00000b00
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE	    0x0000fd00
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN	    0x0000ff00
 
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index e32d337..41b9b7b 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -1107,18 +1107,21 @@
 			      MDIO_REG_BANK_SERDES_DIGITAL,
 			      MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
 			      &control2);
-
-
-	control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
-
-
+	if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+		control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
+	else
+		control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
+	DP(NETIF_MSG_LINK, "params->speed_cap_mask = 0x%x, control2 = 0x%x\n",
+		params->speed_cap_mask, control2);
 	CL45_WR_OVER_CL22(bp, params->port,
 			      params->phy_addr,
 			      MDIO_REG_BANK_SERDES_DIGITAL,
 			      MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
 			      control2);
 
-	if (phy_flags & PHY_XGXS_FLAG) {
+	if ((phy_flags & PHY_XGXS_FLAG) &&
+	     (params->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
 		DP(NETIF_MSG_LINK, "XGXS\n");
 
 		CL45_WR_OVER_CL22(bp, params->port,
@@ -1225,7 +1228,7 @@
 				      params->phy_addr,
 				      MDIO_REG_BANK_CL73_USERB0,
 				    MDIO_CL73_USERB0_CL73_UCTRL,
-				    MDIO_CL73_USERB0_CL73_UCTRL_USTAT1_MUXSEL);
+				      0xe);
 
 		/* Enable BAM Station Manager*/
 		CL45_WR_OVER_CL22(bp, params->port,
@@ -1236,29 +1239,25 @@
 			MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
 			MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
 
-		/* Merge CL73 and CL37 aneg resolution */
-		CL45_RD_OVER_CL22(bp, params->port,
-				      params->phy_addr,
-				      MDIO_REG_BANK_CL73_USERB0,
-				      MDIO_CL73_USERB0_CL73_BAM_CTRL3,
-				      &reg_val);
-
-		if (params->speed_cap_mask &
-		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
-			/* Set the CL73 AN speed */
+		/* Advertise CL73 link speeds */
 			CL45_RD_OVER_CL22(bp, params->port,
 					      params->phy_addr,
 					      MDIO_REG_BANK_CL73_IEEEB1,
 					      MDIO_CL73_IEEEB1_AN_ADV2,
 					      &reg_val);
+		if (params->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+			reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
+		if (params->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+			reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
 
 			CL45_WR_OVER_CL22(bp, params->port,
 					      params->phy_addr,
 					      MDIO_REG_BANK_CL73_IEEEB1,
 					      MDIO_CL73_IEEEB1_AN_ADV2,
-			  reg_val | MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4);
+				      reg_val);
 
-		}
 		/* CL73 Autoneg Enabled */
 		reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
 
@@ -1351,6 +1350,7 @@
 
 static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc)
 {
+	struct bnx2x *bp = params->bp;
 	*ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
 	/* resolve pause mode and advertisement
 	 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
@@ -1380,18 +1380,30 @@
 		*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
 		break;
 	}
+	DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
 }
 
 static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params,
 					   u16 ieee_fc)
 {
 	struct bnx2x *bp = params->bp;
+	u16 val;
 	/* for AN, we are always publishing full duplex */
 
 	CL45_WR_OVER_CL22(bp, params->port,
 			      params->phy_addr,
 			      MDIO_REG_BANK_COMBO_IEEE0,
 			      MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
+	CL45_RD_OVER_CL22(bp, params->port,
+			      params->phy_addr,
+			      MDIO_REG_BANK_CL73_IEEEB1,
+			      MDIO_CL73_IEEEB1_AN_ADV1, &val);
+	val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
+	val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
+	CL45_WR_OVER_CL22(bp, params->port,
+			      params->phy_addr,
+			      MDIO_REG_BANK_CL73_IEEEB1,
+			      MDIO_CL73_IEEEB1_AN_ADV1, val);
 }
 
 static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
@@ -1609,6 +1621,39 @@
 	return ret;
 }
 
+static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 pd_10g, status2_1000x;
+	CL45_RD_OVER_CL22(bp, params->port,
+			      params->phy_addr,
+			      MDIO_REG_BANK_SERDES_DIGITAL,
+			      MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+			      &status2_1000x);
+	CL45_RD_OVER_CL22(bp, params->port,
+			      params->phy_addr,
+			      MDIO_REG_BANK_SERDES_DIGITAL,
+			      MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+			      &status2_1000x);
+	if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
+		DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
+			 params->port);
+		return 1;
+	}
+
+	CL45_RD_OVER_CL22(bp, params->port,
+			      params->phy_addr,
+			      MDIO_REG_BANK_10G_PARALLEL_DETECT,
+			      MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
+			      &pd_10g);
+
+	if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
+		DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
+			 params->port);
+		return 1;
+	}
+	return 0;
+}
 
 static void bnx2x_flow_ctrl_resolve(struct link_params *params,
 				  struct link_vars *vars,
@@ -1627,21 +1672,53 @@
 	    (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
 	    (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
 	     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
-		CL45_RD_OVER_CL22(bp, params->port,
-				      params->phy_addr,
-				      MDIO_REG_BANK_COMBO_IEEE0,
-				      MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
-				      &ld_pause);
-		CL45_RD_OVER_CL22(bp, params->port,
-				      params->phy_addr,
-			MDIO_REG_BANK_COMBO_IEEE0,
-			MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
-			&lp_pause);
-		pause_result = (ld_pause &
+		if (bnx2x_direct_parallel_detect_used(params)) {
+			vars->flow_ctrl = params->req_fc_auto_adv;
+			return;
+		}
+		if ((gp_status &
+		    (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
+		     MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) ==
+		    (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
+		     MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
+
+			CL45_RD_OVER_CL22(bp, params->port,
+					      params->phy_addr,
+					      MDIO_REG_BANK_CL73_IEEEB1,
+					      MDIO_CL73_IEEEB1_AN_ADV1,
+					      &ld_pause);
+			CL45_RD_OVER_CL22(bp, params->port,
+					     params->phy_addr,
+					     MDIO_REG_BANK_CL73_IEEEB1,
+					     MDIO_CL73_IEEEB1_AN_LP_ADV1,
+					     &lp_pause);
+			pause_result = (ld_pause &
+					MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
+					>> 8;
+			pause_result |= (lp_pause &
+					MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK)
+					>> 10;
+			DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
+				 pause_result);
+		} else {
+
+			CL45_RD_OVER_CL22(bp, params->port,
+					      params->phy_addr,
+					      MDIO_REG_BANK_COMBO_IEEE0,
+					      MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
+					      &ld_pause);
+			CL45_RD_OVER_CL22(bp, params->port,
+			       params->phy_addr,
+			       MDIO_REG_BANK_COMBO_IEEE0,
+			       MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
+			       &lp_pause);
+			pause_result = (ld_pause &
 				MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
-		pause_result |= (lp_pause &
+			pause_result |= (lp_pause &
 				 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
-		DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
+			DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
+				 pause_result);
+		}
 		bnx2x_pause_resolve(vars, pause_result);
 	} else if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
 		   (bnx2x_ext_phy_resolve_fc(params, vars))) {
@@ -1853,6 +1930,8 @@
 		    (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
 		     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
 		    (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
+		     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
+		    (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
 		     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726))) {
 			vars->autoneg = AUTO_NEG_ENABLED;
 
@@ -1987,8 +2066,7 @@
 		    GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
 		    mode);
 
-	bnx2x_set_led(bp, params->port, LED_MODE_OPER,
-		    line_speed, params->hw_led_mode, params->chip_id);
+	bnx2x_set_led(params, LED_MODE_OPER, line_speed);
 	return 0;
 }
 
@@ -2122,6 +2200,8 @@
 				       MDIO_PMA_REG_CTRL,
 				       1<<15);
 			break;
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
+			break;
 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
 			DP(NETIF_MSG_LINK, "XGXS PHY Failure detected\n");
 			break;
@@ -2512,16 +2592,11 @@
 	/* Need to wait 100ms after reset */
 	msleep(100);
 
-	/* Set serial boot control for external load */
-	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_MISC_CTRL1, 0x0001);
-
 	/* Micro controller re-boot */
 	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
 		       MDIO_PMA_DEVAD,
 		       MDIO_PMA_REG_GEN_CTRL,
-		       MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+		       0x018B);
 
 	/* Set soft reset */
 	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
@@ -2529,14 +2604,10 @@
 		       MDIO_PMA_REG_GEN_CTRL,
 		       MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
 
-	/* Set PLL register value to be same like in P13 ver */
 	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
 		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_PLL_CTRL,
-		       0x73A0);
+		       MDIO_PMA_REG_MISC_CTRL1, 0x0001);
 
-	/* Clear soft reset.
-	Will automatically reset micro-controller re-boot */
 	bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
 		       MDIO_PMA_DEVAD,
 		       MDIO_PMA_REG_GEN_CTRL,
@@ -3462,8 +3533,8 @@
 		       MDIO_PMA_REG_8481_LINK_SIGNAL,
 		       &val1);
 	/* Set bit 2 to 0, and bits [1:0] to 10 */
-	val1 &= ~((1<<0) | (1<<2)); /* Clear bits 0,2*/
-	val1 |= (1<<1); /* Set bit 1 */
+	val1 &= ~((1<<0) | (1<<2) | (1<<7)); /* Clear bits 0,2,7*/
+	val1 |= ((1<<1) | (1<<6)); /* Set bit 1, 6 */
 
 	bnx2x_cl45_write(bp, params->port,
 		       ext_phy_type,
@@ -3497,36 +3568,19 @@
 		       MDIO_PMA_REG_8481_LED2_MASK,
 		       0);
 
-	/* LED3 (10G/1G/100/10G Activity) */
-	bnx2x_cl45_read(bp, params->port,
-		      ext_phy_type,
-		      ext_phy_addr,
-		      MDIO_PMA_DEVAD,
-		      MDIO_PMA_REG_8481_LINK_SIGNAL,
-		      &val1);
-	/* Enable blink based on source 4(Activity) */
-	val1 &= ~((1<<7) | (1<<8)); /* Clear bits 7,8 */
-	val1 |= (1<<6); /* Set only bit 6 */
+	/* Unmask LED3 for 10G link */
 	bnx2x_cl45_write(bp, params->port,
 		       ext_phy_type,
 		       ext_phy_addr,
 		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_8481_LINK_SIGNAL,
-		       val1);
-
-	bnx2x_cl45_read(bp, params->port,
-		      ext_phy_type,
-		      ext_phy_addr,
-		      MDIO_PMA_DEVAD,
 		      MDIO_PMA_REG_8481_LED3_MASK,
-		      &val1);
-	val1 |= (1<<4); /* Unmask LED3 for 10G link */
+		       0x6);
 	bnx2x_cl45_write(bp, params->port,
 		       ext_phy_type,
 		       ext_phy_addr,
 		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_8481_LED3_MASK,
-		       val1);
+		       MDIO_PMA_REG_8481_LED3_BLINK,
+		       0);
 }
 
 
@@ -3544,7 +3598,10 @@
 			bnx2x_set_preemphasis(params);
 
 		/* forced speed requested? */
-		if (vars->line_speed != SPEED_AUTO_NEG) {
+		if (vars->line_speed != SPEED_AUTO_NEG ||
+		    ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
+		     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+			  params->loopback_mode == LOOPBACK_EXT)) {
 			DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
 
 			/* disable autoneg */
@@ -3693,19 +3750,6 @@
 				}
 			}
 			/* Force speed */
-			/* First enable LASI */
-			bnx2x_cl45_write(bp, params->port,
-				       ext_phy_type,
-				       ext_phy_addr,
-				       MDIO_PMA_DEVAD,
-				       MDIO_PMA_REG_RX_ALARM_CTRL,
-				       0x0400);
-			bnx2x_cl45_write(bp, params->port,
-				       ext_phy_type,
-				       ext_phy_addr,
-				       MDIO_PMA_DEVAD,
-				       MDIO_PMA_REG_LASI_CTRL, 0x0004);
-
 			if (params->req_line_speed == SPEED_10000) {
 				DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
 
@@ -3715,6 +3759,9 @@
 					       MDIO_PMA_DEVAD,
 					       MDIO_PMA_REG_DIGITAL_CTRL,
 					       0x400);
+				bnx2x_cl45_write(bp, params->port, ext_phy_type,
+					       ext_phy_addr, MDIO_PMA_DEVAD,
+					       MDIO_PMA_REG_LASI_CTRL, 1);
 			} else {
 				/* Force 1Gbps using autoneg with 1G
 				advertisment */
@@ -3756,6 +3803,17 @@
 					       MDIO_AN_DEVAD,
 					       MDIO_AN_REG_CTRL,
 					       0x1200);
+				bnx2x_cl45_write(bp, params->port,
+					       ext_phy_type,
+					       ext_phy_addr,
+					       MDIO_PMA_DEVAD,
+					       MDIO_PMA_REG_RX_ALARM_CTRL,
+					       0x0400);
+				bnx2x_cl45_write(bp, params->port,
+					       ext_phy_type,
+					       ext_phy_addr,
+					       MDIO_PMA_DEVAD,
+					       MDIO_PMA_REG_LASI_CTRL, 0x0004);
 
 			}
 			bnx2x_save_bcm_spirom_ver(bp, params->port,
@@ -4291,6 +4349,7 @@
 			break;
 		}
 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
 			/* This phy uses the NIG latch mechanism since link
 				indication arrives through its LED4 and not via
 				its LASI signal, so we get steady signal
@@ -4298,6 +4357,12 @@
 			bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
 				    1 << NIG_LATCH_BC_ENABLE_MI_INT);
 
+			bnx2x_cl45_write(bp, params->port,
+				       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
+				       ext_phy_addr,
+				       MDIO_PMA_DEVAD,
+				       MDIO_PMA_REG_CTRL, 0x0000);
+
 			bnx2x_8481_set_led4(params, ext_phy_type, ext_phy_addr);
 			if (params->req_line_speed == SPEED_AUTO_NEG) {
 
@@ -4394,17 +4459,12 @@
 				    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
 					DP(NETIF_MSG_LINK, "Advertising 10G\n");
 					/* Restart autoneg for 10G*/
-			bnx2x_cl45_read(bp, params->port,
-				      ext_phy_type,
-				      ext_phy_addr,
-				      MDIO_AN_DEVAD,
-				      MDIO_AN_REG_CTRL, &val);
-			val |= 0x200;
+
 			bnx2x_cl45_write(bp, params->port,
 				       ext_phy_type,
 				       ext_phy_addr,
 				       MDIO_AN_DEVAD,
-				       MDIO_AN_REG_CTRL, val);
+				       MDIO_AN_REG_CTRL, 0x3200);
 				}
 			} else {
 				/* Force speed */
@@ -5148,6 +5208,7 @@
 			}
 			break;
 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
 			/* Check 10G-BaseT link status */
 			/* Check PMD signal ok */
 			bnx2x_cl45_read(bp, params->port, ext_phy_type,
@@ -5363,8 +5424,10 @@
 		     (NIG_STATUS_XGXS0_LINK10G |
 		      NIG_STATUS_XGXS0_LINK_STATUS |
 		      NIG_STATUS_SERDES0_LINK_STATUS));
-	if (XGXS_EXT_PHY_TYPE(params->ext_phy_config)
-	    == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481) {
+	if ((XGXS_EXT_PHY_TYPE(params->ext_phy_config)
+		== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481) ||
+	(XGXS_EXT_PHY_TYPE(params->ext_phy_config)
+		== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823)) {
 		bnx2x_8481_rearm_latch_signal(bp, port, is_mi_int);
 	}
 	if (vars->phy_link_up) {
@@ -5477,6 +5540,7 @@
 		status = bnx2x_format_ver(spirom_ver, version, len);
 		break;
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
 		spirom_ver = ((spirom_ver & 0xF80) >> 7) << 16 |
 			(spirom_ver & 0x7F);
 		status = bnx2x_format_ver(spirom_ver, version, len);
@@ -5728,13 +5792,15 @@
 }
 
 
-u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
-	       u16 hw_led_mode, u32 chip_id)
+u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed)
 {
+	u8 port = params->port;
+	u16 hw_led_mode = params->hw_led_mode;
 	u8 rc = 0;
 	u32 tmp;
 	u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
-
+	u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
+	struct bnx2x *bp = params->bp;
 	DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
 	DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
 		 speed, hw_led_mode);
@@ -5749,7 +5815,14 @@
 		break;
 
 	case LED_MODE_OPER:
-		REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
+		if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
+			REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+			REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+		} else {
+			REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+				   hw_led_mode);
+		}
+
 		REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
 			   port*4, 0);
 		/* Set blinking rate to ~15.9Hz */
@@ -5761,7 +5834,7 @@
 		EMAC_WR(bp, EMAC_REG_EMAC_LED,
 			    (tmp & (~EMAC_LED_OVERRIDE)));
 
-		if (!CHIP_IS_E1H(bp) &&
+		if (CHIP_IS_E1(bp) &&
 		    ((speed == SPEED_2500) ||
 		     (speed == SPEED_1000) ||
 		     (speed == SPEED_100) ||
@@ -5864,6 +5937,7 @@
 
 	if (non_ext_phy ||
 	    (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
+	    (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
 	    (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) ||
 	    (params->loopback_mode == LOOPBACK_EXT_PHY)) {
 		if (params->req_line_speed == SPEED_AUTO_NEG)
@@ -6030,10 +6104,7 @@
 		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
 			    params->port*4, 0);
 
-		bnx2x_set_led(bp, params->port, LED_MODE_OPER,
-			    vars->line_speed, params->hw_led_mode,
-			    params->chip_id);
-
+		bnx2x_set_led(params, LED_MODE_OPER, vars->line_speed);
 	} else
 	/* No loopback */
 	{
@@ -6091,15 +6162,13 @@
 {
 	struct bnx2x *bp = params->bp;
 	u32 ext_phy_config = params->ext_phy_config;
-	u16 hw_led_mode = params->hw_led_mode;
-	u32 chip_id = params->chip_id;
 	u8 port = params->port;
 	u32 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
 	u32 val = REG_RD(bp, params->shmem_base +
 			     offsetof(struct shmem_region, dev_info.
 				      port_feature_config[params->port].
 				      config));
-
+	DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
 	/* disable attentions */
 	vars->link_status = 0;
 	bnx2x_update_mng(params, vars->link_status);
@@ -6127,7 +6196,7 @@
 	 * Hold it as vars low
 	 */
 	 /* clear link led */
-	bnx2x_set_led(bp, port, LED_MODE_OFF, 0, hw_led_mode, chip_id);
+	bnx2x_set_led(params, LED_MODE_OFF, 0);
 	if (reset_ext_phy) {
 		switch (ext_phy_type) {
 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
@@ -6163,6 +6232,22 @@
 			bnx2x_8726_reset_phy(bp, params->port, ext_phy_addr);
 			break;
 		}
+		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
+		{
+			u8 ext_phy_addr =
+				XGXS_EXT_PHY_ADDR(params->ext_phy_config);
+			bnx2x_cl45_write(bp, port,
+				       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
+				       ext_phy_addr,
+				       MDIO_AN_DEVAD,
+				       MDIO_AN_REG_CTRL, 0x0000);
+			bnx2x_cl45_write(bp, port,
+				       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
+				       ext_phy_addr,
+				       MDIO_PMA_DEVAD,
+				       MDIO_PMA_REG_CTRL, 1);
+			break;
+		}
 		default:
 			/* HW reset */
 			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
@@ -6198,9 +6283,7 @@
 	u8 port = params->port;
 
 	DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
-	bnx2x_set_led(bp, port, LED_MODE_OFF,
-		    0, params->hw_led_mode,
-		    params->chip_id);
+	bnx2x_set_led(params, LED_MODE_OFF, 0);
 
 	/* indicate no mac active */
 	vars->mac_type = MAC_TYPE_NONE;
@@ -6237,15 +6320,13 @@
 	vars->link_status |= LINK_STATUS_LINK_UP;
 	if (link_10g) {
 		bnx2x_bmac_enable(params, vars, 0);
-		bnx2x_set_led(bp, port, LED_MODE_OPER,
-			    SPEED_10000, params->hw_led_mode,
-			    params->chip_id);
-
+		bnx2x_set_led(params, LED_MODE_OPER, SPEED_10000);
 	} else {
-		bnx2x_emac_enable(params, vars, 0);
 		rc = bnx2x_emac_program(params, vars->line_speed,
 				      vars->duplex);
 
+		bnx2x_emac_enable(params, vars, 0);
+
 		/* AN complete? */
 		if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
 			if (!(vars->phy_flags &
@@ -6343,6 +6424,7 @@
 
 	if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
 	    (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) &&
+	    (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) &&
 	    (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) &&
 	    (ext_phy_link_up && !vars->phy_link_up))
 		bnx2x_init_internal_phy(params, vars, 0);
@@ -6578,6 +6660,13 @@
 	return 0;
 }
 
+
+static u8 bnx2x_84823_common_init_phy(struct bnx2x *bp, u32 shmem_base)
+{
+	/* HW reset */
+	bnx2x_ext_phy_hw_reset(bp, 1);
+	return 0;
+}
 u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
 {
 	u8 rc = 0;
@@ -6607,7 +6696,9 @@
 		/* GPIO1 affects both ports, so there's need to pull
 		it for single port alone */
 		rc = bnx2x_8726_common_init_phy(bp, shmem_base);
-
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
+		rc = bnx2x_84823_common_init_phy(bp, shmem_base);
 		break;
 	default:
 		DP(NETIF_MSG_LINK,
diff --git a/drivers/net/bnx2x_link.h b/drivers/net/bnx2x_link.h
index f3e2522..40c2981 100644
--- a/drivers/net/bnx2x_link.h
+++ b/drivers/net/bnx2x_link.h
@@ -178,8 +178,7 @@
    Basically, the CLC takes care of the led for the link, but in case one needs
    to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
    blink the led, and LED_MODE_OFF to set the led off.*/
-u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
-	       u16 hw_led_mode, u32 chip_id);
+u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed);
 #define LED_MODE_OFF	0
 #define LED_MODE_OPER 	2
 
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 59b58d8..61974b74 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -56,8 +56,8 @@
 #include "bnx2x_init_ops.h"
 #include "bnx2x_dump.h"
 
-#define DRV_MODULE_VERSION	"1.52.1-1"
-#define DRV_MODULE_RELDATE	"2009/10/13"
+#define DRV_MODULE_VERSION	"1.52.1-3"
+#define DRV_MODULE_RELDATE	"2009/11/05"
 #define BNX2X_BC_VER		0x040200
 
 #include <linux/firmware.h>
@@ -10855,7 +10855,6 @@
 static int bnx2x_phys_id(struct net_device *dev, u32 data)
 {
 	struct bnx2x *bp = netdev_priv(dev);
-	int port = BP_PORT(bp);
 	int i;
 
 	if (!netif_running(dev))
@@ -10869,13 +10868,10 @@
 
 	for (i = 0; i < (data * 2); i++) {
 		if ((i % 2) == 0)
-			bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
-				      bp->link_params.hw_led_mode,
-				      bp->link_params.chip_id);
+			bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
+				      SPEED_1000);
 		else
-			bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
-				      bp->link_params.hw_led_mode,
-				      bp->link_params.chip_id);
+			bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
 
 		msleep_interruptible(500);
 		if (signal_pending(current))
@@ -10883,10 +10879,8 @@
 	}
 
 	if (bp->link_vars.link_up)
-		bnx2x_set_led(bp, port, LED_MODE_OPER,
-			      bp->link_vars.line_speed,
-			      bp->link_params.hw_led_mode,
-			      bp->link_params.chip_id);
+		bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
+			      bp->link_vars.line_speed);
 
 	return 0;
 }
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index aa76cba..b668173 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -4772,18 +4772,28 @@
 #define PCI_ID_VAL2					0x438
 
 
-#define MDIO_REG_BANK_CL73_IEEEB0			0x0
-#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL		0x0
+#define MDIO_REG_BANK_CL73_IEEEB0	0x0
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL	0x0
 #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN	0x0200
 #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN		0x1000
 #define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST	0x8000
 
-#define MDIO_REG_BANK_CL73_IEEEB1			0x10
-#define MDIO_CL73_IEEEB1_AN_ADV2				0x01
+#define MDIO_REG_BANK_CL73_IEEEB1	0x10
+#define MDIO_CL73_IEEEB1_AN_ADV1		0x00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE			0x0400
+#define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC		0x0800
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH		0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK		0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV2		0x01
 #define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M		0x0000
 #define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX		0x0020
 #define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4		0x0040
 #define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR		0x0080
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1		0x03
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE		0x0400
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC		0x0800
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH		0x0C00
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK		0x0C00
 
 #define MDIO_REG_BANK_RX0				0x80b0
 #define MDIO_RX0_RX_STATUS				0x10
@@ -4910,6 +4920,8 @@
 
 
 #define MDIO_REG_BANK_10G_PARALLEL_DETECT		0x8130
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS		0x10
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK		0x8000
 #define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL		0x11
 #define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN	0x1
 #define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK		0x13
@@ -4934,6 +4946,8 @@
 #define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G			0x0010
 #define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M			0x0008
 #define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M			0x0000
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2			0x15
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED 		0x0002
 #define MDIO_SERDES_DIGITAL_MISC1				0x18
 #define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK			0xE000
 #define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M			0x0000
@@ -5115,6 +5129,7 @@
 #define MDIO_PMA_REG_8481_LED1_MASK	0xa82c
 #define MDIO_PMA_REG_8481_LED2_MASK	0xa82f
 #define MDIO_PMA_REG_8481_LED3_MASK	0xa832
+#define MDIO_PMA_REG_8481_LED3_BLINK	0xa834
 #define MDIO_PMA_REG_8481_SIGNAL_MASK	0xa835
 #define MDIO_PMA_REG_8481_LINK_SIGNAL	0xa83b
 
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index c3fa31c..1d05819 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1956,7 +1956,7 @@
 	struct port *port, *prev_port, *temp_port;
 	struct aggregator *aggregator, *new_aggregator, *temp_aggregator;
 	int select_new_active_agg = 0;
-	
+
 	// find the aggregator related to this slave
 	aggregator = &(SLAVE_AD_INFO(slave).aggregator);
 
@@ -2024,7 +2024,7 @@
 
 				// clear the aggregator
 				ad_clear_agg(aggregator);
-				
+
 				if (select_new_active_agg) {
 					ad_agg_selection_logic(__get_first_agg(port));
 				}
@@ -2075,7 +2075,7 @@
 			}
 		}
 	}
-	port->slave=NULL;	
+	port->slave=NULL;
 }
 
 /**
@@ -2301,7 +2301,7 @@
 }
 
 /*
- * set link state for bonding master: if we have an active 
+ * set link state for bonding master: if we have an active
  * aggregator, we're up, if not, we're down.  Presumes that we cannot
  * have an active aggregator if there are no slaves with link up.
  *
@@ -2395,7 +2395,7 @@
 		goto out;
 	}
 
-	slave_agg_no = bond->xmit_hash_policy(skb, dev, slaves_in_agg);
+	slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg);
 
 	bond_for_each_slave(bond, slave, i) {
 		struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
@@ -2445,9 +2445,6 @@
 	struct slave *slave = NULL;
 	int ret = NET_RX_DROP;
 
-	if (dev_net(dev) != &init_net)
-		goto out;
-
 	if (!(dev->flags & IFF_MASTER))
 		goto out;
 
@@ -2468,4 +2465,3 @@
 
 	return ret;
 }
-
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9b5936f..0d30d1e 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -355,9 +355,6 @@
 	struct arp_pkt *arp = (struct arp_pkt *)skb->data;
 	int res = NET_RX_DROP;
 
-	if (dev_net(bond_dev) != &init_net)
-		goto out;
-
 	while (bond_dev->priv_flags & IFF_802_1Q_VLAN)
 		bond_dev = vlan_dev_real_dev(bond_dev);
 
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
index 83921ab..b72e1dc 100644
--- a/drivers/net/bonding/bond_ipv6.c
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -25,6 +25,7 @@
 #include <net/ipv6.h>
 #include <net/ndisc.h>
 #include <net/addrconf.h>
+#include <net/netns/generic.h>
 #include "bonding.h"
 
 /*
@@ -152,11 +153,9 @@
 	struct net_device *vlan_dev, *event_dev = ifa->idev->dev;
 	struct bonding *bond;
 	struct vlan_entry *vlan;
+	struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id);
 
-	if (dev_net(event_dev) != &init_net)
-		return NOTIFY_DONE;
-
-	list_for_each_entry(bond, &bond_dev_list, bond_list) {
+	list_for_each_entry(bond, &bn->dev_list, bond_list) {
 		if (bond->dev == event_dev) {
 			switch (event) {
 			case NETDEV_UP:
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index feb03ad..ecea6c2 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -75,6 +75,7 @@
 #include <linux/jiffies.h>
 #include <net/route.h>
 #include <net/net_namespace.h>
+#include <net/netns/generic.h>
 #include "bonding.h"
 #include "bond_3ad.h"
 #include "bond_alb.h"
@@ -157,11 +158,7 @@
 static const char * const version =
 	DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
 
-LIST_HEAD(bond_dev_list);
-
-#ifdef CONFIG_PROC_FS
-static struct proc_dir_entry *bond_proc_dir;
-#endif
+int bond_net_id;
 
 static __be32 arp_target[BOND_MAX_ARP_TARGETS];
 static int arp_ip_count;
@@ -227,7 +224,7 @@
 
 static void bond_send_gratuitous_arp(struct bonding *bond);
 static int bond_init(struct net_device *bond_dev);
-static void bond_deinit(struct net_device *bond_dev);
+static void bond_uninit(struct net_device *bond_dev);
 
 /*---------------------------- General routines -----------------------------*/
 
@@ -707,7 +704,7 @@
 			       struct net_device *slave_dev, int reporting)
 {
 	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-	static int (*ioctl)(struct net_device *, struct ifreq *, int);
+	int (*ioctl)(struct net_device *, struct ifreq *, int);
 	struct ifreq ifr;
 	struct mii_ioctl_data *mii;
 
@@ -2003,25 +2000,6 @@
 }
 
 /*
-* Destroy a bonding device.
-* Must be under rtnl_lock when this function is called.
-*/
-static void bond_uninit(struct net_device *bond_dev)
-{
-	struct bonding *bond = netdev_priv(bond_dev);
-
-	bond_deinit(bond_dev);
-	bond_destroy_sysfs_entry(bond);
-
-	if (bond->wq)
-		destroy_workqueue(bond->wq);
-
-	netif_addr_lock_bh(bond_dev);
-	bond_mc_list_destroy(bond);
-	netif_addr_unlock_bh(bond_dev);
-}
-
-/*
 * First release a slave and than destroy the bond if no more slaves are left.
 * Must be under rtnl_lock when this function is called.
 */
@@ -2605,7 +2583,7 @@
 		fl.fl4_dst = targets[i];
 		fl.fl4_tos = RTO_ONLINK;
 
-		rv = ip_route_output_key(&init_net, &rt, &fl);
+		rv = ip_route_output_key(dev_net(bond->dev), &rt, &fl);
 		if (rv) {
 			if (net_ratelimit()) {
 				pr_warning(DRV_NAME
@@ -2713,9 +2691,6 @@
 	unsigned char *arp_ptr;
 	__be32 sip, tip;
 
-	if (dev_net(dev) != &init_net)
-		goto out;
-
 	if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER))
 		goto out;
 
@@ -3378,10 +3353,11 @@
 static void bond_create_proc_entry(struct bonding *bond)
 {
 	struct net_device *bond_dev = bond->dev;
+	struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
 
-	if (bond_proc_dir) {
+	if (bn->proc_dir) {
 		bond->proc_entry = proc_create_data(bond_dev->name,
-						    S_IRUGO, bond_proc_dir,
+						    S_IRUGO, bn->proc_dir,
 						    &bond_info_fops, bond);
 		if (bond->proc_entry == NULL)
 			pr_warning(DRV_NAME
@@ -3394,8 +3370,11 @@
 
 static void bond_remove_proc_entry(struct bonding *bond)
 {
-	if (bond_proc_dir && bond->proc_entry) {
-		remove_proc_entry(bond->proc_file_name, bond_proc_dir);
+	struct net_device *bond_dev = bond->dev;
+	struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
+
+	if (bn->proc_dir && bond->proc_entry) {
+		remove_proc_entry(bond->proc_file_name, bn->proc_dir);
 		memset(bond->proc_file_name, 0, IFNAMSIZ);
 		bond->proc_entry = NULL;
 	}
@@ -3404,11 +3383,11 @@
 /* Create the bonding directory under /proc/net, if doesn't exist yet.
  * Caller must hold rtnl_lock.
  */
-static void bond_create_proc_dir(void)
+static void bond_create_proc_dir(struct bond_net *bn)
 {
-	if (!bond_proc_dir) {
-		bond_proc_dir = proc_mkdir(DRV_NAME, init_net.proc_net);
-		if (!bond_proc_dir)
+	if (!bn->proc_dir) {
+		bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
+		if (!bn->proc_dir)
 			pr_warning(DRV_NAME
 				": Warning: cannot create /proc/net/%s\n",
 				DRV_NAME);
@@ -3418,11 +3397,11 @@
 /* Destroy the bonding directory under /proc/net, if empty.
  * Caller must hold rtnl_lock.
  */
-static void bond_destroy_proc_dir(void)
+static void bond_destroy_proc_dir(struct bond_net *bn)
 {
-	if (bond_proc_dir) {
-		remove_proc_entry(DRV_NAME, init_net.proc_net);
-		bond_proc_dir = NULL;
+	if (bn->proc_dir) {
+		remove_proc_entry(DRV_NAME, bn->net->proc_net);
+		bn->proc_dir = NULL;
 	}
 }
 
@@ -3436,11 +3415,11 @@
 {
 }
 
-static void bond_create_proc_dir(void)
+static void bond_create_proc_dir(struct bond_net *bn)
 {
 }
 
-static void bond_destroy_proc_dir(void)
+static void bond_destroy_proc_dir(struct bond_net *bn)
 {
 }
 
@@ -3457,9 +3436,6 @@
 	bond_remove_proc_entry(bond);
 	bond_create_proc_entry(bond);
 
-	bond_destroy_sysfs_entry(bond);
-	bond_create_sysfs_entry(bond);
-
 	return NOTIFY_DONE;
 }
 
@@ -3471,9 +3447,6 @@
 	switch (event) {
 	case NETDEV_CHANGENAME:
 		return bond_event_changename(event_bond);
-	case NETDEV_UNREGISTER:
-		bond_release_all(event_bond->dev);
-		break;
 	default:
 		break;
 	}
@@ -3565,9 +3538,6 @@
 {
 	struct net_device *event_dev = (struct net_device *)ptr;
 
-	if (dev_net(event_dev) != &init_net)
-		return NOTIFY_DONE;
-
 	pr_debug("event_dev: %s, event: %lx\n",
 		(event_dev ? event_dev->name : "None"),
 		event);
@@ -3600,13 +3570,11 @@
 {
 	struct in_ifaddr *ifa = ptr;
 	struct net_device *vlan_dev, *event_dev = ifa->ifa_dev->dev;
+	struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id);
 	struct bonding *bond;
 	struct vlan_entry *vlan;
 
-	if (dev_net(ifa->ifa_dev->dev) != &init_net)
-		return NOTIFY_DONE;
-
-	list_for_each_entry(bond, &bond_dev_list, bond_list) {
+	list_for_each_entry(bond, &bn->dev_list, bond_list) {
 		if (bond->dev == event_dev) {
 			switch (event) {
 			case NETDEV_UP:
@@ -3696,18 +3664,17 @@
  * Hash for the output device based upon layer 2 and layer 3 data. If
  * the packet is not IP mimic bond_xmit_hash_policy_l2()
  */
-static int bond_xmit_hash_policy_l23(struct sk_buff *skb,
-				     struct net_device *bond_dev, int count)
+static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
 {
 	struct ethhdr *data = (struct ethhdr *)skb->data;
 	struct iphdr *iph = ip_hdr(skb);
 
 	if (skb->protocol == htons(ETH_P_IP)) {
 		return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
-			(data->h_dest[5] ^ bond_dev->dev_addr[5])) % count;
+			(data->h_dest[5] ^ data->h_source[5])) % count;
 	}
 
-	return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count;
+	return (data->h_dest[5] ^ data->h_source[5]) % count;
 }
 
 /*
@@ -3715,8 +3682,7 @@
  * the packet is a frag or not TCP or UDP, just use layer 3 data.  If it is
  * altogether not IP, mimic bond_xmit_hash_policy_l2()
  */
-static int bond_xmit_hash_policy_l34(struct sk_buff *skb,
-				    struct net_device *bond_dev, int count)
+static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
 {
 	struct ethhdr *data = (struct ethhdr *)skb->data;
 	struct iphdr *iph = ip_hdr(skb);
@@ -3734,18 +3700,17 @@
 
 	}
 
-	return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count;
+	return (data->h_dest[5] ^ data->h_source[5]) % count;
 }
 
 /*
  * Hash for the output device based upon layer 2 data
  */
-static int bond_xmit_hash_policy_l2(struct sk_buff *skb,
-				   struct net_device *bond_dev, int count)
+static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
 {
 	struct ethhdr *data = (struct ethhdr *)skb->data;
 
-	return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count;
+	return (data->h_dest[5] ^ data->h_source[5]) % count;
 }
 
 /*-------------------------- Device entry points ----------------------------*/
@@ -3978,7 +3943,7 @@
 	if (!capable(CAP_NET_ADMIN))
 		return -EPERM;
 
-	slave_dev = dev_get_by_name(&init_net, ifr->ifr_slave);
+	slave_dev = dev_get_by_name(dev_net(bond_dev), ifr->ifr_slave);
 
 	pr_debug("slave_dev=%p: \n", slave_dev);
 
@@ -4334,7 +4299,7 @@
 	if (!BOND_IS_OK(bond))
 		goto out;
 
-	slave_no = bond->xmit_hash_policy(skb, bond_dev, bond->slave_cnt);
+	slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt);
 
 	bond_for_each_slave(bond, slave, i) {
 		slave_no--;
@@ -4615,37 +4580,29 @@
 		cancel_delayed_work(&bond->ad_work);
 }
 
-/* De-initialize device specific data.
- * Caller must hold rtnl_lock.
- */
-static void bond_deinit(struct net_device *bond_dev)
+/*
+* Destroy a bonding device.
+* Must be under rtnl_lock when this function is called.
+*/
+static void bond_uninit(struct net_device *bond_dev)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
 
+	/* Release the bonded slaves */
+	bond_release_all(bond_dev);
+
 	list_del(&bond->bond_list);
 
 	bond_work_cancel_all(bond);
 
 	bond_remove_proc_entry(bond);
-}
 
-/* Unregister and free all bond devices.
- * Caller must hold rtnl_lock.
- */
-static void bond_free_all(void)
-{
-	struct bonding *bond, *nxt;
+	if (bond->wq)
+		destroy_workqueue(bond->wq);
 
-	list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) {
-		struct net_device *bond_dev = bond->dev;
-
-		bond_work_cancel_all(bond);
-		/* Release the bonded slaves */
-		bond_release_all(bond_dev);
-		unregister_netdevice(bond_dev);
-	}
-
-	bond_destroy_proc_dir();
+	netif_addr_lock_bh(bond_dev);
+	bond_mc_list_destroy(bond);
+	netif_addr_unlock_bh(bond_dev);
 }
 
 /*------------------------- Module initialization ---------------------------*/
@@ -5067,6 +5024,7 @@
 static int bond_init(struct net_device *bond_dev)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
+	struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
 
 	pr_debug("Begin bond_init for %s\n", bond_dev->name);
 
@@ -5079,30 +5037,41 @@
 	netif_carrier_off(bond_dev);
 
 	bond_create_proc_entry(bond);
-	list_add_tail(&bond->bond_list, &bond_dev_list);
+	list_add_tail(&bond->bond_list, &bn->dev_list);
 
+	bond_prepare_sysfs_group(bond);
 	return 0;
 }
 
+static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+	if (tb[IFLA_ADDRESS]) {
+		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+			return -EINVAL;
+		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+			return -EADDRNOTAVAIL;
+	}
+	return 0;
+}
+
+static struct rtnl_link_ops bond_link_ops __read_mostly = {
+	.kind		= "bond",
+	.priv_size	= sizeof(struct bonding),
+	.setup		= bond_setup,
+	.validate	= bond_validate,
+};
+
 /* Create a new bond based on the specified name and bonding parameters.
  * If name is NULL, obtain a suitable "bond%d" name for us.
  * Caller must NOT hold rtnl_lock; we need to release it here before we
  * set up our sysfs entries.
  */
-int bond_create(const char *name)
+int bond_create(struct net *net, const char *name)
 {
 	struct net_device *bond_dev;
 	int res;
 
 	rtnl_lock();
-	/* Check to see if the bond already exists. */
-	/* FIXME: pass netns from caller */
-	if (name && __dev_get_by_name(&init_net, name)) {
-		pr_err(DRV_NAME ": cannot add bond %s; already exists\n",
-		       name);
-		res = -EEXIST;
-		goto out_rtnl;
-	}
 
 	bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
 				bond_setup);
@@ -5110,9 +5079,12 @@
 		pr_err(DRV_NAME ": %s: eek! can't alloc netdev!\n",
 		       name);
 		res = -ENOMEM;
-		goto out_rtnl;
+		goto out;
 	}
 
+	dev_net_set(bond_dev, net);
+	bond_dev->rtnl_link_ops = &bond_link_ops;
+
 	if (!name) {
 		res = dev_alloc_name(bond_dev, "bond%d");
 		if (res < 0)
@@ -5120,27 +5092,55 @@
 	}
 
 	res = register_netdevice(bond_dev);
-	if (res < 0)
-		goto out_bond;
 
-	res = bond_create_sysfs_entry(netdev_priv(bond_dev));
-	if (res < 0)
-		goto out_unreg;
-
-	rtnl_unlock();
-	return 0;
-
-out_unreg:
-	unregister_netdevice(bond_dev);
-out_bond:
-	bond_deinit(bond_dev);
-out_netdev:
-	free_netdev(bond_dev);
-out_rtnl:
+out:
 	rtnl_unlock();
 	return res;
+out_netdev:
+	free_netdev(bond_dev);
+	goto out;
 }
 
+static int bond_net_init(struct net *net)
+{
+	struct bond_net *bn;
+	int err;
+
+	err = -ENOMEM;
+	bn = kzalloc(sizeof(struct bond_net), GFP_KERNEL);
+	if (bn == NULL)
+		goto out;
+
+	bn->net = net;
+	INIT_LIST_HEAD(&bn->dev_list);
+
+	err = net_assign_generic(net, bond_net_id, bn);
+	if (err)
+		goto out_free;
+
+	bond_create_proc_dir(bn);
+out:
+	return err;
+out_free:
+	kfree(bn);
+	goto out;
+}
+
+static void bond_net_exit(struct net *net)
+{
+	struct bond_net *bn;
+
+	bn = net_generic(net, bond_net_id);
+
+	bond_destroy_proc_dir(bn);
+	kfree(bn);
+}
+
+static struct pernet_operations bond_net_ops = {
+	.init = bond_net_init,
+	.exit = bond_net_exit,
+};
+
 static int __init bonding_init(void)
 {
 	int i;
@@ -5152,10 +5152,16 @@
 	if (res)
 		goto out;
 
-	bond_create_proc_dir();
+	res = register_pernet_gen_subsys(&bond_net_id, &bond_net_ops);
+	if (res)
+		goto out;
+
+	res = rtnl_link_register(&bond_link_ops);
+	if (res)
+		goto err_link;
 
 	for (i = 0; i < max_bonds; i++) {
-		res = bond_create(NULL);
+		res = bond_create(&init_net, NULL);
 		if (res)
 			goto err;
 	}
@@ -5167,14 +5173,13 @@
 	register_netdevice_notifier(&bond_netdev_notifier);
 	register_inetaddr_notifier(&bond_inetaddr_notifier);
 	bond_register_ipv6_notifier();
-
-	goto out;
-err:
-	rtnl_lock();
-	bond_free_all();
-	rtnl_unlock();
 out:
 	return res;
+err:
+	rtnl_link_unregister(&bond_link_ops);
+err_link:
+	unregister_pernet_gen_subsys(bond_net_id, &bond_net_ops);
+	goto out;
 
 }
 
@@ -5186,9 +5191,8 @@
 
 	bond_destroy_sysfs();
 
-	rtnl_lock();
-	bond_free_all();
-	rtnl_unlock();
+	rtnl_link_unregister(&bond_link_ops);
+	unregister_pernet_gen_subsys(bond_net_id, &bond_net_ops);
 }
 
 module_init(bonding_init);
@@ -5197,3 +5201,4 @@
 MODULE_VERSION(DRV_VERSION);
 MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
 MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
+MODULE_ALIAS_RTNL_LINK("bond");
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index dca7d82..a59094f 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -35,6 +35,8 @@
 #include <linux/rtnetlink.h>
 #include <linux/etherdevice.h>
 #include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <linux/nsproxy.h>
 
 #include "bonding.h"
 
@@ -47,12 +49,14 @@
  */
 static ssize_t bonding_show_bonds(struct class *cls, char *buf)
 {
+	struct net *net = current->nsproxy->net_ns;
+	struct bond_net *bn = net_generic(net, bond_net_id);
 	int res = 0;
 	struct bonding *bond;
 
 	rtnl_lock();
 
-	list_for_each_entry(bond, &bond_dev_list, bond_list) {
+	list_for_each_entry(bond, &bn->dev_list, bond_list) {
 		if (res > (PAGE_SIZE - IFNAMSIZ)) {
 			/* not enough space for another interface name */
 			if ((PAGE_SIZE - res) > 10)
@@ -69,11 +73,12 @@
 	return res;
 }
 
-static struct net_device *bond_get_by_name(const char *ifname)
+static struct net_device *bond_get_by_name(struct net *net, const char *ifname)
 {
+	struct bond_net *bn = net_generic(net, bond_net_id);
 	struct bonding *bond;
 
-	list_for_each_entry(bond, &bond_dev_list, bond_list) {
+	list_for_each_entry(bond, &bn->dev_list, bond_list) {
 		if (strncmp(bond->dev->name, ifname, IFNAMSIZ) == 0)
 			return bond->dev;
 	}
@@ -91,6 +96,7 @@
 static ssize_t bonding_store_bonds(struct class *cls,
 				   const char *buffer, size_t count)
 {
+	struct net *net = current->nsproxy->net_ns;
 	char command[IFNAMSIZ + 1] = {0, };
 	char *ifname;
 	int rv, res = count;
@@ -104,7 +110,7 @@
 	if (command[0] == '+') {
 		pr_info(DRV_NAME
 			": %s is being created...\n", ifname);
-		rv = bond_create(ifname);
+		rv = bond_create(net, ifname);
 		if (rv) {
 			pr_info(DRV_NAME ": Bond creation failed.\n");
 			res = rv;
@@ -113,7 +119,7 @@
 		struct net_device *bond_dev;
 
 		rtnl_lock();
-		bond_dev = bond_get_by_name(ifname);
+		bond_dev = bond_get_by_name(net, ifname);
 		if (bond_dev) {
 			pr_info(DRV_NAME ": %s is being deleted...\n",
 				ifname);
@@ -238,8 +244,7 @@
 		/* Got a slave name in ifname.  Is it already in the list? */
 		found = 0;
 
-		/* FIXME: get netns from sysfs object */
-		dev = __dev_get_by_name(&init_net, ifname);
+		dev = __dev_get_by_name(dev_net(bond->dev), ifname);
 		if (!dev) {
 			pr_info(DRV_NAME
 			       ": %s: Interface %s does not exist!\n",
@@ -1616,24 +1621,8 @@
  * Initialize sysfs for each bond.  This sets up and registers
  * the 'bondctl' directory for each individual bond under /sys/class/net.
  */
-int bond_create_sysfs_entry(struct bonding *bond)
+void bond_prepare_sysfs_group(struct bonding *bond)
 {
-	struct net_device *dev = bond->dev;
-	int err;
-
-	err = sysfs_create_group(&(dev->dev.kobj), &bonding_group);
-	if (err)
-		pr_emerg("eek! didn't create group!\n");
-
-	return err;
-}
-/*
- * Remove sysfs entries for each bond.
- */
-void bond_destroy_sysfs_entry(struct bonding *bond)
-{
-	struct net_device *dev = bond->dev;
-
-	sysfs_remove_group(&(dev->dev.kobj), &bonding_group);
+	bond->dev->sysfs_groups[0] = &bonding_group;
 }
 
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 9c03c2e..a51ae7d 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -30,8 +30,6 @@
 
 #define BOND_MAX_ARP_TARGETS	16
 
-extern struct list_head bond_dev_list;
-
 #define IS_UP(dev)					   \
 	      ((((dev)->flags & IFF_UP) == IFF_UP)	&& \
 	       netif_running(dev)			&& \
@@ -206,7 +204,7 @@
 #endif /* CONFIG_PROC_FS */
 	struct   list_head bond_list;
 	struct   dev_mc_list *mc_list;
-	int      (*xmit_hash_policy)(struct sk_buff *, struct net_device *, int);
+	int      (*xmit_hash_policy)(struct sk_buff *, int);
 	__be32   master_ip;
 	u16      flags;
 	u16      rr_tx_counter;
@@ -327,12 +325,11 @@
 
 struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
 int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
-int bond_create(const char *name);
+int bond_create(struct net *net, const char *name);
 int  bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev);
 int bond_create_sysfs(void);
 void bond_destroy_sysfs(void);
-void bond_destroy_sysfs_entry(struct bonding *bond);
-int bond_create_sysfs_entry(struct bonding *bond);
+void bond_prepare_sysfs_group(struct bonding *bond);
 int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave);
 void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave);
 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
@@ -347,8 +344,16 @@
 void bond_register_arp(struct bonding *);
 void bond_unregister_arp(struct bonding *);
 
+struct bond_net {
+	struct net *		net;	/* Associated network namespace */
+	struct list_head	dev_list;
+#ifdef CONFIG_PROC_FS
+	struct proc_dir_entry *	proc_dir;
+#endif
+};
+
 /* exported from bond_main.c */
-extern struct list_head bond_dev_list;
+extern int bond_net_id;
 extern const struct bond_parm_tbl bond_lacp_tbl[];
 extern const struct bond_parm_tbl bond_mode_tbl[];
 extern const struct bond_parm_tbl xmit_hashtype_tbl[];
@@ -377,4 +382,3 @@
 #endif
 
 #endif /* _LINUX_BONDING_H */
-
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index b13fd91..cbe3fce 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -221,38 +221,6 @@
 	set_mb_mode_prio(priv, mb, mode, 0);
 }
 
-static struct sk_buff *alloc_can_skb(struct net_device *dev,
-		struct can_frame **cf)
-{
-	struct sk_buff *skb;
-
-	skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
-	if (unlikely(!skb))
-		return NULL;
-
-	skb->protocol = htons(ETH_P_CAN);
-	skb->ip_summed = CHECKSUM_UNNECESSARY;
-	*cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
-
-	return skb;
-}
-
-static struct sk_buff *alloc_can_err_skb(struct net_device *dev,
-		struct can_frame **cf)
-{
-	struct sk_buff *skb;
-
-	skb = alloc_can_skb(dev, cf);
-	if (unlikely(!skb))
-		return NULL;
-
-	memset(*cf, 0, sizeof(struct can_frame));
-	(*cf)->can_id = CAN_ERR_FLAG;
-	(*cf)->can_dlc = CAN_ERR_DLC;
-
-	return skb;
-}
-
 /*
  * Swtich transceiver on or off
  */
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 39b99f5..c3db111 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -366,17 +366,12 @@
 	can_flush_echo_skb(dev);
 
 	/* send restart message upstream */
-	skb = dev_alloc_skb(sizeof(struct can_frame));
+	skb = alloc_can_err_skb(dev, &cf);
 	if (skb == NULL) {
 		err = -ENOMEM;
 		goto restart;
 	}
-	skb->dev = dev;
-	skb->protocol = htons(ETH_P_CAN);
-	cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
-	memset(cf, 0, sizeof(struct can_frame));
-	cf->can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
-	cf->can_dlc = CAN_ERR_DLC;
+	cf->can_id |= CAN_ERR_RESTARTED;
 
 	netif_rx(skb);
 
@@ -449,6 +444,39 @@
 	dev->features = NETIF_F_NO_CSUM;
 }
 
+struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
+{
+	struct sk_buff *skb;
+
+	skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
+	if (unlikely(!skb))
+		return NULL;
+
+	skb->protocol = htons(ETH_P_CAN);
+	skb->pkt_type = PACKET_BROADCAST;
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	*cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
+	memset(*cf, 0, sizeof(struct can_frame));
+
+	return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_can_skb);
+
+struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
+{
+	struct sk_buff *skb;
+
+	skb = alloc_can_skb(dev, cf);
+	if (unlikely(!skb))
+		return NULL;
+
+	(*cf)->can_id = CAN_ERR_FLAG;
+	(*cf)->can_dlc = CAN_ERR_DLC;
+
+	return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_can_err_skb);
+
 /*
  * Allocate and setup space for the CAN network device
  */
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 96d8be4..782a47f 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -296,11 +296,9 @@
 	uint8_t dlc;
 	int i;
 
-	skb = dev_alloc_skb(sizeof(struct can_frame));
+	skb = alloc_can_skb(dev, &cf);
 	if (skb == NULL)
 		return;
-	skb->dev = dev;
-	skb->protocol = htons(ETH_P_CAN);
 
 	fi = priv->read_reg(priv, REG_FI);
 	dlc = fi & 0x0F;
@@ -323,8 +321,6 @@
 	if (fi & FI_RTR)
 		id |= CAN_RTR_FLAG;
 
-	cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
-	memset(cf, 0, sizeof(struct can_frame));
 	cf->can_id = id;
 	cf->can_dlc = dlc;
 	for (i = 0; i < dlc; i++)
@@ -351,15 +347,9 @@
 	enum can_state state = priv->can.state;
 	uint8_t ecc, alc;
 
-	skb = dev_alloc_skb(sizeof(struct can_frame));
+	skb = alloc_can_err_skb(dev, &cf);
 	if (skb == NULL)
 		return -ENOMEM;
-	skb->dev = dev;
-	skb->protocol = htons(ETH_P_CAN);
-	cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
-	memset(cf, 0, sizeof(struct can_frame));
-	cf->can_id = CAN_ERR_FLAG;
-	cf->can_dlc = CAN_ERR_DLC;
 
 	if (isrc & IRQ_DOI) {
 		/* data overrun interrupt */
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 3373560..9dd076a 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -213,6 +213,7 @@
 	{.compatible = "nxp,sja1000"},
 	{},
 };
+MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
 
 static struct of_platform_driver sja1000_ofp_driver = {
 	.owner = THIS_MODULE,
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 23a7128..07e8016 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -535,18 +535,15 @@
 	u32 data, mbx_mask;
 	unsigned long flags;
 
-	skb = netdev_alloc_skb(priv->ndev, sizeof(struct can_frame));
+	skb = alloc_can_skb(priv->ndev, &cf);
 	if (!skb) {
 		if (printk_ratelimit())
 			dev_err(priv->ndev->dev.parent,
-				"ti_hecc_rx_pkt: netdev_alloc_skb() failed\n");
+				"ti_hecc_rx_pkt: alloc_can_skb() failed\n");
 		return -ENOMEM;
 	}
-	skb->protocol = __constant_htons(ETH_P_CAN);
-	skb->ip_summed = CHECKSUM_UNNECESSARY;
 
 	mbx_mask = BIT(mbxno);
-	cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
 	data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
 	if (data & HECC_CANMID_IDE)
 		cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
@@ -656,19 +653,13 @@
 	struct sk_buff *skb;
 
 	/* propogate the error condition to the can stack */
-	skb = netdev_alloc_skb(ndev, sizeof(struct can_frame));
+	skb = alloc_can_err_skb(ndev, &cf);
 	if (!skb) {
 		if (printk_ratelimit())
 			dev_err(priv->ndev->dev.parent,
-				"ti_hecc_error: netdev_alloc_skb() failed\n");
+				"ti_hecc_error: alloc_can_err_skb() failed\n");
 		return -ENOMEM;
 	}
-	skb->protocol = __constant_htons(ETH_P_CAN);
-	skb->ip_summed = CHECKSUM_UNNECESSARY;
-	cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
-	memset(cf, 0, sizeof(struct can_frame));
-	cf->can_id = CAN_ERR_FLAG;
-	cf->can_dlc = CAN_ERR_DLC;
 
 	if (int_status & HECC_CANGIF_WLIF) { /* warning level int */
 		if ((int_status & HECC_CANGIF_BOIF) == 0) {
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index a65f56a..3685f3e 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -311,14 +311,10 @@
 	int i;
 	struct net_device_stats *stats = &dev->netdev->stats;
 
-	skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame));
+	skb = alloc_can_skb(dev->netdev, &cf);
 	if (skb == NULL)
 		return;
 
-	skb->protocol = htons(ETH_P_CAN);
-
-	cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
-
 	cf->can_id = msg->msg.can_msg.id;
 	cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8);
 
@@ -346,18 +342,10 @@
 	struct sk_buff *skb;
 	struct net_device_stats *stats = &dev->netdev->stats;
 
-	skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame));
+	skb = alloc_can_err_skb(dev->netdev, &cf);
 	if (skb == NULL)
 		return;
 
-	skb->protocol = htons(ETH_P_CAN);
-
-	cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
-	memset(cf, 0, sizeof(struct can_frame));
-
-	cf->can_id = CAN_ERR_FLAG;
-	cf->can_dlc = CAN_ERR_DLC;
-
 	if (msg->type == CPC_MSG_TYPE_CAN_STATE) {
 		u8 state = msg->msg.can_state;
 
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 333b1d1..e503384 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -408,7 +408,7 @@
 {
 	struct cnic_dev *dev;
 
-	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 		printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n",
 		       ulp_type);
 		return -EINVAL;
@@ -454,7 +454,7 @@
 	struct cnic_ulp_ops *ulp_ops;
 	int i = 0;
 
-	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 		printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
 		       ulp_type);
 		return -EINVAL;
@@ -510,7 +510,7 @@
 	struct cnic_local *cp = dev->cnic_priv;
 	struct cnic_ulp_ops *ulp_ops;
 
-	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 		printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n",
 		       ulp_type);
 		return -EINVAL;
@@ -551,7 +551,7 @@
 	struct cnic_local *cp = dev->cnic_priv;
 	int i = 0;
 
-	if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 		printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
 		       ulp_type);
 		return -EINVAL;
@@ -3560,9 +3560,9 @@
 	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
 
 	if (sb_id == 0)
-		val = 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT;
+		val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
 	else
-		val = BNX2_L2CTX_STATUSB_NUM(sb_id);
+		val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
 	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
 
 	rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE);
@@ -3719,7 +3719,7 @@
 	cp->int_num = 0;
 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
 		u32 sb_id = cp->status_blk_num;
-		u32 sb = BNX2_L2CTX_STATUSB_NUM(sb_id);
+		u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
 
 		cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
 		cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 47b352d..cf2e1d3 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2135,6 +2135,7 @@
 	if (!complete)
 		return;
 
+	skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
 	cpl = qs->lro_va;
 
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index a876dce..79ce8e8 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -2217,7 +2217,7 @@
 	struct emac_priv *priv = netdev_priv(ndev);
 
 	emac_int_disable(priv);
-	emac_irq(ndev->irq, priv);
+	emac_irq(ndev->irq, ndev);
 	emac_int_enable(priv);
 }
 #endif
@@ -2806,11 +2806,33 @@
 	return 0;
 }
 
+static
+int davinci_emac_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+
+	if (netif_running(dev))
+		emac_dev_stop(dev);
+
+	clk_disable(emac_clk);
+
+	return 0;
+}
+
+static int davinci_emac_resume(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+
+	clk_enable(emac_clk);
+
+	if (netif_running(dev))
+		emac_dev_open(dev);
+
+	return 0;
+}
+
 /**
  * davinci_emac_driver: EMAC platform driver structure
- *
- * We implement only probe and remove functions - suspend/resume and
- * others not supported by this module
  */
 static struct platform_driver davinci_emac_driver = {
 	.driver = {
@@ -2819,6 +2841,8 @@
 	},
 	.probe = davinci_emac_probe,
 	.remove = __devexit_p(davinci_emac_remove),
+	.suspend = davinci_emac_suspend,
+	.resume = davinci_emac_resume,
 };
 
 /**
diff --git a/drivers/net/dm9000.h b/drivers/net/dm9000.h
index 80817c2..fb1c924d 100644
--- a/drivers/net/dm9000.h
+++ b/drivers/net/dm9000.h
@@ -50,7 +50,7 @@
 #define DM9000_RCSR	       0x32
 
 #define CHIPR_DM9000A	       0x19
-#define CHIPR_DM9000B	       0x1B
+#define CHIPR_DM9000B	       0x1A
 
 #define DM9000_MRCMDX          0xF0
 #define DM9000_MRCMD           0xF2
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index ff83efd..7462fdf 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -621,6 +621,7 @@
 	u16 eeprom_wc;
 	__le16 eeprom[256];
 	spinlock_t mdio_lock;
+	const struct firmware *fw;
 };
 
 static inline void e100_write_flush(struct nic *nic)
@@ -1222,9 +1223,9 @@
 static const struct firmware *e100_request_firmware(struct nic *nic)
 {
 	const char *fw_name;
-	const struct firmware *fw;
+	const struct firmware *fw = nic->fw;
 	u8 timer, bundle, min_size;
-	int err;
+	int err = 0;
 
 	/* do not load u-code for ICH devices */
 	if (nic->flags & ich)
@@ -1240,12 +1241,20 @@
 	else /* No ucode on other devices */
 		return NULL;
 
-	err = request_firmware(&fw, fw_name, &nic->pdev->dev);
+	/* If the firmware has not previously been loaded, request a pointer
+	 * to it. If it was previously loaded, we are reinitializing the
+	 * adapter, possibly in a resume from hibernate, in which case
+	 * request_firmware() cannot be used.
+	 */
+	if (!fw)
+		err = request_firmware(&fw, fw_name, &nic->pdev->dev);
+
 	if (err) {
 		DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n",
 			fw_name, err);
 		return ERR_PTR(err);
 	}
+
 	/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
 	   indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
 	if (fw->size != UCODE_SIZE * 4 + 3) {
@@ -1268,7 +1277,10 @@
 		release_firmware(fw);
 		return ERR_PTR(-EINVAL);
 	}
-	/* OK, firmware is validated and ready to use... */
+
+	/* OK, firmware is validated and ready to use. Save a pointer
+	 * to it in the nic */
+	nic->fw = fw;
 	return fw;
 }
 
@@ -1426,19 +1438,31 @@
 	} else
 		DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
 
-	/* Isolate all the PHY ids */
-	for (addr = 0; addr < 32; addr++)
-		mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
-	/* Select the discovered PHY */
-	bmcr &= ~BMCR_ISOLATE;
-	mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
-
 	/* Get phy ID */
 	id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
 	id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
 	nic->phy = (u32)id_hi << 16 | (u32)id_lo;
 	DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
 
+	/* Select the phy and isolate the rest */
+	for (addr = 0; addr < 32; addr++) {
+		if (addr != nic->mii.phy_id) {
+			mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
+		} else if (nic->phy != phy_82552_v) {
+			bmcr = mdio_read(netdev, addr, MII_BMCR);
+			mdio_write(netdev, addr, MII_BMCR,
+				bmcr & ~BMCR_ISOLATE);
+		}
+	}
+	/*
+	 * Workaround for 82552:
+	 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
+	 * other phy_id's) using bmcr value from addr discovery loop above.
+	 */
+	if (nic->phy == phy_82552_v)
+		mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
+			bmcr & ~BMCR_ISOLATE);
+
 	/* Handle National tx phys */
 #define NCS_PHY_MODEL_MASK	0xFFF0FFFF
 	if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index c0f185b..1190167 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -76,6 +76,7 @@
 /* Extended Device Control */
 #define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */
 #define E1000_CTRL_EXT_EE_RST    0x00002000 /* Reinitialize from EEPROM */
+#define E1000_CTRL_EXT_SPD_BYPS  0x00008000 /* Speed Select Bypass */
 #define E1000_CTRL_EXT_RO_DIS    0x00020000 /* Relaxed Ordering disable */
 #define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */
 #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
@@ -347,6 +348,7 @@
 /* Extended Configuration Control and Size */
 #define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP      0x00000020
 #define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE       0x00000001
+#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE       0x00000008
 #define E1000_EXTCNF_CTRL_SWFLAG                 0x00000020
 #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK   0x00FF0000
 #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT          16
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 1211df9..00989c5 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -141,6 +141,20 @@
 #define HV_TNCRS_UPPER		PHY_REG(778, 29) /* Transmit with no CRS */
 #define HV_TNCRS_LOWER		PHY_REG(778, 30)
 
+/* BM PHY Copper Specific Status */
+#define BM_CS_STATUS                      17
+#define BM_CS_STATUS_LINK_UP              0x0400
+#define BM_CS_STATUS_RESOLVED             0x0800
+#define BM_CS_STATUS_SPEED_MASK           0xC000
+#define BM_CS_STATUS_SPEED_1000           0x8000
+
+/* 82577 Mobile Phy Status Register */
+#define HV_M_STATUS                       26
+#define HV_M_STATUS_AUTONEG_COMPLETE      0x1000
+#define HV_M_STATUS_SPEED_MASK            0x0300
+#define HV_M_STATUS_SPEED_1000            0x0200
+#define HV_M_STATUS_LINK_UP               0x0040
+
 enum e1000_boards {
 	board_82571,
 	board_82572,
@@ -518,9 +532,13 @@
 extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
 extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
 extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
+                                          u16 *data);
 extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
 extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
 extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
+                                           u16 data);
 extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
 extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
 extern s32 e1000e_get_cfg_done(struct e1000_hw *hw);
@@ -537,7 +555,11 @@
 extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
 extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
 extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
+                                        u16 data);
 extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
+                                       u16 *data);
 extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
 			       u32 usec_interval, bool *success);
 extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
@@ -545,7 +567,11 @@
 extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
 extern s32 e1000e_check_downshift(struct e1000_hw *hw);
 extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
+                                        u16 *data);
 extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
+                                         u16 data);
 extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
 extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
 extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index a70999b..0364b91 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -335,10 +335,18 @@
 
 		hw->fc.current_mode = hw->fc.requested_mode;
 
-		retval = ((hw->phy.media_type == e1000_media_type_fiber) ?
-			  hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw));
+		if (hw->phy.media_type == e1000_media_type_fiber) {
+			retval = hw->mac.ops.setup_link(hw);
+			/* implicit goto out */
+		} else {
+			retval = e1000e_force_mac_fc(hw);
+			if (retval)
+				goto out;
+			e1000e_set_fc_watermarks(hw);
+		}
 	}
 
+out:
 	clear_bit(__E1000_RESETTING, &adapter->state);
 	return retval;
 }
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index fd44d9f..aaea41e 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -764,11 +764,13 @@
 	s32  (*get_cable_length)(struct e1000_hw *);
 	s32  (*get_phy_info)(struct e1000_hw *);
 	s32  (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
+	s32  (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
 	void (*release_phy)(struct e1000_hw *);
 	s32  (*reset_phy)(struct e1000_hw *);
 	s32  (*set_d0_lplu_state)(struct e1000_hw *, bool);
 	s32  (*set_d3_lplu_state)(struct e1000_hw *, bool);
 	s32  (*write_phy_reg)(struct e1000_hw *, u32, u16);
+	s32  (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
 	s32  (*cfg_on_link_up)(struct e1000_hw *);
 };
 
@@ -901,6 +903,7 @@
 struct e1000_dev_spec_ich8lan {
 	bool kmrn_lock_loss_workaround_enabled;
 	struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
+	bool nvm_k1_enabled;
 };
 
 struct e1000_hw {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 99df2ab..51ddb04 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -122,6 +122,27 @@
 
 #define HV_LED_CONFIG		PHY_REG(768, 30) /* LED Configuration */
 
+#define SW_FLAG_TIMEOUT    1000 /* SW Semaphore flag timeout in milliseconds */
+
+/* SMBus Address Phy Register */
+#define HV_SMB_ADDR            PHY_REG(768, 26)
+#define HV_SMB_ADDR_PEC_EN     0x0200
+#define HV_SMB_ADDR_VALID      0x0080
+
+/* Strapping Option Register - RO */
+#define E1000_STRAP                     0x0000C
+#define E1000_STRAP_SMBUS_ADDRESS_MASK  0x00FE0000
+#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
+
+/* OEM Bits Phy Register */
+#define HV_OEM_BITS            PHY_REG(768, 25)
+#define HV_OEM_BITS_LPLU       0x0004 /* Low Power Link Up */
+#define HV_OEM_BITS_GBE_DIS    0x0040 /* Gigabit Disable */
+#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
+
+#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
+#define E1000_NVM_K1_ENABLE 0x1  /* NVM Enable K1 bit */
+
 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
 /* Offset 04h HSFSTS */
 union ich8_hws_flash_status {
@@ -200,6 +221,10 @@
 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
+static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
+static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
+static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
+static s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
 
 static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
 {
@@ -242,7 +267,11 @@
 
 	phy->ops.check_polarity       = e1000_check_polarity_ife_ich8lan;
 	phy->ops.read_phy_reg         = e1000_read_phy_reg_hv;
+	phy->ops.read_phy_reg_locked  = e1000_read_phy_reg_hv_locked;
+	phy->ops.set_d0_lplu_state    = e1000_set_lplu_state_pchlan;
+	phy->ops.set_d3_lplu_state    = e1000_set_lplu_state_pchlan;
 	phy->ops.write_phy_reg        = e1000_write_phy_reg_hv;
+	phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
 	phy->autoneg_mask             = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 
 	phy->id = e1000_phy_unknown;
@@ -303,6 +332,8 @@
 	case IGP03E1000_E_PHY_ID:
 		phy->type = e1000_phy_igp_3;
 		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+		phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
+		phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
 		break;
 	case IFE_E_PHY_ID:
 	case IFE_PLUS_E_PHY_ID:
@@ -469,14 +500,6 @@
 		goto out;
 	}
 
-	if (hw->mac.type == e1000_pchlan) {
-		ret_val = e1000e_write_kmrn_reg(hw,
-		                                   E1000_KMRNCTRLSTA_K1_CONFIG,
-		                                   E1000_KMRNCTRLSTA_K1_ENABLE);
-		if (ret_val)
-			goto out;
-	}
-
 	/*
 	 * First we want to see if the MII Status Register reports
 	 * link.  If so, then we want to get the current speed/duplex
@@ -486,6 +509,12 @@
 	if (ret_val)
 		goto out;
 
+	if (hw->mac.type == e1000_pchlan) {
+		ret_val = e1000_k1_gig_workaround_hv(hw, link);
+		if (ret_val)
+			goto out;
+	}
+
 	if (!link)
 		goto out; /* No link detected */
 
@@ -568,12 +597,39 @@
 static DEFINE_MUTEX(nvm_mutex);
 
 /**
+ *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquires the mutex for performing NVM operations.
+ **/
+static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
+{
+	mutex_lock(&nvm_mutex);
+
+	return 0;
+}
+
+/**
+ *  e1000_release_nvm_ich8lan - Release NVM mutex
+ *  @hw: pointer to the HW structure
+ *
+ *  Releases the mutex used while performing NVM operations.
+ **/
+static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
+{
+	mutex_unlock(&nvm_mutex);
+
+	return;
+}
+
+static DEFINE_MUTEX(swflag_mutex);
+
+/**
  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
  *  @hw: pointer to the HW structure
  *
- *  Acquires the software control flag for performing NVM and PHY
- *  operations.  This is a function pointer entry point only called by
- *  read/write routines for the PHY and NVM parts.
+ *  Acquires the software control flag for performing PHY and select
+ *  MAC CSR accesses.
  **/
 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
 {
@@ -582,7 +638,7 @@
 
 	might_sleep();
 
-	mutex_lock(&nvm_mutex);
+	mutex_lock(&swflag_mutex);
 
 	while (timeout) {
 		extcnf_ctrl = er32(EXTCNF_CTRL);
@@ -599,7 +655,7 @@
 		goto out;
 	}
 
-	timeout = PHY_CFG_TIMEOUT * 2;
+	timeout = SW_FLAG_TIMEOUT;
 
 	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
 	ew32(EXTCNF_CTRL, extcnf_ctrl);
@@ -623,7 +679,7 @@
 
 out:
 	if (ret_val)
-		mutex_unlock(&nvm_mutex);
+		mutex_unlock(&swflag_mutex);
 
 	return ret_val;
 }
@@ -632,9 +688,8 @@
  *  e1000_release_swflag_ich8lan - Release software control flag
  *  @hw: pointer to the HW structure
  *
- *  Releases the software control flag for performing NVM and PHY operations.
- *  This is a function pointer entry point only called by read/write
- *  routines for the PHY and NVM parts.
+ *  Releases the software control flag for performing PHY and select
+ *  MAC CSR accesses.
  **/
 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
 {
@@ -644,7 +699,9 @@
 	extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
 	ew32(EXTCNF_CTRL, extcnf_ctrl);
 
-	mutex_unlock(&nvm_mutex);
+	mutex_unlock(&swflag_mutex);
+
+	return;
 }
 
 /**
@@ -752,6 +809,326 @@
 }
 
 /**
+ *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
+ *  @hw:   pointer to the HW structure
+ *
+ *  SW should configure the LCD from the NVM extended configuration region
+ *  as a workaround for certain parts.
+ **/
+static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
+	s32 ret_val;
+	u16 word_addr, reg_data, reg_addr, phy_page = 0;
+
+	ret_val = hw->phy.ops.acquire_phy(hw);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * Initialize the PHY from the NVM on ICH platforms.  This
+	 * is needed due to an issue where the NVM configuration is
+	 * not properly autoloaded after power transitions.
+	 * Therefore, after each PHY reset, we will load the
+	 * configuration data out of the NVM manually.
+	 */
+	if ((hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) ||
+		(hw->mac.type == e1000_pchlan)) {
+		struct e1000_adapter *adapter = hw->adapter;
+
+		/* Check if SW needs to configure the PHY */
+		if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
+		    (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M) ||
+		    (hw->mac.type == e1000_pchlan))
+			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
+		else
+			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+
+		data = er32(FEXTNVM);
+		if (!(data & sw_cfg_mask))
+			goto out;
+
+		/* Wait for basic configuration completes before proceeding */
+		e1000_lan_init_done_ich8lan(hw);
+
+		/*
+		 * Make sure HW does not configure LCD from PHY
+		 * extended configuration before SW configuration
+		 */
+		data = er32(EXTCNF_CTRL);
+		if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
+			goto out;
+
+		cnf_size = er32(EXTCNF_SIZE);
+		cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
+		cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
+		if (!cnf_size)
+			goto out;
+
+		cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
+		cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
+
+		if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
+		    (hw->mac.type == e1000_pchlan)) {
+			/*
+			 * HW configures the SMBus address and LEDs when the
+			 * OEM and LCD Write Enable bits are set in the NVM.
+			 * When both NVM bits are cleared, SW will configure
+			 * them instead.
+			 */
+			data = er32(STRAP);
+			data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
+			reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
+			reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
+			ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
+			                                        reg_data);
+			if (ret_val)
+				goto out;
+
+			data = er32(LEDCTL);
+			ret_val = e1000_write_phy_reg_hv_locked(hw,
+			                                        HV_LED_CONFIG,
+			                                        (u16)data);
+			if (ret_val)
+				goto out;
+		}
+		/* Configure LCD from extended configuration region. */
+
+		/* cnf_base_addr is in DWORD */
+		word_addr = (u16)(cnf_base_addr << 1);
+
+		for (i = 0; i < cnf_size; i++) {
+			ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
+			                           &reg_data);
+			if (ret_val)
+				goto out;
+
+			ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
+			                           1, &reg_addr);
+			if (ret_val)
+				goto out;
+
+			/* Save off the PHY page for future writes. */
+			if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
+				phy_page = reg_data;
+				continue;
+			}
+
+			reg_addr &= PHY_REG_MASK;
+			reg_addr |= phy_page;
+
+			ret_val = phy->ops.write_phy_reg_locked(hw,
+			                                    (u32)reg_addr,
+			                                    reg_data);
+			if (ret_val)
+				goto out;
+		}
+	}
+
+out:
+	hw->phy.ops.release_phy(hw);
+	return ret_val;
+}
+
+/**
+ *  e1000_k1_gig_workaround_hv - K1 Si workaround
+ *  @hw:   pointer to the HW structure
+ *  @link: link up bool flag
+ *
+ *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
+ *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
+ *  If link is down, the function will restore the default K1 setting located
+ *  in the NVM.
+ **/
+static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
+{
+	s32 ret_val = 0;
+	u16 status_reg = 0;
+	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
+
+	if (hw->mac.type != e1000_pchlan)
+		goto out;
+
+	/* Wrap the whole flow with the sw flag */
+	ret_val = hw->phy.ops.acquire_phy(hw);
+	if (ret_val)
+		goto out;
+
+	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
+	if (link) {
+		if (hw->phy.type == e1000_phy_82578) {
+			ret_val = hw->phy.ops.read_phy_reg_locked(hw,
+			                                          BM_CS_STATUS,
+			                                          &status_reg);
+			if (ret_val)
+				goto release;
+
+			status_reg &= BM_CS_STATUS_LINK_UP |
+			              BM_CS_STATUS_RESOLVED |
+			              BM_CS_STATUS_SPEED_MASK;
+
+			if (status_reg == (BM_CS_STATUS_LINK_UP |
+			                   BM_CS_STATUS_RESOLVED |
+			                   BM_CS_STATUS_SPEED_1000))
+				k1_enable = false;
+		}
+
+		if (hw->phy.type == e1000_phy_82577) {
+			ret_val = hw->phy.ops.read_phy_reg_locked(hw,
+			                                          HV_M_STATUS,
+			                                          &status_reg);
+			if (ret_val)
+				goto release;
+
+			status_reg &= HV_M_STATUS_LINK_UP |
+			              HV_M_STATUS_AUTONEG_COMPLETE |
+			              HV_M_STATUS_SPEED_MASK;
+
+			if (status_reg == (HV_M_STATUS_LINK_UP |
+			                   HV_M_STATUS_AUTONEG_COMPLETE |
+			                   HV_M_STATUS_SPEED_1000))
+				k1_enable = false;
+		}
+
+		/* Link stall fix for link up */
+		ret_val = hw->phy.ops.write_phy_reg_locked(hw, PHY_REG(770, 19),
+		                                           0x0100);
+		if (ret_val)
+			goto release;
+
+	} else {
+		/* Link stall fix for link down */
+		ret_val = hw->phy.ops.write_phy_reg_locked(hw, PHY_REG(770, 19),
+		                                           0x4100);
+		if (ret_val)
+			goto release;
+	}
+
+	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
+
+release:
+	hw->phy.ops.release_phy(hw);
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_configure_k1_ich8lan - Configure K1 power state
+ *  @hw: pointer to the HW structure
+ *  @enable: K1 state to configure
+ *
+ *  Configure the K1 power state based on the provided parameter.
+ *  Assumes semaphore already acquired.
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ **/
+static s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
+{
+	s32 ret_val = 0;
+	u32 ctrl_reg = 0;
+	u32 ctrl_ext = 0;
+	u32 reg = 0;
+	u16 kmrn_reg = 0;
+
+	ret_val = e1000e_read_kmrn_reg_locked(hw,
+	                                     E1000_KMRNCTRLSTA_K1_CONFIG,
+	                                     &kmrn_reg);
+	if (ret_val)
+		goto out;
+
+	if (k1_enable)
+		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
+	else
+		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
+
+	ret_val = e1000e_write_kmrn_reg_locked(hw,
+	                                      E1000_KMRNCTRLSTA_K1_CONFIG,
+	                                      kmrn_reg);
+	if (ret_val)
+		goto out;
+
+	udelay(20);
+	ctrl_ext = er32(CTRL_EXT);
+	ctrl_reg = er32(CTRL);
+
+	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+	reg |= E1000_CTRL_FRCSPD;
+	ew32(CTRL, reg);
+
+	ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
+	udelay(20);
+	ew32(CTRL, ctrl_reg);
+	ew32(CTRL_EXT, ctrl_ext);
+	udelay(20);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
+ *  @hw:       pointer to the HW structure
+ *  @d0_state: boolean if entering d0 or d3 device state
+ *
+ *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
+ *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
+ *  in NVM determines whether HW should configure LPLU and Gbe Disable.
+ **/
+static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
+{
+	s32 ret_val = 0;
+	u32 mac_reg;
+	u16 oem_reg;
+
+	if (hw->mac.type != e1000_pchlan)
+		return ret_val;
+
+	ret_val = hw->phy.ops.acquire_phy(hw);
+	if (ret_val)
+		return ret_val;
+
+	mac_reg = er32(EXTCNF_CTRL);
+	if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
+		goto out;
+
+	mac_reg = er32(FEXTNVM);
+	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
+		goto out;
+
+	mac_reg = er32(PHY_CTRL);
+
+	ret_val = hw->phy.ops.read_phy_reg_locked(hw, HV_OEM_BITS, &oem_reg);
+	if (ret_val)
+		goto out;
+
+	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
+
+	if (d0_state) {
+		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
+			oem_reg |= HV_OEM_BITS_GBE_DIS;
+
+		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
+			oem_reg |= HV_OEM_BITS_LPLU;
+	} else {
+		if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE)
+			oem_reg |= HV_OEM_BITS_GBE_DIS;
+
+		if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU)
+			oem_reg |= HV_OEM_BITS_LPLU;
+	}
+	/* Restart auto-neg to activate the bits */
+	oem_reg |= HV_OEM_BITS_RESTART_AN;
+	ret_val = hw->phy.ops.write_phy_reg_locked(hw, HV_OEM_BITS, oem_reg);
+
+out:
+	hw->phy.ops.release_phy(hw);
+
+	return ret_val;
+}
+
+
+/**
  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
  *  done after every PHY reset.
  **/
@@ -791,10 +1168,20 @@
 	ret_val = hw->phy.ops.acquire_phy(hw);
 	if (ret_val)
 		return ret_val;
+
 	hw->phy.addr = 1;
-	e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
+	ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
+	if (ret_val)
+		goto out;
 	hw->phy.ops.release_phy(hw);
 
+	/*
+	 * Configure the K1 Si workaround during phy reset assuming there is
+	 * link so that it disables K1 if link is in 1Gbps.
+	 */
+	ret_val = e1000_k1_gig_workaround_hv(hw, true);
+
+out:
 	return ret_val;
 }
 
@@ -840,11 +1227,8 @@
  **/
 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
 {
-	struct e1000_phy_info *phy = &hw->phy;
-	u32 i;
-	u32 data, cnf_size, cnf_base_addr, sw_cfg_mask;
-	s32 ret_val;
-	u16 word_addr, reg_data, reg_addr, phy_page = 0;
+	s32 ret_val = 0;
+	u16 reg;
 
 	ret_val = e1000e_phy_hw_reset_generic(hw);
 	if (ret_val)
@@ -859,81 +1243,20 @@
 			return ret_val;
 	}
 
-	/*
-	 * Initialize the PHY from the NVM on ICH platforms.  This
-	 * is needed due to an issue where the NVM configuration is
-	 * not properly autoloaded after power transitions.
-	 * Therefore, after each PHY reset, we will load the
-	 * configuration data out of the NVM manually.
-	 */
-	if (hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) {
-		struct e1000_adapter *adapter = hw->adapter;
+	/* Dummy read to clear the phy wakeup bit after lcd reset */
+	if (hw->mac.type == e1000_pchlan)
+		e1e_rphy(hw, BM_WUC, &reg);
 
-		/* Check if SW needs configure the PHY */
-		if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
-		    (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M))
-			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
-		else
-			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+	/* Configure the LCD with the extended configuration region in NVM */
+	ret_val = e1000_sw_lcd_config_ich8lan(hw);
+	if (ret_val)
+		goto out;
 
-		data = er32(FEXTNVM);
-		if (!(data & sw_cfg_mask))
-			return 0;
+	/* Configure the LCD with the OEM bits in NVM */
+	if (hw->mac.type == e1000_pchlan)
+		ret_val = e1000_oem_bits_config_ich8lan(hw, true);
 
-		/* Wait for basic configuration completes before proceeding */
-		e1000_lan_init_done_ich8lan(hw);
-
-		/*
-		 * Make sure HW does not configure LCD from PHY
-		 * extended configuration before SW configuration
-		 */
-		data = er32(EXTCNF_CTRL);
-		if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
-			return 0;
-
-		cnf_size = er32(EXTCNF_SIZE);
-		cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
-		cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
-		if (!cnf_size)
-			return 0;
-
-		cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
-		cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
-
-		/* Configure LCD from extended configuration region. */
-
-		/* cnf_base_addr is in DWORD */
-		word_addr = (u16)(cnf_base_addr << 1);
-
-		for (i = 0; i < cnf_size; i++) {
-			ret_val = e1000_read_nvm(hw,
-						(word_addr + i * 2),
-						1,
-						&reg_data);
-			if (ret_val)
-				return ret_val;
-
-			ret_val = e1000_read_nvm(hw,
-						(word_addr + i * 2 + 1),
-						1,
-						&reg_addr);
-			if (ret_val)
-				return ret_val;
-
-			/* Save off the PHY page for future writes. */
-			if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
-				phy_page = reg_data;
-				continue;
-			}
-
-			reg_addr |= phy_page;
-
-			ret_val = e1e_wphy(hw, (u32)reg_addr, reg_data);
-			if (ret_val)
-				return ret_val;
-		}
-	}
-
+out:
 	return 0;
 }
 
@@ -1054,6 +1377,38 @@
 }
 
 /**
+ *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
+ *  @hw: pointer to the HW structure
+ *  @active: true to enable LPLU, false to disable
+ *
+ *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
+ *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
+ *  the phy speed. This function will manually set the LPLU bit and restart
+ *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
+ *  since it configures the same bit.
+ **/
+static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
+{
+	s32 ret_val = 0;
+	u16 oem_reg;
+
+	ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
+	if (ret_val)
+		goto out;
+
+	if (active)
+		oem_reg |= HV_OEM_BITS_LPLU;
+	else
+		oem_reg &= ~HV_OEM_BITS_LPLU;
+
+	oem_reg |= HV_OEM_BITS_RESTART_AN;
+	ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg);
+
+out:
+	return ret_val;
+}
+
+/**
  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
  *  @hw: pointer to the HW structure
  *  @active: TRUE to enable LPLU, FALSE to disable
@@ -1314,12 +1669,11 @@
 	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
 	    (words == 0)) {
 		hw_dbg(hw, "nvm parameter(s) out of bounds\n");
-		return -E1000_ERR_NVM;
+		ret_val = -E1000_ERR_NVM;
+		goto out;
 	}
 
-	ret_val = e1000_acquire_swflag_ich8lan(hw);
-	if (ret_val)
-		goto out;
+	nvm->ops.acquire_nvm(hw);
 
 	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
 	if (ret_val) {
@@ -1345,7 +1699,7 @@
 		}
 	}
 
-	e1000_release_swflag_ich8lan(hw);
+	nvm->ops.release_nvm(hw);
 
 out:
 	if (ret_val)
@@ -1603,11 +1957,15 @@
 		return -E1000_ERR_NVM;
 	}
 
+	nvm->ops.acquire_nvm(hw);
+
 	for (i = 0; i < words; i++) {
 		dev_spec->shadow_ram[offset+i].modified = 1;
 		dev_spec->shadow_ram[offset+i].value = data[i];
 	}
 
+	nvm->ops.release_nvm(hw);
+
 	return 0;
 }
 
@@ -1637,9 +1995,7 @@
 	if (nvm->type != e1000_nvm_flash_sw)
 		goto out;
 
-	ret_val = e1000_acquire_swflag_ich8lan(hw);
-	if (ret_val)
-		goto out;
+	nvm->ops.acquire_nvm(hw);
 
 	/*
 	 * We're writing to the opposite bank so if we're on bank 1,
@@ -1657,7 +2013,7 @@
 		old_bank_offset = 0;
 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
 		if (ret_val) {
-			e1000_release_swflag_ich8lan(hw);
+			nvm->ops.release_nvm(hw);
 			goto out;
 		}
 	} else {
@@ -1665,7 +2021,7 @@
 		new_bank_offset = 0;
 		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
 		if (ret_val) {
-			e1000_release_swflag_ich8lan(hw);
+			nvm->ops.release_nvm(hw);
 			goto out;
 		}
 	}
@@ -1723,7 +2079,7 @@
 	if (ret_val) {
 		/* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
 		hw_dbg(hw, "Flash commit failed.\n");
-		e1000_release_swflag_ich8lan(hw);
+		nvm->ops.release_nvm(hw);
 		goto out;
 	}
 
@@ -1736,7 +2092,7 @@
 	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
 	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
 	if (ret_val) {
-		e1000_release_swflag_ich8lan(hw);
+		nvm->ops.release_nvm(hw);
 		goto out;
 	}
 	data &= 0xBFFF;
@@ -1744,7 +2100,7 @@
 						       act_offset * 2 + 1,
 						       (u8)(data >> 8));
 	if (ret_val) {
-		e1000_release_swflag_ich8lan(hw);
+		nvm->ops.release_nvm(hw);
 		goto out;
 	}
 
@@ -1757,7 +2113,7 @@
 	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
 	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
 	if (ret_val) {
-		e1000_release_swflag_ich8lan(hw);
+		nvm->ops.release_nvm(hw);
 		goto out;
 	}
 
@@ -1767,7 +2123,7 @@
 		dev_spec->shadow_ram[i].value = 0xFFFF;
 	}
 
-	e1000_release_swflag_ich8lan(hw);
+	nvm->ops.release_nvm(hw);
 
 	/*
 	 * Reload the EEPROM, or else modifications will not appear
@@ -1831,14 +2187,12 @@
  **/
 void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
 {
+	struct e1000_nvm_info *nvm = &hw->nvm;
 	union ich8_flash_protected_range pr0;
 	union ich8_hws_flash_status hsfsts;
 	u32 gfpreg;
-	s32 ret_val;
 
-	ret_val = e1000_acquire_swflag_ich8lan(hw);
-	if (ret_val)
-		return;
+	nvm->ops.acquire_nvm(hw);
 
 	gfpreg = er32flash(ICH_FLASH_GFPREG);
 
@@ -1859,7 +2213,7 @@
 	hsfsts.hsf_status.flockdn = true;
 	ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
 
-	e1000_release_swflag_ich8lan(hw);
+	nvm->ops.release_nvm(hw);
 }
 
 /**
@@ -2229,6 +2583,8 @@
  **/
 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
 {
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+	u16 reg;
 	u32 ctrl, icr, kab;
 	s32 ret_val;
 
@@ -2263,6 +2619,18 @@
 		ew32(PBS, E1000_PBS_16K);
 	}
 
+	if (hw->mac.type == e1000_pchlan) {
+		/* Save the NVM K1 bit setting*/
+		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg);
+		if (ret_val)
+			return ret_val;
+
+		if (reg & E1000_NVM_K1_ENABLE)
+			dev_spec->nvm_k1_enabled = true;
+		else
+			dev_spec->nvm_k1_enabled = false;
+	}
+
 	ctrl = er32(CTRL);
 
 	if (!e1000_check_reset_block(hw)) {
@@ -2304,7 +2672,19 @@
 			hw_dbg(hw, "Auto Read Done did not complete\n");
 		}
 	}
+	/* Dummy read to clear the phy wakeup bit after lcd reset */
+	if (hw->mac.type == e1000_pchlan)
+		e1e_rphy(hw, BM_WUC, &reg);
 
+	ret_val = e1000_sw_lcd_config_ich8lan(hw);
+	if (ret_val)
+		goto out;
+
+	if (hw->mac.type == e1000_pchlan) {
+		ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+		if (ret_val)
+			goto out;
+	}
 	/*
 	 * For PCH, this write will make sure that any noise
 	 * will be detected as a CRC error and be dropped rather than show up
@@ -2323,6 +2703,7 @@
 	if (hw->mac.type == e1000_pchlan)
 		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
 
+out:
 	return ret_val;
 }
 
@@ -2627,14 +3008,6 @@
 	if (ret_val)
 		return ret_val;
 
-	if ((hw->mac.type == e1000_pchlan) && (*speed == SPEED_1000)) {
-		ret_val = e1000e_write_kmrn_reg(hw,
-		                                  E1000_KMRNCTRLSTA_K1_CONFIG,
-		                                  E1000_KMRNCTRLSTA_K1_DISABLE);
-		if (ret_val)
-			return ret_val;
-	}
-
 	if ((hw->mac.type == e1000_ich8lan) &&
 	    (hw->phy.type == e1000_phy_igp_3) &&
 	    (*speed == SPEED_1000)) {
@@ -2843,9 +3216,8 @@
 		            E1000_PHY_CTRL_GBE_DISABLE;
 		ew32(PHY_CTRL, phy_ctrl);
 
-		/* Workaround SWFLAG unexpectedly set during S0->Sx */
 		if (hw->mac.type == e1000_pchlan)
-			udelay(500);
+			e1000_phy_hw_reset_ich8lan(hw);
 	default:
 		break;
 	}
@@ -3113,9 +3485,9 @@
 };
 
 static struct e1000_nvm_operations ich8_nvm_ops = {
-	.acquire_nvm		= e1000_acquire_swflag_ich8lan,
+	.acquire_nvm		= e1000_acquire_nvm_ich8lan,
 	.read_nvm	 	= e1000_read_nvm_ich8lan,
-	.release_nvm		= e1000_release_swflag_ich8lan,
+	.release_nvm		= e1000_release_nvm_ich8lan,
 	.update_nvm		= e1000_update_nvm_checksum_ich8lan,
 	.valid_led_default	= e1000_valid_led_default_ich8lan,
 	.validate_nvm		= e1000_validate_nvm_checksum_ich8lan,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 994401f..03175b3 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -95,13 +95,6 @@
 /* BM PHY Copper Specific Control 1 */
 #define BM_CS_CTRL1                       16
 
-/* BM PHY Copper Specific Status */
-#define BM_CS_STATUS                      17
-#define BM_CS_STATUS_LINK_UP              0x0400
-#define BM_CS_STATUS_RESOLVED             0x0800
-#define BM_CS_STATUS_SPEED_MASK           0xC000
-#define BM_CS_STATUS_SPEED_1000           0x8000
-
 #define HV_MUX_DATA_CTRL               PHY_REG(776, 16)
 #define HV_MUX_DATA_CTRL_GEN_TO_MAC    0x0400
 #define HV_MUX_DATA_CTRL_FORCE_SPEED   0x0004
@@ -164,16 +157,25 @@
 		 * MDIC mode. No harm in trying again in this case since
 		 * the PHY ID is unknown at this point anyway
 		 */
+		ret_val = phy->ops.acquire_phy(hw);
+		if (ret_val)
+			goto out;
 		ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
 		if (ret_val)
 			goto out;
+		phy->ops.release_phy(hw);
 
 		retry_count++;
 	}
 out:
 	/* Revert to MDIO fast mode, if applicable */
-	if (retry_count)
+	if (retry_count) {
+		ret_val = phy->ops.acquire_phy(hw);
+		if (ret_val)
+			return ret_val;
 		ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
+		phy->ops.release_phy(hw);
+	}
 
 	return ret_val;
 }
@@ -354,38 +356,117 @@
 }
 
 /**
- *  e1000e_read_phy_reg_igp - Read igp PHY register
+ *  __e1000e_read_phy_reg_igp - Read igp PHY register
  *  @hw: pointer to the HW structure
  *  @offset: register offset to be read
  *  @data: pointer to the read data
+ *  @locked: semaphore has already been acquired or not
  *
  *  Acquires semaphore, if necessary, then reads the PHY register at offset
- *  and storing the retrieved information in data.  Release any acquired
+ *  and stores the retrieved information in data.  Release any acquired
  *  semaphores before exiting.
  **/
-s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
+                                    bool locked)
 {
-	s32 ret_val;
+	s32 ret_val = 0;
 
-	ret_val = hw->phy.ops.acquire_phy(hw);
-	if (ret_val)
-		return ret_val;
+	if (!locked) {
+		if (!(hw->phy.ops.acquire_phy))
+			goto out;
+
+		ret_val = hw->phy.ops.acquire_phy(hw);
+		if (ret_val)
+			goto out;
+	}
 
 	if (offset > MAX_PHY_MULTI_PAGE_REG) {
 		ret_val = e1000e_write_phy_reg_mdic(hw,
 						    IGP01E1000_PHY_PAGE_SELECT,
 						    (u16)offset);
-		if (ret_val) {
-			hw->phy.ops.release_phy(hw);
-			return ret_val;
-		}
+		if (ret_val)
+			goto release;
 	}
 
 	ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
-					   data);
+	                                  data);
 
-	hw->phy.ops.release_phy(hw);
+release:
+	if (!locked)
+		hw->phy.ops.release_phy(hw);
+out:
+	return ret_val;
+}
 
+/**
+ *  e1000e_read_phy_reg_igp - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore then reads the PHY register at offset and stores the
+ *  retrieved information in data.
+ *  Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000e_read_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ *  e1000e_read_phy_reg_igp_locked - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset and stores the retrieved information
+ *  in data.  Assumes semaphore already acquired.
+ **/
+s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000e_read_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
+ *  e1000e_write_phy_reg_igp - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
+                                     bool locked)
+{
+	s32 ret_val = 0;
+
+	if (!locked) {
+		if (!(hw->phy.ops.acquire_phy))
+			goto out;
+
+		ret_val = hw->phy.ops.acquire_phy(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		ret_val = e1000e_write_phy_reg_mdic(hw,
+						    IGP01E1000_PHY_PAGE_SELECT,
+						    (u16)offset);
+		if (ret_val)
+			goto release;
+	}
+
+	ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+					    data);
+
+release:
+	if (!locked)
+		hw->phy.ops.release_phy(hw);
+
+out:
 	return ret_val;
 }
 
@@ -395,53 +476,53 @@
  *  @offset: register offset to write to
  *  @data: data to write at register offset
  *
- *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  Acquires semaphore then writes the data to PHY register
  *  at the offset.  Release any acquired semaphores before exiting.
  **/
 s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
 {
-	s32 ret_val;
-
-	ret_val = hw->phy.ops.acquire_phy(hw);
-	if (ret_val)
-		return ret_val;
-
-	if (offset > MAX_PHY_MULTI_PAGE_REG) {
-		ret_val = e1000e_write_phy_reg_mdic(hw,
-						    IGP01E1000_PHY_PAGE_SELECT,
-						    (u16)offset);
-		if (ret_val) {
-			hw->phy.ops.release_phy(hw);
-			return ret_val;
-		}
-	}
-
-	ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
-					    data);
-
-	hw->phy.ops.release_phy(hw);
-
-	return ret_val;
+	return __e1000e_write_phy_reg_igp(hw, offset, data, false);
 }
 
 /**
- *  e1000e_read_kmrn_reg - Read kumeran register
+ *  e1000e_write_phy_reg_igp_locked - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset.
+ *  Assumes semaphore already acquired.
+ **/
+s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return __e1000e_write_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
+ *  __e1000_read_kmrn_reg - Read kumeran register
  *  @hw: pointer to the HW structure
  *  @offset: register offset to be read
  *  @data: pointer to the read data
+ *  @locked: semaphore has already been acquired or not
  *
  *  Acquires semaphore, if necessary.  Then reads the PHY register at offset
  *  using the kumeran interface.  The information retrieved is stored in data.
  *  Release any acquired semaphores before exiting.
  **/
-s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
+                                 bool locked)
 {
 	u32 kmrnctrlsta;
-	s32 ret_val;
+	s32 ret_val = 0;
 
-	ret_val = hw->phy.ops.acquire_phy(hw);
-	if (ret_val)
-		return ret_val;
+	if (!locked) {
+		if (!(hw->phy.ops.acquire_phy))
+			goto out;
+
+		ret_val = hw->phy.ops.acquire_phy(hw);
+		if (ret_val)
+			goto out;
+	}
 
 	kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
 		       E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
@@ -452,41 +533,111 @@
 	kmrnctrlsta = er32(KMRNCTRLSTA);
 	*data = (u16)kmrnctrlsta;
 
-	hw->phy.ops.release_phy(hw);
+	if (!locked)
+		hw->phy.ops.release_phy(hw);
 
+out:
 	return ret_val;
 }
 
 /**
- *  e1000e_write_kmrn_reg - Write kumeran register
+ *  e1000e_read_kmrn_reg -  Read kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore then reads the PHY register at offset using the
+ *  kumeran interface.  The information retrieved is stored in data.
+ *  Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000_read_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ *  e1000e_read_kmrn_reg_locked -  Read kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset using the kumeran interface.  The
+ *  information retrieved is stored in data.
+ *  Assumes semaphore already acquired.
+ **/
+s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000_read_kmrn_reg(hw, offset, data, true);
+}
+
+/**
+ *  __e1000_write_kmrn_reg - Write kumeran register
  *  @hw: pointer to the HW structure
  *  @offset: register offset to write to
  *  @data: data to write at register offset
+ *  @locked: semaphore has already been acquired or not
  *
  *  Acquires semaphore, if necessary.  Then write the data to PHY register
  *  at the offset using the kumeran interface.  Release any acquired semaphores
  *  before exiting.
  **/
-s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
+static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
+                                  bool locked)
 {
 	u32 kmrnctrlsta;
-	s32 ret_val;
+	s32 ret_val = 0;
 
-	ret_val = hw->phy.ops.acquire_phy(hw);
-	if (ret_val)
-		return ret_val;
+	if (!locked) {
+		if (!(hw->phy.ops.acquire_phy))
+			goto out;
+
+		ret_val = hw->phy.ops.acquire_phy(hw);
+		if (ret_val)
+			goto out;
+	}
 
 	kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
 		       E1000_KMRNCTRLSTA_OFFSET) | data;
 	ew32(KMRNCTRLSTA, kmrnctrlsta);
 
 	udelay(2);
-	hw->phy.ops.release_phy(hw);
 
+	if (!locked)
+		hw->phy.ops.release_phy(hw);
+
+out:
 	return ret_val;
 }
 
 /**
+ *  e1000e_write_kmrn_reg -  Write kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore then writes the data to the PHY register at the offset
+ *  using the kumeran interface.  Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return __e1000_write_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ *  e1000e_write_kmrn_reg_locked -  Write kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Write the data to PHY register at the offset using the kumeran interface.
+ *  Assumes semaphore already acquired.
+ **/
+s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return __e1000_write_kmrn_reg(hw, offset, data, true);
+}
+
+/**
  *  e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
  *  @hw: pointer to the HW structure
  *
@@ -2105,6 +2256,10 @@
 	u32 page = offset >> IGP_PAGE_SHIFT;
 	u32 page_shift = 0;
 
+	ret_val = hw->phy.ops.acquire_phy(hw);
+	if (ret_val)
+		return ret_val;
+
 	/* Page 800 works differently than the rest so it has its own func */
 	if (page == BM_WUC_PAGE) {
 		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
@@ -2112,10 +2267,6 @@
 		goto out;
 	}
 
-	ret_val = hw->phy.ops.acquire_phy(hw);
-	if (ret_val)
-		goto out;
-
 	hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
 
 	if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2135,18 +2286,15 @@
 		/* Page is shifted left, PHY expects (page x 32) */
 		ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
 		                                    (page << page_shift));
-		if (ret_val) {
-			hw->phy.ops.release_phy(hw);
+		if (ret_val)
 			goto out;
-		}
 	}
 
 	ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
 	                                    data);
 
-	hw->phy.ops.release_phy(hw);
-
 out:
+	hw->phy.ops.release_phy(hw);
 	return ret_val;
 }
 
@@ -2167,6 +2315,10 @@
 	u32 page = offset >> IGP_PAGE_SHIFT;
 	u32 page_shift = 0;
 
+	ret_val = hw->phy.ops.acquire_phy(hw);
+	if (ret_val)
+		return ret_val;
+
 	/* Page 800 works differently than the rest so it has its own func */
 	if (page == BM_WUC_PAGE) {
 		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
@@ -2174,10 +2326,6 @@
 		goto out;
 	}
 
-	ret_val = hw->phy.ops.acquire_phy(hw);
-	if (ret_val)
-		goto out;
-
 	hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
 
 	if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2197,17 +2345,14 @@
 		/* Page is shifted left, PHY expects (page x 32) */
 		ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
 		                                    (page << page_shift));
-		if (ret_val) {
-			hw->phy.ops.release_phy(hw);
+		if (ret_val)
 			goto out;
-		}
 	}
 
 	ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
 	                                   data);
-	hw->phy.ops.release_phy(hw);
-
 out:
+	hw->phy.ops.release_phy(hw);
 	return ret_val;
 }
 
@@ -2226,17 +2371,17 @@
 	s32 ret_val;
 	u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
 
+	ret_val = hw->phy.ops.acquire_phy(hw);
+	if (ret_val)
+		return ret_val;
+
 	/* Page 800 works differently than the rest so it has its own func */
 	if (page == BM_WUC_PAGE) {
 		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
 							 true);
-		return ret_val;
+		goto out;
 	}
 
-	ret_val = hw->phy.ops.acquire_phy(hw);
-	if (ret_val)
-		return ret_val;
-
 	hw->phy.addr = 1;
 
 	if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2245,16 +2390,14 @@
 		ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
 						    page);
 
-		if (ret_val) {
-			hw->phy.ops.release_phy(hw);
-			return ret_val;
-		}
+		if (ret_val)
+			goto out;
 	}
 
 	ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
 					   data);
+out:
 	hw->phy.ops.release_phy(hw);
-
 	return ret_val;
 }
 
@@ -2272,17 +2415,17 @@
 	s32 ret_val;
 	u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
 
+	ret_val = hw->phy.ops.acquire_phy(hw);
+	if (ret_val)
+		return ret_val;
+
 	/* Page 800 works differently than the rest so it has its own func */
 	if (page == BM_WUC_PAGE) {
 		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
 							 false);
-		return ret_val;
+		goto out;
 	}
 
-	ret_val = hw->phy.ops.acquire_phy(hw);
-	if (ret_val)
-		return ret_val;
-
 	hw->phy.addr = 1;
 
 	if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2290,17 +2433,15 @@
 		ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
 						    page);
 
-		if (ret_val) {
-			hw->phy.ops.release_phy(hw);
-			return ret_val;
-		}
+		if (ret_val)
+			goto out;
 	}
 
 	ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
 					    data);
 
+out:
 	hw->phy.ops.release_phy(hw);
-
 	return ret_val;
 }
 
@@ -2320,6 +2461,8 @@
  *  3) Write the address using the address opcode (0x11)
  *  4) Read or write the data using the data opcode (0x12)
  *  5) Restore 769_17.2 to its original value
+ *
+ *  Assumes semaphore already acquired.
  **/
 static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
 					  u16 *data, bool read)
@@ -2327,20 +2470,12 @@
 	s32 ret_val;
 	u16 reg = BM_PHY_REG_NUM(offset);
 	u16 phy_reg = 0;
-	u8  phy_acquired = 1;
-
 
 	/* Gig must be disabled for MDIO accesses to page 800 */
 	if ((hw->mac.type == e1000_pchlan) &&
 	   (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
 		hw_dbg(hw, "Attempting to access page 800 while gig enabled\n");
 
-	ret_val = hw->phy.ops.acquire_phy(hw);
-	if (ret_val) {
-		phy_acquired = 0;
-		goto out;
-	}
-
 	/* All operations in this function are phy address 1 */
 	hw->phy.addr = 1;
 
@@ -2397,8 +2532,6 @@
 	ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
 
 out:
-	if (phy_acquired == 1)
-		hw->phy.ops.release_phy(hw);
 	return ret_val;
 }
 
@@ -2439,52 +2572,63 @@
 	return 0;
 }
 
+/**
+ *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
+ *  @hw:   pointer to the HW structure
+ *  @slow: true for slow mode, false for normal mode
+ *
+ *  Assumes semaphore already acquired.
+ **/
 s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
 {
 	s32 ret_val = 0;
 	u16 data = 0;
 
-	ret_val = hw->phy.ops.acquire_phy(hw);
-	if (ret_val)
-		return ret_val;
-
 	/* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
 	hw->phy.addr = 1;
 	ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
 				         (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
-	if (ret_val) {
-		hw->phy.ops.release_phy(hw);
-		return ret_val;
-	}
+	if (ret_val)
+		goto out;
+
 	ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1,
 	                                   (0x2180 | (slow << 10)));
+	if (ret_val)
+		goto out;
 
 	/* dummy read when reverting to fast mode - throw away result */
 	if (!slow)
-		e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
+		ret_val = e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
 
-	hw->phy.ops.release_phy(hw);
-
+out:
 	return ret_val;
 }
 
 /**
- *  e1000_read_phy_reg_hv -  Read HV PHY register
+ *  __e1000_read_phy_reg_hv -  Read HV PHY register
  *  @hw: pointer to the HW structure
  *  @offset: register offset to be read
  *  @data: pointer to the read data
+ *  @locked: semaphore has already been acquired or not
  *
  *  Acquires semaphore, if necessary, then reads the PHY register at offset
- *  and storing the retrieved information in data.  Release any acquired
+ *  and stores the retrieved information in data.  Release any acquired
  *  semaphore before exiting.
  **/
-s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
+                                   bool locked)
 {
 	s32 ret_val;
 	u16 page = BM_PHY_REG_PAGE(offset);
 	u16 reg = BM_PHY_REG_NUM(offset);
 	bool in_slow_mode = false;
 
+	if (!locked) {
+		ret_val = hw->phy.ops.acquire_phy(hw);
+		if (ret_val)
+			return ret_val;
+	}
+
 	/* Workaround failure in MDIO access while cable is disconnected */
 	if ((hw->phy.type == e1000_phy_82577) &&
 	    !(er32(STATUS) & E1000_STATUS_LU)) {
@@ -2508,10 +2652,6 @@
 		goto out;
 	}
 
-	ret_val = hw->phy.ops.acquire_phy(hw);
-	if (ret_val)
-		goto out;
-
 	hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
 
 	if (page == HV_INTC_FC_PAGE_START)
@@ -2529,42 +2669,76 @@
 			ret_val = e1000e_write_phy_reg_mdic(hw,
 			                             IGP01E1000_PHY_PAGE_SELECT,
 			                             (page << IGP_PAGE_SHIFT));
-			if (ret_val) {
-				hw->phy.ops.release_phy(hw);
-				goto out;
-			}
 			hw->phy.addr = phy_addr;
 		}
 	}
 
 	ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
 	                                  data);
-	hw->phy.ops.release_phy(hw);
-
 out:
 	/* Revert to MDIO fast mode, if applicable */
 	if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
 		ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
 
+	if (!locked)
+		hw->phy.ops.release_phy(hw);
+
 	return ret_val;
 }
 
 /**
- *  e1000_write_phy_reg_hv - Write HV PHY register
+ *  e1000_read_phy_reg_hv -  Read HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore then reads the PHY register at offset and stores
+ *  the retrieved information in data.  Release the acquired semaphore
+ *  before exiting.
+ **/
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000_read_phy_reg_hv(hw, offset, data, false);
+}
+
+/**
+ *  e1000_read_phy_reg_hv_locked -  Read HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset and stores the retrieved information
+ *  in data.  Assumes semaphore already acquired.
+ **/
+s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000_read_phy_reg_hv(hw, offset, data, true);
+}
+
+/**
+ *  __e1000_write_phy_reg_hv - Write HV PHY register
  *  @hw: pointer to the HW structure
  *  @offset: register offset to write to
  *  @data: data to write at register offset
+ *  @locked: semaphore has already been acquired or not
  *
  *  Acquires semaphore, if necessary, then writes the data to PHY register
  *  at the offset.  Release any acquired semaphores before exiting.
  **/
-s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
+static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
+                                    bool locked)
 {
 	s32 ret_val;
 	u16 page = BM_PHY_REG_PAGE(offset);
 	u16 reg = BM_PHY_REG_NUM(offset);
 	bool in_slow_mode = false;
 
+	if (!locked) {
+		ret_val = hw->phy.ops.acquire_phy(hw);
+		if (ret_val)
+			return ret_val;
+	}
+
 	/* Workaround failure in MDIO access while cable is disconnected */
 	if ((hw->phy.type == e1000_phy_82577) &&
 	    !(er32(STATUS) & E1000_STATUS_LU)) {
@@ -2588,10 +2762,6 @@
 		goto out;
 	}
 
-	ret_val = hw->phy.ops.acquire_phy(hw);
-	if (ret_val)
-		goto out;
-
 	hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
 
 	if (page == HV_INTC_FC_PAGE_START)
@@ -2607,15 +2777,10 @@
 	    ((MAX_PHY_REG_ADDRESS & reg) == 0) &&
 	    (data & (1 << 11))) {
 		u16 data2 = 0x7EFF;
-		hw->phy.ops.release_phy(hw);
 		ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3,
 		                                         &data2, false);
 		if (ret_val)
 			goto out;
-
-		ret_val = hw->phy.ops.acquire_phy(hw);
-		if (ret_val)
-			goto out;
 	}
 
 	if (reg > MAX_PHY_MULTI_PAGE_REG) {
@@ -2630,27 +2795,53 @@
 			ret_val = e1000e_write_phy_reg_mdic(hw,
 			                             IGP01E1000_PHY_PAGE_SELECT,
 			                             (page << IGP_PAGE_SHIFT));
-			if (ret_val) {
-				hw->phy.ops.release_phy(hw);
-				goto out;
-			}
 			hw->phy.addr = phy_addr;
 		}
 	}
 
 	ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
 	                                  data);
-	hw->phy.ops.release_phy(hw);
 
 out:
 	/* Revert to MDIO fast mode, if applicable */
 	if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
 		ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
 
+	if (!locked)
+		hw->phy.ops.release_phy(hw);
+
 	return ret_val;
 }
 
 /**
+ *  e1000_write_phy_reg_hv - Write HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore then writes the data to PHY register at the offset.
+ *  Release the acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return __e1000_write_phy_reg_hv(hw, offset, data, false);
+}
+
+/**
+ *  e1000_write_phy_reg_hv_locked - Write HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset.  Assumes semaphore
+ *  already acquired.
+ **/
+s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return __e1000_write_phy_reg_hv(hw, offset, data, true);
+}
+
+/**
  *  e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
  *  @page: page to be accessed
  **/
@@ -2671,10 +2862,9 @@
  *  @data: pointer to the data to be read or written
  *  @read: determines if operation is read or written
  *
- *  Acquires semaphore, if necessary, then reads the PHY register at offset
- *  and storing the retreived information in data.  Release any acquired
- *  semaphores before exiting.  Note that the procedure to read these regs
- *  uses the address port and data port to read/write.
+ *  Reads the PHY register at offset and stores the retreived information
+ *  in data.  Assumes semaphore already acquired.  Note that the procedure
+ *  to read these regs uses the address port and data port to read/write.
  **/
 static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
                                           u16 *data, bool read)
@@ -2682,20 +2872,12 @@
 	s32 ret_val;
 	u32 addr_reg = 0;
 	u32 data_reg = 0;
-	u8  phy_acquired = 1;
 
 	/* This takes care of the difference with desktop vs mobile phy */
 	addr_reg = (hw->phy.type == e1000_phy_82578) ?
 	           I82578_ADDR_REG : I82577_ADDR_REG;
 	data_reg = addr_reg + 1;
 
-	ret_val = hw->phy.ops.acquire_phy(hw);
-	if (ret_val) {
-		hw_dbg(hw, "Could not acquire PHY\n");
-		phy_acquired = 0;
-		goto out;
-	}
-
 	/* All operations in this function are phy address 2 */
 	hw->phy.addr = 2;
 
@@ -2718,8 +2900,6 @@
 	}
 
 out:
-	if (phy_acquired == 1)
-		hw->phy.ops.release_phy(hw);
 	return ret_val;
 }
 
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index a25f8ed..f1c5652 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -222,24 +222,25 @@
 	u32 addr;
 };
 
-static u32 ethoc_read(struct ethoc *dev, loff_t offset)
+static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
 {
 	return ioread32(dev->iobase + offset);
 }
 
-static void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
+static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
 {
 	iowrite32(data, dev->iobase + offset);
 }
 
-static void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd)
+static inline void ethoc_read_bd(struct ethoc *dev, int index,
+		struct ethoc_bd *bd)
 {
 	loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
 	bd->stat = ethoc_read(dev, offset + 0);
 	bd->addr = ethoc_read(dev, offset + 4);
 }
 
-static void ethoc_write_bd(struct ethoc *dev, int index,
+static inline void ethoc_write_bd(struct ethoc *dev, int index,
 		const struct ethoc_bd *bd)
 {
 	loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
@@ -247,33 +248,33 @@
 	ethoc_write(dev, offset + 4, bd->addr);
 }
 
-static void ethoc_enable_irq(struct ethoc *dev, u32 mask)
+static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask)
 {
 	u32 imask = ethoc_read(dev, INT_MASK);
 	imask |= mask;
 	ethoc_write(dev, INT_MASK, imask);
 }
 
-static void ethoc_disable_irq(struct ethoc *dev, u32 mask)
+static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask)
 {
 	u32 imask = ethoc_read(dev, INT_MASK);
 	imask &= ~mask;
 	ethoc_write(dev, INT_MASK, imask);
 }
 
-static void ethoc_ack_irq(struct ethoc *dev, u32 mask)
+static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask)
 {
 	ethoc_write(dev, INT_SOURCE, mask);
 }
 
-static void ethoc_enable_rx_and_tx(struct ethoc *dev)
+static inline void ethoc_enable_rx_and_tx(struct ethoc *dev)
 {
 	u32 mode = ethoc_read(dev, MODER);
 	mode |= MODER_RXEN | MODER_TXEN;
 	ethoc_write(dev, MODER, mode);
 }
 
-static void ethoc_disable_rx_and_tx(struct ethoc *dev)
+static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
 {
 	u32 mode = ethoc_read(dev, MODER);
 	mode &= ~(MODER_RXEN | MODER_TXEN);
@@ -507,7 +508,7 @@
 		return IRQ_NONE;
 	}
 
-	ethoc_ack_irq(priv, INT_MASK_ALL);
+	ethoc_ack_irq(priv, pending);
 
 	if (pending & INT_MASK_BUSY) {
 		dev_err(&dev->dev, "packet dropped\n");
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 2923438..16a1d58 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1654,7 +1654,7 @@
   *
   * index is only used in legacy code
   */
-int __init fec_enet_init(struct net_device *dev, int index)
+static int fec_enet_init(struct net_device *dev, int index)
 {
 	struct fec_enet_private *fep = netdev_priv(dev);
 	struct bufdesc *cbd_base;
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index c40113f..66dace6 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -759,12 +759,6 @@
 
 	mpc52xx_fec_hw_init(dev);
 
-	if (priv->phydev) {
-		phy_stop(priv->phydev);
-		phy_write(priv->phydev, MII_BMCR, BMCR_RESET);
-		phy_start(priv->phydev);
-	}
-
 	bcom_fec_rx_reset(priv->rx_dmatsk);
 	bcom_fec_tx_reset(priv->tx_dmatsk);
 
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 31e6d62..ee0f3c6 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -155,6 +155,7 @@
 	{ .compatible = "mpc5200b-fec-phy", },
 	{}
 };
+MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match);
 
 struct of_platform_driver mpc52xx_fec_mdio_driver = {
 	.name = "mpc5200b-fec-phy",
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 2bc2d2b..ec2f503 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -1110,6 +1110,7 @@
 #endif
 	{}
 };
+MODULE_DEVICE_TABLE(of, fs_enet_match);
 
 static struct of_platform_driver fs_enet_driver = {
 	.name	= "fs_enet",
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 93b481b..24ff9f4 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -221,6 +221,7 @@
 	},
 	{},
 };
+MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match);
 
 static struct of_platform_driver fs_enet_bb_mdio_driver = {
 	.name = "fsl-bb-mdio",
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index a2d69c1..96eba42 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -219,6 +219,7 @@
 #endif
 	{},
 };
+MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
 
 static struct of_platform_driver fs_enet_fec_mdio_driver = {
 	.name = "fsl-fec-mdio",
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index d167090..4065b7c 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -3,8 +3,9 @@
  * Provides Bus interface for MIIM regs
  *
  * Author: Andy Fleming <afleming@freescale.com>
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  *
- * Copyright (c) 2002-2004,2008 Freescale Semiconductor, Inc.
+ * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
  *
  * Based on gianfar_mii.c and ucc_geth_mii.c (Li Yang, Kim Phillips)
  *
@@ -189,19 +190,29 @@
 
 
 #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
-static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs)
+static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
 {
 	struct gfar __iomem *enet_regs;
+	u32 __iomem *ioremap_tbipa;
+	u64 addr, size;
 
 	/*
 	 * This is mildly evil, but so is our hardware for doing this.
 	 * Also, we have to cast back to struct gfar because of
 	 * definition weirdness done in gianfar.h.
 	 */
-	enet_regs = (struct gfar __iomem *)
-		((char __iomem *)regs - offsetof(struct gfar, gfar_mii_regs));
-
-	return &enet_regs->tbipa;
+	if(of_device_is_compatible(np, "fsl,gianfar-mdio") ||
+		of_device_is_compatible(np, "fsl,gianfar-tbi") ||
+		of_device_is_compatible(np, "gianfar")) {
+		enet_regs = (struct gfar __iomem *)regs;
+		return &enet_regs->tbipa;
+	} else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
+			of_device_is_compatible(np, "fsl,etsec2-tbi")) {
+		addr = of_translate_address(np, of_get_address(np, 1, &size, NULL));
+		ioremap_tbipa = ioremap(addr, size);
+		return ioremap_tbipa;
+	} else
+		return NULL;
 }
 #endif
 
@@ -250,11 +261,11 @@
 {
 	struct device_node *np = ofdev->node;
 	struct device_node *tbi;
-	struct fsl_pq_mdio __iomem *regs;
+	struct fsl_pq_mdio __iomem *regs = NULL;
 	u32 __iomem *tbipa;
 	struct mii_bus *new_bus;
 	int tbiaddr = -1;
-	u64 addr, size;
+	u64 addr = 0, size = 0, ioremap_miimcfg = 0;
 	int err = 0;
 
 	new_bus = mdiobus_alloc();
@@ -268,8 +279,22 @@
 	fsl_pq_mdio_bus_name(new_bus->id, np);
 
 	/* Set the PHY base address */
-	addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
-	regs = ioremap(addr, size);
+	if (of_device_is_compatible(np,"fsl,gianfar-mdio") ||
+		of_device_is_compatible(np, "fsl,gianfar-tbi") ||
+		of_device_is_compatible(np, "fsl,ucc-mdio") ||
+		of_device_is_compatible(np,"ucc_geth_phy" )) {
+		addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
+		ioremap_miimcfg =  container_of(addr, struct fsl_pq_mdio, miimcfg);
+		regs = ioremap(ioremap_miimcfg, size +
+				offsetof(struct fsl_pq_mdio, miimcfg));
+	} else if (of_device_is_compatible(np,"fsl,etsec2-mdio") ||
+			of_device_is_compatible(np, "fsl,etsec2-tbi")) {
+		addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
+		regs = ioremap(addr, size);
+	} else {
+		err = -EINVAL;
+		goto err_free_bus;
+	}
 
 	if (NULL == regs) {
 		err = -ENOMEM;
@@ -290,9 +315,15 @@
 
 	if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
 			of_device_is_compatible(np, "fsl,gianfar-tbi") ||
+			of_device_is_compatible(np, "fsl,etsec2-mdio") ||
+			of_device_is_compatible(np, "fsl,etsec2-tbi") ||
 			of_device_is_compatible(np, "gianfar")) {
 #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
-		tbipa = get_gfar_tbipa(regs);
+		tbipa = get_gfar_tbipa(regs, np);
+		if (!tbipa) {
+			err = -EINVAL;
+			goto err_free_irqs;
+		}
 #else
 		err = -ENODEV;
 		goto err_free_irqs;
@@ -405,8 +436,15 @@
 	{
 		.compatible = "fsl,gianfar-mdio",
 	},
+	{
+		.compatible = "fsl,etsec2-tbi",
+	},
+	{
+		.compatible = "fsl,etsec2-mdio",
+	},
 	{},
 };
+MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
 
 static struct of_platform_driver fsl_pq_mdio_driver = {
 	.name = "fsl-pq_mdio",
diff --git a/drivers/net/fsl_pq_mdio.h b/drivers/net/fsl_pq_mdio.h
index 36dad52..1f7d865 100644
--- a/drivers/net/fsl_pq_mdio.h
+++ b/drivers/net/fsl_pq_mdio.h
@@ -3,8 +3,9 @@
  * Driver for the MDIO bus controller on Freescale PowerQUICC processors
  *
  * Author: Andy Fleming
+ * Modifier: Sandeep Gopalpet
  *
- * Copyright (c) 2002-2004,2008 Freescale Semiconductor, Inc.
+ * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
@@ -23,6 +24,12 @@
 #define MII_READ_COMMAND       0x00000001
 
 struct fsl_pq_mdio {
+	u8 res1[16];
+	u32 ieventm;	/* MDIO Interrupt event register (for etsec2)*/
+	u32 imaskm;	/* MDIO Interrupt mask register (for etsec2)*/
+	u8 res2[4];
+	u32 emapm;	/* MDIO Event mapping register (for etsec2)*/
+	u8 res3[1280];
 	u32 miimcfg;		/* MII management configuration reg */
 	u32 miimcom;		/* MII management command reg */
 	u32 miimadd;		/* MII management address reg */
@@ -31,9 +38,9 @@
 	u32 miimind;		/* MII management indication reg */
 	u8 reserved[28];	/* Space holder */
 	u32 utbipar;		/* TBI phy address reg (only on UCC) */
+	u8 res4[2728];
 } __attribute__ ((packed));
 
-
 int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
 int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
 int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index c6f6d3b..086d40dd 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -8,9 +8,10 @@
  *
  * Author: Andy Fleming
  * Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  *
- * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
- * Copyright (c) 2007 MontaVista Software, Inc.
+ * Copyright 2002-2009 Freescale Semiconductor, Inc.
+ * Copyright 2007 MontaVista Software, Inc.
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
@@ -109,7 +110,7 @@
 static void gfar_timeout(struct net_device *dev);
 static int gfar_close(struct net_device *dev);
 struct sk_buff *gfar_new_skb(struct net_device *dev);
-static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
+static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
 		struct sk_buff *skb);
 static int gfar_set_mac_address(struct net_device *dev);
 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
@@ -130,8 +131,8 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void gfar_netpoll(struct net_device *dev);
 #endif
-int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
-static int gfar_clean_tx_ring(struct net_device *dev);
+int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
+static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
 			      int amount_pull);
 static void gfar_vlan_rx_register(struct net_device *netdev,
@@ -142,21 +143,21 @@
 static void gfar_clear_exact_match(struct net_device *dev);
 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
 
 MODULE_AUTHOR("Freescale Semiconductor, Inc");
 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
 MODULE_LICENSE("GPL");
 
-static void gfar_init_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
+static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
 			    dma_addr_t buf)
 {
-	struct gfar_private *priv = netdev_priv(dev);
 	u32 lstatus;
 
 	bdp->bufPtr = buf;
 
 	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
-	if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
+	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
 		lstatus |= BD_LFLAG(RXBD_WRAP);
 
 	eieio();
@@ -167,65 +168,93 @@
 static int gfar_init_bds(struct net_device *ndev)
 {
 	struct gfar_private *priv = netdev_priv(ndev);
+	struct gfar_priv_tx_q *tx_queue = NULL;
+	struct gfar_priv_rx_q *rx_queue = NULL;
 	struct txbd8 *txbdp;
 	struct rxbd8 *rxbdp;
-	int i;
+	int i, j;
 
-	/* Initialize some variables in our dev structure */
-	priv->num_txbdfree = priv->tx_ring_size;
-	priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
-	priv->cur_rx = priv->rx_bd_base;
-	priv->skb_curtx = priv->skb_dirtytx = 0;
-	priv->skb_currx = 0;
+	for (i = 0; i < priv->num_tx_queues; i++) {
+		tx_queue = priv->tx_queue[i];
+		/* Initialize some variables in our dev structure */
+		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
+		tx_queue->dirty_tx = tx_queue->tx_bd_base;
+		tx_queue->cur_tx = tx_queue->tx_bd_base;
+		tx_queue->skb_curtx = 0;
+		tx_queue->skb_dirtytx = 0;
 
-	/* Initialize Transmit Descriptor Ring */
-	txbdp = priv->tx_bd_base;
-	for (i = 0; i < priv->tx_ring_size; i++) {
-		txbdp->lstatus = 0;
-		txbdp->bufPtr = 0;
-		txbdp++;
-	}
-
-	/* Set the last descriptor in the ring to indicate wrap */
-	txbdp--;
-	txbdp->status |= TXBD_WRAP;
-
-	rxbdp = priv->rx_bd_base;
-	for (i = 0; i < priv->rx_ring_size; i++) {
-		struct sk_buff *skb = priv->rx_skbuff[i];
-
-		if (skb) {
-			gfar_init_rxbdp(ndev, rxbdp, rxbdp->bufPtr);
-		} else {
-			skb = gfar_new_skb(ndev);
-			if (!skb) {
-				pr_err("%s: Can't allocate RX buffers\n",
-				       ndev->name);
-				return -ENOMEM;
-			}
-			priv->rx_skbuff[i] = skb;
-
-			gfar_new_rxbdp(ndev, rxbdp, skb);
+		/* Initialize Transmit Descriptor Ring */
+		txbdp = tx_queue->tx_bd_base;
+		for (j = 0; j < tx_queue->tx_ring_size; j++) {
+			txbdp->lstatus = 0;
+			txbdp->bufPtr = 0;
+			txbdp++;
 		}
 
-		rxbdp++;
+		/* Set the last descriptor in the ring to indicate wrap */
+		txbdp--;
+		txbdp->status |= TXBD_WRAP;
+	}
+
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		rx_queue = priv->rx_queue[i];
+		rx_queue->cur_rx = rx_queue->rx_bd_base;
+		rx_queue->skb_currx = 0;
+		rxbdp = rx_queue->rx_bd_base;
+
+		for (j = 0; j < rx_queue->rx_ring_size; j++) {
+			struct sk_buff *skb = rx_queue->rx_skbuff[j];
+
+			if (skb) {
+				gfar_init_rxbdp(rx_queue, rxbdp,
+						rxbdp->bufPtr);
+			} else {
+				skb = gfar_new_skb(ndev);
+				if (!skb) {
+					pr_err("%s: Can't allocate RX buffers\n",
+							ndev->name);
+					goto err_rxalloc_fail;
+				}
+				rx_queue->rx_skbuff[j] = skb;
+
+				gfar_new_rxbdp(rx_queue, rxbdp, skb);
+			}
+
+			rxbdp++;
+		}
+
 	}
 
 	return 0;
+
+err_rxalloc_fail:
+	free_skb_resources(priv);
+	return -ENOMEM;
 }
 
 static int gfar_alloc_skb_resources(struct net_device *ndev)
 {
 	void *vaddr;
-	int i;
+	dma_addr_t addr;
+	int i, j, k;
 	struct gfar_private *priv = netdev_priv(ndev);
 	struct device *dev = &priv->ofdev->dev;
+	struct gfar_priv_tx_q *tx_queue = NULL;
+	struct gfar_priv_rx_q *rx_queue = NULL;
+
+	priv->total_tx_ring_size = 0;
+	for (i = 0; i < priv->num_tx_queues; i++)
+		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
+
+	priv->total_rx_ring_size = 0;
+	for (i = 0; i < priv->num_rx_queues; i++)
+		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
 
 	/* Allocate memory for the buffer descriptors */
 	vaddr = dma_alloc_coherent(dev,
-			sizeof(*priv->tx_bd_base) * priv->tx_ring_size +
-			sizeof(*priv->rx_bd_base) * priv->rx_ring_size,
-			&priv->tx_bd_dma_base, GFP_KERNEL);
+			sizeof(struct txbd8) * priv->total_tx_ring_size +
+			sizeof(struct rxbd8) * priv->total_rx_ring_size,
+			&addr, GFP_KERNEL);
 	if (!vaddr) {
 		if (netif_msg_ifup(priv))
 			pr_err("%s: Could not allocate buffer descriptors!\n",
@@ -233,37 +262,58 @@
 		return -ENOMEM;
 	}
 
-	priv->tx_bd_base = vaddr;
+	for (i = 0; i < priv->num_tx_queues; i++) {
+		tx_queue = priv->tx_queue[i];
+		tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
+		tx_queue->tx_bd_dma_base = addr;
+		tx_queue->dev = ndev;
+		/* enet DMA only understands physical addresses */
+		addr    += sizeof(struct txbd8) *tx_queue->tx_ring_size;
+		vaddr   += sizeof(struct txbd8) *tx_queue->tx_ring_size;
+	}
 
 	/* Start the rx descriptor ring where the tx ring leaves off */
-	vaddr = vaddr + sizeof(*priv->tx_bd_base) * priv->tx_ring_size;
-	priv->rx_bd_base = vaddr;
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		rx_queue = priv->rx_queue[i];
+		rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
+		rx_queue->rx_bd_dma_base = addr;
+		rx_queue->dev = ndev;
+		addr    += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
+		vaddr   += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
+	}
 
 	/* Setup the skbuff rings */
-	priv->tx_skbuff = kmalloc(sizeof(*priv->tx_skbuff) *
-				  priv->tx_ring_size, GFP_KERNEL);
-	if (!priv->tx_skbuff) {
-		if (netif_msg_ifup(priv))
-			pr_err("%s: Could not allocate tx_skbuff\n",
-			       ndev->name);
-		goto cleanup;
+	for (i = 0; i < priv->num_tx_queues; i++) {
+		tx_queue = priv->tx_queue[i];
+		tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
+				  tx_queue->tx_ring_size, GFP_KERNEL);
+		if (!tx_queue->tx_skbuff) {
+			if (netif_msg_ifup(priv))
+				pr_err("%s: Could not allocate tx_skbuff\n",
+						ndev->name);
+			goto cleanup;
+		}
+
+		for (k = 0; k < tx_queue->tx_ring_size; k++)
+			tx_queue->tx_skbuff[k] = NULL;
 	}
 
-	for (i = 0; i < priv->tx_ring_size; i++)
-		priv->tx_skbuff[i] = NULL;
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		rx_queue = priv->rx_queue[i];
+		rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
+				  rx_queue->rx_ring_size, GFP_KERNEL);
 
-	priv->rx_skbuff = kmalloc(sizeof(*priv->rx_skbuff) *
-				  priv->rx_ring_size, GFP_KERNEL);
-	if (!priv->rx_skbuff) {
-		if (netif_msg_ifup(priv))
-			pr_err("%s: Could not allocate rx_skbuff\n",
-			       ndev->name);
-		goto cleanup;
+		if (!rx_queue->rx_skbuff) {
+			if (netif_msg_ifup(priv))
+				pr_err("%s: Could not allocate rx_skbuff\n",
+				       ndev->name);
+			goto cleanup;
+		}
+
+		for (j = 0; j < rx_queue->rx_ring_size; j++)
+			rx_queue->rx_skbuff[j] = NULL;
 	}
 
-	for (i = 0; i < priv->rx_ring_size; i++)
-		priv->rx_skbuff[i] = NULL;
-
 	if (gfar_init_bds(ndev))
 		goto cleanup;
 
@@ -274,28 +324,41 @@
 	return -ENOMEM;
 }
 
+static void gfar_init_tx_rx_base(struct gfar_private *priv)
+{
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	u32 *baddr;
+	int i;
+
+	baddr = &regs->tbase0;
+	for(i = 0; i < priv->num_tx_queues; i++) {
+		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
+		baddr	+= 2;
+	}
+
+	baddr = &regs->rbase0;
+	for(i = 0; i < priv->num_rx_queues; i++) {
+		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
+		baddr   += 2;
+	}
+}
+
 static void gfar_init_mac(struct net_device *ndev)
 {
 	struct gfar_private *priv = netdev_priv(ndev);
-	struct gfar __iomem *regs = priv->regs;
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	u32 rctrl = 0;
 	u32 tctrl = 0;
 	u32 attrs = 0;
 
-	/* enet DMA only understands physical addresses */
-	gfar_write(&regs->tbase0, priv->tx_bd_dma_base);
-	gfar_write(&regs->rbase0, priv->tx_bd_dma_base +
-				  sizeof(*priv->tx_bd_base) *
-				  priv->tx_ring_size);
+	/* write the tx/rx base registers */
+	gfar_init_tx_rx_base(priv);
 
 	/* Configure the coalescing support */
-	gfar_write(&regs->txic, 0);
-	if (priv->txcoalescing)
-		gfar_write(&regs->txic, priv->txic);
+	gfar_configure_coalescing(priv, 0xFF, 0xFF);
 
-	gfar_write(&regs->rxic, 0);
-	if (priv->rxcoalescing)
-		gfar_write(&regs->rxic, priv->rxic);
+	if (priv->rx_filer_enable)
+		rctrl |= RCTRL_FILREN;
 
 	if (priv->rx_csum_enable)
 		rctrl |= RCTRL_CHECKSUMMING;
@@ -324,6 +387,8 @@
 	if (ndev->features & NETIF_F_IP_CSUM)
 		tctrl |= TCTRL_INIT_CSUM;
 
+	tctrl |= TCTRL_TXSCHED_PRIO;
+
 	gfar_write(&regs->tctrl, tctrl);
 
 	/* Set the extraction length and index */
@@ -357,6 +422,7 @@
 	.ndo_set_multicast_list = gfar_set_multi,
 	.ndo_tx_timeout = gfar_timeout,
 	.ndo_do_ioctl = gfar_ioctl,
+	.ndo_select_queue = gfar_select_queue,
 	.ndo_vlan_rx_register = gfar_vlan_rx_register,
 	.ndo_set_mac_address = eth_mac_addr,
 	.ndo_validate_addr = eth_validate_addr,
@@ -365,56 +431,252 @@
 #endif
 };
 
+unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
+unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
+
+void lock_rx_qs(struct gfar_private *priv)
+{
+	int i = 0x0;
+
+	for (i = 0; i < priv->num_rx_queues; i++)
+		spin_lock(&priv->rx_queue[i]->rxlock);
+}
+
+void lock_tx_qs(struct gfar_private *priv)
+{
+	int i = 0x0;
+
+	for (i = 0; i < priv->num_tx_queues; i++)
+		spin_lock(&priv->tx_queue[i]->txlock);
+}
+
+void unlock_rx_qs(struct gfar_private *priv)
+{
+	int i = 0x0;
+
+	for (i = 0; i < priv->num_rx_queues; i++)
+		spin_unlock(&priv->rx_queue[i]->rxlock);
+}
+
+void unlock_tx_qs(struct gfar_private *priv)
+{
+	int i = 0x0;
+
+	for (i = 0; i < priv->num_tx_queues; i++)
+		spin_unlock(&priv->tx_queue[i]->txlock);
+}
+
 /* Returns 1 if incoming frames use an FCB */
 static inline int gfar_uses_fcb(struct gfar_private *priv)
 {
 	return priv->vlgrp || priv->rx_csum_enable;
 }
 
-static int gfar_of_init(struct net_device *dev)
+u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+	return skb_get_queue_mapping(skb);
+}
+static void free_tx_pointers(struct gfar_private *priv)
+{
+	int i = 0;
+
+	for (i = 0; i < priv->num_tx_queues; i++)
+		kfree(priv->tx_queue[i]);
+}
+
+static void free_rx_pointers(struct gfar_private *priv)
+{
+	int i = 0;
+
+	for (i = 0; i < priv->num_rx_queues; i++)
+		kfree(priv->rx_queue[i]);
+}
+
+static void unmap_group_regs(struct gfar_private *priv)
+{
+	int i = 0;
+
+	for (i = 0; i < MAXGROUPS; i++)
+		if (priv->gfargrp[i].regs)
+			iounmap(priv->gfargrp[i].regs);
+}
+
+static void disable_napi(struct gfar_private *priv)
+{
+	int i = 0;
+
+	for (i = 0; i < priv->num_grps; i++)
+		napi_disable(&priv->gfargrp[i].napi);
+}
+
+static void enable_napi(struct gfar_private *priv)
+{
+	int i = 0;
+
+	for (i = 0; i < priv->num_grps; i++)
+		napi_enable(&priv->gfargrp[i].napi);
+}
+
+static int gfar_parse_group(struct device_node *np,
+		struct gfar_private *priv, const char *model)
+{
+	u32 *queue_mask;
+	u64 addr, size;
+
+	addr = of_translate_address(np,
+			of_get_address(np, 0, &size, NULL));
+	priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
+
+	if (!priv->gfargrp[priv->num_grps].regs)
+		return -ENOMEM;
+
+	priv->gfargrp[priv->num_grps].interruptTransmit =
+			irq_of_parse_and_map(np, 0);
+
+	/* If we aren't the FEC we have multiple interrupts */
+	if (model && strcasecmp(model, "FEC")) {
+		priv->gfargrp[priv->num_grps].interruptReceive =
+			irq_of_parse_and_map(np, 1);
+		priv->gfargrp[priv->num_grps].interruptError =
+			irq_of_parse_and_map(np,2);
+		if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
+			priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
+			priv->gfargrp[priv->num_grps].interruptError < 0) {
+			return -EINVAL;
+		}
+	}
+
+	priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
+	priv->gfargrp[priv->num_grps].priv = priv;
+	spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
+	if(priv->mode == MQ_MG_MODE) {
+		queue_mask = (u32 *)of_get_property(np,
+					"fsl,rx-bit-map", NULL);
+		priv->gfargrp[priv->num_grps].rx_bit_map =
+			queue_mask ?  *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
+		queue_mask = (u32 *)of_get_property(np,
+					"fsl,tx-bit-map", NULL);
+		priv->gfargrp[priv->num_grps].tx_bit_map =
+			queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+	} else {
+		priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
+		priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
+	}
+	priv->num_grps++;
+
+	return 0;
+}
+
+static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
 {
 	const char *model;
 	const char *ctype;
 	const void *mac_addr;
-	u64 addr, size;
-	int err = 0;
-	struct gfar_private *priv = netdev_priv(dev);
-	struct device_node *np = priv->node;
+	int err = 0, i;
+	struct net_device *dev = NULL;
+	struct gfar_private *priv = NULL;
+	struct device_node *np = ofdev->node;
+	struct device_node *child = NULL;
 	const u32 *stash;
 	const u32 *stash_len;
 	const u32 *stash_idx;
+	unsigned int num_tx_qs, num_rx_qs;
+	u32 *tx_queues, *rx_queues;
 
 	if (!np || !of_device_is_available(np))
 		return -ENODEV;
 
-	/* get a pointer to the register memory */
-	addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
-	priv->regs = ioremap(addr, size);
+	/* parse the num of tx and rx queues */
+	tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
+	num_tx_qs = tx_queues ? *tx_queues : 1;
 
-	if (priv->regs == NULL)
+	if (num_tx_qs > MAX_TX_QS) {
+		printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
+				num_tx_qs, MAX_TX_QS);
+		printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
+		return -EINVAL;
+	}
+
+	rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
+	num_rx_qs = rx_queues ? *rx_queues : 1;
+
+	if (num_rx_qs > MAX_RX_QS) {
+		printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
+				num_tx_qs, MAX_TX_QS);
+		printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
+		return -EINVAL;
+	}
+
+	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
+	dev = *pdev;
+	if (NULL == dev)
 		return -ENOMEM;
 
-	priv->interruptTransmit = irq_of_parse_and_map(np, 0);
+	priv = netdev_priv(dev);
+	priv->node = ofdev->node;
+	priv->ndev = dev;
+
+	dev->num_tx_queues = num_tx_qs;
+	dev->real_num_tx_queues = num_tx_qs;
+	priv->num_tx_queues = num_tx_qs;
+	priv->num_rx_queues = num_rx_qs;
+	priv->num_grps = 0x0;
 
 	model = of_get_property(np, "model", NULL);
 
-	/* If we aren't the FEC we have multiple interrupts */
-	if (model && strcasecmp(model, "FEC")) {
-		priv->interruptReceive = irq_of_parse_and_map(np, 1);
+	for (i = 0; i < MAXGROUPS; i++)
+		priv->gfargrp[i].regs = NULL;
 
-		priv->interruptError = irq_of_parse_and_map(np, 2);
-
-		if (priv->interruptTransmit < 0 ||
-				priv->interruptReceive < 0 ||
-				priv->interruptError < 0) {
-			err = -EINVAL;
-			goto err_out;
+	/* Parse and initialize group specific information */
+	if (of_device_is_compatible(np, "fsl,etsec2")) {
+		priv->mode = MQ_MG_MODE;
+		for_each_child_of_node(np, child) {
+			err = gfar_parse_group(child, priv, model);
+			if (err)
+				goto err_grp_init;
 		}
+	} else {
+		priv->mode = SQ_SG_MODE;
+		err = gfar_parse_group(np, priv, model);
+		if(err)
+			goto err_grp_init;
 	}
 
+	for (i = 0; i < priv->num_tx_queues; i++)
+	       priv->tx_queue[i] = NULL;
+	for (i = 0; i < priv->num_rx_queues; i++)
+		priv->rx_queue[i] = NULL;
+
+	for (i = 0; i < priv->num_tx_queues; i++) {
+		priv->tx_queue[i] =  (struct gfar_priv_tx_q *)kmalloc(
+				sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
+		if (!priv->tx_queue[i]) {
+			err = -ENOMEM;
+			goto tx_alloc_failed;
+		}
+		priv->tx_queue[i]->tx_skbuff = NULL;
+		priv->tx_queue[i]->qindex = i;
+		priv->tx_queue[i]->dev = dev;
+		spin_lock_init(&(priv->tx_queue[i]->txlock));
+	}
+
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc(
+					sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
+		if (!priv->rx_queue[i]) {
+			err = -ENOMEM;
+			goto rx_alloc_failed;
+		}
+		priv->rx_queue[i]->rx_skbuff = NULL;
+		priv->rx_queue[i]->qindex = i;
+		priv->rx_queue[i]->dev = dev;
+		spin_lock_init(&(priv->rx_queue[i]->rxlock));
+	}
+
+
 	stash = of_get_property(np, "bd-stash", NULL);
 
-	if(stash) {
+	if (stash) {
 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
 		priv->bd_stash_en = 1;
 	}
@@ -472,8 +734,13 @@
 
 	return 0;
 
-err_out:
-	iounmap(priv->regs);
+rx_alloc_failed:
+	free_rx_pointers(priv);
+tx_alloc_failed:
+	free_tx_pointers(priv);
+err_grp_init:
+	unmap_group_regs(priv);
+	free_netdev(dev);
 	return err;
 }
 
@@ -491,6 +758,84 @@
 	return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
 }
 
+static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
+{
+	unsigned int new_bit_map = 0x0;
+	int mask = 0x1 << (max_qs - 1), i;
+	for (i = 0; i < max_qs; i++) {
+		if (bit_map & mask)
+			new_bit_map = new_bit_map + (1 << i);
+		mask = mask >> 0x1;
+	}
+	return new_bit_map;
+}
+
+u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, u32 class)
+{
+	u32 rqfpr = FPR_FILER_MASK;
+	u32 rqfcr = 0x0;
+
+	rqfar--;
+	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
+	ftp_rqfpr[rqfar] = rqfpr;
+	ftp_rqfcr[rqfar] = rqfcr;
+	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+	rqfar--;
+	rqfcr = RQFCR_CMP_NOMATCH;
+	ftp_rqfpr[rqfar] = rqfpr;
+	ftp_rqfcr[rqfar] = rqfcr;
+	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+	rqfar--;
+	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
+	rqfpr = class;
+	ftp_rqfcr[rqfar] = rqfcr;
+	ftp_rqfpr[rqfar] = rqfpr;
+	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+	rqfar--;
+	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
+	rqfpr = class;
+	ftp_rqfcr[rqfar] = rqfcr;
+	ftp_rqfpr[rqfar] = rqfpr;
+	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+	return rqfar;
+}
+
+static void gfar_init_filer_table(struct gfar_private *priv)
+{
+	int i = 0x0;
+	u32 rqfar = MAX_FILER_IDX;
+	u32 rqfcr = 0x0;
+	u32 rqfpr = FPR_FILER_MASK;
+
+	/* Default rule */
+	rqfcr = RQFCR_CMP_MATCH;
+	ftp_rqfcr[rqfar] = rqfcr;
+	ftp_rqfpr[rqfar] = rqfpr;
+	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
+	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
+	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
+	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
+	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
+	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
+
+	/* cur_filer_idx indicated the fisrt non-masked rule */
+	priv->cur_filer_idx = rqfar;
+
+	/* Rest are masked rules */
+	rqfcr = RQFCR_CMP_NOMATCH;
+	for (i = 0; i < rqfar; i++) {
+		ftp_rqfcr[i] = rqfcr;
+		ftp_rqfpr[i] = rqfpr;
+		gfar_write_filer(priv, i, rqfcr, rqfpr);
+	}
+}
+
 /* Set up the ethernet device structure, private data,
  * and anything else we need before we start */
 static int gfar_probe(struct of_device *ofdev,
@@ -499,14 +844,17 @@
 	u32 tempval;
 	struct net_device *dev = NULL;
 	struct gfar_private *priv = NULL;
-	int err = 0;
+	struct gfar __iomem *regs = NULL;
+	int err = 0, i, grp_idx = 0;
 	int len_devname;
+	u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
+	u32 isrg = 0;
+	u32 *baddr;
 
-	/* Create an ethernet device instance */
-	dev = alloc_etherdev(sizeof (*priv));
+	err = gfar_of_init(ofdev, &dev);
 
-	if (NULL == dev)
-		return -ENOMEM;
+	if (err)
+		return err;
 
 	priv = netdev_priv(dev);
 	priv->ndev = dev;
@@ -514,50 +862,46 @@
 	priv->node = ofdev->node;
 	SET_NETDEV_DEV(dev, &ofdev->dev);
 
-	err = gfar_of_init(dev);
-
-	if (err)
-		goto regs_fail;
-
-	spin_lock_init(&priv->txlock);
-	spin_lock_init(&priv->rxlock);
 	spin_lock_init(&priv->bflock);
 	INIT_WORK(&priv->reset_task, gfar_reset_task);
 
 	dev_set_drvdata(&ofdev->dev, priv);
+	regs = priv->gfargrp[0].regs;
 
 	/* Stop the DMA engine now, in case it was running before */
 	/* (The firmware could have used it, and left it running). */
 	gfar_halt(dev);
 
 	/* Reset MAC layer */
-	gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
+	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
 
 	/* We need to delay at least 3 TX clocks */
 	udelay(2);
 
 	tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
-	gfar_write(&priv->regs->maccfg1, tempval);
+	gfar_write(&regs->maccfg1, tempval);
 
 	/* Initialize MACCFG2. */
-	gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
+	gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS);
 
 	/* Initialize ECNTRL */
-	gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
+	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
 
 	/* Set the dev->base_addr to the gfar reg region */
-	dev->base_addr = (unsigned long) (priv->regs);
+	dev->base_addr = (unsigned long) regs;
 
 	SET_NETDEV_DEV(dev, &ofdev->dev);
 
 	/* Fill in the dev structure */
 	dev->watchdog_timeo = TX_TIMEOUT;
-	netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
 	dev->mtu = 1500;
-
 	dev->netdev_ops = &gfar_netdev_ops;
 	dev->ethtool_ops = &gfar_ethtool_ops;
 
+	/* Register for napi ...We are registering NAPI for each grp */
+	for (i = 0; i < priv->num_grps; i++)
+		netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
+
 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
 		priv->rx_csum_enable = 1;
 		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
@@ -573,35 +917,35 @@
 		priv->extended_hash = 1;
 		priv->hash_width = 9;
 
-		priv->hash_regs[0] = &priv->regs->igaddr0;
-		priv->hash_regs[1] = &priv->regs->igaddr1;
-		priv->hash_regs[2] = &priv->regs->igaddr2;
-		priv->hash_regs[3] = &priv->regs->igaddr3;
-		priv->hash_regs[4] = &priv->regs->igaddr4;
-		priv->hash_regs[5] = &priv->regs->igaddr5;
-		priv->hash_regs[6] = &priv->regs->igaddr6;
-		priv->hash_regs[7] = &priv->regs->igaddr7;
-		priv->hash_regs[8] = &priv->regs->gaddr0;
-		priv->hash_regs[9] = &priv->regs->gaddr1;
-		priv->hash_regs[10] = &priv->regs->gaddr2;
-		priv->hash_regs[11] = &priv->regs->gaddr3;
-		priv->hash_regs[12] = &priv->regs->gaddr4;
-		priv->hash_regs[13] = &priv->regs->gaddr5;
-		priv->hash_regs[14] = &priv->regs->gaddr6;
-		priv->hash_regs[15] = &priv->regs->gaddr7;
+		priv->hash_regs[0] = &regs->igaddr0;
+		priv->hash_regs[1] = &regs->igaddr1;
+		priv->hash_regs[2] = &regs->igaddr2;
+		priv->hash_regs[3] = &regs->igaddr3;
+		priv->hash_regs[4] = &regs->igaddr4;
+		priv->hash_regs[5] = &regs->igaddr5;
+		priv->hash_regs[6] = &regs->igaddr6;
+		priv->hash_regs[7] = &regs->igaddr7;
+		priv->hash_regs[8] = &regs->gaddr0;
+		priv->hash_regs[9] = &regs->gaddr1;
+		priv->hash_regs[10] = &regs->gaddr2;
+		priv->hash_regs[11] = &regs->gaddr3;
+		priv->hash_regs[12] = &regs->gaddr4;
+		priv->hash_regs[13] = &regs->gaddr5;
+		priv->hash_regs[14] = &regs->gaddr6;
+		priv->hash_regs[15] = &regs->gaddr7;
 
 	} else {
 		priv->extended_hash = 0;
 		priv->hash_width = 8;
 
-		priv->hash_regs[0] = &priv->regs->gaddr0;
-		priv->hash_regs[1] = &priv->regs->gaddr1;
-		priv->hash_regs[2] = &priv->regs->gaddr2;
-		priv->hash_regs[3] = &priv->regs->gaddr3;
-		priv->hash_regs[4] = &priv->regs->gaddr4;
-		priv->hash_regs[5] = &priv->regs->gaddr5;
-		priv->hash_regs[6] = &priv->regs->gaddr6;
-		priv->hash_regs[7] = &priv->regs->gaddr7;
+		priv->hash_regs[0] = &regs->gaddr0;
+		priv->hash_regs[1] = &regs->gaddr1;
+		priv->hash_regs[2] = &regs->gaddr2;
+		priv->hash_regs[3] = &regs->gaddr3;
+		priv->hash_regs[4] = &regs->gaddr4;
+		priv->hash_regs[5] = &regs->gaddr5;
+		priv->hash_regs[6] = &regs->gaddr6;
+		priv->hash_regs[7] = &regs->gaddr7;
 	}
 
 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
@@ -612,15 +956,70 @@
 	if (dev->features & NETIF_F_IP_CSUM)
 		dev->hard_header_len += GMAC_FCB_LEN;
 
-	priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
-	priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
-	priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
-	priv->num_txbdfree = DEFAULT_TX_RING_SIZE;
+	/* Program the isrg regs only if number of grps > 1 */
+	if (priv->num_grps > 1) {
+		baddr = &regs->isrg0;
+		for (i = 0; i < priv->num_grps; i++) {
+			isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
+			isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
+			gfar_write(baddr, isrg);
+			baddr++;
+			isrg = 0x0;
+		}
+	}
 
-	priv->txcoalescing = DEFAULT_TX_COALESCE;
-	priv->txic = DEFAULT_TXIC;
-	priv->rxcoalescing = DEFAULT_RX_COALESCE;
-	priv->rxic = DEFAULT_RXIC;
+	/* Need to reverse the bit maps as  bit_map's MSB is q0
+	 * but, for_each_bit parses from right to left, which
+	 * basically reverses the queue numbers */
+	for (i = 0; i< priv->num_grps; i++) {
+		priv->gfargrp[i].tx_bit_map = reverse_bitmap(
+				priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
+		priv->gfargrp[i].rx_bit_map = reverse_bitmap(
+				priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
+	}
+
+	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
+	 * also assign queues to groups */
+	for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
+		priv->gfargrp[grp_idx].num_rx_queues = 0x0;
+		for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
+				priv->num_rx_queues) {
+			priv->gfargrp[grp_idx].num_rx_queues++;
+			priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
+			rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
+			rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
+		}
+		priv->gfargrp[grp_idx].num_tx_queues = 0x0;
+		for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map,
+				priv->num_tx_queues) {
+			priv->gfargrp[grp_idx].num_tx_queues++;
+			priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
+			tstat = tstat | (TSTAT_CLEAR_THALT >> i);
+			tqueue = tqueue | (TQUEUE_EN0 >> i);
+		}
+		priv->gfargrp[grp_idx].rstat = rstat;
+		priv->gfargrp[grp_idx].tstat = tstat;
+		rstat = tstat =0;
+	}
+
+	gfar_write(&regs->rqueue, rqueue);
+	gfar_write(&regs->tqueue, tqueue);
+
+	priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
+
+	/* Initializing some of the rx/tx queue level parameters */
+	for (i = 0; i < priv->num_tx_queues; i++) {
+		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
+		priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
+		priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
+		priv->tx_queue[i]->txic = DEFAULT_TXIC;
+	}
+
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
+		priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
+		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
+	}
 
 	/* Enable most messages by default */
 	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
@@ -641,20 +1040,43 @@
 
 	/* fill out IRQ number and name fields */
 	len_devname = strlen(dev->name);
-	strncpy(&priv->int_name_tx[0], dev->name, len_devname);
-	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
-		strncpy(&priv->int_name_tx[len_devname],
-			"_tx", sizeof("_tx") + 1);
+	for (i = 0; i < priv->num_grps; i++) {
+		strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
+				len_devname);
+		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+			strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
+				"_g", sizeof("_g"));
+			priv->gfargrp[i].int_name_tx[
+				strlen(priv->gfargrp[i].int_name_tx)] = i+48;
+			strncpy(&priv->gfargrp[i].int_name_tx[strlen(
+				priv->gfargrp[i].int_name_tx)],
+				"_tx", sizeof("_tx") + 1);
 
-		strncpy(&priv->int_name_rx[0], dev->name, len_devname);
-		strncpy(&priv->int_name_rx[len_devname],
-			"_rx", sizeof("_rx") + 1);
+			strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
+					len_devname);
+			strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
+					"_g", sizeof("_g"));
+			priv->gfargrp[i].int_name_rx[
+				strlen(priv->gfargrp[i].int_name_rx)] = i+48;
+			strncpy(&priv->gfargrp[i].int_name_rx[strlen(
+				priv->gfargrp[i].int_name_rx)],
+				"_rx", sizeof("_rx") + 1);
 
-		strncpy(&priv->int_name_er[0], dev->name, len_devname);
-		strncpy(&priv->int_name_er[len_devname],
-			"_er", sizeof("_er") + 1);
-	} else
-		priv->int_name_tx[len_devname] = '\0';
+			strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
+					len_devname);
+			strncpy(&priv->gfargrp[i].int_name_er[len_devname],
+				"_g", sizeof("_g"));
+			priv->gfargrp[i].int_name_er[strlen(
+					priv->gfargrp[i].int_name_er)] = i+48;
+			strncpy(&priv->gfargrp[i].int_name_er[strlen(\
+				priv->gfargrp[i].int_name_er)],
+				"_er", sizeof("_er") + 1);
+		} else
+			priv->gfargrp[i].int_name_tx[len_devname] = '\0';
+	}
+
+	/* Initialize the filer table */
+	gfar_init_filer_table(priv);
 
 	/* Create all the sysfs files */
 	gfar_init_sysfs(dev);
@@ -665,14 +1087,19 @@
 	/* Even more device info helps when determining which kernel */
 	/* provided which set of benchmarks. */
 	printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
-	printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
-	       dev->name, priv->rx_ring_size, priv->tx_ring_size);
+	for (i = 0; i < priv->num_rx_queues; i++)
+		printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n",
+			dev->name, i, priv->rx_queue[i]->rx_ring_size);
+	for(i = 0; i < priv->num_tx_queues; i++)
+		 printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n",
+			dev->name, i, priv->tx_queue[i]->tx_ring_size);
 
 	return 0;
 
 register_fail:
-	iounmap(priv->regs);
-regs_fail:
+	unmap_group_regs(priv);
+	free_tx_pointers(priv);
+	free_rx_pointers(priv);
 	if (priv->phy_node)
 		of_node_put(priv->phy_node);
 	if (priv->tbi_node)
@@ -693,7 +1120,7 @@
 	dev_set_drvdata(&ofdev->dev, NULL);
 
 	unregister_netdev(priv->ndev);
-	iounmap(priv->regs);
+	unmap_group_regs(priv);
 	free_netdev(priv->ndev);
 
 	return 0;
@@ -705,6 +1132,7 @@
 {
 	struct gfar_private *priv = dev_get_drvdata(dev);
 	struct net_device *ndev = priv->ndev;
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	unsigned long flags;
 	u32 tempval;
 
@@ -714,34 +1142,37 @@
 	netif_device_detach(ndev);
 
 	if (netif_running(ndev)) {
-		spin_lock_irqsave(&priv->txlock, flags);
-		spin_lock(&priv->rxlock);
+
+		local_irq_save(flags);
+		lock_tx_qs(priv);
+		lock_rx_qs(priv);
 
 		gfar_halt_nodisable(ndev);
 
 		/* Disable Tx, and Rx if wake-on-LAN is disabled. */
-		tempval = gfar_read(&priv->regs->maccfg1);
+		tempval = gfar_read(&regs->maccfg1);
 
 		tempval &= ~MACCFG1_TX_EN;
 
 		if (!magic_packet)
 			tempval &= ~MACCFG1_RX_EN;
 
-		gfar_write(&priv->regs->maccfg1, tempval);
+		gfar_write(&regs->maccfg1, tempval);
 
-		spin_unlock(&priv->rxlock);
-		spin_unlock_irqrestore(&priv->txlock, flags);
+		unlock_rx_qs(priv);
+		unlock_tx_qs(priv);
+		local_irq_restore(flags);
 
-		napi_disable(&priv->napi);
+		disable_napi(priv);
 
 		if (magic_packet) {
 			/* Enable interrupt on Magic Packet */
-			gfar_write(&priv->regs->imask, IMASK_MAG);
+			gfar_write(&regs->imask, IMASK_MAG);
 
 			/* Enable Magic Packet mode */
-			tempval = gfar_read(&priv->regs->maccfg2);
+			tempval = gfar_read(&regs->maccfg2);
 			tempval |= MACCFG2_MPEN;
-			gfar_write(&priv->regs->maccfg2, tempval);
+			gfar_write(&regs->maccfg2, tempval);
 		} else {
 			phy_stop(priv->phydev);
 		}
@@ -754,6 +1185,7 @@
 {
 	struct gfar_private *priv = dev_get_drvdata(dev);
 	struct net_device *ndev = priv->ndev;
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	unsigned long flags;
 	u32 tempval;
 	int magic_packet = priv->wol_en &&
@@ -770,22 +1202,23 @@
 	/* Disable Magic Packet mode, in case something
 	 * else woke us up.
 	 */
+	local_irq_save(flags);
+	lock_tx_qs(priv);
+	lock_rx_qs(priv);
 
-	spin_lock_irqsave(&priv->txlock, flags);
-	spin_lock(&priv->rxlock);
-
-	tempval = gfar_read(&priv->regs->maccfg2);
+	tempval = gfar_read(&regs->maccfg2);
 	tempval &= ~MACCFG2_MPEN;
-	gfar_write(&priv->regs->maccfg2, tempval);
+	gfar_write(&regs->maccfg2, tempval);
 
 	gfar_start(ndev);
 
-	spin_unlock(&priv->rxlock);
-	spin_unlock_irqrestore(&priv->txlock, flags);
+	unlock_rx_qs(priv);
+	unlock_tx_qs(priv);
+	local_irq_restore(flags);
 
 	netif_device_attach(ndev);
 
-	napi_enable(&priv->napi);
+	enable_napi(priv);
 
 	return 0;
 }
@@ -812,7 +1245,7 @@
 		phy_start(priv->phydev);
 
 	netif_device_attach(ndev);
-	napi_enable(&priv->napi);
+	napi_enable(&priv->gfargrp.napi);
 
 	return 0;
 }
@@ -851,7 +1284,10 @@
 static phy_interface_t gfar_get_interface(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
-	u32 ecntrl = gfar_read(&priv->regs->ecntrl);
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	u32 ecntrl;
+
+	ecntrl = gfar_read(&regs->ecntrl);
 
 	if (ecntrl & ECNTRL_SGMII_MODE)
 		return PHY_INTERFACE_MODE_SGMII;
@@ -973,46 +1409,52 @@
 static void init_registers(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar __iomem *regs = NULL;
+	int i = 0;
 
-	/* Clear IEVENT */
-	gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
+	for (i = 0; i < priv->num_grps; i++) {
+		regs = priv->gfargrp[i].regs;
+		/* Clear IEVENT */
+		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
 
-	/* Initialize IMASK */
-	gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
+		/* Initialize IMASK */
+		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+	}
 
+	regs = priv->gfargrp[0].regs;
 	/* Init hash registers to zero */
-	gfar_write(&priv->regs->igaddr0, 0);
-	gfar_write(&priv->regs->igaddr1, 0);
-	gfar_write(&priv->regs->igaddr2, 0);
-	gfar_write(&priv->regs->igaddr3, 0);
-	gfar_write(&priv->regs->igaddr4, 0);
-	gfar_write(&priv->regs->igaddr5, 0);
-	gfar_write(&priv->regs->igaddr6, 0);
-	gfar_write(&priv->regs->igaddr7, 0);
+	gfar_write(&regs->igaddr0, 0);
+	gfar_write(&regs->igaddr1, 0);
+	gfar_write(&regs->igaddr2, 0);
+	gfar_write(&regs->igaddr3, 0);
+	gfar_write(&regs->igaddr4, 0);
+	gfar_write(&regs->igaddr5, 0);
+	gfar_write(&regs->igaddr6, 0);
+	gfar_write(&regs->igaddr7, 0);
 
-	gfar_write(&priv->regs->gaddr0, 0);
-	gfar_write(&priv->regs->gaddr1, 0);
-	gfar_write(&priv->regs->gaddr2, 0);
-	gfar_write(&priv->regs->gaddr3, 0);
-	gfar_write(&priv->regs->gaddr4, 0);
-	gfar_write(&priv->regs->gaddr5, 0);
-	gfar_write(&priv->regs->gaddr6, 0);
-	gfar_write(&priv->regs->gaddr7, 0);
+	gfar_write(&regs->gaddr0, 0);
+	gfar_write(&regs->gaddr1, 0);
+	gfar_write(&regs->gaddr2, 0);
+	gfar_write(&regs->gaddr3, 0);
+	gfar_write(&regs->gaddr4, 0);
+	gfar_write(&regs->gaddr5, 0);
+	gfar_write(&regs->gaddr6, 0);
+	gfar_write(&regs->gaddr7, 0);
 
 	/* Zero out the rmon mib registers if it has them */
 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
-		memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));
+		memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
 
 		/* Mask off the CAM interrupts */
-		gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
-		gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
+		gfar_write(&regs->rmon.cam1, 0xffffffff);
+		gfar_write(&regs->rmon.cam2, 0xffffffff);
 	}
 
 	/* Initialize the max receive buffer length */
-	gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
+	gfar_write(&regs->mrblr, priv->rx_buffer_size);
 
 	/* Initialize the Minimum Frame Length Register */
-	gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
+	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
 }
 
 
@@ -1020,23 +1462,28 @@
 static void gfar_halt_nodisable(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
-	struct gfar __iomem *regs = priv->regs;
+	struct gfar __iomem *regs = NULL;
 	u32 tempval;
+	int i = 0;
 
-	/* Mask all interrupts */
-	gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+	for (i = 0; i < priv->num_grps; i++) {
+		regs = priv->gfargrp[i].regs;
+		/* Mask all interrupts */
+		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
 
-	/* Clear all interrupts */
-	gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+		/* Clear all interrupts */
+		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+	}
 
+	regs = priv->gfargrp[0].regs;
 	/* Stop the DMA, and wait for it to stop */
-	tempval = gfar_read(&priv->regs->dmactrl);
+	tempval = gfar_read(&regs->dmactrl);
 	if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
 	    != (DMACTRL_GRS | DMACTRL_GTS)) {
 		tempval |= (DMACTRL_GRS | DMACTRL_GTS);
-		gfar_write(&priv->regs->dmactrl, tempval);
+		gfar_write(&regs->dmactrl, tempval);
 
-		while (!(gfar_read(&priv->regs->ievent) &
+		while (!(gfar_read(&regs->ievent) &
 			 (IEVENT_GRSC | IEVENT_GTSC)))
 			cpu_relax();
 	}
@@ -1046,7 +1493,7 @@
 void gfar_halt(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
-	struct gfar __iomem *regs = priv->regs;
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	u32 tempval;
 
 	gfar_halt_nodisable(dev);
@@ -1057,101 +1504,131 @@
 	gfar_write(&regs->maccfg1, tempval);
 }
 
+static void free_grp_irqs(struct gfar_priv_grp *grp)
+{
+	free_irq(grp->interruptError, grp);
+	free_irq(grp->interruptTransmit, grp);
+	free_irq(grp->interruptReceive, grp);
+}
+
 void stop_gfar(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
 	unsigned long flags;
+	int i;
 
 	phy_stop(priv->phydev);
 
+
 	/* Lock it down */
-	spin_lock_irqsave(&priv->txlock, flags);
-	spin_lock(&priv->rxlock);
+	local_irq_save(flags);
+	lock_tx_qs(priv);
+	lock_rx_qs(priv);
 
 	gfar_halt(dev);
 
-	spin_unlock(&priv->rxlock);
-	spin_unlock_irqrestore(&priv->txlock, flags);
+	unlock_rx_qs(priv);
+	unlock_tx_qs(priv);
+	local_irq_restore(flags);
 
 	/* Free the IRQs */
 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
-		free_irq(priv->interruptError, dev);
-		free_irq(priv->interruptTransmit, dev);
-		free_irq(priv->interruptReceive, dev);
+		for (i = 0; i < priv->num_grps; i++)
+			free_grp_irqs(&priv->gfargrp[i]);
 	} else {
-		free_irq(priv->interruptTransmit, dev);
+		for (i = 0; i < priv->num_grps; i++)
+			free_irq(priv->gfargrp[i].interruptTransmit,
+					&priv->gfargrp[i]);
 	}
 
 	free_skb_resources(priv);
 }
 
+static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
+{
+	struct txbd8 *txbdp;
+	struct gfar_private *priv = netdev_priv(tx_queue->dev);
+	int i, j;
+
+	txbdp = tx_queue->tx_bd_base;
+
+	for (i = 0; i < tx_queue->tx_ring_size; i++) {
+		if (!tx_queue->tx_skbuff[i])
+			continue;
+
+		dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
+				txbdp->length, DMA_TO_DEVICE);
+		txbdp->lstatus = 0;
+		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
+				j++) {
+			txbdp++;
+			dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
+					txbdp->length, DMA_TO_DEVICE);
+		}
+		txbdp++;
+		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
+		tx_queue->tx_skbuff[i] = NULL;
+	}
+	kfree(tx_queue->tx_skbuff);
+}
+
+static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
+{
+	struct rxbd8 *rxbdp;
+	struct gfar_private *priv = netdev_priv(rx_queue->dev);
+	int i;
+
+	rxbdp = rx_queue->rx_bd_base;
+
+	for (i = 0; i < rx_queue->rx_ring_size; i++) {
+		if (rx_queue->rx_skbuff[i]) {
+			dma_unmap_single(&priv->ofdev->dev,
+					rxbdp->bufPtr, priv->rx_buffer_size,
+					DMA_FROM_DEVICE);
+			dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
+			rx_queue->rx_skbuff[i] = NULL;
+		}
+		rxbdp->lstatus = 0;
+		rxbdp->bufPtr = 0;
+		rxbdp++;
+	}
+	kfree(rx_queue->rx_skbuff);
+}
+
 /* If there are any tx skbs or rx skbs still around, free them.
  * Then free tx_skbuff and rx_skbuff */
 static void free_skb_resources(struct gfar_private *priv)
 {
-	struct device *dev = &priv->ofdev->dev;
-	struct rxbd8 *rxbdp;
-	struct txbd8 *txbdp;
-	int i, j;
+	struct gfar_priv_tx_q *tx_queue = NULL;
+	struct gfar_priv_rx_q *rx_queue = NULL;
+	int i;
 
 	/* Go through all the buffer descriptors and free their data buffers */
-	txbdp = priv->tx_bd_base;
-
-	if (!priv->tx_skbuff)
-		goto skip_tx_skbuff;
-
-	for (i = 0; i < priv->tx_ring_size; i++) {
-		if (!priv->tx_skbuff[i])
-			continue;
-
-		dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
-				txbdp->length, DMA_TO_DEVICE);
-		txbdp->lstatus = 0;
-		for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) {
-			txbdp++;
-			dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
-					txbdp->length, DMA_TO_DEVICE);
-		}
-		txbdp++;
-		dev_kfree_skb_any(priv->tx_skbuff[i]);
-		priv->tx_skbuff[i] = NULL;
+	for (i = 0; i < priv->num_tx_queues; i++) {
+		tx_queue = priv->tx_queue[i];
+		if(!tx_queue->tx_skbuff)
+			free_skb_tx_queue(tx_queue);
 	}
 
-	kfree(priv->tx_skbuff);
-skip_tx_skbuff:
-
-	rxbdp = priv->rx_bd_base;
-
-	if (!priv->rx_skbuff)
-		goto skip_rx_skbuff;
-
-	for (i = 0; i < priv->rx_ring_size; i++) {
-		if (priv->rx_skbuff[i]) {
-			dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr,
-					 priv->rx_buffer_size,
-					DMA_FROM_DEVICE);
-			dev_kfree_skb_any(priv->rx_skbuff[i]);
-			priv->rx_skbuff[i] = NULL;
-		}
-
-		rxbdp->lstatus = 0;
-		rxbdp->bufPtr = 0;
-		rxbdp++;
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		rx_queue = priv->rx_queue[i];
+		if(!rx_queue->rx_skbuff)
+			free_skb_rx_queue(rx_queue);
 	}
 
-	kfree(priv->rx_skbuff);
-skip_rx_skbuff:
-
-	dma_free_coherent(dev, sizeof(*txbdp) * priv->tx_ring_size +
-			       sizeof(*rxbdp) * priv->rx_ring_size,
-			  priv->tx_bd_base, priv->tx_bd_dma_base);
+	dma_free_coherent(&priv->ofdev->dev,
+			sizeof(struct txbd8) * priv->total_tx_ring_size +
+			sizeof(struct rxbd8) * priv->total_rx_ring_size,
+			priv->tx_queue[0]->tx_bd_base,
+			priv->tx_queue[0]->tx_bd_dma_base);
 }
 
 void gfar_start(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
-	struct gfar __iomem *regs = priv->regs;
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	u32 tempval;
+	int i = 0;
 
 	/* Enable Rx and Tx in MACCFG1 */
 	tempval = gfar_read(&regs->maccfg1);
@@ -1159,94 +1636,158 @@
 	gfar_write(&regs->maccfg1, tempval);
 
 	/* Initialize DMACTRL to have WWR and WOP */
-	tempval = gfar_read(&priv->regs->dmactrl);
+	tempval = gfar_read(&regs->dmactrl);
 	tempval |= DMACTRL_INIT_SETTINGS;
-	gfar_write(&priv->regs->dmactrl, tempval);
+	gfar_write(&regs->dmactrl, tempval);
 
 	/* Make sure we aren't stopped */
-	tempval = gfar_read(&priv->regs->dmactrl);
+	tempval = gfar_read(&regs->dmactrl);
 	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
-	gfar_write(&priv->regs->dmactrl, tempval);
+	gfar_write(&regs->dmactrl, tempval);
 
-	/* Clear THLT/RHLT, so that the DMA starts polling now */
-	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
-	gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
-
-	/* Unmask the interrupts we look for */
-	gfar_write(&regs->imask, IMASK_DEFAULT);
+	for (i = 0; i < priv->num_grps; i++) {
+		regs = priv->gfargrp[i].regs;
+		/* Clear THLT/RHLT, so that the DMA starts polling now */
+		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
+		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
+		/* Unmask the interrupts we look for */
+		gfar_write(&regs->imask, IMASK_DEFAULT);
+	}
 
 	dev->trans_start = jiffies;
 }
 
-/* Bring the controller up and running */
-int startup_gfar(struct net_device *ndev)
+void gfar_configure_coalescing(struct gfar_private *priv,
+	unsigned int tx_mask, unsigned int rx_mask)
 {
-	struct gfar_private *priv = netdev_priv(ndev);
-	struct gfar __iomem *regs = priv->regs;
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	u32 *baddr;
+	int i = 0;
+
+	/* Backward compatible case ---- even if we enable
+	 * multiple queues, there's only single reg to program
+	 */
+	gfar_write(&regs->txic, 0);
+	if(likely(priv->tx_queue[0]->txcoalescing))
+		gfar_write(&regs->txic, priv->tx_queue[0]->txic);
+
+	gfar_write(&regs->rxic, 0);
+	if(unlikely(priv->rx_queue[0]->rxcoalescing))
+		gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
+
+	if (priv->mode == MQ_MG_MODE) {
+		baddr = &regs->txic0;
+		for_each_bit (i, &tx_mask, priv->num_tx_queues) {
+			if (likely(priv->tx_queue[i]->txcoalescing)) {
+				gfar_write(baddr + i, 0);
+				gfar_write(baddr + i, priv->tx_queue[i]->txic);
+			}
+		}
+
+		baddr = &regs->rxic0;
+		for_each_bit (i, &rx_mask, priv->num_rx_queues) {
+			if (likely(priv->rx_queue[i]->rxcoalescing)) {
+				gfar_write(baddr + i, 0);
+				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
+			}
+		}
+	}
+}
+
+static int register_grp_irqs(struct gfar_priv_grp *grp)
+{
+	struct gfar_private *priv = grp->priv;
+	struct net_device *dev = priv->ndev;
 	int err;
 
-	gfar_write(&regs->imask, IMASK_INIT_CLEAR);
-
-	err = gfar_alloc_skb_resources(ndev);
-	if (err)
-		return err;
-
-	gfar_init_mac(ndev);
-
 	/* If the device has multiple interrupts, register for
 	 * them.  Otherwise, only register for the one */
 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
 		/* Install our interrupt handlers for Error,
 		 * Transmit, and Receive */
-		err = request_irq(priv->interruptError, gfar_error, 0,
-				  priv->int_name_er, ndev);
-		if (err) {
+		if ((err = request_irq(grp->interruptError, gfar_error, 0,
+				grp->int_name_er,grp)) < 0) {
 			if (netif_msg_intr(priv))
-				pr_err("%s: Can't get IRQ %d\n", ndev->name,
-				       priv->interruptError);
-			goto err_irq_fail;
+				printk(KERN_ERR "%s: Can't get IRQ %d\n",
+					dev->name, grp->interruptError);
+
+				goto err_irq_fail;
 		}
 
-		err = request_irq(priv->interruptTransmit, gfar_transmit, 0,
-				  priv->int_name_tx, ndev);
-		if (err) {
+		if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
+				0, grp->int_name_tx, grp)) < 0) {
 			if (netif_msg_intr(priv))
-				pr_err("%s: Can't get IRQ %d\n", ndev->name,
-				       priv->interruptTransmit);
+				printk(KERN_ERR "%s: Can't get IRQ %d\n",
+					dev->name, grp->interruptTransmit);
 			goto tx_irq_fail;
 		}
 
-		err = request_irq(priv->interruptReceive, gfar_receive, 0,
-				  priv->int_name_rx, ndev);
-		if (err) {
+		if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
+				grp->int_name_rx, grp)) < 0) {
 			if (netif_msg_intr(priv))
-				pr_err("%s: Can't get IRQ %d (receive0)\n",
-				       ndev->name, priv->interruptReceive);
+				printk(KERN_ERR "%s: Can't get IRQ %d\n",
+					dev->name, grp->interruptReceive);
 			goto rx_irq_fail;
 		}
 	} else {
-		err = request_irq(priv->interruptTransmit, gfar_interrupt,
-				0, priv->int_name_tx, ndev);
-		if (err) {
+		if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
+				grp->int_name_tx, grp)) < 0) {
 			if (netif_msg_intr(priv))
-				pr_err("%s: Can't get IRQ %d\n", ndev->name,
-				       priv->interruptTransmit);
+				printk(KERN_ERR "%s: Can't get IRQ %d\n",
+					dev->name, grp->interruptTransmit);
 			goto err_irq_fail;
 		}
 	}
 
+	return 0;
+
+rx_irq_fail:
+	free_irq(grp->interruptTransmit, grp);
+tx_irq_fail:
+	free_irq(grp->interruptError, grp);
+err_irq_fail:
+	return err;
+
+}
+
+/* Bring the controller up and running */
+int startup_gfar(struct net_device *ndev)
+{
+	struct gfar_private *priv = netdev_priv(ndev);
+	struct gfar __iomem *regs = NULL;
+	int err, i, j;
+
+	for (i = 0; i < priv->num_grps; i++) {
+		regs= priv->gfargrp[i].regs;
+		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+	}
+
+	regs= priv->gfargrp[0].regs;
+	err = gfar_alloc_skb_resources(ndev);
+	if (err)
+		return err;
+
+	gfar_init_mac(ndev);
+
+	for (i = 0; i < priv->num_grps; i++) {
+		err = register_grp_irqs(&priv->gfargrp[i]);
+		if (err) {
+			for (j = 0; j < i; j++)
+				free_grp_irqs(&priv->gfargrp[j]);
+				goto irq_fail;
+		}
+	}
+
 	/* Start the controller */
 	gfar_start(ndev);
 
 	phy_start(priv->phydev);
 
+	gfar_configure_coalescing(priv, 0xFF, 0xFF);
+
 	return 0;
 
-rx_irq_fail:
-	free_irq(priv->interruptTransmit, ndev);
-tx_irq_fail:
-	free_irq(priv->interruptError, ndev);
-err_irq_fail:
+irq_fail:
 	free_skb_resources(priv);
 	return err;
 }
@@ -1258,7 +1799,7 @@
 	struct gfar_private *priv = netdev_priv(dev);
 	int err;
 
-	napi_enable(&priv->napi);
+	enable_napi(priv);
 
 	skb_queue_head_init(&priv->rx_recycle);
 
@@ -1269,18 +1810,18 @@
 
 	err = init_phy(dev);
 
-	if(err) {
-		napi_disable(&priv->napi);
+	if (err) {
+		disable_napi(priv);
 		return err;
 	}
 
 	err = startup_gfar(dev);
 	if (err) {
-		napi_disable(&priv->napi);
+		disable_napi(priv);
 		return err;
 	}
 
-	netif_start_queue(dev);
+	netif_tx_start_all_queues(dev);
 
 	device_set_wakeup_enable(&dev->dev, priv->wol_en);
 
@@ -1349,15 +1890,23 @@
 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar_priv_tx_q *tx_queue = NULL;
+	struct netdev_queue *txq;
+	struct gfar __iomem *regs = NULL;
 	struct txfcb *fcb = NULL;
 	struct txbd8 *txbdp, *txbdp_start, *base;
 	u32 lstatus;
-	int i;
+	int i, rq = 0;
 	u32 bufaddr;
 	unsigned long flags;
 	unsigned int nr_frags, length;
 
-	base = priv->tx_bd_base;
+
+	rq = skb->queue_mapping;
+	tx_queue = priv->tx_queue[rq];
+	txq = netdev_get_tx_queue(dev, rq);
+	base = tx_queue->tx_bd_base;
+	regs = tx_queue->grp->regs;
 
 	/* make space for additional header when fcb is needed */
 	if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
@@ -1378,21 +1927,21 @@
 	/* total number of fragments in the SKB */
 	nr_frags = skb_shinfo(skb)->nr_frags;
 
-	spin_lock_irqsave(&priv->txlock, flags);
+	spin_lock_irqsave(&tx_queue->txlock, flags);
 
 	/* check if there is space to queue this packet */
-	if ((nr_frags+1) > priv->num_txbdfree) {
+	if ((nr_frags+1) > tx_queue->num_txbdfree) {
 		/* no space, stop the queue */
-		netif_stop_queue(dev);
+		netif_tx_stop_queue(txq);
 		dev->stats.tx_fifo_errors++;
-		spin_unlock_irqrestore(&priv->txlock, flags);
+		spin_unlock_irqrestore(&tx_queue->txlock, flags);
 		return NETDEV_TX_BUSY;
 	}
 
 	/* Update transmit stats */
 	dev->stats.tx_bytes += skb->len;
 
-	txbdp = txbdp_start = priv->cur_tx;
+	txbdp = txbdp_start = tx_queue->cur_tx;
 
 	if (nr_frags == 0) {
 		lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
@@ -1400,7 +1949,7 @@
 		/* Place the fragment addresses and lengths into the TxBDs */
 		for (i = 0; i < nr_frags; i++) {
 			/* Point at the next BD, wrapping as needed */
-			txbdp = next_txbd(txbdp, base, priv->tx_ring_size);
+			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
 
 			length = skb_shinfo(skb)->frags[i].size;
 
@@ -1442,7 +1991,7 @@
 	}
 
 	/* setup the TxBD length and buffer pointer for the first BD */
-	priv->tx_skbuff[priv->skb_curtx] = skb;
+	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
 	txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
 			skb_headlen(skb), DMA_TO_DEVICE);
 
@@ -1462,29 +2011,29 @@
 
 	/* Update the current skb pointer to the next entry we will use
 	 * (wrapping if necessary) */
-	priv->skb_curtx = (priv->skb_curtx + 1) &
-		TX_RING_MOD_MASK(priv->tx_ring_size);
+	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
+		TX_RING_MOD_MASK(tx_queue->tx_ring_size);
 
-	priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size);
+	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
 
 	/* reduce TxBD free count */
-	priv->num_txbdfree -= (nr_frags + 1);
+	tx_queue->num_txbdfree -= (nr_frags + 1);
 
 	dev->trans_start = jiffies;
 
 	/* If the next BD still needs to be cleaned up, then the bds
 	   are full.  We need to tell the kernel to stop sending us stuff. */
-	if (!priv->num_txbdfree) {
-		netif_stop_queue(dev);
+	if (!tx_queue->num_txbdfree) {
+		netif_tx_stop_queue(txq);
 
 		dev->stats.tx_fifo_errors++;
 	}
 
 	/* Tell the DMA to go go go */
-	gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
+	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
 
 	/* Unlock priv */
-	spin_unlock_irqrestore(&priv->txlock, flags);
+	spin_unlock_irqrestore(&tx_queue->txlock, flags);
 
 	return NETDEV_TX_OK;
 }
@@ -1494,7 +2043,7 @@
 {
 	struct gfar_private *priv = netdev_priv(dev);
 
-	napi_disable(&priv->napi);
+	disable_napi(priv);
 
 	skb_queue_purge(&priv->rx_recycle);
 	cancel_work_sync(&priv->reset_task);
@@ -1504,7 +2053,7 @@
 	phy_disconnect(priv->phydev);
 	priv->phydev = NULL;
 
-	netif_stop_queue(dev);
+	netif_tx_stop_all_queues(dev);
 
 	return 0;
 }
@@ -1523,50 +2072,55 @@
 		struct vlan_group *grp)
 {
 	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar __iomem *regs = NULL;
 	unsigned long flags;
 	u32 tempval;
 
-	spin_lock_irqsave(&priv->rxlock, flags);
+	regs = priv->gfargrp[0].regs;
+	local_irq_save(flags);
+	lock_rx_qs(priv);
 
 	priv->vlgrp = grp;
 
 	if (grp) {
 		/* Enable VLAN tag insertion */
-		tempval = gfar_read(&priv->regs->tctrl);
+		tempval = gfar_read(&regs->tctrl);
 		tempval |= TCTRL_VLINS;
 
-		gfar_write(&priv->regs->tctrl, tempval);
+		gfar_write(&regs->tctrl, tempval);
 
 		/* Enable VLAN tag extraction */
-		tempval = gfar_read(&priv->regs->rctrl);
+		tempval = gfar_read(&regs->rctrl);
 		tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
-		gfar_write(&priv->regs->rctrl, tempval);
+		gfar_write(&regs->rctrl, tempval);
 	} else {
 		/* Disable VLAN tag insertion */
-		tempval = gfar_read(&priv->regs->tctrl);
+		tempval = gfar_read(&regs->tctrl);
 		tempval &= ~TCTRL_VLINS;
-		gfar_write(&priv->regs->tctrl, tempval);
+		gfar_write(&regs->tctrl, tempval);
 
 		/* Disable VLAN tag extraction */
-		tempval = gfar_read(&priv->regs->rctrl);
+		tempval = gfar_read(&regs->rctrl);
 		tempval &= ~RCTRL_VLEX;
 		/* If parse is no longer required, then disable parser */
 		if (tempval & RCTRL_REQ_PARSER)
 			tempval |= RCTRL_PRSDEP_INIT;
 		else
 			tempval &= ~RCTRL_PRSDEP_INIT;
-		gfar_write(&priv->regs->rctrl, tempval);
+		gfar_write(&regs->rctrl, tempval);
 	}
 
 	gfar_change_mtu(dev, dev->mtu);
 
-	spin_unlock_irqrestore(&priv->rxlock, flags);
+	unlock_rx_qs(priv);
+	local_irq_restore(flags);
 }
 
 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
 {
 	int tempsize, tempval;
 	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	int oldsize = priv->rx_buffer_size;
 	int frame_size = new_mtu + ETH_HLEN;
 
@@ -1598,20 +2152,20 @@
 
 	dev->mtu = new_mtu;
 
-	gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
-	gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
+	gfar_write(&regs->mrblr, priv->rx_buffer_size);
+	gfar_write(&regs->maxfrm, priv->rx_buffer_size);
 
 	/* If the mtu is larger than the max size for standard
 	 * ethernet frames (ie, a jumbo frame), then set maccfg2
 	 * to allow huge frames, and to check the length */
-	tempval = gfar_read(&priv->regs->maccfg2);
+	tempval = gfar_read(&regs->maccfg2);
 
 	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
 		tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
 	else
 		tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
 
-	gfar_write(&priv->regs->maccfg2, tempval);
+	gfar_write(&regs->maccfg2, tempval);
 
 	if ((oldsize != tempsize) && (dev->flags & IFF_UP))
 		startup_gfar(dev);
@@ -1631,10 +2185,10 @@
 	struct net_device *dev = priv->ndev;
 
 	if (dev->flags & IFF_UP) {
-		netif_stop_queue(dev);
+		netif_tx_stop_all_queues(dev);
 		stop_gfar(dev);
 		startup_gfar(dev);
-		netif_start_queue(dev);
+		netif_tx_start_all_queues(dev);
 	}
 
 	netif_tx_schedule_all(dev);
@@ -1649,24 +2203,27 @@
 }
 
 /* Interrupt Handler for Transmit complete */
-static int gfar_clean_tx_ring(struct net_device *dev)
+static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
 {
+	struct net_device *dev = tx_queue->dev;
 	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar_priv_rx_q *rx_queue = NULL;
 	struct txbd8 *bdp;
 	struct txbd8 *lbdp = NULL;
-	struct txbd8 *base = priv->tx_bd_base;
+	struct txbd8 *base = tx_queue->tx_bd_base;
 	struct sk_buff *skb;
 	int skb_dirtytx;
-	int tx_ring_size = priv->tx_ring_size;
+	int tx_ring_size = tx_queue->tx_ring_size;
 	int frags = 0;
 	int i;
 	int howmany = 0;
 	u32 lstatus;
 
-	bdp = priv->dirty_tx;
-	skb_dirtytx = priv->skb_dirtytx;
+	rx_queue = priv->rx_queue[tx_queue->qindex];
+	bdp = tx_queue->dirty_tx;
+	skb_dirtytx = tx_queue->skb_dirtytx;
 
-	while ((skb = priv->tx_skbuff[skb_dirtytx])) {
+	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
 		frags = skb_shinfo(skb)->nr_frags;
 		lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
 
@@ -1698,74 +2255,71 @@
 		 * If there's room in the queue (limit it to rx_buffer_size)
 		 * we add this skb back into the pool, if it's the right size
 		 */
-		if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size &&
+		if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
 				skb_recycle_check(skb, priv->rx_buffer_size +
 					RXBUF_ALIGNMENT))
 			__skb_queue_head(&priv->rx_recycle, skb);
 		else
 			dev_kfree_skb_any(skb);
 
-		priv->tx_skbuff[skb_dirtytx] = NULL;
+		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
 
 		skb_dirtytx = (skb_dirtytx + 1) &
 			TX_RING_MOD_MASK(tx_ring_size);
 
 		howmany++;
-		priv->num_txbdfree += frags + 1;
+		tx_queue->num_txbdfree += frags + 1;
 	}
 
 	/* If we freed a buffer, we can restart transmission, if necessary */
-	if (netif_queue_stopped(dev) && priv->num_txbdfree)
-		netif_wake_queue(dev);
+	if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
+		netif_wake_subqueue(dev, tx_queue->qindex);
 
 	/* Update dirty indicators */
-	priv->skb_dirtytx = skb_dirtytx;
-	priv->dirty_tx = bdp;
+	tx_queue->skb_dirtytx = skb_dirtytx;
+	tx_queue->dirty_tx = bdp;
 
 	dev->stats.tx_packets += howmany;
 
 	return howmany;
 }
 
-static void gfar_schedule_cleanup(struct net_device *dev)
+static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
 {
-	struct gfar_private *priv = netdev_priv(dev);
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->txlock, flags);
-	spin_lock(&priv->rxlock);
-
-	if (napi_schedule_prep(&priv->napi)) {
-		gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
-		__napi_schedule(&priv->napi);
+	spin_lock_irqsave(&gfargrp->grplock, flags);
+	if (napi_schedule_prep(&gfargrp->napi)) {
+		gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
+		__napi_schedule(&gfargrp->napi);
 	} else {
 		/*
 		 * Clear IEVENT, so interrupts aren't called again
 		 * because of the packets that have already arrived.
 		 */
-		gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
+		gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
 	}
+	spin_unlock_irqrestore(&gfargrp->grplock, flags);
 
-	spin_unlock(&priv->rxlock);
-	spin_unlock_irqrestore(&priv->txlock, flags);
 }
 
 /* Interrupt Handler for Transmit complete */
-static irqreturn_t gfar_transmit(int irq, void *dev_id)
+static irqreturn_t gfar_transmit(int irq, void *grp_id)
 {
-	gfar_schedule_cleanup((struct net_device *)dev_id);
+	gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
 	return IRQ_HANDLED;
 }
 
-static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
+static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
 		struct sk_buff *skb)
 {
+	struct net_device *dev = rx_queue->dev;
 	struct gfar_private *priv = netdev_priv(dev);
 	dma_addr_t buf;
 
 	buf = dma_map_single(&priv->ofdev->dev, skb->data,
 			     priv->rx_buffer_size, DMA_FROM_DEVICE);
-	gfar_init_rxbdp(dev, bdp, buf);
+	gfar_init_rxbdp(rx_queue, bdp, buf);
 }
 
 
@@ -1832,9 +2386,9 @@
 	}
 }
 
-irqreturn_t gfar_receive(int irq, void *dev_id)
+irqreturn_t gfar_receive(int irq, void *grp_id)
 {
-	gfar_schedule_cleanup((struct net_device *)dev_id);
+	gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
 	return IRQ_HANDLED;
 }
 
@@ -1864,6 +2418,7 @@
 	fcb = (struct rxfcb *)skb->data;
 
 	/* Remove the FCB from the skb */
+	skb_set_queue_mapping(skb, fcb->rq);
 	/* Remove the padded bytes, if there are any */
 	if (amount_pull)
 		skb_pull(skb, amount_pull);
@@ -1890,8 +2445,9 @@
  *   until the budget/quota has been reached. Returns the number
  *   of frames handled
  */
-int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
+int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
 {
+	struct net_device *dev = rx_queue->dev;
 	struct rxbd8 *bdp, *base;
 	struct sk_buff *skb;
 	int pkt_len;
@@ -1900,8 +2456,8 @@
 	struct gfar_private *priv = netdev_priv(dev);
 
 	/* Get the first full descriptor */
-	bdp = priv->cur_rx;
-	base = priv->rx_bd_base;
+	bdp = rx_queue->cur_rx;
+	base = rx_queue->rx_bd_base;
 
 	amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
 		priv->padding;
@@ -1913,7 +2469,7 @@
 		/* Add another skb for the future */
 		newskb = gfar_new_skb(dev);
 
-		skb = priv->rx_skbuff[priv->skb_currx];
+		skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
 
 		dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
 				priv->rx_buffer_size, DMA_FROM_DEVICE);
@@ -1961,46 +2517,77 @@
 
 		}
 
-		priv->rx_skbuff[priv->skb_currx] = newskb;
+		rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
 
 		/* Setup the new bdp */
-		gfar_new_rxbdp(dev, bdp, newskb);
+		gfar_new_rxbdp(rx_queue, bdp, newskb);
 
 		/* Update to the next pointer */
-		bdp = next_bd(bdp, base, priv->rx_ring_size);
+		bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
 
 		/* update to point at the next skb */
-		priv->skb_currx =
-		    (priv->skb_currx + 1) &
-		    RX_RING_MOD_MASK(priv->rx_ring_size);
+		rx_queue->skb_currx =
+		    (rx_queue->skb_currx + 1) &
+		    RX_RING_MOD_MASK(rx_queue->rx_ring_size);
 	}
 
 	/* Update the current rxbd pointer to be the next one */
-	priv->cur_rx = bdp;
+	rx_queue->cur_rx = bdp;
 
 	return howmany;
 }
 
 static int gfar_poll(struct napi_struct *napi, int budget)
 {
-	struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
-	struct net_device *dev = priv->ndev;
-	int tx_cleaned = 0;
-	int rx_cleaned = 0;
+	struct gfar_priv_grp *gfargrp = container_of(napi,
+			struct gfar_priv_grp, napi);
+	struct gfar_private *priv = gfargrp->priv;
+	struct gfar __iomem *regs = gfargrp->regs;
+	struct gfar_priv_tx_q *tx_queue = NULL;
+	struct gfar_priv_rx_q *rx_queue = NULL;
+	int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
+	int tx_cleaned = 0, i, left_over_budget = budget, serviced_queues = 0;
+	int num_queues = 0;
 	unsigned long flags;
 
+	num_queues = gfargrp->num_rx_queues;
+	budget_per_queue = budget/num_queues;
+
 	/* Clear IEVENT, so interrupts aren't called again
 	 * because of the packets that have already arrived */
-	gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
+	gfar_write(&regs->ievent, IEVENT_RTX_MASK);
 
-	/* If we fail to get the lock, don't bother with the TX BDs */
-	if (spin_trylock_irqsave(&priv->txlock, flags)) {
-		tx_cleaned = gfar_clean_tx_ring(dev);
-		spin_unlock_irqrestore(&priv->txlock, flags);
+	while (num_queues && left_over_budget) {
+
+		budget_per_queue = left_over_budget/num_queues;
+		left_over_budget = 0;
+
+		for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+			if (test_bit(i, &serviced_queues))
+				continue;
+			rx_queue = priv->rx_queue[i];
+			tx_queue = priv->tx_queue[rx_queue->qindex];
+
+			/* If we fail to get the lock,
+			 * don't bother with the TX BDs */
+			if (spin_trylock_irqsave(&tx_queue->txlock, flags)) {
+				tx_cleaned += gfar_clean_tx_ring(tx_queue);
+				spin_unlock_irqrestore(&tx_queue->txlock,
+							flags);
+			}
+
+			rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
+							budget_per_queue);
+			rx_cleaned += rx_cleaned_per_queue;
+			if(rx_cleaned_per_queue < budget_per_queue) {
+				left_over_budget = left_over_budget +
+					(budget_per_queue - rx_cleaned_per_queue);
+				set_bit(i, &serviced_queues);
+				num_queues--;
+			}
+		}
 	}
 
-	rx_cleaned = gfar_clean_rx_ring(dev, budget);
-
 	if (tx_cleaned)
 		return budget;
 
@@ -2008,20 +2595,14 @@
 		napi_complete(napi);
 
 		/* Clear the halt bit in RSTAT */
-		gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
+		gfar_write(&regs->rstat, gfargrp->rstat);
 
-		gfar_write(&priv->regs->imask, IMASK_DEFAULT);
+		gfar_write(&regs->imask, IMASK_DEFAULT);
 
 		/* If we are coalescing interrupts, update the timer */
 		/* Otherwise, clear it */
-		if (likely(priv->rxcoalescing)) {
-			gfar_write(&priv->regs->rxic, 0);
-			gfar_write(&priv->regs->rxic, priv->rxic);
-		}
-		if (likely(priv->txcoalescing)) {
-			gfar_write(&priv->regs->txic, 0);
-			gfar_write(&priv->regs->txic, priv->txic);
-		}
+		gfar_configure_coalescing(priv,
+				gfargrp->rx_bit_map, gfargrp->tx_bit_map);
 	}
 
 	return rx_cleaned;
@@ -2036,44 +2617,49 @@
 static void gfar_netpoll(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
+	int i = 0;
 
 	/* If the device has multiple interrupts, run tx/rx */
 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
-		disable_irq(priv->interruptTransmit);
-		disable_irq(priv->interruptReceive);
-		disable_irq(priv->interruptError);
-		gfar_interrupt(priv->interruptTransmit, dev);
-		enable_irq(priv->interruptError);
-		enable_irq(priv->interruptReceive);
-		enable_irq(priv->interruptTransmit);
+		for (i = 0; i < priv->num_grps; i++) {
+			disable_irq(priv->gfargrp[i].interruptTransmit);
+			disable_irq(priv->gfargrp[i].interruptReceive);
+			disable_irq(priv->gfargrp[i].interruptError);
+			gfar_interrupt(priv->gfargrp[i].interruptTransmit,
+						&priv->gfargrp[i]);
+			enable_irq(priv->gfargrp[i].interruptError);
+			enable_irq(priv->gfargrp[i].interruptReceive);
+			enable_irq(priv->gfargrp[i].interruptTransmit);
+		}
 	} else {
-		disable_irq(priv->interruptTransmit);
-		gfar_interrupt(priv->interruptTransmit, dev);
-		enable_irq(priv->interruptTransmit);
+		for (i = 0; i < priv->num_grps; i++) {
+			disable_irq(priv->gfargrp[i].interruptTransmit);
+			gfar_interrupt(priv->gfargrp[i].interruptTransmit,
+						&priv->gfargrp[i]);
+			enable_irq(priv->gfargrp[i].interruptTransmit);
 	}
 }
 #endif
 
 /* The interrupt handler for devices with one interrupt */
-static irqreturn_t gfar_interrupt(int irq, void *dev_id)
+static irqreturn_t gfar_interrupt(int irq, void *grp_id)
 {
-	struct net_device *dev = dev_id;
-	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar_priv_grp *gfargrp = grp_id;
 
 	/* Save ievent for future reference */
-	u32 events = gfar_read(&priv->regs->ievent);
+	u32 events = gfar_read(&gfargrp->regs->ievent);
 
 	/* Check for reception */
 	if (events & IEVENT_RX_MASK)
-		gfar_receive(irq, dev_id);
+		gfar_receive(irq, grp_id);
 
 	/* Check for transmit completion */
 	if (events & IEVENT_TX_MASK)
-		gfar_transmit(irq, dev_id);
+		gfar_transmit(irq, grp_id);
 
 	/* Check for errors */
 	if (events & IEVENT_ERR_MASK)
-		gfar_error(irq, dev_id);
+		gfar_error(irq, grp_id);
 
 	return IRQ_HANDLED;
 }
@@ -2087,12 +2673,14 @@
 static void adjust_link(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
-	struct gfar __iomem *regs = priv->regs;
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	unsigned long flags;
 	struct phy_device *phydev = priv->phydev;
 	int new_state = 0;
 
-	spin_lock_irqsave(&priv->txlock, flags);
+	local_irq_save(flags);
+	lock_tx_qs(priv);
+
 	if (phydev->link) {
 		u32 tempval = gfar_read(&regs->maccfg2);
 		u32 ecntrl = gfar_read(&regs->ecntrl);
@@ -2157,8 +2745,8 @@
 
 	if (new_state && netif_msg_link(priv))
 		phy_print_status(phydev);
-
-	spin_unlock_irqrestore(&priv->txlock, flags);
+	unlock_tx_qs(priv);
+	local_irq_restore(flags);
 }
 
 /* Update the hash table based on the current list of multicast
@@ -2169,10 +2757,10 @@
 {
 	struct dev_mc_list *mc_ptr;
 	struct gfar_private *priv = netdev_priv(dev);
-	struct gfar __iomem *regs = priv->regs;
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	u32 tempval;
 
-	if(dev->flags & IFF_PROMISC) {
+	if (dev->flags & IFF_PROMISC) {
 		/* Set RCTRL to PROM */
 		tempval = gfar_read(&regs->rctrl);
 		tempval |= RCTRL_PROM;
@@ -2184,7 +2772,7 @@
 		gfar_write(&regs->rctrl, tempval);
 	}
 
-	if(dev->flags & IFF_ALLMULTI) {
+	if (dev->flags & IFF_ALLMULTI) {
 		/* Set the hash to rx all multicast frames */
 		gfar_write(&regs->igaddr0, 0xffffffff);
 		gfar_write(&regs->igaddr1, 0xffffffff);
@@ -2236,7 +2824,7 @@
 			em_num = 0;
 		}
 
-		if(dev->mc_count == 0)
+		if (dev->mc_count == 0)
 			return;
 
 		/* Parse the list, and set the appropriate bits */
@@ -2302,10 +2890,11 @@
 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
 {
 	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	int idx;
 	char tmpbuf[MAC_ADDR_LEN];
 	u32 tempval;
-	u32 __iomem *macptr = &priv->regs->macstnaddr1;
+	u32 __iomem *macptr = &regs->macstnaddr1;
 
 	macptr += num*2;
 
@@ -2322,16 +2911,18 @@
 }
 
 /* GFAR error interrupt handler */
-static irqreturn_t gfar_error(int irq, void *dev_id)
+static irqreturn_t gfar_error(int irq, void *grp_id)
 {
-	struct net_device *dev = dev_id;
-	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar_priv_grp *gfargrp = grp_id;
+	struct gfar __iomem *regs = gfargrp->regs;
+	struct gfar_private *priv= gfargrp->priv;
+	struct net_device *dev = priv->ndev;
 
 	/* Save ievent for future reference */
-	u32 events = gfar_read(&priv->regs->ievent);
+	u32 events = gfar_read(&regs->ievent);
 
 	/* Clear IEVENT */
-	gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK);
+	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
 
 	/* Magic Packet is not an error. */
 	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
@@ -2341,7 +2932,7 @@
 	/* Hmm... */
 	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
 		printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
-		       dev->name, events, gfar_read(&priv->regs->imask));
+		       dev->name, events, gfar_read(&regs->imask));
 
 	/* Update the error counters */
 	if (events & IEVENT_TXE) {
@@ -2359,7 +2950,7 @@
 			priv->extra_stats.tx_underrun++;
 
 			/* Reactivate the Tx Queues */
-			gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
+			gfar_write(&regs->tstat, gfargrp->tstat);
 		}
 		if (netif_msg_tx_err(priv))
 			printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
@@ -2368,11 +2959,11 @@
 		dev->stats.rx_errors++;
 		priv->extra_stats.rx_bsy++;
 
-		gfar_receive(irq, dev_id);
+		gfar_receive(irq, grp_id);
 
 		if (netif_msg_rx_err(priv))
 			printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
-			       dev->name, gfar_read(&priv->regs->rstat));
+			       dev->name, gfar_read(&regs->rstat));
 	}
 	if (events & IEVENT_BABR) {
 		dev->stats.rx_errors++;
@@ -2397,17 +2988,18 @@
 	return IRQ_HANDLED;
 }
 
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:fsl-gianfar");
-
 static struct of_device_id gfar_match[] =
 {
 	{
 		.type = "network",
 		.compatible = "gianfar",
 	},
+	{
+		.compatible = "fsl,etsec2",
+	},
 	{},
 };
+MODULE_DEVICE_TABLE(of, gfar_match);
 
 /* Structure for a device driver */
 static struct of_platform_driver gfar_driver = {
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 05732fa..44b63da 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -7,8 +7,9 @@
  *
  * Author: Andy Fleming
  * Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  *
- * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
+ * Copyright 2002-2009 Freescale Semiconductor, Inc.
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
@@ -74,6 +75,13 @@
 extern const char gfar_driver_name[];
 extern const char gfar_driver_version[];
 
+/* MAXIMUM NUMBER OF QUEUES SUPPORTED */
+#define MAX_TX_QS	0x8
+#define MAX_RX_QS	0x8
+
+/* MAXIMUM NUMBER OF GROUPS SUPPORTED */
+#define MAXGROUPS 0x2
+
 /* These need to be powers of 2 for this driver */
 #define DEFAULT_TX_RING_SIZE	256
 #define DEFAULT_RX_RING_SIZE	256
@@ -171,12 +179,63 @@
 
 #define MINFLR_INIT_SETTINGS	0x00000040
 
+/* Tqueue control */
+#define TQUEUE_EN0		0x00008000
+#define TQUEUE_EN1		0x00004000
+#define TQUEUE_EN2		0x00002000
+#define TQUEUE_EN3		0x00001000
+#define TQUEUE_EN4		0x00000800
+#define TQUEUE_EN5		0x00000400
+#define TQUEUE_EN6		0x00000200
+#define TQUEUE_EN7		0x00000100
+#define TQUEUE_EN_ALL		0x0000FF00
+
+#define TR03WT_WT0_MASK		0xFF000000
+#define TR03WT_WT1_MASK		0x00FF0000
+#define TR03WT_WT2_MASK		0x0000FF00
+#define TR03WT_WT3_MASK		0x000000FF
+
+#define TR47WT_WT4_MASK		0xFF000000
+#define TR47WT_WT5_MASK		0x00FF0000
+#define TR47WT_WT6_MASK		0x0000FF00
+#define TR47WT_WT7_MASK		0x000000FF
+
+/* Rqueue control */
+#define RQUEUE_EX0		0x00800000
+#define RQUEUE_EX1		0x00400000
+#define RQUEUE_EX2		0x00200000
+#define RQUEUE_EX3		0x00100000
+#define RQUEUE_EX4		0x00080000
+#define RQUEUE_EX5		0x00040000
+#define RQUEUE_EX6		0x00020000
+#define RQUEUE_EX7		0x00010000
+#define RQUEUE_EX_ALL		0x00FF0000
+
+#define RQUEUE_EN0		0x00000080
+#define RQUEUE_EN1		0x00000040
+#define RQUEUE_EN2		0x00000020
+#define RQUEUE_EN3		0x00000010
+#define RQUEUE_EN4		0x00000008
+#define RQUEUE_EN5		0x00000004
+#define RQUEUE_EN6		0x00000002
+#define RQUEUE_EN7		0x00000001
+#define RQUEUE_EN_ALL		0x000000FF
+
 /* Init to do tx snooping for buffers and descriptors */
 #define DMACTRL_INIT_SETTINGS   0x000000c3
 #define DMACTRL_GRS             0x00000010
 #define DMACTRL_GTS             0x00000008
 
-#define TSTAT_CLEAR_THALT       0x80000000
+#define TSTAT_CLEAR_THALT_ALL	0xFF000000
+#define TSTAT_CLEAR_THALT	0x80000000
+#define TSTAT_CLEAR_THALT0	0x80000000
+#define TSTAT_CLEAR_THALT1	0x40000000
+#define TSTAT_CLEAR_THALT2	0x20000000
+#define TSTAT_CLEAR_THALT3	0x10000000
+#define TSTAT_CLEAR_THALT4	0x08000000
+#define TSTAT_CLEAR_THALT5	0x04000000
+#define TSTAT_CLEAR_THALT6	0x02000000
+#define TSTAT_CLEAR_THALT7	0x01000000
 
 /* Interrupt coalescing macros */
 #define IC_ICEN			0x80000000
@@ -227,6 +286,13 @@
 #define TCTRL_IPCSEN		0x00004000
 #define TCTRL_TUCSEN		0x00002000
 #define TCTRL_VLINS		0x00001000
+#define TCTRL_THDF		0x00000800
+#define TCTRL_RFCPAUSE		0x00000010
+#define TCTRL_TFCPAUSE		0x00000008
+#define TCTRL_TXSCHED_MASK	0x00000006
+#define TCTRL_TXSCHED_INIT	0x00000000
+#define TCTRL_TXSCHED_PRIO	0x00000002
+#define TCTRL_TXSCHED_WRRS	0x00000004
 #define TCTRL_INIT_CSUM		(TCTRL_TUCSEN | TCTRL_IPCSEN)
 
 #define IEVENT_INIT_CLEAR	0xffffffff
@@ -315,6 +381,84 @@
 #define BD_LFLAG(flags) ((flags) << 16)
 #define BD_LENGTH_MASK		0x0000ffff
 
+#define CLASS_CODE_UNRECOG		0x00
+#define CLASS_CODE_DUMMY1		0x01
+#define CLASS_CODE_ETHERTYPE1		0x02
+#define CLASS_CODE_ETHERTYPE2		0x03
+#define CLASS_CODE_USER_PROG1		0x04
+#define CLASS_CODE_USER_PROG2		0x05
+#define CLASS_CODE_USER_PROG3		0x06
+#define CLASS_CODE_USER_PROG4		0x07
+#define CLASS_CODE_TCP_IPV4		0x08
+#define CLASS_CODE_UDP_IPV4		0x09
+#define CLASS_CODE_AH_ESP_IPV4		0x0a
+#define CLASS_CODE_SCTP_IPV4		0x0b
+#define CLASS_CODE_TCP_IPV6		0x0c
+#define CLASS_CODE_UDP_IPV6		0x0d
+#define CLASS_CODE_AH_ESP_IPV6		0x0e
+#define CLASS_CODE_SCTP_IPV6		0x0f
+
+#define FPR_FILER_MASK	0xFFFFFFFF
+#define MAX_FILER_IDX	0xFF
+
+/* RQFCR register bits */
+#define RQFCR_GPI		0x80000000
+#define RQFCR_HASHTBL_Q		0x00000000
+#define RQFCR_HASHTBL_0		0x00020000
+#define RQFCR_HASHTBL_1		0x00040000
+#define RQFCR_HASHTBL_2		0x00060000
+#define RQFCR_HASHTBL_3		0x00080000
+#define RQFCR_HASH		0x00010000
+#define RQFCR_CLE		0x00000200
+#define RQFCR_RJE		0x00000100
+#define RQFCR_AND		0x00000080
+#define RQFCR_CMP_EXACT		0x00000000
+#define RQFCR_CMP_MATCH		0x00000020
+#define RQFCR_CMP_NOEXACT	0x00000040
+#define RQFCR_CMP_NOMATCH	0x00000060
+
+/* RQFCR PID values */
+#define	RQFCR_PID_MASK		0x00000000
+#define	RQFCR_PID_PARSE		0x00000001
+#define	RQFCR_PID_ARB		0x00000002
+#define	RQFCR_PID_DAH		0x00000003
+#define	RQFCR_PID_DAL		0x00000004
+#define	RQFCR_PID_SAH		0x00000005
+#define	RQFCR_PID_SAL		0x00000006
+#define	RQFCR_PID_ETY		0x00000007
+#define	RQFCR_PID_VID		0x00000008
+#define	RQFCR_PID_PRI		0x00000009
+#define	RQFCR_PID_TOS		0x0000000A
+#define	RQFCR_PID_L4P		0x0000000B
+#define	RQFCR_PID_DIA		0x0000000C
+#define	RQFCR_PID_SIA		0x0000000D
+#define	RQFCR_PID_DPT		0x0000000E
+#define	RQFCR_PID_SPT		0x0000000F
+
+/* RQFPR when PID is 0x0001 */
+#define RQFPR_HDR_GE_512	0x00200000
+#define RQFPR_LERR		0x00100000
+#define RQFPR_RAR		0x00080000
+#define RQFPR_RARQ		0x00040000
+#define RQFPR_AR		0x00020000
+#define RQFPR_ARQ		0x00010000
+#define RQFPR_EBC		0x00008000
+#define RQFPR_VLN		0x00004000
+#define RQFPR_CFI		0x00002000
+#define RQFPR_JUM		0x00001000
+#define RQFPR_IPF		0x00000800
+#define RQFPR_FIF		0x00000400
+#define RQFPR_IPV4		0x00000200
+#define RQFPR_IPV6		0x00000100
+#define RQFPR_ICC		0x00000080
+#define RQFPR_ICV		0x00000040
+#define RQFPR_TCP		0x00000020
+#define RQFPR_UDP		0x00000010
+#define RQFPR_TUC		0x00000008
+#define RQFPR_TUV		0x00000004
+#define RQFPR_PER		0x00000002
+#define RQFPR_EER		0x00000001
+
 /* TxBD status field bits */
 #define TXBD_READY		0x8000
 #define TXBD_PADCRC		0x4000
@@ -503,25 +647,32 @@
 
 struct gfar {
 	u32	tsec_id;	/* 0x.000 - Controller ID register */
-	u8	res1[12];
+	u32	tsec_id2;	/* 0x.004 - Controller ID2 register */
+	u8	res1[8];
 	u32	ievent;		/* 0x.010 - Interrupt Event Register */
 	u32	imask;		/* 0x.014 - Interrupt Mask Register */
 	u32	edis;		/* 0x.018 - Error Disabled Register */
-	u8	res2[4];
+	u32	emapg;		/* 0x.01c - Group Error mapping register */
 	u32	ecntrl;		/* 0x.020 - Ethernet Control Register */
 	u32	minflr;		/* 0x.024 - Minimum Frame Length Register */
 	u32	ptv;		/* 0x.028 - Pause Time Value Register */
 	u32	dmactrl;	/* 0x.02c - DMA Control Register */
 	u32	tbipa;		/* 0x.030 - TBI PHY Address Register */
-	u8	res3[88];
+	u8	res2[28];
+	u32	fifo_rx_pause;	/* 0x.050 - FIFO receive pause start threshold
+					register */
+	u32	fifo_rx_pause_shutoff;	/* x.054 - FIFO receive starve shutoff
+						register */
+	u32	fifo_rx_alarm;	/* 0x.058 - FIFO receive alarm start threshold
+						register */
+	u32	fifo_rx_alarm_shutoff;	/*0x.05c - FIFO receive alarm  starve
+						shutoff register */
+	u8	res3[44];
 	u32	fifo_tx_thr;	/* 0x.08c - FIFO transmit threshold register */
 	u8	res4[8];
 	u32	fifo_tx_starve;	/* 0x.098 - FIFO transmit starve register */
 	u32	fifo_tx_starve_shutoff;	/* 0x.09c - FIFO transmit starve shutoff register */
-	u8	res5[4];
-	u32	fifo_rx_pause;	/* 0x.0a4 - FIFO receive pause threshold register */
-	u32	fifo_rx_alarm;	/* 0x.0a8 - FIFO receive alarm threshold register */
-	u8	res6[84];
+	u8	res5[96];
 	u32	tctrl;		/* 0x.100 - Transmit Control Register */
 	u32	tstat;		/* 0x.104 - Transmit Status Register */
 	u32	dfvlan;		/* 0x.108 - Default VLAN Control word */
@@ -572,7 +723,11 @@
 	u8	res12[8];
 	u32	rxic;		/* 0x.310 - Receive Interrupt Coalescing Configuration Register */
 	u32	rqueue;		/* 0x.314 - Receive queue control register */
-	u8	res13[24];
+	u32	rir0;		/* 0x.318 - Ring mapping register 0 */
+	u32	rir1;		/* 0x.31c - Ring mapping register 1 */
+	u32	rir2;		/* 0x.320 - Ring mapping register 2 */
+	u32	rir3;		/* 0x.324 - Ring mapping register 3 */
+	u8	res13[8];
 	u32	rbifx;		/* 0x.330 - Receive bit field extract control register */
 	u32	rqfar;		/* 0x.334 - Receive queue filing table address register */
 	u32	rqfcr;		/* 0x.338 - Receive queue filing table control register */
@@ -621,7 +776,7 @@
 	u32	maxfrm;		/* 0x.510 - Maximum Frame Length Register */
 	u8	res18[12];
 	u8	gfar_mii_regs[24];	/* See gianfar_phy.h */
-	u8	res19[4];
+	u32	ifctrl;		/* 0x.538 - Interface control register */
 	u32	ifstat;		/* 0x.53c - Interface Status Register */
 	u32	macstnaddr1;	/* 0x.540 - Station Address Part 1 Register */
 	u32	macstnaddr2;	/* 0x.544 - Station Address Part 2 Register */
@@ -682,8 +837,30 @@
 	u8	res23c[248];
 	u32	attr;		/* 0x.bf8 - Attributes Register */
 	u32	attreli;	/* 0x.bfc - Attributes Extract Length and Extract Index Register */
-	u8	res24[1024];
-
+	u8	res24[688];
+	u32	isrg0;		/* 0x.eb0 - Interrupt steering group 0 register */
+	u32	isrg1;		/* 0x.eb4 - Interrupt steering group 1 register */
+	u32	isrg2;		/* 0x.eb8 - Interrupt steering group 2 register */
+	u32	isrg3;		/* 0x.ebc - Interrupt steering group 3 register */
+	u8	res25[16];
+	u32	rxic0;		/* 0x.ed0 - Ring 0 Rx interrupt coalescing */
+	u32	rxic1;		/* 0x.ed4 - Ring 1 Rx interrupt coalescing */
+	u32	rxic2;		/* 0x.ed8 - Ring 2 Rx interrupt coalescing */
+	u32	rxic3;		/* 0x.edc - Ring 3 Rx interrupt coalescing */
+	u32	rxic4;		/* 0x.ee0 - Ring 4 Rx interrupt coalescing */
+	u32	rxic5;		/* 0x.ee4 - Ring 5 Rx interrupt coalescing */
+	u32	rxic6;		/* 0x.ee8 - Ring 6 Rx interrupt coalescing */
+	u32	rxic7;		/* 0x.eec - Ring 7 Rx interrupt coalescing */
+	u8	res26[32];
+	u32	txic0;		/* 0x.f10 - Ring 0 Tx interrupt coalescing */
+	u32	txic1;		/* 0x.f14 - Ring 1 Tx interrupt coalescing */
+	u32	txic2;		/* 0x.f18 - Ring 2 Tx interrupt coalescing */
+	u32	txic3;		/* 0x.f1c - Ring 3 Tx interrupt coalescing */
+	u32	txic4;		/* 0x.f20 - Ring 4 Tx interrupt coalescing */
+	u32	txic5;		/* 0x.f24 - Ring 5 Tx interrupt coalescing */
+	u32	txic6;		/* 0x.f28 - Ring 6 Tx interrupt coalescing */
+	u32	txic7;		/* 0x.f2c - Ring 7 Tx interrupt coalescing */
+	u8	res27[208];
 };
 
 /* Flags related to gianfar device features */
@@ -699,6 +876,133 @@
 #define FSL_GIANFAR_DEV_HAS_BD_STASHING		0x00000200
 #define FSL_GIANFAR_DEV_HAS_BUF_STASHING	0x00000400
 
+#if (MAXGROUPS == 2)
+#define DEFAULT_MAPPING 	0xAA
+#else
+#define DEFAULT_MAPPING 	0xFF
+#endif
+
+#define ISRG_SHIFT_TX	0x10
+#define ISRG_SHIFT_RX	0x18
+
+/* The same driver can operate in two modes */
+/* SQ_SG_MODE: Single Queue Single Group Mode
+ * 		(Backward compatible mode)
+ * MQ_MG_MODE: Multi Queue Multi Group mode
+ */
+enum {
+	SQ_SG_MODE = 0,
+	MQ_MG_MODE
+};
+
+/**
+ *	struct gfar_priv_tx_q - per tx queue structure
+ *	@txlock: per queue tx spin lock
+ *	@tx_skbuff:skb pointers
+ *	@skb_curtx: to be used skb pointer
+ *	@skb_dirtytx:the last used skb pointer
+ *	@qindex: index of this queue
+ *	@dev: back pointer to the dev structure
+ *	@grp: back pointer to the group to which this queue belongs
+ *	@tx_bd_base: First tx buffer descriptor
+ *	@cur_tx: Next free ring entry
+ *	@dirty_tx: First buffer in line to be transmitted
+ *	@tx_ring_size: Tx ring size
+ *	@num_txbdfree: number of free TxBds
+ *	@txcoalescing: enable/disable tx coalescing
+ *	@txic: transmit interrupt coalescing value
+ *	@txcount: coalescing value if based on tx frame count
+ *	@txtime: coalescing value if based on time
+ */
+struct gfar_priv_tx_q {
+	spinlock_t txlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
+	struct sk_buff ** tx_skbuff;
+	/* Buffer descriptor pointers */
+	dma_addr_t tx_bd_dma_base;
+	struct	txbd8 *tx_bd_base;
+	struct	txbd8 *cur_tx;
+	struct	txbd8 *dirty_tx;
+	struct	net_device *dev;
+	struct gfar_priv_grp *grp;
+	u16	skb_curtx;
+	u16	skb_dirtytx;
+	u16	qindex;
+	unsigned int tx_ring_size;
+	unsigned int num_txbdfree;
+	/* Configuration info for the coalescing features */
+	unsigned char txcoalescing;
+	unsigned long txic;
+	unsigned short txcount;
+	unsigned short txtime;
+};
+
+/**
+ *	struct gfar_priv_rx_q - per rx queue structure
+ *	@rxlock: per queue rx spin lock
+ *	@rx_skbuff: skb pointers
+ *	@skb_currx: currently use skb pointer
+ *	@rx_bd_base: First rx buffer descriptor
+ *	@cur_rx: Next free rx ring entry
+ *	@qindex: index of this queue
+ *	@dev: back pointer to the dev structure
+ *	@rx_ring_size: Rx ring size
+ *	@rxcoalescing: enable/disable rx-coalescing
+ *	@rxic: receive interrupt coalescing vlaue
+ */
+
+struct gfar_priv_rx_q {
+	spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
+	struct	sk_buff ** rx_skbuff;
+	dma_addr_t rx_bd_dma_base;
+	struct	rxbd8 *rx_bd_base;
+	struct	rxbd8 *cur_rx;
+	struct	net_device *dev;
+	struct gfar_priv_grp *grp;
+	u16	skb_currx;
+	u16	qindex;
+	unsigned int	rx_ring_size;
+	/* RX Coalescing values */
+	unsigned char rxcoalescing;
+	unsigned long rxic;
+};
+
+/**
+ *	struct gfar_priv_grp - per group structure
+ *	@napi: the napi poll function
+ *	@priv: back pointer to the priv structure
+ *	@regs: the ioremapped register space for this group
+ *	@grp_id: group id for this group
+ *	@interruptTransmit: The TX interrupt number for this group
+ *	@interruptReceive: The RX interrupt number for this group
+ *	@interruptError: The ERROR interrupt number for this group
+ *	@int_name_tx: tx interrupt name for this group
+ *	@int_name_rx: rx interrupt name for this group
+ *	@int_name_er: er interrupt name for this group
+ */
+
+struct gfar_priv_grp {
+	spinlock_t grplock __attribute__ ((aligned (SMP_CACHE_BYTES)));
+	struct	napi_struct napi;
+	struct gfar_private *priv;
+	struct gfar __iomem *regs;
+	unsigned int grp_id;
+	unsigned int rx_bit_map;
+	unsigned int tx_bit_map;
+	unsigned int num_tx_queues;
+	unsigned int num_rx_queues;
+	unsigned int rstat;
+	unsigned int tstat;
+	unsigned int imask;
+	unsigned int ievent;
+	unsigned int interruptTransmit;
+	unsigned int interruptReceive;
+	unsigned int interruptError;
+
+	char int_name_tx[GFAR_INT_NAME_MAX];
+	char int_name_rx[GFAR_INT_NAME_MAX];
+	char int_name_er[GFAR_INT_NAME_MAX];
+};
+
 /* Struct stolen almost completely (and shamelessly) from the FCC enet source
  * (Ok, that's not so true anymore, but there is a family resemblence)
  * The GFAR buffer descriptors track the ring buffers.  The rx_bd_base
@@ -709,63 +1013,36 @@
  * the buffer descriptor determines the actual condition.
  */
 struct gfar_private {
-	/* Fields controlled by TX lock */
-	spinlock_t txlock;
 
-	/* Pointer to the array of skbuffs */
-	struct sk_buff ** tx_skbuff;
+	/* Indicates how many tx, rx queues are enabled */
+	unsigned int num_tx_queues;
+	unsigned int num_rx_queues;
+	unsigned int num_grps;
+	unsigned int mode;
 
-	/* next free skb in the array */
-	u16 skb_curtx;
-
-	/* First skb in line to be transmitted */
-	u16 skb_dirtytx;
-
-	/* Configuration info for the coalescing features */
-	unsigned char txcoalescing;
-	unsigned long txic;
-
-	/* Buffer descriptor pointers */
-	dma_addr_t tx_bd_dma_base;
-	struct txbd8 *tx_bd_base;	/* First tx buffer descriptor */
-	struct txbd8 *cur_tx;	        /* Next free ring entry */
-	struct txbd8 *dirty_tx;		/* First buffer in line
-					   to be transmitted */
-	unsigned int tx_ring_size;
-	unsigned int num_txbdfree;	/* number of TxBDs free */
-
-	/* RX Locked fields */
-	spinlock_t rxlock;
+	/* The total tx and rx ring size for the enabled queues */
+	unsigned int total_tx_ring_size;
+	unsigned int total_rx_ring_size;
 
 	struct device_node *node;
 	struct net_device *ndev;
 	struct of_device *ofdev;
-	struct napi_struct napi;
 
-	/* skb array and index */
-	struct sk_buff ** rx_skbuff;
-	u16 skb_currx;
+	struct gfar_priv_grp gfargrp[MAXGROUPS];
+	struct gfar_priv_tx_q *tx_queue[MAX_TX_QS];
+	struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
 
-	/* RX Coalescing values */
-	unsigned char rxcoalescing;
-	unsigned long rxic;
-
-	struct rxbd8 *rx_bd_base;	/* First Rx buffers */
-	struct rxbd8 *cur_rx;           /* Next free rx ring entry */
-
-	/* RX parameters */
-	unsigned int rx_ring_size;
+	/* RX per device parameters */
 	unsigned int rx_buffer_size;
 	unsigned int rx_stash_size;
 	unsigned int rx_stash_index;
 
+	u32 cur_filer_idx;
+
 	struct sk_buff_head rx_recycle;
 
 	struct vlan_group *vlgrp;
 
-	/* Unprotected fields */
-	/* Pointer to the GFAR memory mapped Registers */
-	struct gfar __iomem *regs;
 
 	/* Hash registers and their width */
 	u32 __iomem *hash_regs[16];
@@ -786,13 +1063,10 @@
 	unsigned char rx_csum_enable:1,
 		extended_hash:1,
 		bd_stash_en:1,
+		rx_filer_enable:1,
 		wol_en:1; /* Wake-on-LAN enabled */
 	unsigned short padding;
 
-	unsigned int interruptTransmit;
-	unsigned int interruptReceive;
-	unsigned int interruptError;
-
 	/* PHY stuff */
 	struct phy_device *phydev;
 	struct mii_bus *mii_bus;
@@ -804,14 +1078,13 @@
 
 	struct work_struct reset_task;
 
-	char int_name_tx[GFAR_INT_NAME_MAX];
-	char int_name_rx[GFAR_INT_NAME_MAX];
-	char int_name_er[GFAR_INT_NAME_MAX];
-
 	/* Network Statistics */
 	struct gfar_extra_stats extra_stats;
 };
 
+extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
+extern unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
+
 static inline u32 gfar_read(volatile unsigned __iomem *addr)
 {
 	u32 val;
@@ -824,12 +1097,28 @@
 	out_be32(addr, val);
 }
 
+static inline void gfar_write_filer(struct gfar_private *priv,
+		unsigned int far, unsigned int fcr, unsigned int fpr)
+{
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+	gfar_write(&regs->rqfar, far);
+	gfar_write(&regs->rqfcr, fcr);
+	gfar_write(&regs->rqfpr, fpr);
+}
+
+extern void lock_rx_qs(struct gfar_private *priv);
+extern void lock_tx_qs(struct gfar_private *priv);
+extern void unlock_rx_qs(struct gfar_private *priv);
+extern void unlock_tx_qs(struct gfar_private *priv);
 extern irqreturn_t gfar_receive(int irq, void *dev_id);
 extern int startup_gfar(struct net_device *dev);
 extern void stop_gfar(struct net_device *dev);
 extern void gfar_halt(struct net_device *dev);
 extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
 		int enable, u32 regnum, u32 read);
+extern void gfar_configure_coalescing(struct gfar_private *priv,
+		unsigned int tx_mask, unsigned int rx_mask);
 void gfar_init_sysfs(struct net_device *dev);
 
 extern const struct ethtool_ops gfar_ethtool_ops;
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 6c144b5..1010367 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -7,8 +7,9 @@
  *
  *  Author: Andy Fleming
  *  Maintainer: Kumar Gala
+ *  Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  *
- *  Copyright (c) 2003,2004 Freescale Semiconductor, Inc.
+ *  Copyright 2003-2006, 2008-2009 Freescale Semiconductor, Inc.
  *
  *  This software may be used and distributed according to
  *  the terms of the GNU Public License, Version 2, incorporated herein
@@ -41,7 +42,7 @@
 #include "gianfar.h"
 
 extern void gfar_start(struct net_device *dev);
-extern int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
+extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
 
 #define GFAR_MAX_COAL_USECS 0xffff
 #define GFAR_MAX_COAL_FRAMES 0xff
@@ -136,10 +137,11 @@
 {
 	int i;
 	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	u64 *extra = (u64 *) & priv->extra_stats;
 
 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
-		u32 __iomem *rmon = (u32 __iomem *) & priv->regs->rmon;
+		u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
 		struct gfar_stats *stats = (struct gfar_stats *) buf;
 
 		for (i = 0; i < GFAR_RMON_LEN; i++)
@@ -197,12 +199,18 @@
 {
 	struct gfar_private *priv = netdev_priv(dev);
 	struct phy_device *phydev = priv->phydev;
+	struct gfar_priv_rx_q *rx_queue = NULL;
+	struct gfar_priv_tx_q *tx_queue = NULL;
 
 	if (NULL == phydev)
 		return -ENODEV;
+	tx_queue = priv->tx_queue[0];
+	rx_queue = priv->rx_queue[0];
 
-	cmd->maxtxpkt = get_icft_value(priv->txic);
-	cmd->maxrxpkt = get_icft_value(priv->rxic);
+	/* etsec-1.7 and older versions have only one txic
+	 * and rxic regs although they support multiple queues */
+	cmd->maxtxpkt = get_icft_value(tx_queue->txic);
+	cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
 
 	return phy_ethtool_gset(phydev, cmd);
 }
@@ -218,7 +226,7 @@
 {
 	int i;
 	struct gfar_private *priv = netdev_priv(dev);
-	u32 __iomem *theregs = (u32 __iomem *) priv->regs;
+	u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
 	u32 *buf = (u32 *) regbuf;
 
 	for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
@@ -279,6 +287,8 @@
 static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
 {
 	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar_priv_rx_q *rx_queue = NULL;
+	struct gfar_priv_tx_q *tx_queue = NULL;
 	unsigned long rxtime;
 	unsigned long rxcount;
 	unsigned long txtime;
@@ -290,10 +300,13 @@
 	if (NULL == priv->phydev)
 		return -ENODEV;
 
-	rxtime  = get_ictt_value(priv->rxic);
-	rxcount = get_icft_value(priv->rxic);
-	txtime  = get_ictt_value(priv->txic);
-	txcount = get_icft_value(priv->txic);
+	rx_queue = priv->rx_queue[0];
+	tx_queue = priv->tx_queue[0];
+
+	rxtime  = get_ictt_value(rx_queue->rxic);
+	rxcount = get_icft_value(rx_queue->rxic);
+	txtime  = get_ictt_value(tx_queue->txic);
+	txcount = get_icft_value(tx_queue->txic);
 	cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
 	cvals->rx_max_coalesced_frames = rxcount;
 
@@ -339,16 +352,23 @@
 static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
 {
 	struct gfar_private *priv = netdev_priv(dev);
+	int i = 0;
 
 	if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
 		return -EOPNOTSUPP;
 
 	/* Set up rx coalescing */
+	/* As of now, we will enable/disable coalescing for all
+	 * queues together in case of eTSEC2, this will be modified
+	 * along with the ethtool interface */
 	if ((cvals->rx_coalesce_usecs == 0) ||
-	    (cvals->rx_max_coalesced_frames == 0))
-		priv->rxcoalescing = 0;
-	else
-		priv->rxcoalescing = 1;
+	    (cvals->rx_max_coalesced_frames == 0)) {
+		for (i = 0; i < priv->num_rx_queues; i++)
+			priv->rx_queue[i]->rxcoalescing = 0;
+	} else {
+		for (i = 0; i < priv->num_rx_queues; i++)
+			priv->rx_queue[i]->rxcoalescing = 1;
+	}
 
 	if (NULL == priv->phydev)
 		return -ENODEV;
@@ -366,15 +386,21 @@
 		return -EINVAL;
 	}
 
-	priv->rxic = mk_ic_value(cvals->rx_max_coalesced_frames,
-		gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		priv->rx_queue[i]->rxic = mk_ic_value(
+			cvals->rx_max_coalesced_frames,
+			gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
+	}
 
 	/* Set up tx coalescing */
 	if ((cvals->tx_coalesce_usecs == 0) ||
-	    (cvals->tx_max_coalesced_frames == 0))
-		priv->txcoalescing = 0;
-	else
-		priv->txcoalescing = 1;
+	    (cvals->tx_max_coalesced_frames == 0)) {
+		for (i = 0; i < priv->num_tx_queues; i++)
+			priv->tx_queue[i]->txcoalescing = 0;
+	} else {
+		for (i = 0; i < priv->num_tx_queues; i++)
+			priv->tx_queue[i]->txcoalescing = 1;
+	}
 
 	/* Check the bounds of the values */
 	if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
@@ -389,16 +415,13 @@
 		return -EINVAL;
 	}
 
-	priv->txic = mk_ic_value(cvals->tx_max_coalesced_frames,
-		gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
+	for (i = 0; i < priv->num_tx_queues; i++) {
+		priv->tx_queue[i]->txic = mk_ic_value(
+			cvals->tx_max_coalesced_frames,
+			gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
+	}
 
-	gfar_write(&priv->regs->rxic, 0);
-	if (priv->rxcoalescing)
-		gfar_write(&priv->regs->rxic, priv->rxic);
-
-	gfar_write(&priv->regs->txic, 0);
-	if (priv->txcoalescing)
-		gfar_write(&priv->regs->txic, priv->txic);
+	gfar_configure_coalescing(priv, 0xFF, 0xFF);
 
 	return 0;
 }
@@ -409,6 +432,11 @@
 static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
 {
 	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar_priv_tx_q *tx_queue = NULL;
+	struct gfar_priv_rx_q *rx_queue = NULL;
+
+	tx_queue = priv->tx_queue[0];
+	rx_queue = priv->rx_queue[0];
 
 	rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
 	rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
@@ -418,10 +446,10 @@
 	/* Values changeable by the user.  The valid values are
 	 * in the range 1 to the "*_max_pending" counterpart above.
 	 */
-	rvals->rx_pending = priv->rx_ring_size;
-	rvals->rx_mini_pending = priv->rx_ring_size;
-	rvals->rx_jumbo_pending = priv->rx_ring_size;
-	rvals->tx_pending = priv->tx_ring_size;
+	rvals->rx_pending = rx_queue->rx_ring_size;
+	rvals->rx_mini_pending = rx_queue->rx_ring_size;
+	rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
+	rvals->tx_pending = tx_queue->tx_ring_size;
 }
 
 /* Change the current ring parameters, stopping the controller if
@@ -431,7 +459,7 @@
 static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
 {
 	struct gfar_private *priv = netdev_priv(dev);
-	int err = 0;
+	int err = 0, i = 0;
 
 	if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
 		return -EINVAL;
@@ -451,34 +479,41 @@
 		return -EINVAL;
 	}
 
+
 	if (dev->flags & IFF_UP) {
 		unsigned long flags;
 
 		/* Halt TX and RX, and process the frames which
 		 * have already been received */
-		spin_lock_irqsave(&priv->txlock, flags);
-		spin_lock(&priv->rxlock);
+		local_irq_save(flags);
+		lock_tx_qs(priv);
+		lock_rx_qs(priv);
 
 		gfar_halt(dev);
 
-		spin_unlock(&priv->rxlock);
-		spin_unlock_irqrestore(&priv->txlock, flags);
+		unlock_rx_qs(priv);
+		unlock_tx_qs(priv);
+		local_irq_restore(flags);
 
-		gfar_clean_rx_ring(dev, priv->rx_ring_size);
+		for (i = 0; i < priv->num_rx_queues; i++)
+			gfar_clean_rx_ring(priv->rx_queue[i],
+					priv->rx_queue[i]->rx_ring_size);
 
 		/* Now we take down the rings to rebuild them */
 		stop_gfar(dev);
 	}
 
 	/* Change the size */
-	priv->rx_ring_size = rvals->rx_pending;
-	priv->tx_ring_size = rvals->tx_pending;
-	priv->num_txbdfree = priv->tx_ring_size;
+	for (i = 0; i < priv->num_rx_queues; i++) {
+		priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
+		priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
+		priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size;
+	}
 
 	/* Rebuild the rings with the new size */
 	if (dev->flags & IFF_UP) {
 		err = startup_gfar(dev);
-		netif_wake_queue(dev);
+		netif_tx_wake_all_queues(dev);
 	}
 	return err;
 }
@@ -487,23 +522,28 @@
 {
 	struct gfar_private *priv = netdev_priv(dev);
 	unsigned long flags;
-	int err = 0;
+	int err = 0, i = 0;
 
 	if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
 		return -EOPNOTSUPP;
 
+
 	if (dev->flags & IFF_UP) {
 		/* Halt TX and RX, and process the frames which
 		 * have already been received */
-		spin_lock_irqsave(&priv->txlock, flags);
-		spin_lock(&priv->rxlock);
+		local_irq_save(flags);
+		lock_tx_qs(priv);
+		lock_rx_qs(priv);
 
 		gfar_halt(dev);
 
-		spin_unlock(&priv->rxlock);
-		spin_unlock_irqrestore(&priv->txlock, flags);
+		unlock_tx_qs(priv);
+		unlock_rx_qs(priv);
+		local_irq_save(flags);
 
-		gfar_clean_rx_ring(dev, priv->rx_ring_size);
+		for (i = 0; i < priv->num_rx_queues; i++)
+			gfar_clean_rx_ring(priv->rx_queue[i],
+					priv->rx_queue[i]->rx_ring_size);
 
 		/* Now we take down the rings to rebuild them */
 		stop_gfar(dev);
@@ -515,7 +555,7 @@
 
 	if (dev->flags & IFF_UP) {
 		err = startup_gfar(dev);
-		netif_wake_queue(dev);
+		netif_tx_wake_all_queues(dev);
 	}
 	return err;
 }
@@ -605,6 +645,241 @@
 }
 #endif
 
+static int gfar_ethflow_to_class(int flow_type, u64 *class)
+{
+	switch (flow_type) {
+	case TCP_V4_FLOW:
+		*class = CLASS_CODE_TCP_IPV4;
+		break;
+	case UDP_V4_FLOW:
+		*class = CLASS_CODE_UDP_IPV4;
+		break;
+	case AH_V4_FLOW:
+	case ESP_V4_FLOW:
+		*class = CLASS_CODE_AH_ESP_IPV4;
+		break;
+	case SCTP_V4_FLOW:
+		*class = CLASS_CODE_SCTP_IPV4;
+		break;
+	case TCP_V6_FLOW:
+		*class = CLASS_CODE_TCP_IPV6;
+		break;
+	case UDP_V6_FLOW:
+		*class = CLASS_CODE_UDP_IPV6;
+		break;
+	case AH_V6_FLOW:
+	case ESP_V6_FLOW:
+		*class = CLASS_CODE_AH_ESP_IPV6;
+		break;
+	case SCTP_V6_FLOW:
+		*class = CLASS_CODE_SCTP_IPV6;
+		break;
+	default:
+		return 0;
+	}
+
+	return 1;
+}
+
+static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
+{
+	u32 fcr = 0x0, fpr = FPR_FILER_MASK;
+
+	if (ethflow & RXH_L2DA) {
+		fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
+			RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
+		ftp_rqfpr[priv->cur_filer_idx] = fpr;
+		ftp_rqfcr[priv->cur_filer_idx] = fcr;
+		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+		priv->cur_filer_idx = priv->cur_filer_idx - 1;
+
+		fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
+				RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
+		ftp_rqfpr[priv->cur_filer_idx] = fpr;
+		ftp_rqfcr[priv->cur_filer_idx] = fcr;
+		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+		priv->cur_filer_idx = priv->cur_filer_idx - 1;
+	}
+
+	if (ethflow & RXH_VLAN) {
+		fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+				RQFCR_AND | RQFCR_HASHTBL_0;
+		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+		ftp_rqfpr[priv->cur_filer_idx] = fpr;
+		ftp_rqfcr[priv->cur_filer_idx] = fcr;
+		priv->cur_filer_idx = priv->cur_filer_idx - 1;
+	}
+
+	if (ethflow & RXH_IP_SRC) {
+		fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+			RQFCR_AND | RQFCR_HASHTBL_0;
+		ftp_rqfpr[priv->cur_filer_idx] = fpr;
+		ftp_rqfcr[priv->cur_filer_idx] = fcr;
+		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+		priv->cur_filer_idx = priv->cur_filer_idx - 1;
+	}
+
+	if (ethflow & (RXH_IP_DST)) {
+		fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+			RQFCR_AND | RQFCR_HASHTBL_0;
+		ftp_rqfpr[priv->cur_filer_idx] = fpr;
+		ftp_rqfcr[priv->cur_filer_idx] = fcr;
+		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+		priv->cur_filer_idx = priv->cur_filer_idx - 1;
+	}
+
+	if (ethflow & RXH_L3_PROTO) {
+		fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+			RQFCR_AND | RQFCR_HASHTBL_0;
+		ftp_rqfpr[priv->cur_filer_idx] = fpr;
+		ftp_rqfcr[priv->cur_filer_idx] = fcr;
+		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+		priv->cur_filer_idx = priv->cur_filer_idx - 1;
+	}
+
+	if (ethflow & RXH_L4_B_0_1) {
+		fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+			RQFCR_AND | RQFCR_HASHTBL_0;
+		ftp_rqfpr[priv->cur_filer_idx] = fpr;
+		ftp_rqfcr[priv->cur_filer_idx] = fcr;
+		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+		priv->cur_filer_idx = priv->cur_filer_idx - 1;
+	}
+
+	if (ethflow & RXH_L4_B_2_3) {
+		fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+			RQFCR_AND | RQFCR_HASHTBL_0;
+		ftp_rqfpr[priv->cur_filer_idx] = fpr;
+		ftp_rqfcr[priv->cur_filer_idx] = fcr;
+		gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+		priv->cur_filer_idx = priv->cur_filer_idx - 1;
+	}
+}
+
+static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class)
+{
+	unsigned int last_rule_idx = priv->cur_filer_idx;
+	unsigned int cmp_rqfpr;
+	unsigned int local_rqfpr[MAX_FILER_IDX + 1];
+	unsigned int local_rqfcr[MAX_FILER_IDX + 1];
+	int i = 0x0, k = 0x0;
+	int j = MAX_FILER_IDX, l = 0x0;
+
+	switch (class) {
+	case TCP_V4_FLOW:
+		cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
+		break;
+	case UDP_V4_FLOW:
+		cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
+		break;
+	case TCP_V6_FLOW:
+		cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
+		break;
+	case UDP_V6_FLOW:
+		cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
+		break;
+	case IPV4_FLOW:
+		cmp_rqfpr = RQFPR_IPV4;
+	case IPV6_FLOW:
+		cmp_rqfpr = RQFPR_IPV6;
+		break;
+	default:
+		printk(KERN_ERR "Right now this class is not supported\n");
+		return 0;
+	}
+
+	for (i = 0; i < MAX_FILER_IDX + 1; i++) {
+		local_rqfpr[j] = ftp_rqfpr[i];
+		local_rqfcr[j] = ftp_rqfcr[i];
+		j--;
+		if ((ftp_rqfcr[i] == (RQFCR_PID_PARSE |
+			RQFCR_CLE |RQFCR_AND)) &&
+			(ftp_rqfpr[i] == cmp_rqfpr))
+			break;
+	}
+
+	if (i == MAX_FILER_IDX + 1) {
+		printk(KERN_ERR "No parse rule found, ");
+		printk(KERN_ERR "can't create hash rules\n");
+		return 0;
+	}
+
+	/* If a match was found, then it begins the starting of a cluster rule
+	 * if it was already programmed, we need to overwrite these rules
+	 */
+	for (l = i+1; l < MAX_FILER_IDX; l++) {
+		if ((ftp_rqfcr[l] & RQFCR_CLE) &&
+			!(ftp_rqfcr[l] & RQFCR_AND)) {
+			ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
+				RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
+			ftp_rqfpr[l] = FPR_FILER_MASK;
+			gfar_write_filer(priv, l, ftp_rqfcr[l], ftp_rqfpr[l]);
+			break;
+		}
+
+		if (!(ftp_rqfcr[l] & RQFCR_CLE) && (ftp_rqfcr[l] & RQFCR_AND))
+			continue;
+		else {
+			local_rqfpr[j] = ftp_rqfpr[l];
+			local_rqfcr[j] = ftp_rqfcr[l];
+			j--;
+		}
+	}
+
+	priv->cur_filer_idx = l - 1;
+	last_rule_idx = l;
+
+	/* hash rules */
+	ethflow_to_filer_rules(priv, ethflow);
+
+	/* Write back the popped out rules again */
+	for (k = j+1; k < MAX_FILER_IDX; k++) {
+		ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
+		ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
+		gfar_write_filer(priv, priv->cur_filer_idx,
+				local_rqfcr[k], local_rqfpr[k]);
+		if (!priv->cur_filer_idx)
+			break;
+		priv->cur_filer_idx = priv->cur_filer_idx - 1;
+	}
+
+	return 1;
+}
+
+static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
+{
+	u64 class;
+
+	if (!gfar_ethflow_to_class(cmd->flow_type, &class))
+		return -EINVAL;
+
+	if (class < CLASS_CODE_USER_PROG1 ||
+			class > CLASS_CODE_SCTP_IPV6)
+		return -EINVAL;
+
+	/* write the filer rules here */
+	if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
+		return -1;
+
+	return 0;
+}
+
+static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+	int ret = 0;
+
+	switch(cmd->cmd) {
+	case ETHTOOL_SRXFH:
+		ret = gfar_set_hash_opts(priv, cmd);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
 const struct ethtool_ops gfar_ethtool_ops = {
 	.get_settings = gfar_gsettings,
 	.set_settings = gfar_ssettings,
@@ -630,4 +905,5 @@
 	.get_wol = gfar_get_wol,
 	.set_wol = gfar_set_wol,
 #endif
+	.set_rxnfc = gfar_set_nfc,
 };
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index dd26da7..3724835 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -8,8 +8,9 @@
  *
  * Author: Andy Fleming
  * Maintainer: Kumar Gala (galak@kernel.crashing.org)
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  *
- * Copyright (c) 2002-2005 Freescale Semiconductor, Inc.
+ * Copyright 2002-2009 Freescale Semiconductor, Inc.
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
@@ -49,6 +50,7 @@
 				 const char *buf, size_t count)
 {
 	struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	int new_setting = 0;
 	u32 temp;
 	unsigned long flags;
@@ -56,6 +58,7 @@
 	if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
 		return count;
 
+
 	/* Find out the new setting */
 	if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
 		new_setting = 1;
@@ -65,21 +68,24 @@
 	else
 		return count;
 
-	spin_lock_irqsave(&priv->rxlock, flags);
+
+	local_irq_save(flags);
+	lock_rx_qs(priv);
 
 	/* Set the new stashing value */
 	priv->bd_stash_en = new_setting;
 
-	temp = gfar_read(&priv->regs->attr);
+	temp = gfar_read(&regs->attr);
 
 	if (new_setting)
 		temp |= ATTR_BDSTASH;
 	else
 		temp &= ~(ATTR_BDSTASH);
 
-	gfar_write(&priv->regs->attr, temp);
+	gfar_write(&regs->attr, temp);
 
-	spin_unlock_irqrestore(&priv->rxlock, flags);
+	unlock_rx_qs(priv);
+	local_irq_restore(flags);
 
 	return count;
 }
@@ -99,6 +105,7 @@
 				      const char *buf, size_t count)
 {
 	struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	unsigned int length = simple_strtoul(buf, NULL, 0);
 	u32 temp;
 	unsigned long flags;
@@ -106,7 +113,9 @@
 	if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
 		return count;
 
-	spin_lock_irqsave(&priv->rxlock, flags);
+	local_irq_save(flags);
+	lock_rx_qs(priv);
+
 	if (length > priv->rx_buffer_size)
 		goto out;
 
@@ -115,23 +124,24 @@
 
 	priv->rx_stash_size = length;
 
-	temp = gfar_read(&priv->regs->attreli);
+	temp = gfar_read(&regs->attreli);
 	temp &= ~ATTRELI_EL_MASK;
 	temp |= ATTRELI_EL(length);
-	gfar_write(&priv->regs->attreli, temp);
+	gfar_write(&regs->attreli, temp);
 
 	/* Turn stashing on/off as appropriate */
-	temp = gfar_read(&priv->regs->attr);
+	temp = gfar_read(&regs->attr);
 
 	if (length)
 		temp |= ATTR_BUFSTASH;
 	else
 		temp &= ~(ATTR_BUFSTASH);
 
-	gfar_write(&priv->regs->attr, temp);
+	gfar_write(&regs->attr, temp);
 
 out:
-	spin_unlock_irqrestore(&priv->rxlock, flags);
+	unlock_rx_qs(priv);
+	local_irq_restore(flags);
 
 	return count;
 }
@@ -154,6 +164,7 @@
 				       const char *buf, size_t count)
 {
 	struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	unsigned short index = simple_strtoul(buf, NULL, 0);
 	u32 temp;
 	unsigned long flags;
@@ -161,7 +172,9 @@
 	if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
 		return count;
 
-	spin_lock_irqsave(&priv->rxlock, flags);
+	local_irq_save(flags);
+	lock_rx_qs(priv);
+
 	if (index > priv->rx_stash_size)
 		goto out;
 
@@ -170,13 +183,14 @@
 
 	priv->rx_stash_index = index;
 
-	temp = gfar_read(&priv->regs->attreli);
+	temp = gfar_read(&regs->attreli);
 	temp &= ~ATTRELI_EI_MASK;
 	temp |= ATTRELI_EI(index);
-	gfar_write(&priv->regs->attreli, flags);
+	gfar_write(&regs->attreli, flags);
 
 out:
-	spin_unlock_irqrestore(&priv->rxlock, flags);
+	unlock_rx_qs(priv);
+	local_irq_restore(flags);
 
 	return count;
 }
@@ -198,6 +212,7 @@
 				       const char *buf, size_t count)
 {
 	struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	unsigned int length = simple_strtoul(buf, NULL, 0);
 	u32 temp;
 	unsigned long flags;
@@ -205,16 +220,18 @@
 	if (length > GFAR_MAX_FIFO_THRESHOLD)
 		return count;
 
-	spin_lock_irqsave(&priv->txlock, flags);
+	local_irq_save(flags);
+	lock_tx_qs(priv);
 
 	priv->fifo_threshold = length;
 
-	temp = gfar_read(&priv->regs->fifo_tx_thr);
+	temp = gfar_read(&regs->fifo_tx_thr);
 	temp &= ~FIFO_TX_THR_MASK;
 	temp |= length;
-	gfar_write(&priv->regs->fifo_tx_thr, temp);
+	gfar_write(&regs->fifo_tx_thr, temp);
 
-	spin_unlock_irqrestore(&priv->txlock, flags);
+	unlock_tx_qs(priv);
+	local_irq_restore(flags);
 
 	return count;
 }
@@ -235,6 +252,7 @@
 				    const char *buf, size_t count)
 {
 	struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	unsigned int num = simple_strtoul(buf, NULL, 0);
 	u32 temp;
 	unsigned long flags;
@@ -242,16 +260,18 @@
 	if (num > GFAR_MAX_FIFO_STARVE)
 		return count;
 
-	spin_lock_irqsave(&priv->txlock, flags);
+	local_irq_save(flags);
+	lock_tx_qs(priv);
 
 	priv->fifo_starve = num;
 
-	temp = gfar_read(&priv->regs->fifo_tx_starve);
+	temp = gfar_read(&regs->fifo_tx_starve);
 	temp &= ~FIFO_TX_STARVE_MASK;
 	temp |= num;
-	gfar_write(&priv->regs->fifo_tx_starve, temp);
+	gfar_write(&regs->fifo_tx_starve, temp);
 
-	spin_unlock_irqrestore(&priv->txlock, flags);
+	unlock_tx_qs(priv);
+	local_irq_restore(flags);
 
 	return count;
 }
@@ -273,6 +293,7 @@
 					const char *buf, size_t count)
 {
 	struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	unsigned int num = simple_strtoul(buf, NULL, 0);
 	u32 temp;
 	unsigned long flags;
@@ -280,16 +301,18 @@
 	if (num > GFAR_MAX_FIFO_STARVE_OFF)
 		return count;
 
-	spin_lock_irqsave(&priv->txlock, flags);
+	local_irq_save(flags);
+	lock_tx_qs(priv);
 
 	priv->fifo_starve_off = num;
 
-	temp = gfar_read(&priv->regs->fifo_tx_starve_shutoff);
+	temp = gfar_read(&regs->fifo_tx_starve_shutoff);
 	temp &= ~FIFO_TX_STARVE_OFF_MASK;
 	temp |= num;
-	gfar_write(&priv->regs->fifo_tx_starve_shutoff, temp);
+	gfar_write(&regs->fifo_tx_starve_shutoff, temp);
 
-	spin_unlock_irqrestore(&priv->txlock, flags);
+	unlock_tx_qs(priv);
+	local_irq_restore(flags);
 
 	return count;
 }
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index c5d92ec..af117c6 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -24,6 +24,7 @@
  *
  */
 
+#include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/string.h>
 #include <linux/errno.h>
@@ -2990,6 +2991,7 @@
 	},
 	{},
 };
+MODULE_DEVICE_TABLE(of, emac_match);
 
 static struct of_platform_driver emac_driver = {
 	.name = "emac",
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 801f088..69c2566 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -98,12 +98,15 @@
 		stats->tx_packets++;
 		stats->tx_bytes +=skb->len;
 
-		skb->dev = __dev_get_by_index(&init_net, skb->iif);
+		rcu_read_lock();
+		skb->dev = dev_get_by_index_rcu(&init_net, skb->iif);
 		if (!skb->dev) {
+			rcu_read_unlock();
 			dev_kfree_skb(skb);
 			stats->tx_dropped++;
 			break;
 		}
+		rcu_read_unlock();
 		skb->iif = _dev->ifindex;
 
 		if (from & AT_EGRESS) {
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index 7be3a0b..b3808ca 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -66,6 +66,8 @@
     E1000_EICR_RX_QUEUE3)
 
 /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
+#define E1000_IMIREXT_SIZE_BP     0x00001000  /* Packet size bypass */
+#define E1000_IMIREXT_CTRL_BP     0x00080000  /* Bypass check of ctrl bits */
 
 /* Receive Descriptor - Advanced */
 union e1000_adv_rx_desc {
@@ -98,6 +100,7 @@
 
 #define E1000_RXDADV_HDRBUFLEN_MASK      0x7FE0
 #define E1000_RXDADV_HDRBUFLEN_SHIFT     5
+#define E1000_RXDADV_STAT_TS             0x10000 /* Pkt was time stamped */
 
 /* Transmit Descriptor - Advanced */
 union e1000_adv_tx_desc {
@@ -167,6 +170,17 @@
 #define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
 #define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
 
+/* ETQF register bit definitions */
+#define E1000_ETQF_FILTER_ENABLE   (1 << 26)
+#define E1000_ETQF_1588            (1 << 30)
+
+/* FTQF register bit definitions */
+#define E1000_FTQF_VF_BP               0x00008000
+#define E1000_FTQF_1588_TIME_STAMP     0x08000000
+#define E1000_FTQF_MASK                0xF0000000
+#define E1000_FTQF_MASK_PROTO_BP       0x10000000
+#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
+
 #define E1000_NVM_APME_82575          0x0400
 #define MAX_NUM_VFS                   8
 
@@ -203,8 +217,19 @@
 #define E1000_IOVCTL 0x05BBC
 #define E1000_IOVCTL_REUSE_VFQ 0x00000001
 
+#define E1000_RPLOLR_STRVLAN   0x40000000
+#define E1000_RPLOLR_STRCRC    0x80000000
+
+#define E1000_DTXCTL_8023LL     0x0004
+#define E1000_DTXCTL_VLAN_ADDED 0x0008
+#define E1000_DTXCTL_OOS_ENABLE 0x0010
+#define E1000_DTXCTL_MDP_EN     0x0020
+#define E1000_DTXCTL_SPOOF_INT  0x0040
+
 #define ALL_QUEUES   0xFFFF
 
+/* RX packet buffer size defines */
+#define E1000_RXPBS_SIZE_MASK_82576  0x0000007F
 void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
 void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
 
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index cb91683..48fcab0 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -435,6 +435,39 @@
 /* Flow Control */
 #define E1000_FCRTL_XONE 0x80000000     /* Enable XON frame transmission */
 
+#define E1000_TSYNCTXCTL_VALID    0x00000001 /* tx timestamp valid */
+#define E1000_TSYNCTXCTL_ENABLED  0x00000010 /* enable tx timestampping */
+
+#define E1000_TSYNCRXCTL_VALID      0x00000001 /* rx timestamp valid */
+#define E1000_TSYNCRXCTL_TYPE_MASK  0x0000000E /* rx type mask */
+#define E1000_TSYNCRXCTL_TYPE_L2_V2       0x00
+#define E1000_TSYNCRXCTL_TYPE_L4_V1       0x02
+#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2    0x04
+#define E1000_TSYNCRXCTL_TYPE_ALL         0x08
+#define E1000_TSYNCRXCTL_TYPE_EVENT_V2    0x0A
+#define E1000_TSYNCRXCTL_ENABLED    0x00000010 /* enable rx timestampping */
+
+#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK   0x000000FF
+#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE       0x00
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE  0x01
+#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE   0x02
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
+#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
+
+#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK               0x00000F00
+#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE                 0x0000
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE            0x0100
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE       0x0200
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE      0x0300
+#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE             0x0800
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE           0x0900
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE  0x0A00
+#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE             0x0B00
+#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE           0x0C00
+#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE           0x0D00
+
+#define E1000_TIMINCA_16NS_SHIFT 24
+
 /* PCI Express Control */
 #define E1000_GCR_CMPL_TMOUT_MASK       0x0000F000
 #define E1000_GCR_CMPL_TMOUT_10ms       0x00001000
diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/igb/e1000_mbx.h
index ebc02ea..bb112fb 100644
--- a/drivers/net/igb/e1000_mbx.h
+++ b/drivers/net/igb/e1000_mbx.h
@@ -58,10 +58,12 @@
 #define E1000_VT_MSGINFO_MASK     (0xFF << E1000_VT_MSGINFO_SHIFT)
 
 #define E1000_VF_RESET            0x01 /* VF requests reset */
-#define E1000_VF_SET_MAC_ADDR     0x02 /* VF requests PF to set MAC addr */
-#define E1000_VF_SET_MULTICAST    0x03 /* VF requests PF to set MC addr */
-#define E1000_VF_SET_VLAN         0x04 /* VF requests PF to set VLAN */
-#define E1000_VF_SET_LPE          0x05 /* VF requests PF to set VMOLR.LPE */
+#define E1000_VF_SET_MAC_ADDR     0x02 /* VF requests to set MAC addr */
+#define E1000_VF_SET_MULTICAST    0x03 /* VF requests to set MC addr */
+#define E1000_VF_SET_VLAN         0x04 /* VF requests to set VLAN */
+#define E1000_VF_SET_LPE          0x05 /* VF requests to set VMOLR.LPE */
+#define E1000_VF_SET_PROMISC      0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
+#define E1000_VF_SET_PROMISC_MULTICAST    (0x02 << E1000_VT_MSGINFO_SHIFT)
 
 #define E1000_PF_CONTROL_MSG      0x0100 /* PF control message */
 
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index 76c3389..934e03b 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -76,59 +76,18 @@
 #define E1000_FCRTV    0x02460  /* Flow Control Refresh Timer Value - RW */
 
 /* IEEE 1588 TIMESYNCH */
-#define E1000_TSYNCTXCTL 0x0B614
-#define E1000_TSYNCTXCTL_VALID (1<<0)
-#define E1000_TSYNCTXCTL_ENABLED (1<<4)
-#define E1000_TSYNCRXCTL 0x0B620
-#define E1000_TSYNCRXCTL_VALID (1<<0)
-#define E1000_TSYNCRXCTL_ENABLED (1<<4)
-enum {
-	E1000_TSYNCRXCTL_TYPE_L2_V2 = 0,
-	E1000_TSYNCRXCTL_TYPE_L4_V1 = (1<<1),
-	E1000_TSYNCRXCTL_TYPE_L2_L4_V2 = (1<<2),
-	E1000_TSYNCRXCTL_TYPE_ALL = (1<<3),
-	E1000_TSYNCRXCTL_TYPE_EVENT_V2 = (1<<3) | (1<<1),
-};
-#define E1000_TSYNCRXCFG 0x05F50
-enum {
-	E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE = 0<<0,
-	E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE = 1<<0,
-	E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE = 2<<0,
-	E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE = 3<<0,
-	E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE = 4<<0,
-
-	E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE = 0<<8,
-	E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE = 1<<8,
-	E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE = 2<<8,
-	E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE = 3<<8,
-	E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE = 8<<8,
-	E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE = 9<<8,
-	E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE = 0xA<<8,
-	E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE = 0xB<<8,
-	E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE = 0xC<<8,
-	E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE = 0xD<<8,
-};
-#define E1000_SYSTIML 0x0B600
-#define E1000_SYSTIMH 0x0B604
-#define E1000_TIMINCA 0x0B608
-
-#define E1000_RXMTRL     0x0B634
-#define E1000_RXSTMPL 0x0B624
-#define E1000_RXSTMPH 0x0B628
-#define E1000_RXSATRL 0x0B62C
-#define E1000_RXSATRH 0x0B630
-
-#define E1000_TXSTMPL 0x0B618
-#define E1000_TXSTMPH 0x0B61C
-
-#define E1000_ETQF0   0x05CB0
-#define E1000_ETQF1   0x05CB4
-#define E1000_ETQF2   0x05CB8
-#define E1000_ETQF3   0x05CBC
-#define E1000_ETQF4   0x05CC0
-#define E1000_ETQF5   0x05CC4
-#define E1000_ETQF6   0x05CC8
-#define E1000_ETQF7   0x05CCC
+#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
+#define E1000_RXSTMPL    0x0B624 /* Rx timestamp Low - RO */
+#define E1000_RXSTMPH    0x0B628 /* Rx timestamp High - RO */
+#define E1000_RXSATRL    0x0B62C /* Rx timestamp attribute low - RO */
+#define E1000_RXSATRH    0x0B630 /* Rx timestamp attribute high - RO */
+#define E1000_TXSTMPL    0x0B618 /* Tx timestamp value Low - RO */
+#define E1000_TXSTMPH    0x0B61C /* Tx timestamp value High - RO */
+#define E1000_SYSTIML    0x0B600 /* System time register Low - RO */
+#define E1000_SYSTIMH    0x0B604 /* System time register High - RO */
+#define E1000_TIMINCA    0x0B608 /* Increment attributes register - RW */
 
 /* Filtering Registers */
 #define E1000_SAQF(_n) (0x5980 + 4 * (_n))
@@ -143,7 +102,9 @@
 #define E1000_ETQF(_n)  (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
 
 #define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
+
 /* Split and Replication RX Control - RW */
+#define E1000_RXPBS    0x02404  /* Rx Packet Buffer Size - RW */
 /*
  * Convenience macros
  *
@@ -288,10 +249,17 @@
 #define E1000_MTA      0x05200  /* Multicast Table Array - RW Array */
 #define E1000_RA       0x05400  /* Receive Address - RW Array */
 #define E1000_RA2      0x054E0  /* 2nd half of receive address array - RW Array */
+#define E1000_PSRTYPE(_i)       (0x05480 + ((_i) * 4))
 #define E1000_RAL(_i)  (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
                                        (0x054E0 + ((_i - 16) * 8)))
 #define E1000_RAH(_i)  (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
                                        (0x054E4 + ((_i - 16) * 8)))
+#define E1000_IP4AT_REG(_i)     (0x05840 + ((_i) * 8))
+#define E1000_IP6AT_REG(_i)     (0x05880 + ((_i) * 4))
+#define E1000_WUPM_REG(_i)      (0x05A00 + ((_i) * 4))
+#define E1000_FFMT_REG(_i)      (0x09000 + ((_i) * 8))
+#define E1000_FFVT_REG(_i)      (0x09800 + ((_i) * 8))
+#define E1000_FFLT_REG(_i)      (0x05F00 + ((_i) * 8))
 #define E1000_VFTA     0x05600  /* VLAN Filter Table Array - RW Array */
 #define E1000_VT_CTL   0x0581C  /* VMDq Control - RW */
 #define E1000_WUC      0x05800  /* Wakeup Control - RW */
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index b805b1c..3298f5a 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -55,6 +55,8 @@
 #define IGB_DEFAULT_ITR                    3 /* dynamic */
 #define IGB_MAX_ITR_USECS              10000
 #define IGB_MIN_ITR_USECS                 10
+#define NON_Q_VECTORS                      1
+#define MAX_Q_VECTORS                      8
 
 /* Transmit and receive queues */
 #define IGB_MAX_RX_QUEUES     (adapter->vfs_allocated_count ? \
@@ -71,9 +73,14 @@
 	u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
 	u16 num_vf_mc_hashes;
 	u16 vlans_enabled;
-	bool clear_to_send;
+	u32 flags;
+	unsigned long last_nack;
 };
 
+#define IGB_VF_FLAG_CTS            0x00000001 /* VF is clear to send data */
+#define IGB_VF_FLAG_UNI_PROMISC    0x00000002 /* VF has unicast promisc */
+#define IGB_VF_FLAG_MULTI_PROMISC  0x00000004 /* VF has multicast promisc */
+
 /* RX descriptor control thresholds.
  * PTHRESH - MAC will consider prefetch if it has fewer than this number of
  *           descriptors available in its onboard memory.
@@ -85,17 +92,19 @@
  *           descriptors until either it has this many to write back, or the
  *           ITR timer expires.
  */
-#define IGB_RX_PTHRESH                    16
+#define IGB_RX_PTHRESH                    (hw->mac.type <= e1000_82576 ? 16 : 8)
 #define IGB_RX_HTHRESH                     8
 #define IGB_RX_WTHRESH                     1
+#define IGB_TX_PTHRESH                     8
+#define IGB_TX_HTHRESH                     1
+#define IGB_TX_WTHRESH                     ((hw->mac.type == e1000_82576 && \
+                                             adapter->msix_entries) ? 0 : 16)
 
 /* this is the size past which hardware will drop packets when setting LPE=0 */
 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
 
 /* Supported Rx Buffer Sizes */
 #define IGB_RXBUFFER_128   128    /* Used for packet split */
-#define IGB_RXBUFFER_256   256    /* Used for packet split */
-#define IGB_RXBUFFER_512   512
 #define IGB_RXBUFFER_1024  1024
 #define IGB_RXBUFFER_2048  2048
 #define IGB_RXBUFFER_16384 16384
@@ -141,36 +150,55 @@
 struct igb_tx_queue_stats {
 	u64 packets;
 	u64 bytes;
+	u64 restart_queue;
 };
 
 struct igb_rx_queue_stats {
 	u64 packets;
 	u64 bytes;
 	u64 drops;
+	u64 csum_err;
+	u64 alloc_failed;
+};
+
+struct igb_q_vector {
+	struct igb_adapter *adapter; /* backlink */
+	struct igb_ring *rx_ring;
+	struct igb_ring *tx_ring;
+	struct napi_struct napi;
+
+	u32 eims_value;
+	u16 cpu;
+
+	u16 itr_val;
+	u8 set_itr;
+	u8 itr_shift;
+	void __iomem *itr_register;
+
+	char name[IFNAMSIZ + 9];
 };
 
 struct igb_ring {
-	struct igb_adapter *adapter; /* backlink */
-	void *desc;                  /* descriptor ring memory */
-	dma_addr_t dma;              /* phys address of the ring */
-	unsigned int size;           /* length of desc. ring in bytes */
-	unsigned int count;          /* number of desc. in the ring */
+	struct igb_q_vector *q_vector; /* backlink to q_vector */
+	struct net_device *netdev;     /* back pointer to net_device */
+	struct pci_dev *pdev;          /* pci device for dma mapping */
+	dma_addr_t dma;                /* phys address of the ring */
+	void *desc;                    /* descriptor ring memory */
+	unsigned int size;             /* length of desc. ring in bytes */
+	u16 count;                     /* number of desc. in the ring */
 	u16 next_to_use;
 	u16 next_to_clean;
-	u16 head;
-	u16 tail;
+	u8 queue_index;
+	u8 reg_idx;
+	void __iomem *head;
+	void __iomem *tail;
 	struct igb_buffer *buffer_info; /* array of buffer info structs */
 
-	u32 eims_value;
-	u32 itr_val;
-	u16 itr_register;
-	u16 cpu;
-
-	u16 queue_index;
-	u16 reg_idx;
 	unsigned int total_bytes;
 	unsigned int total_packets;
 
+	u32 flags;
+
 	union {
 		/* TX */
 		struct {
@@ -180,16 +208,18 @@
 		/* RX */
 		struct {
 			struct igb_rx_queue_stats rx_stats;
-			u64 rx_queue_drops;
-			struct napi_struct napi;
-			int set_itr;
-			struct igb_ring *buddy;
+			u32 rx_buffer_len;
 		};
 	};
-
-	char name[IFNAMSIZ + 5];
 };
 
+#define IGB_RING_FLAG_RX_CSUM        0x00000001 /* RX CSUM enabled */
+#define IGB_RING_FLAG_RX_SCTP_CSUM   0x00000002 /* SCTP CSUM offload enabled */
+
+#define IGB_RING_FLAG_TX_CTX_IDX     0x00000001 /* HW requires context index */
+
+#define IGB_ADVTXD_DCMD (E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS)
+
 #define E1000_RX_DESC_ADV(R, i)	    \
 	(&(((union e1000_adv_rx_desc *)((R).desc))[i]))
 #define E1000_TX_DESC_ADV(R, i)	    \
@@ -197,6 +227,15 @@
 #define E1000_TX_CTXTDESC_ADV(R, i)	    \
 	(&(((struct e1000_adv_tx_context_desc *)((R).desc))[i]))
 
+/* igb_desc_unused - calculate if we have unused descriptors */
+static inline int igb_desc_unused(struct igb_ring *ring)
+{
+	if (ring->next_to_clean > ring->next_to_use)
+		return ring->next_to_clean - ring->next_to_use - 1;
+
+	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
+}
+
 /* board specific private data structure */
 
 struct igb_adapter {
@@ -205,18 +244,18 @@
 	struct vlan_group *vlgrp;
 	u16 mng_vlan_id;
 	u32 bd_number;
-	u32 rx_buffer_len;
 	u32 wol;
 	u32 en_mng_pt;
 	u16 link_speed;
 	u16 link_duplex;
+
 	unsigned int total_tx_bytes;
 	unsigned int total_tx_packets;
 	unsigned int total_rx_bytes;
 	unsigned int total_rx_packets;
 	/* Interrupt Throttle Rate */
-	u32 itr;
-	u32 itr_setting;
+	u32 rx_itr_setting;
+	u32 tx_itr_setting;
 	u16 tx_itr;
 	u16 rx_itr;
 
@@ -229,13 +268,7 @@
 
 	/* TX */
 	struct igb_ring *tx_ring;      /* One per active queue */
-	unsigned int restart_queue;
 	unsigned long tx_queue_len;
-	u32 txd_cmd;
-	u32 gotc;
-	u64 gotc_old;
-	u64 tpt_old;
-	u64 colc_old;
 	u32 tx_timeout_count;
 
 	/* RX */
@@ -243,18 +276,11 @@
 	int num_tx_queues;
 	int num_rx_queues;
 
-	u64 hw_csum_err;
-	u64 hw_csum_good;
-	u32 alloc_rx_buff_failed;
-	u32 gorc;
-	u64 gorc_old;
-	u16 rx_ps_hdr_size;
 	u32 max_frame_size;
 	u32 min_frame_size;
 
 	/* OS defined structs */
 	struct net_device *netdev;
-	struct napi_struct napi;
 	struct pci_dev *pdev;
 	struct cyclecounter cycles;
 	struct timecounter clock;
@@ -272,6 +298,9 @@
 	struct igb_ring test_rx_ring;
 
 	int msg_enable;
+
+	unsigned int num_q_vectors;
+	struct igb_q_vector *q_vector[MAX_Q_VECTORS];
 	struct msix_entry *msix_entries;
 	u32 eims_enable_mask;
 	u32 eims_other;
@@ -282,8 +311,8 @@
 	u32 eeprom_wol;
 
 	struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES];
-	unsigned int tx_ring_count;
-	unsigned int rx_ring_count;
+	u16 tx_ring_count;
+	u16 rx_ring_count;
 	unsigned int vfs_allocated_count;
 	struct vf_data_storage *vf_data;
 };
@@ -291,9 +320,9 @@
 #define IGB_FLAG_HAS_MSI           (1 << 0)
 #define IGB_FLAG_DCA_ENABLED       (1 << 1)
 #define IGB_FLAG_QUAD_PORT_A       (1 << 2)
-#define IGB_FLAG_NEED_CTX_IDX      (1 << 3)
-#define IGB_FLAG_RX_CSUM_DISABLED  (1 << 4)
+#define IGB_FLAG_QUEUE_PAIRS       (1 << 3)
 
+#define IGB_82576_TSYNC_SHIFT 19
 enum e1000_state_t {
 	__IGB_TESTING,
 	__IGB_RESETTING,
@@ -313,10 +342,18 @@
 extern void igb_reinit_locked(struct igb_adapter *);
 extern void igb_reset(struct igb_adapter *);
 extern int igb_set_spd_dplx(struct igb_adapter *, u16);
-extern int igb_setup_tx_resources(struct igb_adapter *, struct igb_ring *);
-extern int igb_setup_rx_resources(struct igb_adapter *, struct igb_ring *);
+extern int igb_setup_tx_resources(struct igb_ring *);
+extern int igb_setup_rx_resources(struct igb_ring *);
 extern void igb_free_tx_resources(struct igb_ring *);
 extern void igb_free_rx_resources(struct igb_ring *);
+extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
+extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+extern void igb_setup_tctl(struct igb_adapter *);
+extern void igb_setup_rctl(struct igb_adapter *);
+extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *);
+extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
+					   struct igb_buffer *);
+extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
 extern void igb_update_stats(struct igb_adapter *);
 extern void igb_set_ethtool_ops(struct net_device *);
 
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index a6da32f..90b89a8 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -84,7 +84,6 @@
 	{ "tx_single_coll_ok", IGB_STAT(stats.scc) },
 	{ "tx_multi_coll_ok", IGB_STAT(stats.mcc) },
 	{ "tx_timeout_count", IGB_STAT(tx_timeout_count) },
-	{ "tx_restart_queue", IGB_STAT(restart_queue) },
 	{ "rx_long_length_errors", IGB_STAT(stats.roc) },
 	{ "rx_short_length_errors", IGB_STAT(stats.ruc) },
 	{ "rx_align_errors", IGB_STAT(stats.algnerrc) },
@@ -95,34 +94,32 @@
 	{ "tx_flow_control_xon", IGB_STAT(stats.xontxc) },
 	{ "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) },
 	{ "rx_long_byte_count", IGB_STAT(stats.gorc) },
-	{ "rx_csum_offload_good", IGB_STAT(hw_csum_good) },
-	{ "rx_csum_offload_errors", IGB_STAT(hw_csum_err) },
 	{ "tx_dma_out_of_sync", IGB_STAT(stats.doosync) },
-	{ "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) },
 	{ "tx_smbus", IGB_STAT(stats.mgptc) },
 	{ "rx_smbus", IGB_STAT(stats.mgprc) },
 	{ "dropped_smbus", IGB_STAT(stats.mgpdc) },
 };
 
 #define IGB_QUEUE_STATS_LEN \
-	(((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues)* \
+	((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
 	  (sizeof(struct igb_rx_queue_stats) / sizeof(u64))) + \
-	 ((((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \
+	 (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \
 	  (sizeof(struct igb_tx_queue_stats) / sizeof(u64))))
 #define IGB_GLOBAL_STATS_LEN	\
-	sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)
+	(sizeof(igb_gstrings_stats) / sizeof(struct igb_stats))
 #define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN)
 static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
 	"Register test  (offline)", "Eeprom test    (offline)",
 	"Interrupt test (offline)", "Loopback test  (offline)",
 	"Link test   (on/offline)"
 };
-#define IGB_TEST_LEN sizeof(igb_gstrings_test) / ETH_GSTRING_LEN
+#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
 
 static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
+	u32 status;
 
 	if (hw->phy.media_type == e1000_media_type_copper) {
 
@@ -157,17 +154,20 @@
 
 	ecmd->transceiver = XCVR_INTERNAL;
 
-	if (rd32(E1000_STATUS) & E1000_STATUS_LU) {
+	status = rd32(E1000_STATUS);
 
-		adapter->hw.mac.ops.get_speed_and_duplex(hw,
-					&adapter->link_speed,
-					&adapter->link_duplex);
-		ecmd->speed = adapter->link_speed;
+	if (status & E1000_STATUS_LU) {
 
-		/* unfortunately FULL_DUPLEX != DUPLEX_FULL
-		 *          and HALF_DUPLEX != DUPLEX_HALF */
+		if ((status & E1000_STATUS_SPEED_1000) ||
+		    hw->phy.media_type != e1000_media_type_copper)
+			ecmd->speed = SPEED_1000;
+		else if (status & E1000_STATUS_SPEED_100)
+			ecmd->speed = SPEED_100;
+		else
+			ecmd->speed = SPEED_10;
 
-		if (adapter->link_duplex == FULL_DUPLEX)
+		if ((status & E1000_STATUS_FD) ||
+		    hw->phy.media_type != e1000_media_type_copper)
 			ecmd->duplex = DUPLEX_FULL;
 		else
 			ecmd->duplex = DUPLEX_HALF;
@@ -258,8 +258,9 @@
 		if (netif_running(adapter->netdev)) {
 			igb_down(adapter);
 			igb_up(adapter);
-		} else
+		} else {
 			igb_reset(adapter);
+		}
 	} else {
 		if (pause->rx_pause && pause->tx_pause)
 			hw->fc.requested_mode = e1000_fc_full;
@@ -283,17 +284,20 @@
 static u32 igb_get_rx_csum(struct net_device *netdev)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
-	return !(adapter->flags & IGB_FLAG_RX_CSUM_DISABLED);
+	return !!(adapter->rx_ring[0].flags & IGB_RING_FLAG_RX_CSUM);
 }
 
 static int igb_set_rx_csum(struct net_device *netdev, u32 data)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
+	int i;
 
-	if (data)
-		adapter->flags &= ~IGB_FLAG_RX_CSUM_DISABLED;
-	else
-		adapter->flags |= IGB_FLAG_RX_CSUM_DISABLED;
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		if (data)
+			adapter->rx_ring[i].flags |= IGB_RING_FLAG_RX_CSUM;
+		else
+			adapter->rx_ring[i].flags &= ~IGB_RING_FLAG_RX_CSUM;
+	}
 
 	return 0;
 }
@@ -309,7 +313,7 @@
 
 	if (data) {
 		netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
-		if (adapter->hw.mac.type == e1000_82576)
+		if (adapter->hw.mac.type >= e1000_82576)
 			netdev->features |= NETIF_F_SCTP_CSUM;
 	} else {
 		netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -503,19 +507,10 @@
 	regs_buff[119] = adapter->stats.scvpc;
 	regs_buff[120] = adapter->stats.hrmpc;
 
-	/* These should probably be added to e1000_regs.h instead */
-	#define E1000_PSRTYPE_REG(_i) (0x05480 + ((_i) * 4))
-	#define E1000_IP4AT_REG(_i)   (0x05840 + ((_i) * 8))
-	#define E1000_IP6AT_REG(_i)   (0x05880 + ((_i) * 4))
-	#define E1000_WUPM_REG(_i)    (0x05A00 + ((_i) * 4))
-	#define E1000_FFMT_REG(_i)    (0x09000 + ((_i) * 8))
-	#define E1000_FFVT_REG(_i)    (0x09800 + ((_i) * 8))
-	#define E1000_FFLT_REG(_i)    (0x05F00 + ((_i) * 8))
-
 	for (i = 0; i < 4; i++)
 		regs_buff[121 + i] = rd32(E1000_SRRCTL(i));
 	for (i = 0; i < 4; i++)
-		regs_buff[125 + i] = rd32(E1000_PSRTYPE_REG(i));
+		regs_buff[125 + i] = rd32(E1000_PSRTYPE(i));
 	for (i = 0; i < 4; i++)
 		regs_buff[129 + i] = rd32(E1000_RDBAL(i));
 	for (i = 0; i < 4; i++)
@@ -739,18 +734,18 @@
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct igb_ring *temp_ring;
-	int i, err;
+	int i, err = 0;
 	u32 new_rx_count, new_tx_count;
 
 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
 		return -EINVAL;
 
-	new_rx_count = max(ring->rx_pending, (u32)IGB_MIN_RXD);
-	new_rx_count = min(new_rx_count, (u32)IGB_MAX_RXD);
+	new_rx_count = min(ring->rx_pending, (u32)IGB_MAX_RXD);
+	new_rx_count = max(new_rx_count, (u32)IGB_MIN_RXD);
 	new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
 
-	new_tx_count = max(ring->tx_pending, (u32)IGB_MIN_TXD);
-	new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD);
+	new_tx_count = min(ring->tx_pending, (u32)IGB_MAX_TXD);
+	new_tx_count = max(new_tx_count, (u32)IGB_MIN_TXD);
 	new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
 
 	if ((new_tx_count == adapter->tx_ring_count) &&
@@ -759,18 +754,30 @@
 		return 0;
 	}
 
+	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+		msleep(1);
+
+	if (!netif_running(adapter->netdev)) {
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			adapter->tx_ring[i].count = new_tx_count;
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			adapter->rx_ring[i].count = new_rx_count;
+		adapter->tx_ring_count = new_tx_count;
+		adapter->rx_ring_count = new_rx_count;
+		goto clear_reset;
+	}
+
 	if (adapter->num_tx_queues > adapter->num_rx_queues)
 		temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
 	else
 		temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
-	if (!temp_ring)
-		return -ENOMEM;
 
-	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
-		msleep(1);
+	if (!temp_ring) {
+		err = -ENOMEM;
+		goto clear_reset;
+	}
 
-	if (netif_running(adapter->netdev))
-		igb_down(adapter);
+	igb_down(adapter);
 
 	/*
 	 * We can't just free everything and then setup again,
@@ -783,7 +790,7 @@
 
 		for (i = 0; i < adapter->num_tx_queues; i++) {
 			temp_ring[i].count = new_tx_count;
-			err = igb_setup_tx_resources(adapter, &temp_ring[i]);
+			err = igb_setup_tx_resources(&temp_ring[i]);
 			if (err) {
 				while (i) {
 					i--;
@@ -808,7 +815,7 @@
 
 		for (i = 0; i < adapter->num_rx_queues; i++) {
 			temp_ring[i].count = new_rx_count;
-			err = igb_setup_rx_resources(adapter, &temp_ring[i]);
+			err = igb_setup_rx_resources(&temp_ring[i]);
 			if (err) {
 				while (i) {
 					i--;
@@ -827,14 +834,11 @@
 
 		adapter->rx_ring_count = new_rx_count;
 	}
-
-	err = 0;
 err_setup:
-	if (netif_running(adapter->netdev))
-		igb_up(adapter);
-
-	clear_bit(__IGB_RESETTING, &adapter->state);
+	igb_up(adapter);
 	vfree(temp_ring);
+clear_reset:
+	clear_bit(__IGB_RESETTING, &adapter->state);
 	return err;
 }
 
@@ -942,7 +946,7 @@
 {
 	struct e1000_hw *hw = &adapter->hw;
 	u32 pat, val;
-	u32 _test[] =
+	static const u32 _test[] =
 		{0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
 	for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
 		wr32(reg, (_test[pat] & write));
@@ -955,6 +959,7 @@
 			return 1;
 		}
 	}
+
 	return 0;
 }
 
@@ -972,6 +977,7 @@
 		*data = reg;
 		return 1;
 	}
+
 	return 0;
 }
 
@@ -994,14 +1000,14 @@
 	u32 value, before, after;
 	u32 i, toggle;
 
-	toggle = 0x7FFFF3FF;
-
 	switch (adapter->hw.mac.type) {
 	case e1000_82576:
 		test = reg_test_82576;
+		toggle = 0x7FFFF3FF;
 		break;
 	default:
 		test = reg_test_82575;
+		toggle = 0x7FFFF3FF;
 		break;
 	}
 
@@ -1079,8 +1085,7 @@
 	*data = 0;
 	/* Read and add up the contents of the EEPROM */
 	for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
-		if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp))
-		    < 0) {
+		if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) {
 			*data = 1;
 			break;
 		}
@@ -1096,8 +1101,7 @@
 
 static irqreturn_t igb_test_intr(int irq, void *data)
 {
-	struct net_device *netdev = (struct net_device *) data;
-	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct igb_adapter *adapter = (struct igb_adapter *) data;
 	struct e1000_hw *hw = &adapter->hw;
 
 	adapter->test_icr |= rd32(E1000_ICR);
@@ -1115,32 +1119,36 @@
 	*data = 0;
 
 	/* Hook up test interrupt handler just for this test */
-	if (adapter->msix_entries)
-		/* NOTE: we don't test MSI-X interrupts here, yet */
-		return 0;
-
-	if (adapter->flags & IGB_FLAG_HAS_MSI) {
+	if (adapter->msix_entries) {
+		if (request_irq(adapter->msix_entries[0].vector,
+		                &igb_test_intr, 0, netdev->name, adapter)) {
+			*data = 1;
+			return -1;
+		}
+	} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
 		shared_int = false;
-		if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) {
+		if (request_irq(irq,
+		                &igb_test_intr, 0, netdev->name, adapter)) {
 			*data = 1;
 			return -1;
 		}
 	} else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED,
-				netdev->name, netdev)) {
+				netdev->name, adapter)) {
 		shared_int = false;
 	} else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
-		 netdev->name, netdev)) {
+		 netdev->name, adapter)) {
 		*data = 1;
 		return -1;
 	}
 	dev_info(&adapter->pdev->dev, "testing %s interrupt\n",
 		(shared_int ? "shared" : "unshared"));
+
 	/* Disable all the interrupts */
-	wr32(E1000_IMC, 0xFFFFFFFF);
+	wr32(E1000_IMC, ~0);
 	msleep(10);
 
 	/* Define all writable bits for ICS */
-	switch(hw->mac.type) {
+	switch (hw->mac.type) {
 	case e1000_82575:
 		ics_mask = 0x37F47EDD;
 		break;
@@ -1230,190 +1238,61 @@
 	msleep(10);
 
 	/* Unhook test interrupt handler */
-	free_irq(irq, netdev);
+	if (adapter->msix_entries)
+		free_irq(adapter->msix_entries[0].vector, adapter);
+	else
+		free_irq(irq, adapter);
 
 	return *data;
 }
 
 static void igb_free_desc_rings(struct igb_adapter *adapter)
 {
-	struct igb_ring *tx_ring = &adapter->test_tx_ring;
-	struct igb_ring *rx_ring = &adapter->test_rx_ring;
-	struct pci_dev *pdev = adapter->pdev;
-	int i;
-
-	if (tx_ring->desc && tx_ring->buffer_info) {
-		for (i = 0; i < tx_ring->count; i++) {
-			struct igb_buffer *buf = &(tx_ring->buffer_info[i]);
-			if (buf->dma)
-				pci_unmap_single(pdev, buf->dma, buf->length,
-						 PCI_DMA_TODEVICE);
-			if (buf->skb)
-				dev_kfree_skb(buf->skb);
-		}
-	}
-
-	if (rx_ring->desc && rx_ring->buffer_info) {
-		for (i = 0; i < rx_ring->count; i++) {
-			struct igb_buffer *buf = &(rx_ring->buffer_info[i]);
-			if (buf->dma)
-				pci_unmap_single(pdev, buf->dma,
-						 IGB_RXBUFFER_2048,
-						 PCI_DMA_FROMDEVICE);
-			if (buf->skb)
-				dev_kfree_skb(buf->skb);
-		}
-	}
-
-	if (tx_ring->desc) {
-		pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
-				    tx_ring->dma);
-		tx_ring->desc = NULL;
-	}
-	if (rx_ring->desc) {
-		pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
-				    rx_ring->dma);
-		rx_ring->desc = NULL;
-	}
-
-	kfree(tx_ring->buffer_info);
-	tx_ring->buffer_info = NULL;
-	kfree(rx_ring->buffer_info);
-	rx_ring->buffer_info = NULL;
-
-	return;
+	igb_free_tx_resources(&adapter->test_tx_ring);
+	igb_free_rx_resources(&adapter->test_rx_ring);
 }
 
 static int igb_setup_desc_rings(struct igb_adapter *adapter)
 {
-	struct e1000_hw *hw = &adapter->hw;
 	struct igb_ring *tx_ring = &adapter->test_tx_ring;
 	struct igb_ring *rx_ring = &adapter->test_rx_ring;
-	struct pci_dev *pdev = adapter->pdev;
-	struct igb_buffer *buffer_info;
-	u32 rctl;
-	int i, ret_val;
+	struct e1000_hw *hw = &adapter->hw;
+	int ret_val;
 
 	/* Setup Tx descriptor ring and Tx buffers */
+	tx_ring->count = IGB_DEFAULT_TXD;
+	tx_ring->pdev = adapter->pdev;
+	tx_ring->netdev = adapter->netdev;
+	tx_ring->reg_idx = adapter->vfs_allocated_count;
 
-	if (!tx_ring->count)
-		tx_ring->count = IGB_DEFAULT_TXD;
-
-	tx_ring->buffer_info = kcalloc(tx_ring->count,
-				       sizeof(struct igb_buffer),
-				       GFP_KERNEL);
-	if (!tx_ring->buffer_info) {
+	if (igb_setup_tx_resources(tx_ring)) {
 		ret_val = 1;
 		goto err_nomem;
 	}
 
-	tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
-	tx_ring->size = ALIGN(tx_ring->size, 4096);
-	tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
-					     &tx_ring->dma);
-	if (!tx_ring->desc) {
-		ret_val = 2;
-		goto err_nomem;
-	}
-	tx_ring->next_to_use = tx_ring->next_to_clean = 0;
-
-	wr32(E1000_TDBAL(0),
-			((u64) tx_ring->dma & 0x00000000FFFFFFFF));
-	wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32));
-	wr32(E1000_TDLEN(0),
-			tx_ring->count * sizeof(union e1000_adv_tx_desc));
-	wr32(E1000_TDH(0), 0);
-	wr32(E1000_TDT(0), 0);
-	wr32(E1000_TCTL,
-			E1000_TCTL_PSP | E1000_TCTL_EN |
-			E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
-			E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
-
-	for (i = 0; i < tx_ring->count; i++) {
-		union e1000_adv_tx_desc *tx_desc;
-		struct sk_buff *skb;
-		unsigned int size = 1024;
-
-		tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
-		skb = alloc_skb(size, GFP_KERNEL);
-		if (!skb) {
-			ret_val = 3;
-			goto err_nomem;
-		}
-		skb_put(skb, size);
-		buffer_info = &tx_ring->buffer_info[i];
-		buffer_info->skb = skb;
-		buffer_info->length = skb->len;
-		buffer_info->dma = pci_map_single(pdev, skb->data, skb->len,
-		                                  PCI_DMA_TODEVICE);
-		tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
-		tx_desc->read.olinfo_status = cpu_to_le32(skb->len) <<
-		                              E1000_ADVTXD_PAYLEN_SHIFT;
-		tx_desc->read.cmd_type_len = cpu_to_le32(skb->len);
-		tx_desc->read.cmd_type_len |= cpu_to_le32(E1000_TXD_CMD_EOP |
-		                                          E1000_TXD_CMD_IFCS |
-		                                          E1000_TXD_CMD_RS |
-		                                          E1000_ADVTXD_DTYP_DATA |
-		                                          E1000_ADVTXD_DCMD_DEXT);
-	}
+	igb_setup_tctl(adapter);
+	igb_configure_tx_ring(adapter, tx_ring);
 
 	/* Setup Rx descriptor ring and Rx buffers */
+	rx_ring->count = IGB_DEFAULT_RXD;
+	rx_ring->pdev = adapter->pdev;
+	rx_ring->netdev = adapter->netdev;
+	rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
+	rx_ring->reg_idx = adapter->vfs_allocated_count;
 
-	if (!rx_ring->count)
-		rx_ring->count = IGB_DEFAULT_RXD;
-
-	rx_ring->buffer_info = kcalloc(rx_ring->count,
-				       sizeof(struct igb_buffer),
-				       GFP_KERNEL);
-	if (!rx_ring->buffer_info) {
-		ret_val = 4;
+	if (igb_setup_rx_resources(rx_ring)) {
+		ret_val = 3;
 		goto err_nomem;
 	}
 
-	rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
-	rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
-					     &rx_ring->dma);
-	if (!rx_ring->desc) {
-		ret_val = 5;
-		goto err_nomem;
-	}
-	rx_ring->next_to_use = rx_ring->next_to_clean = 0;
+	/* set the default queue to queue 0 of PF */
+	wr32(E1000_MRQC, adapter->vfs_allocated_count << 3);
 
-	rctl = rd32(E1000_RCTL);
-	wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
-	wr32(E1000_RDBAL(0),
-			((u64) rx_ring->dma & 0xFFFFFFFF));
-	wr32(E1000_RDBAH(0),
-			((u64) rx_ring->dma >> 32));
-	wr32(E1000_RDLEN(0), rx_ring->size);
-	wr32(E1000_RDH(0), 0);
-	wr32(E1000_RDT(0), 0);
-	rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
-	rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
-		(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
-	wr32(E1000_RCTL, rctl);
-	wr32(E1000_SRRCTL(0), E1000_SRRCTL_DESCTYPE_ADV_ONEBUF);
+	/* enable receive ring */
+	igb_setup_rctl(adapter);
+	igb_configure_rx_ring(adapter, rx_ring);
 
-	for (i = 0; i < rx_ring->count; i++) {
-		union e1000_adv_rx_desc *rx_desc;
-		struct sk_buff *skb;
-
-		buffer_info = &rx_ring->buffer_info[i];
-		rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
-		skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN,
-				GFP_KERNEL);
-		if (!skb) {
-			ret_val = 6;
-			goto err_nomem;
-		}
-		skb_reserve(skb, NET_IP_ALIGN);
-		buffer_info->skb = skb;
-		buffer_info->dma = pci_map_single(pdev, skb->data,
-		                                  IGB_RXBUFFER_2048,
-		                                  PCI_DMA_FROMDEVICE);
-		rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
-		memset(skb->data, 0x00, skb->len);
-	}
+	igb_alloc_rx_buffers_adv(rx_ring, igb_desc_unused(rx_ring));
 
 	return 0;
 
@@ -1489,7 +1368,10 @@
 	struct e1000_hw *hw = &adapter->hw;
 	u32 reg;
 
-	if (hw->phy.media_type == e1000_media_type_internal_serdes) {
+	reg = rd32(E1000_CTRL_EXT);
+
+	/* use CTRL_EXT to identify link type as SGMII can appear as copper */
+	if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
 		reg = rd32(E1000_RCTL);
 		reg |= E1000_RCTL_LBM_TCVR;
 		wr32(E1000_RCTL, reg);
@@ -1520,11 +1402,9 @@
 		wr32(E1000_PCS_LCTL, reg);
 
 		return 0;
-	} else if (hw->phy.media_type == e1000_media_type_copper) {
-		return igb_set_phy_loopback(adapter);
 	}
 
-	return 7;
+	return igb_set_phy_loopback(adapter);
 }
 
 static void igb_loopback_cleanup(struct igb_adapter *adapter)
@@ -1550,35 +1430,99 @@
 				    unsigned int frame_size)
 {
 	memset(skb->data, 0xFF, frame_size);
-	frame_size &= ~1;
-	memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
-	memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
-	memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+	frame_size /= 2;
+	memset(&skb->data[frame_size], 0xAA, frame_size - 1);
+	memset(&skb->data[frame_size + 10], 0xBE, 1);
+	memset(&skb->data[frame_size + 12], 0xAF, 1);
 }
 
 static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
 {
-	frame_size &= ~1;
-	if (*(skb->data + 3) == 0xFF)
-		if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
-		   (*(skb->data + frame_size / 2 + 12) == 0xAF))
+	frame_size /= 2;
+	if (*(skb->data + 3) == 0xFF) {
+		if ((*(skb->data + frame_size + 10) == 0xBE) &&
+		   (*(skb->data + frame_size + 12) == 0xAF)) {
 			return 0;
+		}
+	}
 	return 13;
 }
 
+static int igb_clean_test_rings(struct igb_ring *rx_ring,
+                                struct igb_ring *tx_ring,
+                                unsigned int size)
+{
+	union e1000_adv_rx_desc *rx_desc;
+	struct igb_buffer *buffer_info;
+	int rx_ntc, tx_ntc, count = 0;
+	u32 staterr;
+
+	/* initialize next to clean and descriptor values */
+	rx_ntc = rx_ring->next_to_clean;
+	tx_ntc = tx_ring->next_to_clean;
+	rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
+	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+	while (staterr & E1000_RXD_STAT_DD) {
+		/* check rx buffer */
+		buffer_info = &rx_ring->buffer_info[rx_ntc];
+
+		/* unmap rx buffer, will be remapped by alloc_rx_buffers */
+		pci_unmap_single(rx_ring->pdev,
+		                 buffer_info->dma,
+				 rx_ring->rx_buffer_len,
+				 PCI_DMA_FROMDEVICE);
+		buffer_info->dma = 0;
+
+		/* verify contents of skb */
+		if (!igb_check_lbtest_frame(buffer_info->skb, size))
+			count++;
+
+		/* unmap buffer on tx side */
+		buffer_info = &tx_ring->buffer_info[tx_ntc];
+		igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
+
+		/* increment rx/tx next to clean counters */
+		rx_ntc++;
+		if (rx_ntc == rx_ring->count)
+			rx_ntc = 0;
+		tx_ntc++;
+		if (tx_ntc == tx_ring->count)
+			tx_ntc = 0;
+
+		/* fetch next descriptor */
+		rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
+		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+	}
+
+	/* re-map buffers to ring, store next to clean values */
+	igb_alloc_rx_buffers_adv(rx_ring, count);
+	rx_ring->next_to_clean = rx_ntc;
+	tx_ring->next_to_clean = tx_ntc;
+
+	return count;
+}
+
 static int igb_run_loopback_test(struct igb_adapter *adapter)
 {
-	struct e1000_hw *hw = &adapter->hw;
 	struct igb_ring *tx_ring = &adapter->test_tx_ring;
 	struct igb_ring *rx_ring = &adapter->test_rx_ring;
-	struct pci_dev *pdev = adapter->pdev;
-	int i, j, k, l, lc, good_cnt;
-	int ret_val = 0;
-	unsigned long time;
+	int i, j, lc, good_cnt, ret_val = 0;
+	unsigned int size = 1024;
+	netdev_tx_t tx_ret_val;
+	struct sk_buff *skb;
 
-	wr32(E1000_RDT(0), rx_ring->count - 1);
+	/* allocate test skb */
+	skb = alloc_skb(size, GFP_KERNEL);
+	if (!skb)
+		return 11;
 
-	/* Calculate the loop count based on the largest descriptor ring
+	/* place data into test skb */
+	igb_create_lbtest_frame(skb, size);
+	skb_put(skb, size);
+
+	/*
+	 * Calculate the loop count based on the largest descriptor ring
 	 * The idea is to wrap the largest ring a number of times using 64
 	 * send/receive pairs during each loop
 	 */
@@ -1588,50 +1532,36 @@
 	else
 		lc = ((rx_ring->count / 64) * 2) + 1;
 
-	k = l = 0;
 	for (j = 0; j <= lc; j++) { /* loop count loop */
-		for (i = 0; i < 64; i++) { /* send the packets */
-			igb_create_lbtest_frame(tx_ring->buffer_info[k].skb,
-						1024);
-			pci_dma_sync_single_for_device(pdev,
-				tx_ring->buffer_info[k].dma,
-				tx_ring->buffer_info[k].length,
-				PCI_DMA_TODEVICE);
-			k++;
-			if (k == tx_ring->count)
-				k = 0;
-		}
-		wr32(E1000_TDT(0), k);
-		msleep(200);
-		time = jiffies; /* set the start time for the receive */
+		/* reset count of good packets */
 		good_cnt = 0;
-		do { /* receive the sent packets */
-			pci_dma_sync_single_for_cpu(pdev,
-					rx_ring->buffer_info[l].dma,
-					IGB_RXBUFFER_2048,
-					PCI_DMA_FROMDEVICE);
 
-			ret_val = igb_check_lbtest_frame(
-					     rx_ring->buffer_info[l].skb, 1024);
-			if (!ret_val)
+		/* place 64 packets on the transmit queue*/
+		for (i = 0; i < 64; i++) {
+			skb_get(skb);
+			tx_ret_val = igb_xmit_frame_ring_adv(skb, tx_ring);
+			if (tx_ret_val == NETDEV_TX_OK)
 				good_cnt++;
-			l++;
-			if (l == rx_ring->count)
-				l = 0;
-			/* time + 20 msecs (200 msecs on 2.4) is more than
-			 * enough time to complete the receives, if it's
-			 * exceeded, break and error off
-			 */
-		} while (good_cnt < 64 && jiffies < (time + 20));
+		}
+
 		if (good_cnt != 64) {
-			ret_val = 13; /* ret_val is the same as mis-compare */
+			ret_val = 12;
 			break;
 		}
-		if (jiffies >= (time + 20)) {
-			ret_val = 14; /* error code for time out error */
+
+		/* allow 200 milliseconds for packets to go from tx to rx */
+		msleep(200);
+
+		good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
+		if (good_cnt != 64) {
+			ret_val = 13;
 			break;
 		}
 	} /* end loop count loop */
+
+	/* free the original skb */
+	kfree_skb(skb);
+
 	return ret_val;
 }
 
@@ -1684,8 +1614,7 @@
 		if (hw->mac.autoneg)
 			msleep(4000);
 
-		if (!(rd32(E1000_STATUS) &
-		      E1000_STATUS_LU))
+		if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
 			*data = 1;
 	}
 	return *data;
@@ -1867,7 +1796,6 @@
 		adapter->wol |= E1000_WUFC_BC;
 	if (wol->wolopts & WAKE_MAGIC)
 		adapter->wol |= E1000_WUFC_MAG;
-
 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
 
 	return 0;
@@ -1880,12 +1808,19 @@
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
+	unsigned long timeout;
 
-	if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
-		data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
+	timeout = data * 1000;
+
+	/*
+	 *  msleep_interruptable only accepts unsigned int so we are limited
+	 * in how long a duration we can wait
+	 */
+	if (!timeout || timeout > UINT_MAX)
+		timeout = UINT_MAX;
 
 	igb_blink_led(hw);
-	msleep_interruptible(data * 1000);
+	msleep_interruptible(timeout);
 
 	igb_led_off(hw);
 	clear_bit(IGB_LED_ON, &adapter->led_status);
@@ -1898,7 +1833,6 @@
 			    struct ethtool_coalesce *ec)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
-	struct e1000_hw *hw = &adapter->hw;
 	int i;
 
 	if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
@@ -1907,17 +1841,39 @@
 	    (ec->rx_coalesce_usecs == 2))
 		return -EINVAL;
 
-	/* convert to rate of irq's per second */
-	if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
-		adapter->itr_setting = ec->rx_coalesce_usecs;
-		adapter->itr = IGB_START_ITR;
-	} else {
-		adapter->itr_setting = ec->rx_coalesce_usecs << 2;
-		adapter->itr = adapter->itr_setting;
-	}
+	if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
+	    ((ec->tx_coalesce_usecs > 3) &&
+	     (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
+	    (ec->tx_coalesce_usecs == 2))
+		return -EINVAL;
 
-	for (i = 0; i < adapter->num_rx_queues; i++)
-		wr32(adapter->rx_ring[i].itr_register, adapter->itr);
+	if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
+		return -EINVAL;
+
+	/* convert to rate of irq's per second */
+	if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
+		adapter->rx_itr_setting = ec->rx_coalesce_usecs;
+	else
+		adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
+
+	/* convert to rate of irq's per second */
+	if (adapter->flags & IGB_FLAG_QUEUE_PAIRS)
+		adapter->tx_itr_setting = adapter->rx_itr_setting;
+	else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
+		adapter->tx_itr_setting = ec->tx_coalesce_usecs;
+	else
+		adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		if (q_vector->rx_ring)
+			q_vector->itr_val = adapter->rx_itr_setting;
+		else
+			q_vector->itr_val = adapter->tx_itr_setting;
+		if (q_vector->itr_val && q_vector->itr_val <= 3)
+			q_vector->itr_val = IGB_START_ITR;
+		q_vector->set_itr = 1;
+	}
 
 	return 0;
 }
@@ -1927,15 +1883,21 @@
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 
-	if (adapter->itr_setting <= 3)
-		ec->rx_coalesce_usecs = adapter->itr_setting;
+	if (adapter->rx_itr_setting <= 3)
+		ec->rx_coalesce_usecs = adapter->rx_itr_setting;
 	else
-		ec->rx_coalesce_usecs = adapter->itr_setting >> 2;
+		ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
+
+	if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) {
+		if (adapter->tx_itr_setting <= 3)
+			ec->tx_coalesce_usecs = adapter->tx_itr_setting;
+		else
+			ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
+	}
 
 	return 0;
 }
 
-
 static int igb_nway_reset(struct net_device *netdev)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
@@ -1968,6 +1930,7 @@
 	char *p = NULL;
 
 	igb_update_stats(adapter);
+
 	for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
 		switch (igb_gstrings_stats[i].type) {
 		case NETDEV_STATS:
@@ -2021,6 +1984,8 @@
 			p += ETH_GSTRING_LEN;
 			sprintf(p, "tx_queue_%u_bytes", i);
 			p += ETH_GSTRING_LEN;
+			sprintf(p, "tx_queue_%u_restart", i);
+			p += ETH_GSTRING_LEN;
 		}
 		for (i = 0; i < adapter->num_rx_queues; i++) {
 			sprintf(p, "rx_queue_%u_packets", i);
@@ -2029,6 +1994,10 @@
 			p += ETH_GSTRING_LEN;
 			sprintf(p, "rx_queue_%u_drops", i);
 			p += ETH_GSTRING_LEN;
+			sprintf(p, "rx_queue_%u_csum_err", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "rx_queue_%u_alloc_failed", i);
+			p += ETH_GSTRING_LEN;
 		}
 /*		BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
 		break;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 2ffe099..b044c98 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -82,6 +82,7 @@
 static int igb_setup_all_rx_resources(struct igb_adapter *);
 static void igb_free_all_tx_resources(struct igb_adapter *);
 static void igb_free_all_rx_resources(struct igb_adapter *);
+static void igb_setup_mrqc(struct igb_adapter *);
 void igb_update_stats(struct igb_adapter *);
 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
 static void __devexit igb_remove(struct pci_dev *pdev);
@@ -90,7 +91,6 @@
 static int igb_close(struct net_device *);
 static void igb_configure_tx(struct igb_adapter *);
 static void igb_configure_rx(struct igb_adapter *);
-static void igb_setup_rctl(struct igb_adapter *);
 static void igb_clean_all_tx_rings(struct igb_adapter *);
 static void igb_clean_all_rx_rings(struct igb_adapter *);
 static void igb_clean_tx_ring(struct igb_ring *);
@@ -99,11 +99,7 @@
 static void igb_update_phy_info(unsigned long);
 static void igb_watchdog(unsigned long);
 static void igb_watchdog_task(struct work_struct *);
-static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *,
-					   struct net_device *,
-					   struct igb_ring *);
-static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
-				      struct net_device *);
+static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
 static struct net_device_stats *igb_get_stats(struct net_device *);
 static int igb_change_mtu(struct net_device *, int);
 static int igb_set_mac(struct net_device *, void *);
@@ -111,17 +107,14 @@
 static irqreturn_t igb_intr(int irq, void *);
 static irqreturn_t igb_intr_msi(int irq, void *);
 static irqreturn_t igb_msix_other(int irq, void *);
-static irqreturn_t igb_msix_rx(int irq, void *);
-static irqreturn_t igb_msix_tx(int irq, void *);
+static irqreturn_t igb_msix_ring(int irq, void *);
 #ifdef CONFIG_IGB_DCA
-static void igb_update_rx_dca(struct igb_ring *);
-static void igb_update_tx_dca(struct igb_ring *);
+static void igb_update_dca(struct igb_q_vector *);
 static void igb_setup_dca(struct igb_adapter *);
 #endif /* CONFIG_IGB_DCA */
-static bool igb_clean_tx_irq(struct igb_ring *);
+static bool igb_clean_tx_irq(struct igb_q_vector *);
 static int igb_poll(struct napi_struct *, int);
-static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
-static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
+static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
 static void igb_tx_timeout(struct net_device *);
 static void igb_reset_task(struct work_struct *);
@@ -132,43 +125,10 @@
 static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
 static void igb_ping_all_vfs(struct igb_adapter *);
 static void igb_msg_task(struct igb_adapter *);
-static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
 static void igb_vmm_control(struct igb_adapter *);
-static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
+static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
 
-static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
-{
-	u32 reg_data;
-
-	reg_data = rd32(E1000_VMOLR(vfn));
-	reg_data |= E1000_VMOLR_BAM |	 /* Accept broadcast */
-	            E1000_VMOLR_ROMPE |  /* Accept packets matched in MTA */
-	            E1000_VMOLR_AUPE |   /* Accept untagged packets */
-	            E1000_VMOLR_STRVLAN; /* Strip vlan tags */
-	wr32(E1000_VMOLR(vfn), reg_data);
-}
-
-static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
-                                 int vfn)
-{
-	struct e1000_hw *hw = &adapter->hw;
-	u32 vmolr;
-
-	/* if it isn't the PF check to see if VFs are enabled and
-	 * increase the size to support vlan tags */
-	if (vfn < adapter->vfs_allocated_count &&
-	    adapter->vf_data[vfn].vlans_enabled)
-		size += VLAN_TAG_SIZE;
-
-	vmolr = rd32(E1000_VMOLR(vfn));
-	vmolr &= ~E1000_VMOLR_RLPML_MASK;
-	vmolr |= size | E1000_VMOLR_LPE;
-	wr32(E1000_VMOLR(vfn), vmolr);
-
-	return 0;
-}
-
 #ifdef CONFIG_PM
 static int igb_suspend(struct pci_dev *, pm_message_t);
 static int igb_resume(struct pci_dev *);
@@ -219,46 +179,12 @@
 	.err_handler = &igb_err_handler
 };
 
-static int global_quad_port_a; /* global quad port a indication */
-
 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
 /**
- * Scale the NIC clock cycle by a large factor so that
- * relatively small clock corrections can be added or
- * substracted at each clock tick. The drawbacks of a
- * large factor are a) that the clock register overflows
- * more quickly (not such a big deal) and b) that the
- * increment per tick has to fit into 24 bits.
- *
- * Note that
- *   TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS *
- *             IGB_TSYNC_SCALE
- *   TIMINCA += TIMINCA * adjustment [ppm] / 1e9
- *
- * The base scale factor is intentionally a power of two
- * so that the division in %struct timecounter can be done with
- * a shift.
- */
-#define IGB_TSYNC_SHIFT (19)
-#define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT)
-
-/**
- * The duration of one clock cycle of the NIC.
- *
- * @todo This hard-coded value is part of the specification and might change
- * in future hardware revisions. Add revision check.
- */
-#define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16
-
-#if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24)
-# error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA
-#endif
-
-/**
  * igb_read_clock - read raw cycle counter (to be used by time counter)
  */
 static cycle_t igb_read_clock(const struct cyclecounter *tc)
@@ -266,11 +192,11 @@
 	struct igb_adapter *adapter =
 		container_of(tc, struct igb_adapter, cycles);
 	struct e1000_hw *hw = &adapter->hw;
-	u64 stamp;
+	u64 stamp = 0;
+	int shift = 0;
 
-	stamp =  rd32(E1000_SYSTIML);
-	stamp |= (u64)rd32(E1000_SYSTIMH) << 32ULL;
-
+	stamp |= (u64)rd32(E1000_SYSTIML) << shift;
+	stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
 	return stamp;
 }
 
@@ -311,17 +237,6 @@
 #endif
 
 /**
- * igb_desc_unused - calculate if we have unused descriptors
- **/
-static int igb_desc_unused(struct igb_ring *ring)
-{
-	if (ring->next_to_clean > ring->next_to_use)
-		return ring->next_to_clean - ring->next_to_use - 1;
-
-	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
-}
-
-/**
  * igb_init_module - Driver Registration Routine
  *
  * igb_init_module is the first routine called when the driver is
@@ -335,12 +250,9 @@
 
 	printk(KERN_INFO "%s\n", igb_copyright);
 
-	global_quad_port_a = 0;
-
 #ifdef CONFIG_IGB_DCA
 	dca_register_notify(&dca_notifier);
 #endif
-
 	ret = pci_register_driver(&igb_driver);
 	return ret;
 }
@@ -373,8 +285,8 @@
  **/
 static void igb_cache_ring_register(struct igb_adapter *adapter)
 {
-	int i;
-	unsigned int rbase_offset = adapter->vfs_allocated_count;
+	int i = 0, j = 0;
+	u32 rbase_offset = adapter->vfs_allocated_count;
 
 	switch (adapter->hw.mac.type) {
 	case e1000_82576:
@@ -383,23 +295,36 @@
 		 * In order to avoid collision we start at the first free queue
 		 * and continue consuming queues in the same sequence
 		 */
-		for (i = 0; i < adapter->num_rx_queues; i++)
-			adapter->rx_ring[i].reg_idx = rbase_offset +
-			                              Q_IDX_82576(i);
-		for (i = 0; i < adapter->num_tx_queues; i++)
-			adapter->tx_ring[i].reg_idx = rbase_offset +
-			                              Q_IDX_82576(i);
-		break;
+		if (adapter->vfs_allocated_count) {
+			for (; i < adapter->num_rx_queues; i++)
+				adapter->rx_ring[i].reg_idx = rbase_offset +
+				                              Q_IDX_82576(i);
+			for (; j < adapter->num_tx_queues; j++)
+				adapter->tx_ring[j].reg_idx = rbase_offset +
+				                              Q_IDX_82576(j);
+		}
 	case e1000_82575:
 	default:
-		for (i = 0; i < adapter->num_rx_queues; i++)
-			adapter->rx_ring[i].reg_idx = i;
-		for (i = 0; i < adapter->num_tx_queues; i++)
-			adapter->tx_ring[i].reg_idx = i;
+		for (; i < adapter->num_rx_queues; i++)
+			adapter->rx_ring[i].reg_idx = rbase_offset + i;
+		for (; j < adapter->num_tx_queues; j++)
+			adapter->tx_ring[j].reg_idx = rbase_offset + j;
 		break;
 	}
 }
 
+static void igb_free_queues(struct igb_adapter *adapter)
+{
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+
+	adapter->tx_ring = NULL;
+	adapter->rx_ring = NULL;
+
+	adapter->num_rx_queues = 0;
+	adapter->num_tx_queues = 0;
+}
+
 /**
  * igb_alloc_queues - Allocate memory for all rings
  * @adapter: board private structure to initialize
@@ -414,59 +339,61 @@
 	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
 				   sizeof(struct igb_ring), GFP_KERNEL);
 	if (!adapter->tx_ring)
-		return -ENOMEM;
+		goto err;
 
 	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
 				   sizeof(struct igb_ring), GFP_KERNEL);
-	if (!adapter->rx_ring) {
-		kfree(adapter->tx_ring);
-		return -ENOMEM;
-	}
-
-	adapter->rx_ring->buddy = adapter->tx_ring;
+	if (!adapter->rx_ring)
+		goto err;
 
 	for (i = 0; i < adapter->num_tx_queues; i++) {
 		struct igb_ring *ring = &(adapter->tx_ring[i]);
 		ring->count = adapter->tx_ring_count;
-		ring->adapter = adapter;
 		ring->queue_index = i;
+		ring->pdev = adapter->pdev;
+		ring->netdev = adapter->netdev;
+		/* For 82575, context index must be unique per ring. */
+		if (adapter->hw.mac.type == e1000_82575)
+			ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
 	}
+
 	for (i = 0; i < adapter->num_rx_queues; i++) {
 		struct igb_ring *ring = &(adapter->rx_ring[i]);
 		ring->count = adapter->rx_ring_count;
-		ring->adapter = adapter;
 		ring->queue_index = i;
-		ring->itr_register = E1000_ITR;
-
-		/* set a default napi handler for each rx_ring */
-		netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64);
+		ring->pdev = adapter->pdev;
+		ring->netdev = adapter->netdev;
+		ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+		ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
+		/* set flag indicating ring supports SCTP checksum offload */
+		if (adapter->hw.mac.type >= e1000_82576)
+			ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
 	}
 
 	igb_cache_ring_register(adapter);
+
 	return 0;
-}
 
-static void igb_free_queues(struct igb_adapter *adapter)
-{
-	int i;
+err:
+	igb_free_queues(adapter);
 
-	for (i = 0; i < adapter->num_rx_queues; i++)
-		netif_napi_del(&adapter->rx_ring[i].napi);
-
-	adapter->num_rx_queues = 0;
-	adapter->num_tx_queues = 0;
-
-	kfree(adapter->tx_ring);
-	kfree(adapter->rx_ring);
+	return -ENOMEM;
 }
 
 #define IGB_N0_QUEUE -1
-static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
-			      int tx_queue, int msix_vector)
+static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
 {
 	u32 msixbm = 0;
+	struct igb_adapter *adapter = q_vector->adapter;
 	struct e1000_hw *hw = &adapter->hw;
 	u32 ivar, index;
+	int rx_queue = IGB_N0_QUEUE;
+	int tx_queue = IGB_N0_QUEUE;
+
+	if (q_vector->rx_ring)
+		rx_queue = q_vector->rx_ring->reg_idx;
+	if (q_vector->tx_ring)
+		tx_queue = q_vector->tx_ring->reg_idx;
 
 	switch (hw->mac.type) {
 	case e1000_82575:
@@ -474,16 +401,12 @@
 		   bitmask for the EICR/EIMS/EIMC registers.  To assign one
 		   or more queues to a vector, we write the appropriate bits
 		   into the MSIXBM register for that vector. */
-		if (rx_queue > IGB_N0_QUEUE) {
+		if (rx_queue > IGB_N0_QUEUE)
 			msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
-			adapter->rx_ring[rx_queue].eims_value = msixbm;
-		}
-		if (tx_queue > IGB_N0_QUEUE) {
+		if (tx_queue > IGB_N0_QUEUE)
 			msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
-			adapter->tx_ring[tx_queue].eims_value =
-				  E1000_EICR_TX_QUEUE0 << tx_queue;
-		}
 		array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
+		q_vector->eims_value = msixbm;
 		break;
 	case e1000_82576:
 		/* 82576 uses a table-based method for assigning vectors.
@@ -491,35 +414,34 @@
 		   a vector number along with a "valid" bit.  Sadly, the layout
 		   of the table is somewhat counterintuitive. */
 		if (rx_queue > IGB_N0_QUEUE) {
-			index = (rx_queue >> 1) + adapter->vfs_allocated_count;
+			index = (rx_queue & 0x7);
 			ivar = array_rd32(E1000_IVAR0, index);
-			if (rx_queue & 0x1) {
-				/* vector goes into third byte of register */
-				ivar = ivar & 0xFF00FFFF;
-				ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
-			} else {
+			if (rx_queue < 8) {
 				/* vector goes into low byte of register */
 				ivar = ivar & 0xFFFFFF00;
 				ivar |= msix_vector | E1000_IVAR_VALID;
+			} else {
+				/* vector goes into third byte of register */
+				ivar = ivar & 0xFF00FFFF;
+				ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
 			}
-			adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
 			array_wr32(E1000_IVAR0, index, ivar);
 		}
 		if (tx_queue > IGB_N0_QUEUE) {
-			index = (tx_queue >> 1) + adapter->vfs_allocated_count;
+			index = (tx_queue & 0x7);
 			ivar = array_rd32(E1000_IVAR0, index);
-			if (tx_queue & 0x1) {
-				/* vector goes into high byte of register */
-				ivar = ivar & 0x00FFFFFF;
-				ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
-			} else {
+			if (tx_queue < 8) {
 				/* vector goes into second byte of register */
 				ivar = ivar & 0xFFFF00FF;
 				ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
+			} else {
+				/* vector goes into high byte of register */
+				ivar = ivar & 0x00FFFFFF;
+				ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
 			}
-			adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
 			array_wr32(E1000_IVAR0, index, ivar);
 		}
+		q_vector->eims_value = 1 << msix_vector;
 		break;
 	default:
 		BUG();
@@ -540,43 +462,10 @@
 	struct e1000_hw *hw = &adapter->hw;
 
 	adapter->eims_enable_mask = 0;
-	if (hw->mac.type == e1000_82576)
-		/* Turn on MSI-X capability first, or our settings
-		 * won't stick.  And it will take days to debug. */
-		wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
-				   E1000_GPIE_PBA | E1000_GPIE_EIAME |
- 				   E1000_GPIE_NSICR);
-
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		struct igb_ring *tx_ring = &adapter->tx_ring[i];
-		igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
-		adapter->eims_enable_mask |= tx_ring->eims_value;
-		if (tx_ring->itr_val)
-			writel(tx_ring->itr_val,
-			       hw->hw_addr + tx_ring->itr_register);
-		else
-			writel(1, hw->hw_addr + tx_ring->itr_register);
-	}
-
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		struct igb_ring *rx_ring = &adapter->rx_ring[i];
-		rx_ring->buddy = NULL;
-		igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
-		adapter->eims_enable_mask |= rx_ring->eims_value;
-		if (rx_ring->itr_val)
-			writel(rx_ring->itr_val,
-			       hw->hw_addr + rx_ring->itr_register);
-		else
-			writel(1, hw->hw_addr + rx_ring->itr_register);
-	}
-
 
 	/* set vector for other causes, i.e. link changes */
 	switch (hw->mac.type) {
 	case e1000_82575:
-		array_wr32(E1000_MSIXBM(0), vector++,
-				      E1000_EIMS_OTHER);
-
 		tmp = rd32(E1000_CTRL_EXT);
 		/* enable MSI-X PBA support*/
 		tmp |= E1000_CTRL_EXT_PBA_CLR;
@@ -586,22 +475,40 @@
 		tmp |= E1000_CTRL_EXT_IRCA;
 
 		wr32(E1000_CTRL_EXT, tmp);
-		adapter->eims_enable_mask |= E1000_EIMS_OTHER;
+
+		/* enable msix_other interrupt */
+		array_wr32(E1000_MSIXBM(0), vector++,
+		                      E1000_EIMS_OTHER);
 		adapter->eims_other = E1000_EIMS_OTHER;
 
 		break;
 
 	case e1000_82576:
-		tmp = (vector++ | E1000_IVAR_VALID) << 8;
-		wr32(E1000_IVAR_MISC, tmp);
+		/* Turn on MSI-X capability first, or our settings
+		 * won't stick.  And it will take days to debug. */
+		wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
+		                E1000_GPIE_PBA | E1000_GPIE_EIAME |
+		                E1000_GPIE_NSICR);
 
-		adapter->eims_enable_mask = (1 << (vector)) - 1;
-		adapter->eims_other = 1 << (vector - 1);
+		/* enable msix_other interrupt */
+		adapter->eims_other = 1 << vector;
+		tmp = (vector++ | E1000_IVAR_VALID) << 8;
+
+		wr32(E1000_IVAR_MISC, tmp);
 		break;
 	default:
 		/* do nothing, since nothing else supports MSI-X */
 		break;
 	} /* switch (hw->mac.type) */
+
+	adapter->eims_enable_mask |= adapter->eims_other;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		igb_assign_vector(q_vector, vector++);
+		adapter->eims_enable_mask |= q_vector->eims_value;
+	}
+
 	wrfl();
 }
 
@@ -614,42 +521,39 @@
 static int igb_request_msix(struct igb_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
 	int i, err = 0, vector = 0;
 
-	vector = 0;
-
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		struct igb_ring *ring = &(adapter->tx_ring[i]);
-		sprintf(ring->name, "%s-tx-%d", netdev->name, i);
-		err = request_irq(adapter->msix_entries[vector].vector,
-				  &igb_msix_tx, 0, ring->name,
-				  &(adapter->tx_ring[i]));
-		if (err)
-			goto out;
-		ring->itr_register = E1000_EITR(0) + (vector << 2);
-		ring->itr_val = 976; /* ~4000 ints/sec */
-		vector++;
-	}
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		struct igb_ring *ring = &(adapter->rx_ring[i]);
-		if (strlen(netdev->name) < (IFNAMSIZ - 5))
-			sprintf(ring->name, "%s-rx-%d", netdev->name, i);
-		else
-			memcpy(ring->name, netdev->name, IFNAMSIZ);
-		err = request_irq(adapter->msix_entries[vector].vector,
-				  &igb_msix_rx, 0, ring->name,
-				  &(adapter->rx_ring[i]));
-		if (err)
-			goto out;
-		ring->itr_register = E1000_EITR(0) + (vector << 2);
-		ring->itr_val = adapter->itr;
-		vector++;
-	}
-
 	err = request_irq(adapter->msix_entries[vector].vector,
-			  &igb_msix_other, 0, netdev->name, netdev);
+	                  &igb_msix_other, 0, netdev->name, adapter);
 	if (err)
 		goto out;
+	vector++;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+
+		q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
+
+		if (q_vector->rx_ring && q_vector->tx_ring)
+			sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
+			        q_vector->rx_ring->queue_index);
+		else if (q_vector->tx_ring)
+			sprintf(q_vector->name, "%s-tx-%u", netdev->name,
+			        q_vector->tx_ring->queue_index);
+		else if (q_vector->rx_ring)
+			sprintf(q_vector->name, "%s-rx-%u", netdev->name,
+			        q_vector->rx_ring->queue_index);
+		else
+			sprintf(q_vector->name, "%s-unused", netdev->name);
+
+		err = request_irq(adapter->msix_entries[vector].vector,
+		                  &igb_msix_ring, 0, q_vector->name,
+		                  q_vector);
+		if (err)
+			goto out;
+		vector++;
+	}
 
 	igb_configure_msix(adapter);
 	return 0;
@@ -663,11 +567,44 @@
 		pci_disable_msix(adapter->pdev);
 		kfree(adapter->msix_entries);
 		adapter->msix_entries = NULL;
-	} else if (adapter->flags & IGB_FLAG_HAS_MSI)
+	} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
 		pci_disable_msi(adapter->pdev);
-	return;
+	}
 }
 
+/**
+ * igb_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void igb_free_q_vectors(struct igb_adapter *adapter)
+{
+	int v_idx;
+
+	for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+		adapter->q_vector[v_idx] = NULL;
+		netif_napi_del(&q_vector->napi);
+		kfree(q_vector);
+	}
+	adapter->num_q_vectors = 0;
+}
+
+/**
+ * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
+ *
+ * This function resets the device so that it has 0 rx queues, tx queues, and
+ * MSI-X interrupts allocated.
+ */
+static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
+{
+	igb_free_queues(adapter);
+	igb_free_q_vectors(adapter);
+	igb_reset_interrupt_capability(adapter);
+}
 
 /**
  * igb_set_interrupt_capability - set MSI or MSI-X if supported
@@ -681,11 +618,20 @@
 	int numvecs, i;
 
 	/* Number of supported queues. */
-	/* Having more queues than CPUs doesn't make sense. */
 	adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
 	adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
 
-	numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1;
+	/* start with one vector for every rx queue */
+	numvecs = adapter->num_rx_queues;
+
+	/* if tx handler is seperate add 1 for every tx queue */
+	numvecs += adapter->num_tx_queues;
+
+	/* store the number of vectors reserved for queues */
+	adapter->num_q_vectors = numvecs;
+
+	/* add 1 vector for link status interrupts */
+	numvecs++;
 	adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
 					GFP_KERNEL);
 	if (!adapter->msix_entries)
@@ -719,8 +665,11 @@
 		dev_info(&adapter->pdev->dev, "IOV Disabled\n");
 	}
 #endif
+	adapter->vfs_allocated_count = 0;
+	adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
 	adapter->num_rx_queues = 1;
 	adapter->num_tx_queues = 1;
+	adapter->num_q_vectors = 1;
 	if (!pci_enable_msi(adapter->pdev))
 		adapter->flags |= IGB_FLAG_HAS_MSI;
 out:
@@ -730,6 +679,143 @@
 }
 
 /**
+ * igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ **/
+static int igb_alloc_q_vectors(struct igb_adapter *adapter)
+{
+	struct igb_q_vector *q_vector;
+	struct e1000_hw *hw = &adapter->hw;
+	int v_idx;
+
+	for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
+		q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
+		if (!q_vector)
+			goto err_out;
+		q_vector->adapter = adapter;
+		q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
+		q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
+		q_vector->itr_val = IGB_START_ITR;
+		q_vector->set_itr = 1;
+		netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
+		adapter->q_vector[v_idx] = q_vector;
+	}
+	return 0;
+
+err_out:
+	while (v_idx) {
+		v_idx--;
+		q_vector = adapter->q_vector[v_idx];
+		netif_napi_del(&q_vector->napi);
+		kfree(q_vector);
+		adapter->q_vector[v_idx] = NULL;
+	}
+	return -ENOMEM;
+}
+
+static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
+                                      int ring_idx, int v_idx)
+{
+	struct igb_q_vector *q_vector;
+
+	q_vector = adapter->q_vector[v_idx];
+	q_vector->rx_ring = &adapter->rx_ring[ring_idx];
+	q_vector->rx_ring->q_vector = q_vector;
+	q_vector->itr_val = adapter->rx_itr_setting;
+	if (q_vector->itr_val && q_vector->itr_val <= 3)
+		q_vector->itr_val = IGB_START_ITR;
+}
+
+static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
+                                      int ring_idx, int v_idx)
+{
+	struct igb_q_vector *q_vector;
+
+	q_vector = adapter->q_vector[v_idx];
+	q_vector->tx_ring = &adapter->tx_ring[ring_idx];
+	q_vector->tx_ring->q_vector = q_vector;
+	q_vector->itr_val = adapter->tx_itr_setting;
+	if (q_vector->itr_val && q_vector->itr_val <= 3)
+		q_vector->itr_val = IGB_START_ITR;
+}
+
+/**
+ * igb_map_ring_to_vector - maps allocated queues to vectors
+ *
+ * This function maps the recently allocated queues to vectors.
+ **/
+static int igb_map_ring_to_vector(struct igb_adapter *adapter)
+{
+	int i;
+	int v_idx = 0;
+
+	if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
+	    (adapter->num_q_vectors < adapter->num_tx_queues))
+		return -ENOMEM;
+
+	if (adapter->num_q_vectors >=
+	    (adapter->num_rx_queues + adapter->num_tx_queues)) {
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			igb_map_rx_ring_to_vector(adapter, i, v_idx++);
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			igb_map_tx_ring_to_vector(adapter, i, v_idx++);
+	} else {
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			if (i < adapter->num_tx_queues)
+				igb_map_tx_ring_to_vector(adapter, i, v_idx);
+			igb_map_rx_ring_to_vector(adapter, i, v_idx++);
+		}
+		for (; i < adapter->num_tx_queues; i++)
+			igb_map_tx_ring_to_vector(adapter, i, v_idx++);
+	}
+	return 0;
+}
+
+/**
+ * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
+ *
+ * This function initializes the interrupts and allocates all of the queues.
+ **/
+static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int err;
+
+	igb_set_interrupt_capability(adapter);
+
+	err = igb_alloc_q_vectors(adapter);
+	if (err) {
+		dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
+		goto err_alloc_q_vectors;
+	}
+
+	err = igb_alloc_queues(adapter);
+	if (err) {
+		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+		goto err_alloc_queues;
+	}
+
+	err = igb_map_ring_to_vector(adapter);
+	if (err) {
+		dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
+		goto err_map_queues;
+	}
+
+
+	return 0;
+err_map_queues:
+	igb_free_queues(adapter);
+err_alloc_queues:
+	igb_free_q_vectors(adapter);
+err_alloc_q_vectors:
+	igb_reset_interrupt_capability(adapter);
+	return err;
+}
+
+/**
  * igb_request_irq - initialize interrupts
  *
  * Attempts to configure interrupts using the best available
@@ -738,6 +824,7 @@
 static int igb_request_irq(struct igb_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
 	struct e1000_hw *hw = &adapter->hw;
 	int err = 0;
 
@@ -746,18 +833,36 @@
 		if (!err)
 			goto request_done;
 		/* fall back to MSI */
-		igb_reset_interrupt_capability(adapter);
+		igb_clear_interrupt_scheme(adapter);
 		if (!pci_enable_msi(adapter->pdev))
 			adapter->flags |= IGB_FLAG_HAS_MSI;
 		igb_free_all_tx_resources(adapter);
 		igb_free_all_rx_resources(adapter);
+		adapter->num_tx_queues = 1;
 		adapter->num_rx_queues = 1;
-		igb_alloc_queues(adapter);
+		adapter->num_q_vectors = 1;
+		err = igb_alloc_q_vectors(adapter);
+		if (err) {
+			dev_err(&pdev->dev,
+			        "Unable to allocate memory for vectors\n");
+			goto request_done;
+		}
+		err = igb_alloc_queues(adapter);
+		if (err) {
+			dev_err(&pdev->dev,
+			        "Unable to allocate memory for queues\n");
+			igb_free_q_vectors(adapter);
+			goto request_done;
+		}
+		igb_setup_all_tx_resources(adapter);
+		igb_setup_all_rx_resources(adapter);
 	} else {
 		switch (hw->mac.type) {
 		case e1000_82575:
 			wr32(E1000_MSIXBM(0),
-			     (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER));
+			     (E1000_EICR_RX_QUEUE0 |
+			      E1000_EICR_TX_QUEUE0 |
+			      E1000_EIMS_OTHER));
 			break;
 		case e1000_82576:
 			wr32(E1000_IVAR0, E1000_IVAR_VALID);
@@ -769,16 +874,17 @@
 
 	if (adapter->flags & IGB_FLAG_HAS_MSI) {
 		err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
-				  netdev->name, netdev);
+				  netdev->name, adapter);
 		if (!err)
 			goto request_done;
+
 		/* fall back to legacy interrupts */
 		igb_reset_interrupt_capability(adapter);
 		adapter->flags &= ~IGB_FLAG_HAS_MSI;
 	}
 
 	err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
-			  netdev->name, netdev);
+			  netdev->name, adapter);
 
 	if (err)
 		dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
@@ -790,23 +896,19 @@
 
 static void igb_free_irq(struct igb_adapter *adapter)
 {
-	struct net_device *netdev = adapter->netdev;
-
 	if (adapter->msix_entries) {
 		int vector = 0, i;
 
-		for (i = 0; i < adapter->num_tx_queues; i++)
-			free_irq(adapter->msix_entries[vector++].vector,
-				&(adapter->tx_ring[i]));
-		for (i = 0; i < adapter->num_rx_queues; i++)
-			free_irq(adapter->msix_entries[vector++].vector,
-				&(adapter->rx_ring[i]));
+		free_irq(adapter->msix_entries[vector++].vector, adapter);
 
-		free_irq(adapter->msix_entries[vector++].vector, netdev);
-		return;
+		for (i = 0; i < adapter->num_q_vectors; i++) {
+			struct igb_q_vector *q_vector = adapter->q_vector[i];
+			free_irq(adapter->msix_entries[vector++].vector,
+			         q_vector);
+		}
+	} else {
+		free_irq(adapter->pdev->irq, adapter);
 	}
-
-	free_irq(adapter->pdev->irq, netdev);
 }
 
 /**
@@ -817,6 +919,11 @@
 {
 	struct e1000_hw *hw = &adapter->hw;
 
+	/*
+	 * we need to be careful when disabling interrupts.  The VFs are also
+	 * mapped into these registers and so clearing the bits can cause
+	 * issues on the VF drivers so we only need to clear what we set
+	 */
 	if (adapter->msix_entries) {
 		u32 regval = rd32(E1000_EIAM);
 		wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
@@ -840,15 +947,17 @@
 	struct e1000_hw *hw = &adapter->hw;
 
 	if (adapter->msix_entries) {
+		u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
 		u32 regval = rd32(E1000_EIAC);
 		wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
 		regval = rd32(E1000_EIAM);
 		wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
 		wr32(E1000_EIMS, adapter->eims_enable_mask);
-		if (adapter->vfs_allocated_count)
+		if (adapter->vfs_allocated_count) {
 			wr32(E1000_MBVFIMR, 0xFF);
-		wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB |
-		                 E1000_IMS_DOUTSYNC));
+			ims |= E1000_IMS_VMMB;
+		}
+		wr32(E1000_IMS, ims);
 	} else {
 		wr32(E1000_IMS, IMS_ENABLE_MASK);
 		wr32(E1000_IAM, IMS_ENABLE_MASK);
@@ -857,24 +966,23 @@
 
 static void igb_update_mng_vlan(struct igb_adapter *adapter)
 {
-	struct net_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
 	u16 vid = adapter->hw.mng_cookie.vlan_id;
 	u16 old_vid = adapter->mng_vlan_id;
-	if (adapter->vlgrp) {
-		if (!vlan_group_get_device(adapter->vlgrp, vid)) {
-			if (adapter->hw.mng_cookie.status &
-				E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
-				igb_vlan_rx_add_vid(netdev, vid);
-				adapter->mng_vlan_id = vid;
-			} else
-				adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
 
-			if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
-					(vid != old_vid) &&
-			    !vlan_group_get_device(adapter->vlgrp, old_vid))
-				igb_vlan_rx_kill_vid(netdev, old_vid);
-		} else
-			adapter->mng_vlan_id = vid;
+	if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+		/* add VID to filter table */
+		igb_vfta_set(hw, vid, true);
+		adapter->mng_vlan_id = vid;
+	} else {
+		adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
+	}
+
+	if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
+	    (vid != old_vid) &&
+	    !vlan_group_get_device(adapter->vlgrp, old_vid)) {
+		/* remove VID from filter table */
+		igb_vfta_set(hw, old_vid, false);
 	}
 }
 
@@ -898,7 +1006,6 @@
 			ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
 }
 
-
 /**
  * igb_get_hw_control - get control of the h/w from f/w
  * @adapter: address of board private structure
@@ -933,8 +1040,11 @@
 
 	igb_restore_vlan(adapter);
 
-	igb_configure_tx(adapter);
+	igb_setup_tctl(adapter);
+	igb_setup_mrqc(adapter);
 	igb_setup_rctl(adapter);
+
+	igb_configure_tx(adapter);
 	igb_configure_rx(adapter);
 
 	igb_rx_fifo_flush_82575(&adapter->hw);
@@ -956,7 +1066,6 @@
  * igb_up - Open the interface and prepare it to handle traffic
  * @adapter: board private structure
  **/
-
 int igb_up(struct igb_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
@@ -967,29 +1076,37 @@
 
 	clear_bit(__IGB_DOWN, &adapter->state);
 
-	for (i = 0; i < adapter->num_rx_queues; i++)
-		napi_enable(&adapter->rx_ring[i].napi);
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		napi_enable(&q_vector->napi);
+	}
 	if (adapter->msix_entries)
 		igb_configure_msix(adapter);
 
-	igb_vmm_control(adapter);
-	igb_set_vmolr(hw, adapter->vfs_allocated_count);
-
 	/* Clear any pending interrupts. */
 	rd32(E1000_ICR);
 	igb_irq_enable(adapter);
 
+	/* notify VFs that reset has been completed */
+	if (adapter->vfs_allocated_count) {
+		u32 reg_data = rd32(E1000_CTRL_EXT);
+		reg_data |= E1000_CTRL_EXT_PFRSTD;
+		wr32(E1000_CTRL_EXT, reg_data);
+	}
+
 	netif_tx_start_all_queues(adapter->netdev);
 
-	/* Fire a link change interrupt to start the watchdog. */
-	wr32(E1000_ICS, E1000_ICS_LSC);
+	/* start the watchdog. */
+	hw->mac.get_link_status = 1;
+	schedule_work(&adapter->watchdog_task);
+
 	return 0;
 }
 
 void igb_down(struct igb_adapter *adapter)
 {
-	struct e1000_hw *hw = &adapter->hw;
 	struct net_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
 	u32 tctl, rctl;
 	int i;
 
@@ -1012,8 +1129,10 @@
 	wrfl();
 	msleep(10);
 
-	for (i = 0; i < adapter->num_rx_queues; i++)
-		napi_disable(&adapter->rx_ring[i].napi);
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		napi_disable(&q_vector->napi);
+	}
 
 	igb_irq_disable(adapter);
 
@@ -1052,6 +1171,7 @@
 
 void igb_reset(struct igb_adapter *adapter)
 {
+	struct pci_dev *pdev = adapter->pdev;
 	struct e1000_hw *hw = &adapter->hw;
 	struct e1000_mac_info *mac = &hw->mac;
 	struct e1000_fc_info *fc = &hw->fc;
@@ -1063,7 +1183,8 @@
 	 */
 	switch (mac->type) {
 	case e1000_82576:
-		pba = E1000_PBA_64K;
+		pba = rd32(E1000_RXPBS);
+		pba &= E1000_RXPBS_SIZE_MASK_82576;
 		break;
 	case e1000_82575:
 	default:
@@ -1138,10 +1259,10 @@
 	if (adapter->vfs_allocated_count) {
 		int i;
 		for (i = 0 ; i < adapter->vfs_allocated_count; i++)
-			adapter->vf_data[i].clear_to_send = false;
+			adapter->vf_data[i].flags = 0;
 
 		/* ping all the active vfs to let them know we are going down */
-			igb_ping_all_vfs(adapter);
+		igb_ping_all_vfs(adapter);
 
 		/* disable transmits and receives */
 		wr32(E1000_VFRE, 0);
@@ -1149,23 +1270,23 @@
 	}
 
 	/* Allow time for pending master requests to run */
-	adapter->hw.mac.ops.reset_hw(&adapter->hw);
+	hw->mac.ops.reset_hw(hw);
 	wr32(E1000_WUC, 0);
 
-	if (adapter->hw.mac.ops.init_hw(&adapter->hw))
-		dev_err(&adapter->pdev->dev, "Hardware Error\n");
+	if (hw->mac.ops.init_hw(hw))
+		dev_err(&pdev->dev, "Hardware Error\n");
 
 	igb_update_mng_vlan(adapter);
 
 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
 	wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
 
-	igb_reset_adaptive(&adapter->hw);
-	igb_get_phy_info(&adapter->hw);
+	igb_reset_adaptive(hw);
+	igb_get_phy_info(hw);
 }
 
 static const struct net_device_ops igb_netdev_ops = {
-	.ndo_open 		= igb_open,
+	.ndo_open		= igb_open,
 	.ndo_stop		= igb_close,
 	.ndo_start_xmit		= igb_xmit_frame_adv,
 	.ndo_get_stats		= igb_get_stats,
@@ -1201,10 +1322,11 @@
 	struct net_device *netdev;
 	struct igb_adapter *adapter;
 	struct e1000_hw *hw;
+	u16 eeprom_data = 0;
+	static int global_quad_port_a; /* global quad port a indication */
 	const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
 	unsigned long mmio_start, mmio_len;
 	int err, pci_using_dac;
-	u16 eeprom_data = 0;
 	u16 eeprom_apme_mask = IGB_EEPROM_APME;
 	u32 part_num;
 
@@ -1281,8 +1403,6 @@
 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
 	hw->subsystem_device_id = pdev->subsystem_device;
 
-	/* setup the private structure */
-	hw->back = adapter;
 	/* Copy the default MAC, PHY and NVM function pointers */
 	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
 	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
@@ -1292,46 +1412,6 @@
 	if (err)
 		goto err_sw_init;
 
-#ifdef CONFIG_PCI_IOV
-	/* since iov functionality isn't critical to base device function we
-	 * can accept failure.  If it fails we don't allow iov to be enabled */
-	if (hw->mac.type == e1000_82576) {
-		/* 82576 supports a maximum of 7 VFs in addition to the PF */
-		unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs;
-		int i;
-		unsigned char mac_addr[ETH_ALEN];
-
-		if (num_vfs) {
-			adapter->vf_data = kcalloc(num_vfs,
-						sizeof(struct vf_data_storage),
-						GFP_KERNEL);
-			if (!adapter->vf_data) {
-				dev_err(&pdev->dev,
-				        "Could not allocate VF private data - "
-					"IOV enable failed\n");
-			} else {
-				err = pci_enable_sriov(pdev, num_vfs);
-				if (!err) {
-					adapter->vfs_allocated_count = num_vfs;
-					dev_info(&pdev->dev,
-					         "%d vfs allocated\n",
-					         num_vfs);
-					for (i = 0;
-					     i < adapter->vfs_allocated_count;
-					     i++) {
-						random_ether_addr(mac_addr);
-						igb_set_vf_mac(adapter, i,
-						               mac_addr);
-					}
-				} else {
-					kfree(adapter->vf_data);
-					adapter->vf_data = NULL;
-				}
-			}
-		}
-	}
-
-#endif
 	/* setup the private structure */
 	err = igb_sw_init(adapter);
 	if (err)
@@ -1339,16 +1419,6 @@
 
 	igb_get_bus_info_pcie(hw);
 
-	/* set flags */
-	switch (hw->mac.type) {
-	case e1000_82575:
-		adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
-		break;
-	case e1000_82576:
-	default:
-		break;
-	}
-
 	hw->phy.autoneg_wait_to_complete = false;
 	hw->mac.adaptive_ifs = true;
 
@@ -1372,7 +1442,6 @@
 	netdev->features |= NETIF_F_IPV6_CSUM;
 	netdev->features |= NETIF_F_TSO;
 	netdev->features |= NETIF_F_TSO6;
-
 	netdev->features |= NETIF_F_GRO;
 
 	netdev->vlan_features |= NETIF_F_TSO;
@@ -1384,10 +1453,10 @@
 	if (pci_using_dac)
 		netdev->features |= NETIF_F_HIGHDMA;
 
-	if (adapter->hw.mac.type == e1000_82576)
+	if (hw->mac.type >= e1000_82576)
 		netdev->features |= NETIF_F_SCTP_CSUM;
 
-	adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
+	adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
 
 	/* before reading the NVM, reset the controller to put the device in a
 	 * known good starting state */
@@ -1429,9 +1498,6 @@
 	hw->fc.requested_mode = e1000_fc_default;
 	hw->fc.current_mode = e1000_fc_default;
 
-	adapter->itr_setting = IGB_DEFAULT_ITR;
-	adapter->itr = IGB_START_ITR;
-
 	igb_validate_mdi_setting(hw);
 
 	/* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
@@ -1498,66 +1564,64 @@
 		dev_info(&pdev->dev, "DCA enabled\n");
 		igb_setup_dca(adapter);
 	}
+
 #endif
+	switch (hw->mac.type) {
+	case e1000_82576:
+		/*
+		 * Initialize hardware timer: we keep it running just in case
+		 * that some program needs it later on.
+		 */
+		memset(&adapter->cycles, 0, sizeof(adapter->cycles));
+		adapter->cycles.read = igb_read_clock;
+		adapter->cycles.mask = CLOCKSOURCE_MASK(64);
+		adapter->cycles.mult = 1;
+		/**
+		 * Scale the NIC clock cycle by a large factor so that
+		 * relatively small clock corrections can be added or
+		 * substracted at each clock tick. The drawbacks of a large
+		 * factor are a) that the clock register overflows more quickly
+		 * (not such a big deal) and b) that the increment per tick has
+		 * to fit into 24 bits.  As a result we need to use a shift of
+		 * 19 so we can fit a value of 16 into the TIMINCA register.
+		 */
+		adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
+		wr32(E1000_TIMINCA,
+		                (1 << E1000_TIMINCA_16NS_SHIFT) |
+		                (16 << IGB_82576_TSYNC_SHIFT));
 
-	/*
-	 * Initialize hardware timer: we keep it running just in case
-	 * that some program needs it later on.
-	 */
-	memset(&adapter->cycles, 0, sizeof(adapter->cycles));
-	adapter->cycles.read = igb_read_clock;
-	adapter->cycles.mask = CLOCKSOURCE_MASK(64);
-	adapter->cycles.mult = 1;
-	adapter->cycles.shift = IGB_TSYNC_SHIFT;
-	wr32(E1000_TIMINCA,
-	     (1<<24) |
-	     IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE);
-#if 0
-	/*
-	 * Avoid rollover while we initialize by resetting the time counter.
-	 */
-	wr32(E1000_SYSTIML, 0x00000000);
-	wr32(E1000_SYSTIMH, 0x00000000);
-#else
-	/*
-	 * Set registers so that rollover occurs soon to test this.
-	 */
-	wr32(E1000_SYSTIML, 0x00000000);
-	wr32(E1000_SYSTIMH, 0xFF800000);
-#endif
-	wrfl();
-	timecounter_init(&adapter->clock,
-			 &adapter->cycles,
-			 ktime_to_ns(ktime_get_real()));
+		/* Set registers so that rollover occurs soon to test this. */
+		wr32(E1000_SYSTIML, 0x00000000);
+		wr32(E1000_SYSTIMH, 0xFF800000);
+		wrfl();
 
-	/*
-	 * Synchronize our NIC clock against system wall clock. NIC
-	 * time stamp reading requires ~3us per sample, each sample
-	 * was pretty stable even under load => only require 10
-	 * samples for each offset comparison.
-	 */
-	memset(&adapter->compare, 0, sizeof(adapter->compare));
-	adapter->compare.source = &adapter->clock;
-	adapter->compare.target = ktime_get_real;
-	adapter->compare.num_samples = 10;
-	timecompare_update(&adapter->compare, 0);
-
-#ifdef DEBUG
-	{
-		char buffer[160];
-		printk(KERN_DEBUG
-			"igb: %s: hw %p initialized timer\n",
-			igb_get_time_str(adapter, buffer),
-			&adapter->hw);
+		timecounter_init(&adapter->clock,
+				 &adapter->cycles,
+				 ktime_to_ns(ktime_get_real()));
+		/*
+		 * Synchronize our NIC clock against system wall clock. NIC
+		 * time stamp reading requires ~3us per sample, each sample
+		 * was pretty stable even under load => only require 10
+		 * samples for each offset comparison.
+		 */
+		memset(&adapter->compare, 0, sizeof(adapter->compare));
+		adapter->compare.source = &adapter->clock;
+		adapter->compare.target = ktime_get_real;
+		adapter->compare.num_samples = 10;
+		timecompare_update(&adapter->compare, 0);
+		break;
+	case e1000_82575:
+		/* 82575 does not support timesync */
+	default:
+		break;
 	}
-#endif
 
 	dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
 	/* print bus type/speed/width info */
 	dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
 		 netdev->name,
-		 ((hw->bus.speed == e1000_bus_speed_2500)
-		  ? "2.5Gb/s" : "unknown"),
+		 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+		                                            "unknown"),
 		 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
 		  (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
 		  (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
@@ -1584,15 +1648,14 @@
 
 	if (hw->flash_address)
 		iounmap(hw->flash_address);
-
-	igb_free_queues(adapter);
 err_sw_init:
+	igb_clear_interrupt_scheme(adapter);
 	iounmap(hw->hw_addr);
 err_ioremap:
 	free_netdev(netdev);
 err_alloc_etherdev:
-	pci_release_selected_regions(pdev, pci_select_bars(pdev,
-	                             IORESOURCE_MEM));
+	pci_release_selected_regions(pdev,
+	                             pci_select_bars(pdev, IORESOURCE_MEM));
 err_pci_reg:
 err_dma:
 	pci_disable_device(pdev);
@@ -1637,12 +1700,10 @@
 
 	unregister_netdev(netdev);
 
-	if (!igb_check_reset_block(&adapter->hw))
-		igb_reset_phy(&adapter->hw);
+	if (!igb_check_reset_block(hw))
+		igb_reset_phy(hw);
 
-	igb_reset_interrupt_capability(adapter);
-
-	igb_free_queues(adapter);
+	igb_clear_interrupt_scheme(adapter);
 
 #ifdef CONFIG_PCI_IOV
 	/* reclaim resources allocated to VFs */
@@ -1658,11 +1719,12 @@
 		dev_info(&pdev->dev, "IOV Disabled\n");
 	}
 #endif
+
 	iounmap(hw->hw_addr);
 	if (hw->flash_address)
 		iounmap(hw->flash_address);
-	pci_release_selected_regions(pdev, pci_select_bars(pdev,
-	                             IORESOURCE_MEM));
+	pci_release_selected_regions(pdev,
+	                             pci_select_bars(pdev, IORESOURCE_MEM));
 
 	free_netdev(netdev);
 
@@ -1672,6 +1734,54 @@
 }
 
 /**
+ * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
+ * @adapter: board private structure to initialize
+ *
+ * This function initializes the vf specific data storage and then attempts to
+ * allocate the VFs.  The reason for ordering it this way is because it is much
+ * mor expensive time wise to disable SR-IOV than it is to allocate and free
+ * the memory for the VFs.
+ **/
+static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
+{
+#ifdef CONFIG_PCI_IOV
+	struct pci_dev *pdev = adapter->pdev;
+
+	if (adapter->vfs_allocated_count > 7)
+		adapter->vfs_allocated_count = 7;
+
+	if (adapter->vfs_allocated_count) {
+		adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
+		                           sizeof(struct vf_data_storage),
+		                           GFP_KERNEL);
+		/* if allocation failed then we do not support SR-IOV */
+		if (!adapter->vf_data) {
+			adapter->vfs_allocated_count = 0;
+			dev_err(&pdev->dev, "Unable to allocate memory for VF "
+			        "Data Storage\n");
+		}
+	}
+
+	if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
+		kfree(adapter->vf_data);
+		adapter->vf_data = NULL;
+#endif /* CONFIG_PCI_IOV */
+		adapter->vfs_allocated_count = 0;
+#ifdef CONFIG_PCI_IOV
+	} else {
+		unsigned char mac_addr[ETH_ALEN];
+		int i;
+		dev_info(&pdev->dev, "%d vfs allocated\n",
+		         adapter->vfs_allocated_count);
+		for (i = 0; i < adapter->vfs_allocated_count; i++) {
+			random_ether_addr(mac_addr);
+			igb_set_vf_mac(adapter, i, mac_addr);
+		}
+	}
+#endif /* CONFIG_PCI_IOV */
+}
+
+/**
  * igb_sw_init - Initialize general software structures (struct igb_adapter)
  * @adapter: board private structure to initialize
  *
@@ -1689,20 +1799,25 @@
 
 	adapter->tx_ring_count = IGB_DEFAULT_TXD;
 	adapter->rx_ring_count = IGB_DEFAULT_RXD;
-	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
-	adapter->rx_ps_hdr_size = 0; /* disable packet split */
+	adapter->rx_itr_setting = IGB_DEFAULT_ITR;
+	adapter->tx_itr_setting = IGB_DEFAULT_ITR;
+
 	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
 	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 
-	/* This call may decrease the number of queues depending on
-	 * interrupt mode. */
-	igb_set_interrupt_capability(adapter);
+#ifdef CONFIG_PCI_IOV
+	if (hw->mac.type == e1000_82576)
+		adapter->vfs_allocated_count = max_vfs;
 
-	if (igb_alloc_queues(adapter)) {
+#endif /* CONFIG_PCI_IOV */
+	/* This call may decrease the number of queues */
+	if (igb_init_interrupt_scheme(adapter)) {
 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
 		return -ENOMEM;
 	}
 
+	igb_probe_vfs(adapter);
+
 	/* Explicitly disable IRQ since the NIC can be in any state. */
 	igb_irq_disable(adapter);
 
@@ -1747,20 +1862,12 @@
 
 	/* e1000_power_up_phy(adapter); */
 
-	adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
-	if ((adapter->hw.mng_cookie.status &
-	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
-		igb_update_mng_vlan(adapter);
-
 	/* before we allocate an interrupt, we must be ready to handle it.
 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
 	 * as soon as we call pci_request_irq, so we have to setup our
 	 * clean_rx handler before we do so.  */
 	igb_configure(adapter);
 
-	igb_vmm_control(adapter);
-	igb_set_vmolr(hw, adapter->vfs_allocated_count);
-
 	err = igb_request_irq(adapter);
 	if (err)
 		goto err_req_irq;
@@ -1768,18 +1875,28 @@
 	/* From here on the code is the same as igb_up() */
 	clear_bit(__IGB_DOWN, &adapter->state);
 
-	for (i = 0; i < adapter->num_rx_queues; i++)
-		napi_enable(&adapter->rx_ring[i].napi);
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		napi_enable(&q_vector->napi);
+	}
 
 	/* Clear any pending interrupts. */
 	rd32(E1000_ICR);
 
 	igb_irq_enable(adapter);
 
+	/* notify VFs that reset has been completed */
+	if (adapter->vfs_allocated_count) {
+		u32 reg_data = rd32(E1000_CTRL_EXT);
+		reg_data |= E1000_CTRL_EXT_PFRSTD;
+		wr32(E1000_CTRL_EXT, reg_data);
+	}
+
 	netif_tx_start_all_queues(netdev);
 
-	/* Fire a link status change interrupt to start the watchdog. */
-	wr32(E1000_ICS, E1000_ICS_LSC);
+	/* start the watchdog. */
+	hw->mac.get_link_status = 1;
+	schedule_work(&adapter->watchdog_task);
 
 	return 0;
 
@@ -1818,28 +1935,18 @@
 	igb_free_all_tx_resources(adapter);
 	igb_free_all_rx_resources(adapter);
 
-	/* kill manageability vlan ID if supported, but not if a vlan with
-	 * the same ID is registered on the host OS (let 8021q kill it) */
-	if ((adapter->hw.mng_cookie.status &
-			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
-	     !(adapter->vlgrp &&
-	       vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
-		igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
-
 	return 0;
 }
 
 /**
  * igb_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
  * @tx_ring: tx descriptor ring (for a specific queue) to setup
  *
  * Return 0 on success, negative on failure
  **/
-int igb_setup_tx_resources(struct igb_adapter *adapter,
-			   struct igb_ring *tx_ring)
+int igb_setup_tx_resources(struct igb_ring *tx_ring)
 {
-	struct pci_dev *pdev = adapter->pdev;
+	struct pci_dev *pdev = tx_ring->pdev;
 	int size;
 
 	size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -1852,20 +1959,20 @@
 	tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
 	tx_ring->size = ALIGN(tx_ring->size, 4096);
 
-	tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+	tx_ring->desc = pci_alloc_consistent(pdev,
+	                                     tx_ring->size,
 					     &tx_ring->dma);
 
 	if (!tx_ring->desc)
 		goto err;
 
-	tx_ring->adapter = adapter;
 	tx_ring->next_to_use = 0;
 	tx_ring->next_to_clean = 0;
 	return 0;
 
 err:
 	vfree(tx_ring->buffer_info);
-	dev_err(&adapter->pdev->dev,
+	dev_err(&pdev->dev,
 		"Unable to allocate memory for the transmit descriptor ring\n");
 	return -ENOMEM;
 }
@@ -1879,13 +1986,13 @@
  **/
 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
 {
+	struct pci_dev *pdev = adapter->pdev;
 	int i, err = 0;
-	int r_idx;
 
 	for (i = 0; i < adapter->num_tx_queues; i++) {
-		err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+		err = igb_setup_tx_resources(&adapter->tx_ring[i]);
 		if (err) {
-			dev_err(&adapter->pdev->dev,
+			dev_err(&pdev->dev,
 				"Allocation for Tx Queue %u failed\n", i);
 			for (i--; i >= 0; i--)
 				igb_free_tx_resources(&adapter->tx_ring[i]);
@@ -1894,13 +2001,80 @@
 	}
 
 	for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
-		r_idx = i % adapter->num_tx_queues;
+		int r_idx = i % adapter->num_tx_queues;
 		adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
 	}
 	return err;
 }
 
 /**
+ * igb_setup_tctl - configure the transmit control registers
+ * @adapter: Board private structure
+ **/
+void igb_setup_tctl(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 tctl;
+
+	/* disable queue 0 which is enabled by default on 82575 and 82576 */
+	wr32(E1000_TXDCTL(0), 0);
+
+	/* Program the Transmit Control Register */
+	tctl = rd32(E1000_TCTL);
+	tctl &= ~E1000_TCTL_CT;
+	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+	igb_config_collision_dist(hw);
+
+	/* Enable transmits */
+	tctl |= E1000_TCTL_EN;
+
+	wr32(E1000_TCTL, tctl);
+}
+
+/**
+ * igb_configure_tx_ring - Configure transmit ring after Reset
+ * @adapter: board private structure
+ * @ring: tx ring to configure
+ *
+ * Configure a transmit ring after a reset.
+ **/
+void igb_configure_tx_ring(struct igb_adapter *adapter,
+                           struct igb_ring *ring)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 txdctl;
+	u64 tdba = ring->dma;
+	int reg_idx = ring->reg_idx;
+
+	/* disable the queue */
+	txdctl = rd32(E1000_TXDCTL(reg_idx));
+	wr32(E1000_TXDCTL(reg_idx),
+	                txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
+	wrfl();
+	mdelay(10);
+
+	wr32(E1000_TDLEN(reg_idx),
+	                ring->count * sizeof(union e1000_adv_tx_desc));
+	wr32(E1000_TDBAL(reg_idx),
+	                tdba & 0x00000000ffffffffULL);
+	wr32(E1000_TDBAH(reg_idx), tdba >> 32);
+
+	ring->head = hw->hw_addr + E1000_TDH(reg_idx);
+	ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
+	writel(0, ring->head);
+	writel(0, ring->tail);
+
+	txdctl |= IGB_TX_PTHRESH;
+	txdctl |= IGB_TX_HTHRESH << 8;
+	txdctl |= IGB_TX_WTHRESH << 16;
+
+	txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+	wr32(E1000_TXDCTL(reg_idx), txdctl);
+}
+
+/**
  * igb_configure_tx - Configure transmit Unit after Reset
  * @adapter: board private structure
  *
@@ -1908,71 +2082,21 @@
  **/
 static void igb_configure_tx(struct igb_adapter *adapter)
 {
-	u64 tdba;
-	struct e1000_hw *hw = &adapter->hw;
-	u32 tctl;
-	u32 txdctl, txctrl;
-	int i, j;
+	int i;
 
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		struct igb_ring *ring = &adapter->tx_ring[i];
-		j = ring->reg_idx;
-		wr32(E1000_TDLEN(j),
-		     ring->count * sizeof(union e1000_adv_tx_desc));
-		tdba = ring->dma;
-		wr32(E1000_TDBAL(j),
-		     tdba & 0x00000000ffffffffULL);
-		wr32(E1000_TDBAH(j), tdba >> 32);
-
-		ring->head = E1000_TDH(j);
-		ring->tail = E1000_TDT(j);
-		writel(0, hw->hw_addr + ring->tail);
-		writel(0, hw->hw_addr + ring->head);
-		txdctl = rd32(E1000_TXDCTL(j));
-		txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
-		wr32(E1000_TXDCTL(j), txdctl);
-
-		/* Turn off Relaxed Ordering on head write-backs.  The
-		 * writebacks MUST be delivered in order or it will
-		 * completely screw up our bookeeping.
-		 */
-		txctrl = rd32(E1000_DCA_TXCTRL(j));
-		txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
-		wr32(E1000_DCA_TXCTRL(j), txctrl);
-	}
-
-	/* disable queue 0 to prevent tail bump w/o re-configuration */
-	if (adapter->vfs_allocated_count)
-		wr32(E1000_TXDCTL(0), 0);
-
-	/* Program the Transmit Control Register */
-	tctl = rd32(E1000_TCTL);
-	tctl &= ~E1000_TCTL_CT;
-	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
-		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
-
-	igb_config_collision_dist(hw);
-
-	/* Setup Transmit Descriptor Settings for eop descriptor */
-	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
-
-	/* Enable transmits */
-	tctl |= E1000_TCTL_EN;
-
-	wr32(E1000_TCTL, tctl);
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
 }
 
 /**
  * igb_setup_rx_resources - allocate Rx resources (Descriptors)
- * @adapter: board private structure
  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
  *
  * Returns 0 on success, negative on failure
  **/
-int igb_setup_rx_resources(struct igb_adapter *adapter,
-			   struct igb_ring *rx_ring)
+int igb_setup_rx_resources(struct igb_ring *rx_ring)
 {
-	struct pci_dev *pdev = adapter->pdev;
+	struct pci_dev *pdev = rx_ring->pdev;
 	int size, desc_len;
 
 	size = sizeof(struct igb_buffer) * rx_ring->count;
@@ -1996,13 +2120,12 @@
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
 
-	rx_ring->adapter = adapter;
-
 	return 0;
 
 err:
 	vfree(rx_ring->buffer_info);
-	dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
+	rx_ring->buffer_info = NULL;
+	dev_err(&pdev->dev, "Unable to allocate memory for "
 		"the receive descriptor ring\n");
 	return -ENOMEM;
 }
@@ -2016,12 +2139,13 @@
  **/
 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
 {
+	struct pci_dev *pdev = adapter->pdev;
 	int i, err = 0;
 
 	for (i = 0; i < adapter->num_rx_queues; i++) {
-		err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+		err = igb_setup_rx_resources(&adapter->rx_ring[i]);
 		if (err) {
-			dev_err(&adapter->pdev->dev,
+			dev_err(&pdev->dev,
 				"Allocation for Rx Queue %u failed\n", i);
 			for (i--; i >= 0; i--)
 				igb_free_rx_resources(&adapter->rx_ring[i]);
@@ -2033,15 +2157,118 @@
 }
 
 /**
+ * igb_setup_mrqc - configure the multiple receive queue control registers
+ * @adapter: Board private structure
+ **/
+static void igb_setup_mrqc(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 mrqc, rxcsum;
+	u32 j, num_rx_queues, shift = 0, shift2 = 0;
+	union e1000_reta {
+		u32 dword;
+		u8  bytes[4];
+	} reta;
+	static const u8 rsshash[40] = {
+		0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
+		0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
+		0xae, 0x7b, 0x30, 0xb4,	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
+		0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
+
+	/* Fill out hash function seeds */
+	for (j = 0; j < 10; j++) {
+		u32 rsskey = rsshash[(j * 4)];
+		rsskey |= rsshash[(j * 4) + 1] << 8;
+		rsskey |= rsshash[(j * 4) + 2] << 16;
+		rsskey |= rsshash[(j * 4) + 3] << 24;
+		array_wr32(E1000_RSSRK(0), j, rsskey);
+	}
+
+	num_rx_queues = adapter->num_rx_queues;
+
+	if (adapter->vfs_allocated_count) {
+		/* 82575 and 82576 supports 2 RSS queues for VMDq */
+		switch (hw->mac.type) {
+		case e1000_82576:
+			shift = 3;
+			num_rx_queues = 2;
+			break;
+		case e1000_82575:
+			shift = 2;
+			shift2 = 6;
+		default:
+			break;
+		}
+	} else {
+		if (hw->mac.type == e1000_82575)
+			shift = 6;
+	}
+
+	for (j = 0; j < (32 * 4); j++) {
+		reta.bytes[j & 3] = (j % num_rx_queues) << shift;
+		if (shift2)
+			reta.bytes[j & 3] |= num_rx_queues << shift2;
+		if ((j & 3) == 3)
+			wr32(E1000_RETA(j >> 2), reta.dword);
+	}
+
+	/*
+	 * Disable raw packet checksumming so that RSS hash is placed in
+	 * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
+	 * offloads as they are enabled by default
+	 */
+	rxcsum = rd32(E1000_RXCSUM);
+	rxcsum |= E1000_RXCSUM_PCSD;
+
+	if (adapter->hw.mac.type >= e1000_82576)
+		/* Enable Receive Checksum Offload for SCTP */
+		rxcsum |= E1000_RXCSUM_CRCOFL;
+
+	/* Don't need to set TUOFL or IPOFL, they default to 1 */
+	wr32(E1000_RXCSUM, rxcsum);
+
+	/* If VMDq is enabled then we set the appropriate mode for that, else
+	 * we default to RSS so that an RSS hash is calculated per packet even
+	 * if we are only using one queue */
+	if (adapter->vfs_allocated_count) {
+		if (hw->mac.type > e1000_82575) {
+			/* Set the default pool for the PF's first queue */
+			u32 vtctl = rd32(E1000_VT_CTL);
+			vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
+				   E1000_VT_CTL_DISABLE_DEF_POOL);
+			vtctl |= adapter->vfs_allocated_count <<
+				E1000_VT_CTL_DEFAULT_POOL_SHIFT;
+			wr32(E1000_VT_CTL, vtctl);
+		}
+		if (adapter->num_rx_queues > 1)
+			mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
+		else
+			mrqc = E1000_MRQC_ENABLE_VMDQ;
+	} else {
+		mrqc = E1000_MRQC_ENABLE_RSS_4Q;
+	}
+	igb_vmm_control(adapter);
+
+	mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
+		 E1000_MRQC_RSS_FIELD_IPV4_TCP);
+	mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
+		 E1000_MRQC_RSS_FIELD_IPV6_TCP);
+	mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
+		 E1000_MRQC_RSS_FIELD_IPV6_UDP);
+	mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
+		 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
+
+	wr32(E1000_MRQC, mrqc);
+}
+
+/**
  * igb_setup_rctl - configure the receive control registers
  * @adapter: Board private structure
  **/
-static void igb_setup_rctl(struct igb_adapter *adapter)
+void igb_setup_rctl(struct igb_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	u32 rctl;
-	u32 srrctl = 0;
-	int i;
 
 	rctl = rd32(E1000_RCTL);
 
@@ -2058,77 +2285,47 @@
 	 */
 	rctl |= E1000_RCTL_SECRC;
 
-	/*
-	 * disable store bad packets and clear size bits.
-	 */
+	/* disable store bad packets and clear size bits. */
 	rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
 
-	/* enable LPE when to prevent packets larger than max_frame_size */
-		rctl |= E1000_RCTL_LPE;
+	/* enable LPE to prevent packets larger than max_frame_size */
+	rctl |= E1000_RCTL_LPE;
 
-	/* Setup buffer sizes */
-	switch (adapter->rx_buffer_len) {
-	case IGB_RXBUFFER_256:
-		rctl |= E1000_RCTL_SZ_256;
-		break;
-	case IGB_RXBUFFER_512:
-		rctl |= E1000_RCTL_SZ_512;
-		break;
-	default:
-		srrctl = ALIGN(adapter->rx_buffer_len, 1024)
-		         >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-		break;
-	}
-
-	/* 82575 and greater support packet-split where the protocol
-	 * header is placed in skb->data and the packet data is
-	 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
-	 * In the case of a non-split, skb->data is linearly filled,
-	 * followed by the page buffers.  Therefore, skb->data is
-	 * sized to hold the largest protocol header.
-	 */
-	/* allocations using alloc_page take too long for regular MTU
-	 * so only enable packet split for jumbo frames */
-	if (adapter->netdev->mtu > ETH_DATA_LEN) {
-		adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
-		srrctl |= adapter->rx_ps_hdr_size <<
-			 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
-		srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-	} else {
-		adapter->rx_ps_hdr_size = 0;
-		srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
-	}
+	/* disable queue 0 to prevent tail write w/o re-config */
+	wr32(E1000_RXDCTL(0), 0);
 
 	/* Attention!!!  For SR-IOV PF driver operations you must enable
 	 * queue drop for all VF and PF queues to prevent head of line blocking
 	 * if an un-trusted VF does not provide descriptors to hardware.
 	 */
 	if (adapter->vfs_allocated_count) {
-		u32 vmolr;
-
 		/* set all queue drop enable bits */
 		wr32(E1000_QDE, ALL_QUEUES);
-		srrctl |= E1000_SRRCTL_DROP_EN;
-
-		/* disable queue 0 to prevent tail write w/o re-config */
-		wr32(E1000_RXDCTL(0), 0);
-
-		vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count));
-		if (rctl & E1000_RCTL_LPE)
-			vmolr |= E1000_VMOLR_LPE;
-		if (adapter->num_rx_queues > 1)
-			vmolr |= E1000_VMOLR_RSSE;
-		wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr);
-	}
-
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		int j = adapter->rx_ring[i].reg_idx;
-		wr32(E1000_SRRCTL(j), srrctl);
 	}
 
 	wr32(E1000_RCTL, rctl);
 }
 
+static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
+                                   int vfn)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 vmolr;
+
+	/* if it isn't the PF check to see if VFs are enabled and
+	 * increase the size to support vlan tags */
+	if (vfn < adapter->vfs_allocated_count &&
+	    adapter->vf_data[vfn].vlans_enabled)
+		size += VLAN_TAG_SIZE;
+
+	vmolr = rd32(E1000_VMOLR(vfn));
+	vmolr &= ~E1000_VMOLR_RLPML_MASK;
+	vmolr |= size | E1000_VMOLR_LPE;
+	wr32(E1000_VMOLR(vfn), vmolr);
+
+	return 0;
+}
+
 /**
  * igb_rlpml_set - set maximum receive packet size
  * @adapter: board private structure
@@ -2148,33 +2345,107 @@
 	 * size and set the VMOLR RLPML to the size we need */
 	if (pf_id) {
 		igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
-		max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE;
+		max_frame_size = MAX_JUMBO_FRAME_SIZE;
 	}
 
 	wr32(E1000_RLPML, max_frame_size);
 }
 
-/**
- * igb_configure_vt_default_pool - Configure VT default pool
- * @adapter: board private structure
- *
- * Configure the default pool
- **/
-static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
+static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
 {
 	struct e1000_hw *hw = &adapter->hw;
-	u16 pf_id = adapter->vfs_allocated_count;
-	u32 vtctl;
+	u32 vmolr;
 
-	/* not in sr-iov mode - do nothing */
-	if (!pf_id)
+	/*
+	 * This register exists only on 82576 and newer so if we are older then
+	 * we should exit and do nothing
+	 */
+	if (hw->mac.type < e1000_82576)
 		return;
 
-	vtctl = rd32(E1000_VT_CTL);
-	vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
-		   E1000_VT_CTL_DISABLE_DEF_POOL);
-	vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT;
-	wr32(E1000_VT_CTL, vtctl);
+	vmolr = rd32(E1000_VMOLR(vfn));
+	vmolr |= E1000_VMOLR_AUPE |        /* Accept untagged packets */
+	         E1000_VMOLR_STRVLAN;      /* Strip vlan tags */
+
+	/* clear all bits that might not be set */
+	vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
+
+	if (adapter->num_rx_queues > 1 && vfn == adapter->vfs_allocated_count)
+		vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
+	/*
+	 * for VMDq only allow the VFs and pool 0 to accept broadcast and
+	 * multicast packets
+	 */
+	if (vfn <= adapter->vfs_allocated_count)
+		vmolr |= E1000_VMOLR_BAM;	   /* Accept broadcast */
+
+	wr32(E1000_VMOLR(vfn), vmolr);
+}
+
+/**
+ * igb_configure_rx_ring - Configure a receive ring after Reset
+ * @adapter: board private structure
+ * @ring: receive ring to be configured
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+void igb_configure_rx_ring(struct igb_adapter *adapter,
+                           struct igb_ring *ring)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u64 rdba = ring->dma;
+	int reg_idx = ring->reg_idx;
+	u32 srrctl, rxdctl;
+
+	/* disable the queue */
+	rxdctl = rd32(E1000_RXDCTL(reg_idx));
+	wr32(E1000_RXDCTL(reg_idx),
+	                rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
+
+	/* Set DMA base address registers */
+	wr32(E1000_RDBAL(reg_idx),
+	     rdba & 0x00000000ffffffffULL);
+	wr32(E1000_RDBAH(reg_idx), rdba >> 32);
+	wr32(E1000_RDLEN(reg_idx),
+	               ring->count * sizeof(union e1000_adv_rx_desc));
+
+	/* initialize head and tail */
+	ring->head = hw->hw_addr + E1000_RDH(reg_idx);
+	ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
+	writel(0, ring->head);
+	writel(0, ring->tail);
+
+	/* set descriptor configuration */
+	if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
+		srrctl = ALIGN(ring->rx_buffer_len, 64) <<
+		         E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
+		srrctl |= IGB_RXBUFFER_16384 >>
+		          E1000_SRRCTL_BSIZEPKT_SHIFT;
+#else
+		srrctl |= (PAGE_SIZE / 2) >>
+		          E1000_SRRCTL_BSIZEPKT_SHIFT;
+#endif
+		srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+	} else {
+		srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
+		         E1000_SRRCTL_BSIZEPKT_SHIFT;
+		srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+	}
+
+	wr32(E1000_SRRCTL(reg_idx), srrctl);
+
+	/* set filtering for VMDQ pools */
+	igb_set_vmolr(adapter, reg_idx & 0x7);
+
+	/* enable receive descriptor fetching */
+	rxdctl = rd32(E1000_RXDCTL(reg_idx));
+	rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+	rxdctl &= 0xFFF00000;
+	rxdctl |= IGB_RX_PTHRESH;
+	rxdctl |= IGB_RX_HTHRESH << 8;
+	rxdctl |= IGB_RX_WTHRESH << 16;
+	wr32(E1000_RXDCTL(reg_idx), rxdctl);
 }
 
 /**
@@ -2185,108 +2456,8 @@
  **/
 static void igb_configure_rx(struct igb_adapter *adapter)
 {
-	u64 rdba;
-	struct e1000_hw *hw = &adapter->hw;
-	u32 rctl, rxcsum;
-	u32 rxdctl;
 	int i;
 
-	/* disable receives while setting up the descriptors */
-	rctl = rd32(E1000_RCTL);
-	wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
-	wrfl();
-	mdelay(10);
-
-	if (adapter->itr_setting > 3)
-		wr32(E1000_ITR, adapter->itr);
-
-	/* Setup the HW Rx Head and Tail Descriptor Pointers and
-	 * the Base and Length of the Rx Descriptor Ring */
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		struct igb_ring *ring = &adapter->rx_ring[i];
-		int j = ring->reg_idx;
-		rdba = ring->dma;
-		wr32(E1000_RDBAL(j),
-		     rdba & 0x00000000ffffffffULL);
-		wr32(E1000_RDBAH(j), rdba >> 32);
-		wr32(E1000_RDLEN(j),
-		     ring->count * sizeof(union e1000_adv_rx_desc));
-
-		ring->head = E1000_RDH(j);
-		ring->tail = E1000_RDT(j);
-		writel(0, hw->hw_addr + ring->tail);
-		writel(0, hw->hw_addr + ring->head);
-
-		rxdctl = rd32(E1000_RXDCTL(j));
-		rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
-		rxdctl &= 0xFFF00000;
-		rxdctl |= IGB_RX_PTHRESH;
-		rxdctl |= IGB_RX_HTHRESH << 8;
-		rxdctl |= IGB_RX_WTHRESH << 16;
-		wr32(E1000_RXDCTL(j), rxdctl);
-	}
-
-	if (adapter->num_rx_queues > 1) {
-		u32 random[10];
-		u32 mrqc;
-		u32 j, shift;
-		union e1000_reta {
-			u32 dword;
-			u8  bytes[4];
-		} reta;
-
-		get_random_bytes(&random[0], 40);
-
-		if (hw->mac.type >= e1000_82576)
-			shift = 0;
-		else
-			shift = 6;
-		for (j = 0; j < (32 * 4); j++) {
-			reta.bytes[j & 3] =
-				adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
-			if ((j & 3) == 3)
-				writel(reta.dword,
-				       hw->hw_addr + E1000_RETA(0) + (j & ~3));
-		}
-		if (adapter->vfs_allocated_count)
-			mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
-		else
-			mrqc = E1000_MRQC_ENABLE_RSS_4Q;
-
-		/* Fill out hash function seeds */
-		for (j = 0; j < 10; j++)
-			array_wr32(E1000_RSSRK(0), j, random[j]);
-
-		mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
-			 E1000_MRQC_RSS_FIELD_IPV4_TCP);
-		mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
-			 E1000_MRQC_RSS_FIELD_IPV6_TCP);
-		mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
-			 E1000_MRQC_RSS_FIELD_IPV6_UDP);
-		mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
-			 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
-
-		wr32(E1000_MRQC, mrqc);
-	} else if (adapter->vfs_allocated_count) {
-		/* Enable multi-queue for sr-iov */
-		wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
-	}
-
-	/* Enable Receive Checksum Offload for TCP and UDP */
-	rxcsum = rd32(E1000_RXCSUM);
-	/* Disable raw packet checksumming */
-	rxcsum |= E1000_RXCSUM_PCSD;
-
-	if (adapter->hw.mac.type == e1000_82576)
-		/* Enable Receive Checksum Offload for SCTP */
-		rxcsum |= E1000_RXCSUM_CRCOFL;
-
-	/* Don't need to set TUOFL or IPOFL, they default to 1 */
-	wr32(E1000_RXCSUM, rxcsum);
-
-	/* Set the default pool for the PF's first queue */
-	igb_configure_vt_default_pool(adapter);
-
 	/* set UTA to appropriate mode */
 	igb_set_uta(adapter);
 
@@ -2294,10 +2465,10 @@
 	igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
 	                 adapter->vfs_allocated_count);
 
-	igb_rlpml_set(adapter);
-
-	/* Enable Receives */
-	wr32(E1000_RCTL, rctl);
+	/* Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring */
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
 }
 
 /**
@@ -2308,14 +2479,17 @@
  **/
 void igb_free_tx_resources(struct igb_ring *tx_ring)
 {
-	struct pci_dev *pdev = tx_ring->adapter->pdev;
-
 	igb_clean_tx_ring(tx_ring);
 
 	vfree(tx_ring->buffer_info);
 	tx_ring->buffer_info = NULL;
 
-	pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+	/* if not set, then don't free */
+	if (!tx_ring->desc)
+		return;
+
+	pci_free_consistent(tx_ring->pdev, tx_ring->size,
+	                    tx_ring->desc, tx_ring->dma);
 
 	tx_ring->desc = NULL;
 }
@@ -2334,12 +2508,13 @@
 		igb_free_tx_resources(&adapter->tx_ring[i]);
 }
 
-static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
-					   struct igb_buffer *buffer_info)
+void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
+				    struct igb_buffer *buffer_info)
 {
 	buffer_info->dma = 0;
 	if (buffer_info->skb) {
-		skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
+		skb_dma_unmap(&tx_ring->pdev->dev,
+		              buffer_info->skb,
 		              DMA_TO_DEVICE);
 		dev_kfree_skb_any(buffer_info->skb);
 		buffer_info->skb = NULL;
@@ -2354,7 +2529,6 @@
  **/
 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
 {
-	struct igb_adapter *adapter = tx_ring->adapter;
 	struct igb_buffer *buffer_info;
 	unsigned long size;
 	unsigned int i;
@@ -2365,21 +2539,17 @@
 
 	for (i = 0; i < tx_ring->count; i++) {
 		buffer_info = &tx_ring->buffer_info[i];
-		igb_unmap_and_free_tx_resource(adapter, buffer_info);
+		igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
 	}
 
 	size = sizeof(struct igb_buffer) * tx_ring->count;
 	memset(tx_ring->buffer_info, 0, size);
 
 	/* Zero out the descriptor ring */
-
 	memset(tx_ring->desc, 0, tx_ring->size);
 
 	tx_ring->next_to_use = 0;
 	tx_ring->next_to_clean = 0;
-
-	writel(0, adapter->hw.hw_addr + tx_ring->head);
-	writel(0, adapter->hw.hw_addr + tx_ring->tail);
 }
 
 /**
@@ -2402,14 +2572,17 @@
  **/
 void igb_free_rx_resources(struct igb_ring *rx_ring)
 {
-	struct pci_dev *pdev = rx_ring->adapter->pdev;
-
 	igb_clean_rx_ring(rx_ring);
 
 	vfree(rx_ring->buffer_info);
 	rx_ring->buffer_info = NULL;
 
-	pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+	/* if not set, then don't free */
+	if (!rx_ring->desc)
+		return;
+
+	pci_free_consistent(rx_ring->pdev, rx_ring->size,
+	                    rx_ring->desc, rx_ring->dma);
 
 	rx_ring->desc = NULL;
 }
@@ -2434,26 +2607,21 @@
  **/
 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
 {
-	struct igb_adapter *adapter = rx_ring->adapter;
 	struct igb_buffer *buffer_info;
-	struct pci_dev *pdev = adapter->pdev;
 	unsigned long size;
 	unsigned int i;
 
 	if (!rx_ring->buffer_info)
 		return;
+
 	/* Free all the Rx ring sk_buffs */
 	for (i = 0; i < rx_ring->count; i++) {
 		buffer_info = &rx_ring->buffer_info[i];
 		if (buffer_info->dma) {
-			if (adapter->rx_ps_hdr_size)
-				pci_unmap_single(pdev, buffer_info->dma,
-						 adapter->rx_ps_hdr_size,
-						 PCI_DMA_FROMDEVICE);
-			else
-				pci_unmap_single(pdev, buffer_info->dma,
-						 adapter->rx_buffer_len,
-						 PCI_DMA_FROMDEVICE);
+			pci_unmap_single(rx_ring->pdev,
+			                 buffer_info->dma,
+					 rx_ring->rx_buffer_len,
+					 PCI_DMA_FROMDEVICE);
 			buffer_info->dma = 0;
 		}
 
@@ -2461,14 +2629,16 @@
 			dev_kfree_skb(buffer_info->skb);
 			buffer_info->skb = NULL;
 		}
+		if (buffer_info->page_dma) {
+			pci_unmap_page(rx_ring->pdev,
+			               buffer_info->page_dma,
+				       PAGE_SIZE / 2,
+				       PCI_DMA_FROMDEVICE);
+			buffer_info->page_dma = 0;
+		}
 		if (buffer_info->page) {
-			if (buffer_info->page_dma)
-				pci_unmap_page(pdev, buffer_info->page_dma,
-					       PAGE_SIZE / 2,
-					       PCI_DMA_FROMDEVICE);
 			put_page(buffer_info->page);
 			buffer_info->page = NULL;
-			buffer_info->page_dma = 0;
 			buffer_info->page_offset = 0;
 		}
 	}
@@ -2481,9 +2651,6 @@
 
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
-
-	writel(0, adapter->hw.hw_addr + rx_ring->head);
-	writel(0, adapter->hw.hw_addr + rx_ring->tail);
 }
 
 /**
@@ -2744,37 +2911,34 @@
 static void igb_watchdog_task(struct work_struct *work)
 {
 	struct igb_adapter *adapter = container_of(work,
-					struct igb_adapter, watchdog_task);
+	                                           struct igb_adapter,
+                                                   watchdog_task);
 	struct e1000_hw *hw = &adapter->hw;
 	struct net_device *netdev = adapter->netdev;
 	struct igb_ring *tx_ring = adapter->tx_ring;
 	u32 link;
-	u32 eics = 0;
 	int i;
 
 	link = igb_has_link(adapter);
-	if ((netif_carrier_ok(netdev)) && link)
-		goto link_up;
-
 	if (link) {
 		if (!netif_carrier_ok(netdev)) {
 			u32 ctrl;
-			hw->mac.ops.get_speed_and_duplex(&adapter->hw,
-						   &adapter->link_speed,
-						   &adapter->link_duplex);
+			hw->mac.ops.get_speed_and_duplex(hw,
+			                                 &adapter->link_speed,
+			                                 &adapter->link_duplex);
 
 			ctrl = rd32(E1000_CTRL);
 			/* Links status message must follow this format */
 			printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
 				 "Flow Control: %s\n",
-			         netdev->name,
-				 adapter->link_speed,
-				 adapter->link_duplex == FULL_DUPLEX ?
+			       netdev->name,
+			       adapter->link_speed,
+			       adapter->link_duplex == FULL_DUPLEX ?
 				 "Full Duplex" : "Half Duplex",
-				 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
-				 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
-				 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
-				 E1000_CTRL_TFCE) ? "TX" : "None")));
+			       ((ctrl & E1000_CTRL_TFCE) &&
+			        (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
+			       ((ctrl & E1000_CTRL_RFCE) ?  "RX" :
+			       ((ctrl & E1000_CTRL_TFCE) ?  "TX" : "None")));
 
 			/* tweak tx_queue_len according to speed/duplex and
 			 * adjust the timeout factor */
@@ -2818,20 +2982,8 @@
 		}
 	}
 
-link_up:
 	igb_update_stats(adapter);
-
-	hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
-	adapter->tpt_old = adapter->stats.tpt;
-	hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old;
-	adapter->colc_old = adapter->stats.colc;
-
-	adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
-	adapter->gorc_old = adapter->stats.gorc;
-	adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
-	adapter->gotc_old = adapter->stats.gotc;
-
-	igb_update_adaptive(&adapter->hw);
+	igb_update_adaptive(hw);
 
 	if (!netif_carrier_ok(netdev)) {
 		if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
@@ -2846,18 +2998,22 @@
 		}
 	}
 
+	/* Force detection of hung controller every watchdog period */
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		adapter->tx_ring[i].detect_tx_hung = true;
+
 	/* Cause software interrupt to ensure rx ring is cleaned */
 	if (adapter->msix_entries) {
-		for (i = 0; i < adapter->num_rx_queues; i++)
-			eics |= adapter->rx_ring[i].eims_value;
+		u32 eics = 0;
+		for (i = 0; i < adapter->num_q_vectors; i++) {
+			struct igb_q_vector *q_vector = adapter->q_vector[i];
+			eics |= q_vector->eims_value;
+		}
 		wr32(E1000_EICS, eics);
 	} else {
 		wr32(E1000_ICS, E1000_ICS_RXDMT0);
 	}
 
-	/* Force detection of hung controller every watchdog period */
-	tx_ring->detect_tx_hung = true;
-
 	/* Reset the timer */
 	if (!test_bit(__IGB_DOWN, &adapter->state))
 		mod_timer(&adapter->watchdog_timer,
@@ -2871,7 +3027,6 @@
 	latency_invalid = 255
 };
 
-
 /**
  * igb_update_ring_itr - update the dynamic ITR value based on packet size
  *
@@ -2886,25 +3041,37 @@
  *      parameter (see igb_param.c)
  *      NOTE:  This function is called only when operating in a multiqueue
  *             receive environment.
- * @rx_ring: pointer to ring
+ * @q_vector: pointer to q_vector
  **/
-static void igb_update_ring_itr(struct igb_ring *rx_ring)
+static void igb_update_ring_itr(struct igb_q_vector *q_vector)
 {
-	int new_val = rx_ring->itr_val;
+	int new_val = q_vector->itr_val;
 	int avg_wire_size = 0;
-	struct igb_adapter *adapter = rx_ring->adapter;
-
-	if (!rx_ring->total_packets)
-		goto clear_counts; /* no packets, so don't do anything */
+	struct igb_adapter *adapter = q_vector->adapter;
 
 	/* For non-gigabit speeds, just fix the interrupt rate at 4000
 	 * ints/sec - ITR timer value of 120 ticks.
 	 */
 	if (adapter->link_speed != SPEED_1000) {
-		new_val = 120;
+		new_val = 976;
 		goto set_itr_val;
 	}
-	avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets;
+
+	if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
+		struct igb_ring *ring = q_vector->rx_ring;
+		avg_wire_size = ring->total_bytes / ring->total_packets;
+	}
+
+	if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
+		struct igb_ring *ring = q_vector->tx_ring;
+		avg_wire_size = max_t(u32, avg_wire_size,
+		                      (ring->total_bytes /
+		                       ring->total_packets));
+	}
+
+	/* if avg_wire_size isn't set no work was done */
+	if (!avg_wire_size)
+		goto clear_counts;
 
 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
 	avg_wire_size += 24;
@@ -2919,13 +3086,19 @@
 		new_val = avg_wire_size / 2;
 
 set_itr_val:
-	if (new_val != rx_ring->itr_val) {
-		rx_ring->itr_val = new_val;
-		rx_ring->set_itr = 1;
+	if (new_val != q_vector->itr_val) {
+		q_vector->itr_val = new_val;
+		q_vector->set_itr = 1;
 	}
 clear_counts:
-	rx_ring->total_bytes = 0;
-	rx_ring->total_packets = 0;
+	if (q_vector->rx_ring) {
+		q_vector->rx_ring->total_bytes = 0;
+		q_vector->rx_ring->total_packets = 0;
+	}
+	if (q_vector->tx_ring) {
+		q_vector->tx_ring->total_bytes = 0;
+		q_vector->tx_ring->total_packets = 0;
+	}
 }
 
 /**
@@ -2942,7 +3115,7 @@
  *      NOTE:  These calculations are only valid when operating in a single-
  *             queue environment.
  * @adapter: pointer to adapter
- * @itr_setting: current adapter->itr
+ * @itr_setting: current q_vector->itr_val
  * @packets: the number of packets during this measurement interval
  * @bytes: the number of bytes during this measurement interval
  **/
@@ -2994,8 +3167,9 @@
 
 static void igb_set_itr(struct igb_adapter *adapter)
 {
+	struct igb_q_vector *q_vector = adapter->q_vector[0];
 	u16 current_itr;
-	u32 new_itr = adapter->itr;
+	u32 new_itr = q_vector->itr_val;
 
 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
 	if (adapter->link_speed != SPEED_1000) {
@@ -3009,18 +3183,14 @@
 				    adapter->rx_ring->total_packets,
 				    adapter->rx_ring->total_bytes);
 
-	if (adapter->rx_ring->buddy) {
-		adapter->tx_itr = igb_update_itr(adapter,
-					    adapter->tx_itr,
-					    adapter->tx_ring->total_packets,
-					    adapter->tx_ring->total_bytes);
-		current_itr = max(adapter->rx_itr, adapter->tx_itr);
-	} else {
-		current_itr = adapter->rx_itr;
-	}
+	adapter->tx_itr = igb_update_itr(adapter,
+				    adapter->tx_itr,
+				    adapter->tx_ring->total_packets,
+				    adapter->tx_ring->total_bytes);
+	current_itr = max(adapter->rx_itr, adapter->tx_itr);
 
 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
-	if (adapter->itr_setting == 3 && current_itr == lowest_latency)
+	if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
 		current_itr = low_latency;
 
 	switch (current_itr) {
@@ -3041,18 +3211,17 @@
 set_itr_now:
 	adapter->rx_ring->total_bytes = 0;
 	adapter->rx_ring->total_packets = 0;
-	if (adapter->rx_ring->buddy) {
-		adapter->rx_ring->buddy->total_bytes = 0;
-		adapter->rx_ring->buddy->total_packets = 0;
-	}
+	adapter->tx_ring->total_bytes = 0;
+	adapter->tx_ring->total_packets = 0;
 
-	if (new_itr != adapter->itr) {
+	if (new_itr != q_vector->itr_val) {
 		/* this attempts to bias the interrupt rate towards Bulk
 		 * by adding intermediate steps when interrupt rate is
 		 * increasing */
-		new_itr = new_itr > adapter->itr ?
-			     max((new_itr * adapter->itr) /
-			         (new_itr + (adapter->itr >> 2)), new_itr) :
+		new_itr = new_itr > q_vector->itr_val ?
+		             max((new_itr * q_vector->itr_val) /
+		                 (new_itr + (q_vector->itr_val >> 2)),
+		                 new_itr) :
 			     new_itr;
 		/* Don't write the value here; it resets the adapter's
 		 * internal timer, and causes us to delay far longer than
@@ -3060,25 +3229,22 @@
 		 * value at the beginning of the next interrupt so the timing
 		 * ends up being correct.
 		 */
-		adapter->itr = new_itr;
-		adapter->rx_ring->itr_val = new_itr;
-		adapter->rx_ring->set_itr = 1;
+		q_vector->itr_val = new_itr;
+		q_vector->set_itr = 1;
 	}
 
 	return;
 }
 
-
 #define IGB_TX_FLAGS_CSUM		0x00000001
 #define IGB_TX_FLAGS_VLAN		0x00000002
 #define IGB_TX_FLAGS_TSO		0x00000004
 #define IGB_TX_FLAGS_IPV4		0x00000008
-#define IGB_TX_FLAGS_TSTAMP             0x00000010
-#define IGB_TX_FLAGS_VLAN_MASK	0xffff0000
-#define IGB_TX_FLAGS_VLAN_SHIFT	16
+#define IGB_TX_FLAGS_TSTAMP		0x00000010
+#define IGB_TX_FLAGS_VLAN_MASK		0xffff0000
+#define IGB_TX_FLAGS_VLAN_SHIFT		        16
 
-static inline int igb_tso_adv(struct igb_adapter *adapter,
-			      struct igb_ring *tx_ring,
+static inline int igb_tso_adv(struct igb_ring *tx_ring,
 			      struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
 {
 	struct e1000_adv_tx_context_desc *context_desc;
@@ -3140,8 +3306,8 @@
 	mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
 
 	/* For 82575, context index must be unique per ring. */
-	if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
-		mss_l4len_idx |= tx_ring->queue_index << 4;
+	if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
+		mss_l4len_idx |= tx_ring->reg_idx << 4;
 
 	context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
 	context_desc->seqnum_seed = 0;
@@ -3158,14 +3324,14 @@
 	return true;
 }
 
-static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
-					struct igb_ring *tx_ring,
-					struct sk_buff *skb, u32 tx_flags)
+static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
+				   struct sk_buff *skb, u32 tx_flags)
 {
 	struct e1000_adv_tx_context_desc *context_desc;
-	unsigned int i;
+	struct pci_dev *pdev = tx_ring->pdev;
 	struct igb_buffer *buffer_info;
 	u32 info = 0, tu_cmd = 0;
+	unsigned int i;
 
 	if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
 	    (tx_flags & IGB_TX_FLAGS_VLAN)) {
@@ -3175,6 +3341,7 @@
 
 		if (tx_flags & IGB_TX_FLAGS_VLAN)
 			info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
+
 		info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
 		if (skb->ip_summed == CHECKSUM_PARTIAL)
 			info |= skb_network_header_len(skb);
@@ -3212,7 +3379,7 @@
 				break;
 			default:
 				if (unlikely(net_ratelimit()))
-					dev_warn(&adapter->pdev->dev,
+					dev_warn(&pdev->dev,
 					    "partial checksum but proto=%x!\n",
 					    skb->protocol);
 				break;
@@ -3221,11 +3388,9 @@
 
 		context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
 		context_desc->seqnum_seed = 0;
-		if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
+		if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
 			context_desc->mss_l4len_idx =
-				cpu_to_le32(tx_ring->queue_index << 4);
-		else
-			context_desc->mss_l4len_idx = 0;
+				cpu_to_le32(tx_ring->reg_idx << 4);
 
 		buffer_info->time_stamp = jiffies;
 		buffer_info->next_to_watch = i;
@@ -3244,11 +3409,11 @@
 #define IGB_MAX_TXD_PWR	16
 #define IGB_MAX_DATA_PER_TXD	(1<<IGB_MAX_TXD_PWR)
 
-static inline int igb_tx_map_adv(struct igb_adapter *adapter,
-				 struct igb_ring *tx_ring, struct sk_buff *skb,
+static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
 				 unsigned int first)
 {
 	struct igb_buffer *buffer_info;
+	struct pci_dev *pdev = tx_ring->pdev;
 	unsigned int len = skb_headlen(skb);
 	unsigned int count = 0, i;
 	unsigned int f;
@@ -3256,8 +3421,8 @@
 
 	i = tx_ring->next_to_use;
 
-	if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
-		dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
+	if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
+		dev_err(&pdev->dev, "TX DMA map failed\n");
 		return 0;
 	}
 
@@ -3293,18 +3458,17 @@
 	tx_ring->buffer_info[i].skb = skb;
 	tx_ring->buffer_info[first].next_to_watch = i;
 
-	return count + 1;
+	return ++count;
 }
 
-static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
-				    struct igb_ring *tx_ring,
+static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
 				    int tx_flags, int count, u32 paylen,
 				    u8 hdr_len)
 {
-	union e1000_adv_tx_desc *tx_desc = NULL;
+	union e1000_adv_tx_desc *tx_desc;
 	struct igb_buffer *buffer_info;
 	u32 olinfo_status = 0, cmd_type_len;
-	unsigned int i;
+	unsigned int i = tx_ring->next_to_use;
 
 	cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
 			E1000_ADVTXD_DCMD_DEXT);
@@ -3329,27 +3493,28 @@
 		olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
 	}
 
-	if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) &&
-	    (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
+	if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
+	    (tx_flags & (IGB_TX_FLAGS_CSUM |
+	                 IGB_TX_FLAGS_TSO |
 			 IGB_TX_FLAGS_VLAN)))
-		olinfo_status |= tx_ring->queue_index << 4;
+		olinfo_status |= tx_ring->reg_idx << 4;
 
 	olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
 
-	i = tx_ring->next_to_use;
-	while (count--) {
+	do {
 		buffer_info = &tx_ring->buffer_info[i];
 		tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
 		tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
 		tx_desc->read.cmd_type_len =
 			cpu_to_le32(cmd_type_len | buffer_info->length);
 		tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+		count--;
 		i++;
 		if (i == tx_ring->count)
 			i = 0;
-	}
+	} while (count > 0);
 
-	tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
+	tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
 	/* Force memory writes to complete before letting h/w
 	 * know there are new descriptors to fetch.  (Only
 	 * applicable for weak-ordered memory model archs,
@@ -3357,16 +3522,15 @@
 	wmb();
 
 	tx_ring->next_to_use = i;
-	writel(i, adapter->hw.hw_addr + tx_ring->tail);
+	writel(i, tx_ring->tail);
 	/* we need this if more than one processor can write to our tail
 	 * at a time, it syncronizes IO on IA64/Altix systems */
 	mmiowb();
 }
 
-static int __igb_maybe_stop_tx(struct net_device *netdev,
-			       struct igb_ring *tx_ring, int size)
+static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
 {
-	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct net_device *netdev = tx_ring->netdev;
 
 	netif_stop_subqueue(netdev, tx_ring->queue_index);
 
@@ -3382,29 +3546,92 @@
 
 	/* A reprieve! */
 	netif_wake_subqueue(netdev, tx_ring->queue_index);
-	++adapter->restart_queue;
+	tx_ring->tx_stats.restart_queue++;
 	return 0;
 }
 
-static int igb_maybe_stop_tx(struct net_device *netdev,
-			     struct igb_ring *tx_ring, int size)
+static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
 {
 	if (igb_desc_unused(tx_ring) >= size)
 		return 0;
-	return __igb_maybe_stop_tx(netdev, tx_ring, size);
+	return __igb_maybe_stop_tx(tx_ring, size);
 }
 
-static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
-					   struct net_device *netdev,
-					   struct igb_ring *tx_ring)
+netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
+				    struct igb_ring *tx_ring)
 {
-	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
 	unsigned int first;
 	unsigned int tx_flags = 0;
 	u8 hdr_len = 0;
-	int count = 0;
-	int tso = 0;
-	union skb_shared_tx *shtx;
+	int tso = 0, count;
+	union skb_shared_tx *shtx = skb_tx(skb);
+
+	/* need: 1 descriptor per page,
+	 *       + 2 desc gap to keep tail from touching head,
+	 *       + 1 desc for skb->data,
+	 *       + 1 desc for context descriptor,
+	 * otherwise try next time */
+	if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
+		/* this is a hard error */
+		return NETDEV_TX_BUSY;
+	}
+
+	if (unlikely(shtx->hardware)) {
+		shtx->in_progress = 1;
+		tx_flags |= IGB_TX_FLAGS_TSTAMP;
+	}
+
+	if (vlan_tx_tag_present(skb) && adapter->vlgrp) {
+		tx_flags |= IGB_TX_FLAGS_VLAN;
+		tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
+	}
+
+	if (skb->protocol == htons(ETH_P_IP))
+		tx_flags |= IGB_TX_FLAGS_IPV4;
+
+	first = tx_ring->next_to_use;
+	if (skb_is_gso(skb)) {
+		tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
+
+		if (tso < 0) {
+			dev_kfree_skb_any(skb);
+			return NETDEV_TX_OK;
+		}
+	}
+
+	if (tso)
+		tx_flags |= IGB_TX_FLAGS_TSO;
+	else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
+	         (skb->ip_summed == CHECKSUM_PARTIAL))
+		tx_flags |= IGB_TX_FLAGS_CSUM;
+
+	/*
+	 * count reflects descriptors mapped, if 0 or less then mapping error
+	 * has occured and we need to rewind the descriptor queue
+	 */
+	count = igb_tx_map_adv(tx_ring, skb, first);
+	if (count <= 0) {
+		dev_kfree_skb_any(skb);
+		tx_ring->buffer_info[first].time_stamp = 0;
+		tx_ring->next_to_use = first;
+		return NETDEV_TX_OK;
+	}
+
+	igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
+
+	/* Make sure there is space in the ring for the next send. */
+	igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
+
+	return NETDEV_TX_OK;
+}
+
+static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
+				      struct net_device *netdev)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct igb_ring *tx_ring;
+	int r_idx = 0;
 
 	if (test_bit(__IGB_DOWN, &adapter->state)) {
 		dev_kfree_skb_any(skb);
@@ -3416,81 +3643,6 @@
 		return NETDEV_TX_OK;
 	}
 
-	/* need: 1 descriptor per page,
-	 *       + 2 desc gap to keep tail from touching head,
-	 *       + 1 desc for skb->data,
-	 *       + 1 desc for context descriptor,
-	 * otherwise try next time */
-	if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
-		/* this is a hard error */
-		return NETDEV_TX_BUSY;
-	}
-
-	/*
-	 * TODO: check that there currently is no other packet with
-	 * time stamping in the queue
-	 *
-	 * When doing time stamping, keep the connection to the socket
-	 * a while longer: it is still needed by skb_hwtstamp_tx(),
-	 * called either in igb_tx_hwtstamp() or by our caller when
-	 * doing software time stamping.
-	 */
-	shtx = skb_tx(skb);
-	if (unlikely(shtx->hardware)) {
-		shtx->in_progress = 1;
-		tx_flags |= IGB_TX_FLAGS_TSTAMP;
-	}
-
-	if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
-		tx_flags |= IGB_TX_FLAGS_VLAN;
-		tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
-	}
-
-	if (skb->protocol == htons(ETH_P_IP))
-		tx_flags |= IGB_TX_FLAGS_IPV4;
-
-	first = tx_ring->next_to_use;
-	tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
-					      &hdr_len) : 0;
-
-	if (tso < 0) {
-		dev_kfree_skb_any(skb);
-		return NETDEV_TX_OK;
-	}
-
-	if (tso)
-		tx_flags |= IGB_TX_FLAGS_TSO;
-	else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) &&
-	         (skb->ip_summed == CHECKSUM_PARTIAL))
-		tx_flags |= IGB_TX_FLAGS_CSUM;
-
-	/*
-	 * count reflects descriptors mapped, if 0 then mapping error
-	 * has occured and we need to rewind the descriptor queue
-	 */
-	count = igb_tx_map_adv(adapter, tx_ring, skb, first);
-
-	if (count) {
-		igb_tx_queue_adv(adapter, tx_ring, tx_flags, count,
-			         skb->len, hdr_len);
-		/* Make sure there is space in the ring for the next send. */
-		igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
-	} else {
-		dev_kfree_skb_any(skb);
-		tx_ring->buffer_info[first].time_stamp = 0;
-		tx_ring->next_to_use = first;
-	}
-
-	return NETDEV_TX_OK;
-}
-
-static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
-				      struct net_device *netdev)
-{
-	struct igb_adapter *adapter = netdev_priv(netdev);
-	struct igb_ring *tx_ring;
-
-	int r_idx = 0;
 	r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
 	tx_ring = adapter->multi_tx_table[r_idx];
 
@@ -3498,7 +3650,7 @@
 	 * to a flow.  Right now, performance is impacted slightly negatively
 	 * if using multiple tx queues.  If the stack breaks away from a
 	 * single qdisc implementation, we can look at this again. */
-	return igb_xmit_frame_ring_adv(skb, netdev, tx_ring);
+	return igb_xmit_frame_ring_adv(skb, tx_ring);
 }
 
 /**
@@ -3512,6 +3664,7 @@
 
 	/* Do the reset outside of interrupt context */
 	adapter->tx_timeout_count++;
+
 	schedule_work(&adapter->reset_task);
 	wr32(E1000_EICS,
 	     (adapter->eims_enable_mask & ~adapter->eims_other));
@@ -3548,16 +3701,17 @@
 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct pci_dev *pdev = adapter->pdev;
 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+	u32 rx_buffer_len, i;
 
-	if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
-	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
-		dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
+	if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+		dev_err(&pdev->dev, "Invalid MTU setting\n");
 		return -EINVAL;
 	}
 
 	if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
-		dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
+		dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
 		return -EINVAL;
 	}
 
@@ -3566,8 +3720,6 @@
 
 	/* igb_down has a dependency on max_frame_size */
 	adapter->max_frame_size = max_frame;
-	if (netif_running(netdev))
-		igb_down(adapter);
 
 	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
 	 * means we reserve 2 more, this pushes us to allocate from the next
@@ -3575,35 +3727,23 @@
 	 * i.e. RXBUFFER_2048 --> size-4096 slab
 	 */
 
-	if (max_frame <= IGB_RXBUFFER_256)
-		adapter->rx_buffer_len = IGB_RXBUFFER_256;
-	else if (max_frame <= IGB_RXBUFFER_512)
-		adapter->rx_buffer_len = IGB_RXBUFFER_512;
-	else if (max_frame <= IGB_RXBUFFER_1024)
-		adapter->rx_buffer_len = IGB_RXBUFFER_1024;
-	else if (max_frame <= IGB_RXBUFFER_2048)
-		adapter->rx_buffer_len = IGB_RXBUFFER_2048;
+	if (max_frame <= IGB_RXBUFFER_1024)
+		rx_buffer_len = IGB_RXBUFFER_1024;
+	else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
+		rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
 	else
-#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
-		adapter->rx_buffer_len = IGB_RXBUFFER_16384;
-#else
-		adapter->rx_buffer_len = PAGE_SIZE / 2;
-#endif
+		rx_buffer_len = IGB_RXBUFFER_128;
 
-	/* if sr-iov is enabled we need to force buffer size to 1K or larger */
-	if (adapter->vfs_allocated_count &&
-	    (adapter->rx_buffer_len < IGB_RXBUFFER_1024))
-		adapter->rx_buffer_len = IGB_RXBUFFER_1024;
+	if (netif_running(netdev))
+		igb_down(adapter);
 
-	/* adjust allocation if LPE protects us, and we aren't using SBP */
-	if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
-	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
-		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
-
-	dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
+	dev_info(&pdev->dev, "changing MTU from %d to %d\n",
 		 netdev->mtu, new_mtu);
 	netdev->mtu = new_mtu;
 
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
+
 	if (netif_running(netdev))
 		igb_up(adapter);
 	else
@@ -3624,7 +3764,10 @@
 	struct net_device *netdev = adapter->netdev;
 	struct e1000_hw *hw = &adapter->hw;
 	struct pci_dev *pdev = adapter->pdev;
+	u32 rnbc;
 	u16 phy_tmp;
+	int i;
+	u64 bytes, packets;
 
 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
 
@@ -3637,6 +3780,29 @@
 	if (pci_channel_offline(pdev))
 		return;
 
+	bytes = 0;
+	packets = 0;
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
+		adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
+		netdev->stats.rx_fifo_errors += rqdpc_tmp;
+		bytes += adapter->rx_ring[i].rx_stats.bytes;
+		packets += adapter->rx_ring[i].rx_stats.packets;
+	}
+
+	netdev->stats.rx_bytes = bytes;
+	netdev->stats.rx_packets = packets;
+
+	bytes = 0;
+	packets = 0;
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		bytes += adapter->tx_ring[i].tx_stats.bytes;
+		packets += adapter->tx_ring[i].tx_stats.packets;
+	}
+	netdev->stats.tx_bytes = bytes;
+	netdev->stats.tx_packets = packets;
+
+	/* read stats registers */
 	adapter->stats.crcerrs += rd32(E1000_CRCERRS);
 	adapter->stats.gprc += rd32(E1000_GPRC);
 	adapter->stats.gorc += rd32(E1000_GORCL);
@@ -3669,7 +3835,9 @@
 	adapter->stats.gptc += rd32(E1000_GPTC);
 	adapter->stats.gotc += rd32(E1000_GOTCL);
 	rd32(E1000_GOTCH); /* clear GOTCL */
-	adapter->stats.rnbc += rd32(E1000_RNBC);
+	rnbc = rd32(E1000_RNBC);
+	adapter->stats.rnbc += rnbc;
+	netdev->stats.rx_fifo_errors += rnbc;
 	adapter->stats.ruc += rd32(E1000_RUC);
 	adapter->stats.rfc += rd32(E1000_RFC);
 	adapter->stats.rjc += rd32(E1000_RJC);
@@ -3688,7 +3856,6 @@
 	adapter->stats.bptc += rd32(E1000_BPTC);
 
 	/* used for adaptive IFS */
-
 	hw->mac.tx_packet_delta = rd32(E1000_TPT);
 	adapter->stats.tpt += hw->mac.tx_packet_delta;
 	hw->mac.collision_delta = rd32(E1000_COLC);
@@ -3716,33 +3883,6 @@
 
 	/* Rx Errors */
 
-	if (hw->mac.type != e1000_82575) {
-		u32 rqdpc_tmp;
-		u64 rqdpc_total = 0;
-		int i;
-		/* Read out drops stats per RX queue.  Notice RQDPC (Receive
-		 * Queue Drop Packet Count) stats only gets incremented, if
-		 * the DROP_EN but it set (in the SRRCTL register for that
-		 * queue).  If DROP_EN bit is NOT set, then the some what
-		 * equivalent count is stored in RNBC (not per queue basis).
-		 * Also note the drop count is due to lack of available
-		 * descriptors.
-		 */
-		for (i = 0; i < adapter->num_rx_queues; i++) {
-			rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0xFFF;
-			adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
-			rqdpc_total += adapter->rx_ring[i].rx_stats.drops;
-		}
-		netdev->stats.rx_fifo_errors = rqdpc_total;
-	}
-
-	/* Note RNBC (Receive No Buffers Count) is an not an exact
-	 * drop count as the hardware FIFO might save the day.  Thats
-	 * one of the reason for saving it in rx_fifo_errors, as its
-	 * potentially not a true drop.
-	 */
-	netdev->stats.rx_fifo_errors += adapter->stats.rnbc;
-
 	/* RLEC on some newer hardware can be incorrect so build
 	 * our own version based on RUC and ROC */
 	netdev->stats.rx_errors = adapter->stats.rxerrc +
@@ -3781,14 +3921,12 @@
 
 static irqreturn_t igb_msix_other(int irq, void *data)
 {
-	struct net_device *netdev = data;
-	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct igb_adapter *adapter = data;
 	struct e1000_hw *hw = &adapter->hw;
 	u32 icr = rd32(E1000_ICR);
-
 	/* reading ICR causes bit 31 of EICR to be cleared */
 
-	if(icr & E1000_ICR_DOUTSYNC) {
+	if (icr & E1000_ICR_DOUTSYNC) {
 		/* HW is reporting DMA is out of sync */
 		adapter->stats.doosync++;
 	}
@@ -3804,125 +3942,90 @@
 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
 	}
 
-	wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB);
+	if (adapter->vfs_allocated_count)
+		wr32(E1000_IMS, E1000_IMS_LSC |
+				E1000_IMS_VMMB |
+				E1000_IMS_DOUTSYNC);
+	else
+		wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
 	wr32(E1000_EIMS, adapter->eims_other);
 
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t igb_msix_tx(int irq, void *data)
+static void igb_write_itr(struct igb_q_vector *q_vector)
 {
-	struct igb_ring *tx_ring = data;
-	struct igb_adapter *adapter = tx_ring->adapter;
-	struct e1000_hw *hw = &adapter->hw;
+	u32 itr_val = q_vector->itr_val & 0x7FFC;
 
-#ifdef CONFIG_IGB_DCA
-	if (adapter->flags & IGB_FLAG_DCA_ENABLED)
-		igb_update_tx_dca(tx_ring);
-#endif
+	if (!q_vector->set_itr)
+		return;
 
-	tx_ring->total_bytes = 0;
-	tx_ring->total_packets = 0;
+	if (!itr_val)
+		itr_val = 0x4;
 
-	/* auto mask will automatically reenable the interrupt when we write
-	 * EICS */
-	if (!igb_clean_tx_irq(tx_ring))
-		/* Ring was not completely cleaned, so fire another interrupt */
-		wr32(E1000_EICS, tx_ring->eims_value);
+	if (q_vector->itr_shift)
+		itr_val |= itr_val << q_vector->itr_shift;
 	else
-		wr32(E1000_EIMS, tx_ring->eims_value);
+		itr_val |= 0x8000000;
+
+	writel(itr_val, q_vector->itr_register);
+	q_vector->set_itr = 0;
+}
+
+static irqreturn_t igb_msix_ring(int irq, void *data)
+{
+	struct igb_q_vector *q_vector = data;
+
+	/* Write the ITR value calculated from the previous interrupt. */
+	igb_write_itr(q_vector);
+
+	napi_schedule(&q_vector->napi);
 
 	return IRQ_HANDLED;
 }
 
-static void igb_write_itr(struct igb_ring *ring)
-{
-	struct e1000_hw *hw = &ring->adapter->hw;
-	if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
-		switch (hw->mac.type) {
-		case e1000_82576:
-			wr32(ring->itr_register, ring->itr_val |
-			     0x80000000);
-			break;
-		default:
-			wr32(ring->itr_register, ring->itr_val |
-			     (ring->itr_val << 16));
-			break;
-		}
-		ring->set_itr = 0;
-	}
-}
-
-static irqreturn_t igb_msix_rx(int irq, void *data)
-{
-	struct igb_ring *rx_ring = data;
-
-	/* Write the ITR value calculated at the end of the
-	 * previous interrupt.
-	 */
-
-	igb_write_itr(rx_ring);
-
-	if (napi_schedule_prep(&rx_ring->napi))
-		__napi_schedule(&rx_ring->napi);
-
 #ifdef CONFIG_IGB_DCA
-	if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
-		igb_update_rx_dca(rx_ring);
-#endif
-		return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_IGB_DCA
-static void igb_update_rx_dca(struct igb_ring *rx_ring)
+static void igb_update_dca(struct igb_q_vector *q_vector)
 {
-	u32 dca_rxctrl;
-	struct igb_adapter *adapter = rx_ring->adapter;
+	struct igb_adapter *adapter = q_vector->adapter;
 	struct e1000_hw *hw = &adapter->hw;
 	int cpu = get_cpu();
-	int q = rx_ring->reg_idx;
 
-	if (rx_ring->cpu != cpu) {
-		dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
-		if (hw->mac.type == e1000_82576) {
+	if (q_vector->cpu == cpu)
+		goto out_no_update;
+
+	if (q_vector->tx_ring) {
+		int q = q_vector->tx_ring->reg_idx;
+		u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
+		if (hw->mac.type == e1000_82575) {
+			dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
+			dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+		} else {
+			dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
+			dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
+			              E1000_DCA_TXCTRL_CPUID_SHIFT;
+		}
+		dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
+		wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
+	}
+	if (q_vector->rx_ring) {
+		int q = q_vector->rx_ring->reg_idx;
+		u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
+		if (hw->mac.type == e1000_82575) {
+			dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
+			dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+		} else {
 			dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
 			dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
 			              E1000_DCA_RXCTRL_CPUID_SHIFT;
-		} else {
-			dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
-			dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
 		}
 		dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
 		dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
 		dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
 		wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
-		rx_ring->cpu = cpu;
 	}
-	put_cpu();
-}
-
-static void igb_update_tx_dca(struct igb_ring *tx_ring)
-{
-	u32 dca_txctrl;
-	struct igb_adapter *adapter = tx_ring->adapter;
-	struct e1000_hw *hw = &adapter->hw;
-	int cpu = get_cpu();
-	int q = tx_ring->reg_idx;
-
-	if (tx_ring->cpu != cpu) {
-		dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
-		if (hw->mac.type == e1000_82576) {
-			dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
-			dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
-			              E1000_DCA_TXCTRL_CPUID_SHIFT;
-		} else {
-			dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
-			dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
-		}
-		dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
-		wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
-		tx_ring->cpu = cpu;
-	}
+	q_vector->cpu = cpu;
+out_no_update:
 	put_cpu();
 }
 
@@ -3937,13 +4040,10 @@
 	/* Always use CB2 mode, difference is masked in the CB driver. */
 	wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
 
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		adapter->tx_ring[i].cpu = -1;
-		igb_update_tx_dca(&adapter->tx_ring[i]);
-	}
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		adapter->rx_ring[i].cpu = -1;
-		igb_update_rx_dca(&adapter->rx_ring[i]);
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		q_vector->cpu = -1;
+		igb_update_dca(q_vector);
 	}
 }
 
@@ -3951,6 +4051,7 @@
 {
 	struct net_device *netdev = dev_get_drvdata(dev);
 	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct pci_dev *pdev = adapter->pdev;
 	struct e1000_hw *hw = &adapter->hw;
 	unsigned long event = *(unsigned long *)data;
 
@@ -3959,12 +4060,9 @@
 		/* if already enabled, don't do it again */
 		if (adapter->flags & IGB_FLAG_DCA_ENABLED)
 			break;
-		/* Always use CB2 mode, difference is masked
-		 * in the CB driver. */
-		wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
 		if (dca_add_requester(dev) == 0) {
 			adapter->flags |= IGB_FLAG_DCA_ENABLED;
-			dev_info(&adapter->pdev->dev, "DCA enabled\n");
+			dev_info(&pdev->dev, "DCA enabled\n");
 			igb_setup_dca(adapter);
 			break;
 		}
@@ -3972,9 +4070,9 @@
 	case DCA_PROVIDER_REMOVE:
 		if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
 			/* without this a class_device is left
- 			 * hanging around in the sysfs model */
+			 * hanging around in the sysfs model */
 			dca_remove_requester(dev);
-			dev_info(&adapter->pdev->dev, "DCA disabled\n");
+			dev_info(&pdev->dev, "DCA disabled\n");
 			adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
 			wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
 		}
@@ -4004,12 +4102,51 @@
 
 	for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
 		ping = E1000_PF_CONTROL_MSG;
-		if (adapter->vf_data[i].clear_to_send)
+		if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
 			ping |= E1000_VT_MSGTYPE_CTS;
 		igb_write_mbx(hw, &ping, 1, i);
 	}
 }
 
+static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 vmolr = rd32(E1000_VMOLR(vf));
+	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+
+	vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
+	                    IGB_VF_FLAG_MULTI_PROMISC);
+	vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
+
+	if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
+		vmolr |= E1000_VMOLR_MPME;
+		*msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
+	} else {
+		/*
+		 * if we have hashes and we are clearing a multicast promisc
+		 * flag we need to write the hashes to the MTA as this step
+		 * was previously skipped
+		 */
+		if (vf_data->num_vf_mc_hashes > 30) {
+			vmolr |= E1000_VMOLR_MPME;
+		} else if (vf_data->num_vf_mc_hashes) {
+			int j;
+			vmolr |= E1000_VMOLR_ROMPE;
+			for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
+				igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
+		}
+	}
+
+	wr32(E1000_VMOLR(vf), vmolr);
+
+	/* there are flags left unprocessed, likely not supported */
+	if (*msgbuf & E1000_VT_MSGINFO_MASK)
+		return -EINVAL;
+
+	return 0;
+
+}
+
 static int igb_set_vf_multicasts(struct igb_adapter *adapter,
 				  u32 *msgbuf, u32 vf)
 {
@@ -4018,18 +4155,17 @@
 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
 	int i;
 
-	/* only up to 30 hash values supported */
-	if (n > 30)
-		n = 30;
-
-	/* salt away the number of multi cast addresses assigned
+	/* salt away the number of multicast addresses assigned
 	 * to this VF for later use to restore when the PF multi cast
 	 * list changes
 	 */
 	vf_data->num_vf_mc_hashes = n;
 
-	/* VFs are limited to using the MTA hash table for their multicast
-	 * addresses */
+	/* only up to 30 hash values supported */
+	if (n > 30)
+		n = 30;
+
+	/* store the hashes for later use */
 	for (i = 0; i < n; i++)
 		vf_data->vf_mc_hashes[i] = hash_list[i];
 
@@ -4046,9 +4182,20 @@
 	int i, j;
 
 	for (i = 0; i < adapter->vfs_allocated_count; i++) {
+		u32 vmolr = rd32(E1000_VMOLR(i));
+		vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
+
 		vf_data = &adapter->vf_data[i];
-		for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
-			igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
+
+		if ((vf_data->num_vf_mc_hashes > 30) ||
+		    (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
+			vmolr |= E1000_VMOLR_MPME;
+		} else if (vf_data->num_vf_mc_hashes) {
+			vmolr |= E1000_VMOLR_ROMPE;
+			for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
+				igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
+		}
+		wr32(E1000_VMOLR(i), vmolr);
 	}
 }
 
@@ -4086,7 +4233,11 @@
 	struct e1000_hw *hw = &adapter->hw;
 	u32 reg, i;
 
-	/* It is an error to call this function when VFs are not enabled */
+	/* The vlvf table only exists on 82576 hardware and newer */
+	if (hw->mac.type < e1000_82576)
+		return -1;
+
+	/* we only need to do this if VMDq is enabled */
 	if (!adapter->vfs_allocated_count)
 		return -1;
 
@@ -4116,16 +4267,12 @@
 
 			/* if !enabled we need to set this up in vfta */
 			if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
-				/* add VID to filter table, if bit already set
-				 * PF must have added it outside of table */
-				if (igb_vfta_set(hw, vid, true))
-					reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
-						adapter->vfs_allocated_count);
+				/* add VID to filter table */
+				igb_vfta_set(hw, vid, true);
 				reg |= E1000_VLVF_VLANID_ENABLE;
 			}
 			reg &= ~E1000_VLVF_VLANID_MASK;
 			reg |= vid;
-
 			wr32(E1000_VLVF(i), reg);
 
 			/* do not modify RLPML for PF devices */
@@ -4141,8 +4288,8 @@
 				reg |= size;
 				wr32(E1000_VMOLR(vf), reg);
 			}
-			adapter->vf_data[vf].vlans_enabled++;
 
+			adapter->vf_data[vf].vlans_enabled++;
 			return 0;
 		}
 	} else {
@@ -4184,15 +4331,14 @@
 	return igb_vlvf_set(adapter, vid, add, vf);
 }
 
-static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
+static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
 {
-	struct e1000_hw *hw = &adapter->hw;
-
-	/* disable mailbox functionality for vf */
-	adapter->vf_data[vf].clear_to_send = false;
+	/* clear all flags */
+	adapter->vf_data[vf].flags = 0;
+	adapter->vf_data[vf].last_nack = jiffies;
 
 	/* reset offloads to defaults */
-	igb_set_vmolr(hw, vf);
+	igb_set_vmolr(adapter, vf);
 
 	/* reset vlans for device */
 	igb_clear_vf_vfta(adapter, vf);
@@ -4204,7 +4350,18 @@
 	igb_set_rx_mode(adapter->netdev);
 }
 
-static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
+static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
+{
+	unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
+
+	/* generate a new mac address as we were hotplug removed/added */
+	random_ether_addr(vf_mac);
+
+	/* process remaining reset events */
+	igb_vf_reset(adapter, vf);
+}
+
+static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
@@ -4213,7 +4370,7 @@
 	u8 *addr = (u8 *)(&msgbuf[1]);
 
 	/* process all the same items cleared in a function level reset */
-	igb_vf_reset_event(adapter, vf);
+	igb_vf_reset(adapter, vf);
 
 	/* set vf mac address */
 	igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
@@ -4224,8 +4381,7 @@
 	reg = rd32(E1000_VFRE);
 	wr32(E1000_VFRE, reg | (1 << vf));
 
-	/* enable mailbox functionality for vf */
-	adapter->vf_data[vf].clear_to_send = true;
+	adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
 
 	/* reply to reset with ack and vf mac address */
 	msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
@@ -4235,66 +4391,45 @@
 
 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
 {
-		unsigned char *addr = (char *)&msg[1];
-		int err = -1;
+	unsigned char *addr = (char *)&msg[1];
+	int err = -1;
 
-		if (is_valid_ether_addr(addr))
-			err = igb_set_vf_mac(adapter, vf, addr);
+	if (is_valid_ether_addr(addr))
+		err = igb_set_vf_mac(adapter, vf, addr);
 
-		return err;
-
+	return err;
 }
 
 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
 {
 	struct e1000_hw *hw = &adapter->hw;
+	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
 	u32 msg = E1000_VT_MSGTYPE_NACK;
 
 	/* if device isn't clear to send it shouldn't be reading either */
-	if (!adapter->vf_data[vf].clear_to_send)
+	if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
+	    time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
 		igb_write_mbx(hw, &msg, 1, vf);
-}
-
-
-static void igb_msg_task(struct igb_adapter *adapter)
-{
-	struct e1000_hw *hw = &adapter->hw;
-	u32 vf;
-
-	for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
-		/* process any reset requests */
-		if (!igb_check_for_rst(hw, vf)) {
-			adapter->vf_data[vf].clear_to_send = false;
-			igb_vf_reset_event(adapter, vf);
-		}
-
-		/* process any messages pending */
-		if (!igb_check_for_msg(hw, vf))
-			igb_rcv_msg_from_vf(adapter, vf);
-
-		/* process any acks */
-		if (!igb_check_for_ack(hw, vf))
-			igb_rcv_ack_from_vf(adapter, vf);
-
+		vf_data->last_nack = jiffies;
 	}
 }
 
-static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
+static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
 {
-	u32 mbx_size = E1000_VFMAILBOX_SIZE;
-	u32 msgbuf[mbx_size];
+	struct pci_dev *pdev = adapter->pdev;
+	u32 msgbuf[E1000_VFMAILBOX_SIZE];
 	struct e1000_hw *hw = &adapter->hw;
+	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
 	s32 retval;
 
-	retval = igb_read_mbx(hw, msgbuf, mbx_size, vf);
+	retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
 
 	if (retval)
-		dev_err(&adapter->pdev->dev,
-		        "Error receiving message from VF\n");
+		dev_err(&pdev->dev, "Error receiving message from VF\n");
 
 	/* this is a message we already processed, do nothing */
 	if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
-		return retval;
+		return;
 
 	/*
 	 * until the vf completes a reset it should not be
@@ -4303,20 +4438,25 @@
 
 	if (msgbuf[0] == E1000_VF_RESET) {
 		igb_vf_reset_msg(adapter, vf);
-
-		return retval;
+		return;
 	}
 
-	if (!adapter->vf_data[vf].clear_to_send) {
-		msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
-		igb_write_mbx(hw, msgbuf, 1, vf);
-		return retval;
+	if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
+		msgbuf[0] = E1000_VT_MSGTYPE_NACK;
+		if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
+			igb_write_mbx(hw, msgbuf, 1, vf);
+			vf_data->last_nack = jiffies;
+		}
+		return;
 	}
 
 	switch ((msgbuf[0] & 0xFFFF)) {
 	case E1000_VF_SET_MAC_ADDR:
 		retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
 		break;
+	case E1000_VF_SET_PROMISC:
+		retval = igb_set_vf_promisc(adapter, msgbuf, vf);
+		break;
 	case E1000_VF_SET_MULTICAST:
 		retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
 		break;
@@ -4327,7 +4467,7 @@
 		retval = igb_set_vf_vlan(adapter, msgbuf, vf);
 		break;
 	default:
-		dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
+		dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
 		retval = -1;
 		break;
 	}
@@ -4341,8 +4481,26 @@
 	msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
 
 	igb_write_mbx(hw, msgbuf, 1, vf);
+}
 
-	return retval;
+static void igb_msg_task(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 vf;
+
+	for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
+		/* process any reset requests */
+		if (!igb_check_for_rst(hw, vf))
+			igb_vf_reset_event(adapter, vf);
+
+		/* process any messages pending */
+		if (!igb_check_for_msg(hw, vf))
+			igb_rcv_msg_from_vf(adapter, vf);
+
+		/* process any acks */
+		if (!igb_check_for_ack(hw, vf))
+			igb_rcv_ack_from_vf(adapter, vf);
+	}
 }
 
 /**
@@ -4379,15 +4537,15 @@
  **/
 static irqreturn_t igb_intr_msi(int irq, void *data)
 {
-	struct net_device *netdev = data;
-	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct igb_adapter *adapter = data;
+	struct igb_q_vector *q_vector = adapter->q_vector[0];
 	struct e1000_hw *hw = &adapter->hw;
 	/* read ICR disables interrupts using IAM */
 	u32 icr = rd32(E1000_ICR);
 
-	igb_write_itr(adapter->rx_ring);
+	igb_write_itr(q_vector);
 
-	if(icr & E1000_ICR_DOUTSYNC) {
+	if (icr & E1000_ICR_DOUTSYNC) {
 		/* HW is reporting DMA is out of sync */
 		adapter->stats.doosync++;
 	}
@@ -4398,7 +4556,7 @@
 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
 	}
 
-	napi_schedule(&adapter->rx_ring[0].napi);
+	napi_schedule(&q_vector->napi);
 
 	return IRQ_HANDLED;
 }
@@ -4410,8 +4568,8 @@
  **/
 static irqreturn_t igb_intr(int irq, void *data)
 {
-	struct net_device *netdev = data;
-	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct igb_adapter *adapter = data;
+	struct igb_q_vector *q_vector = adapter->q_vector[0];
 	struct e1000_hw *hw = &adapter->hw;
 	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
 	 * need for the IMC write */
@@ -4419,14 +4577,14 @@
 	if (!icr)
 		return IRQ_NONE;  /* Not our interrupt */
 
-	igb_write_itr(adapter->rx_ring);
+	igb_write_itr(q_vector);
 
 	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
 	 * not set, then the adapter didn't send an interrupt */
 	if (!(icr & E1000_ICR_INT_ASSERTED))
 		return IRQ_NONE;
 
-	if(icr & E1000_ICR_DOUTSYNC) {
+	if (icr & E1000_ICR_DOUTSYNC) {
 		/* HW is reporting DMA is out of sync */
 		adapter->stats.doosync++;
 	}
@@ -4438,26 +4596,27 @@
 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
 	}
 
-	napi_schedule(&adapter->rx_ring[0].napi);
+	napi_schedule(&q_vector->napi);
 
 	return IRQ_HANDLED;
 }
 
-static inline void igb_rx_irq_enable(struct igb_ring *rx_ring)
+static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
 {
-	struct igb_adapter *adapter = rx_ring->adapter;
+	struct igb_adapter *adapter = q_vector->adapter;
 	struct e1000_hw *hw = &adapter->hw;
 
-	if (adapter->itr_setting & 3) {
-		if (adapter->num_rx_queues == 1)
+	if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
+	    (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
+		if (!adapter->msix_entries)
 			igb_set_itr(adapter);
 		else
-			igb_update_ring_itr(rx_ring);
+			igb_update_ring_itr(q_vector);
 	}
 
 	if (!test_bit(__IGB_DOWN, &adapter->state)) {
 		if (adapter->msix_entries)
-			wr32(E1000_EIMS, rx_ring->eims_value);
+			wr32(E1000_EIMS, q_vector->eims_value);
 		else
 			igb_irq_enable(adapter);
 	}
@@ -4470,76 +4629,94 @@
  **/
 static int igb_poll(struct napi_struct *napi, int budget)
 {
-	struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
-	int work_done = 0;
+	struct igb_q_vector *q_vector = container_of(napi,
+	                                             struct igb_q_vector,
+	                                             napi);
+	int tx_clean_complete = 1, work_done = 0;
 
 #ifdef CONFIG_IGB_DCA
-	if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
-		igb_update_rx_dca(rx_ring);
+	if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
+		igb_update_dca(q_vector);
 #endif
-	igb_clean_rx_irq_adv(rx_ring, &work_done, budget);
+	if (q_vector->tx_ring)
+		tx_clean_complete = igb_clean_tx_irq(q_vector);
 
-	if (rx_ring->buddy) {
-#ifdef CONFIG_IGB_DCA
-		if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
-			igb_update_tx_dca(rx_ring->buddy);
-#endif
-		if (!igb_clean_tx_irq(rx_ring->buddy))
-			work_done = budget;
-	}
+	if (q_vector->rx_ring)
+		igb_clean_rx_irq_adv(q_vector, &work_done, budget);
+
+	if (!tx_clean_complete)
+		work_done = budget;
 
 	/* If not enough Rx work done, exit the polling mode */
 	if (work_done < budget) {
 		napi_complete(napi);
-		igb_rx_irq_enable(rx_ring);
+		igb_ring_irq_enable(q_vector);
 	}
 
 	return work_done;
 }
 
 /**
- * igb_hwtstamp - utility function which checks for TX time stamp
+ * igb_systim_to_hwtstamp - convert system time value to hw timestamp
  * @adapter: board private structure
+ * @shhwtstamps: timestamp structure to update
+ * @regval: unsigned 64bit system time value.
+ *
+ * We need to convert the system time value stored in the RX/TXSTMP registers
+ * into a hwtstamp which can be used by the upper level timestamping functions
+ */
+static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
+                                   struct skb_shared_hwtstamps *shhwtstamps,
+                                   u64 regval)
+{
+	u64 ns;
+
+	ns = timecounter_cyc2time(&adapter->clock, regval);
+	timecompare_update(&adapter->compare, ns);
+	memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+	shhwtstamps->hwtstamp = ns_to_ktime(ns);
+	shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
+}
+
+/**
+ * igb_tx_hwtstamp - utility function which checks for TX time stamp
+ * @q_vector: pointer to q_vector containing needed info
  * @skb: packet that was just sent
  *
  * If we were asked to do hardware stamping and such a time stamp is
  * available, then it must have been for this skb here because we only
  * allow only one such packet into the queue.
  */
-static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb)
+static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
 {
+	struct igb_adapter *adapter = q_vector->adapter;
 	union skb_shared_tx *shtx = skb_tx(skb);
 	struct e1000_hw *hw = &adapter->hw;
+	struct skb_shared_hwtstamps shhwtstamps;
+	u64 regval;
 
-	if (unlikely(shtx->hardware)) {
-		u32 valid = rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID;
-		if (valid) {
-			u64 regval = rd32(E1000_TXSTMPL);
-			u64 ns;
-			struct skb_shared_hwtstamps shhwtstamps;
+	/* if skb does not support hw timestamp or TX stamp not valid exit */
+	if (likely(!shtx->hardware) ||
+	    !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
+		return;
 
-			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-			regval |= (u64)rd32(E1000_TXSTMPH) << 32;
-			ns = timecounter_cyc2time(&adapter->clock,
-						  regval);
-			timecompare_update(&adapter->compare, ns);
-			shhwtstamps.hwtstamp = ns_to_ktime(ns);
-			shhwtstamps.syststamp =
-				timecompare_transform(&adapter->compare, ns);
-			skb_tstamp_tx(skb, &shhwtstamps);
-		}
-	}
+	regval = rd32(E1000_TXSTMPL);
+	regval |= (u64)rd32(E1000_TXSTMPH) << 32;
+
+	igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+	skb_tstamp_tx(skb, &shhwtstamps);
 }
 
 /**
  * igb_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
+ * @q_vector: pointer to q_vector containing needed info
  * returns true if ring is completely cleaned
  **/
-static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
+static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
 {
-	struct igb_adapter *adapter = tx_ring->adapter;
-	struct net_device *netdev = adapter->netdev;
+	struct igb_adapter *adapter = q_vector->adapter;
+	struct igb_ring *tx_ring = q_vector->tx_ring;
+	struct net_device *netdev = tx_ring->netdev;
 	struct e1000_hw *hw = &adapter->hw;
 	struct igb_buffer *buffer_info;
 	struct sk_buff *skb;
@@ -4570,10 +4747,10 @@
 				total_packets += segs;
 				total_bytes += bytecount;
 
-				igb_tx_hwtstamp(adapter, skb);
+				igb_tx_hwtstamp(q_vector, skb);
 			}
 
-			igb_unmap_and_free_tx_resource(adapter, buffer_info);
+			igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
 			tx_desc->wb.status = 0;
 
 			i++;
@@ -4596,7 +4773,7 @@
 		if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
 		    !(test_bit(__IGB_DOWN, &adapter->state))) {
 			netif_wake_subqueue(netdev, tx_ring->queue_index);
-			++adapter->restart_queue;
+			tx_ring->tx_stats.restart_queue++;
 		}
 	}
 
@@ -4611,7 +4788,7 @@
 			 E1000_STATUS_TXOFF)) {
 
 			/* detected Tx unit hang */
-			dev_err(&adapter->pdev->dev,
+			dev_err(&tx_ring->pdev->dev,
 				"Detected Tx Unit Hang\n"
 				"  Tx Queue             <%d>\n"
 				"  TDH                  <%x>\n"
@@ -4624,11 +4801,11 @@
 				"  jiffies              <%lx>\n"
 				"  desc.status          <%x>\n",
 				tx_ring->queue_index,
-				readl(adapter->hw.hw_addr + tx_ring->head),
-				readl(adapter->hw.hw_addr + tx_ring->tail),
+				readl(tx_ring->head),
+				readl(tx_ring->tail),
 				tx_ring->next_to_use,
 				tx_ring->next_to_clean,
-				tx_ring->buffer_info[i].time_stamp,
+				tx_ring->buffer_info[eop].time_stamp,
 				eop,
 				jiffies,
 				eop_desc->wb.status);
@@ -4639,43 +4816,38 @@
 	tx_ring->total_packets += total_packets;
 	tx_ring->tx_stats.bytes += total_bytes;
 	tx_ring->tx_stats.packets += total_packets;
-	netdev->stats.tx_bytes += total_bytes;
-	netdev->stats.tx_packets += total_packets;
 	return (count < tx_ring->count);
 }
 
 /**
  * igb_receive_skb - helper function to handle rx indications
- * @ring: pointer to receive ring receving this packet
- * @status: descriptor status field as written by hardware
- * @rx_desc: receive descriptor containing vlan and type information.
- * @skb: pointer to sk_buff to be indicated to stack
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
  **/
-static void igb_receive_skb(struct igb_ring *ring, u8 status,
-                            union e1000_adv_rx_desc * rx_desc,
-                            struct sk_buff *skb)
+static void igb_receive_skb(struct igb_q_vector *q_vector,
+                            struct sk_buff *skb,
+                            u16 vlan_tag)
 {
-	struct igb_adapter * adapter = ring->adapter;
-	bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
+	struct igb_adapter *adapter = q_vector->adapter;
 
-	skb_record_rx_queue(skb, ring->queue_index);
-	if (vlan_extracted)
-		vlan_gro_receive(&ring->napi, adapter->vlgrp,
-		                 le16_to_cpu(rx_desc->wb.upper.vlan),
-		                 skb);
+	if (vlan_tag)
+		vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
+		                 vlan_tag, skb);
 	else
-		napi_gro_receive(&ring->napi, skb);
+		napi_gro_receive(&q_vector->napi, skb);
 }
 
-static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
+static inline void igb_rx_checksum_adv(struct igb_ring *ring,
 				       u32 status_err, struct sk_buff *skb)
 {
 	skb->ip_summed = CHECKSUM_NONE;
 
 	/* Ignore Checksum bit is set or checksum is disabled through ethtool */
-	if ((status_err & E1000_RXD_STAT_IXSM) ||
-	    (adapter->flags & IGB_FLAG_RX_CSUM_DISABLED))
+	if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
+	     (status_err & E1000_RXD_STAT_IXSM))
 		return;
+
 	/* TCP/UDP checksum error bit is set */
 	if (status_err &
 	    (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
@@ -4684,9 +4856,10 @@
 		 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
 		 * packets, (aka let the stack check the crc32c)
 		 */
-		if (!((adapter->hw.mac.type == e1000_82576) &&
-		      (skb->len == 60)))
-			adapter->hw_csum_err++;
+		if ((skb->len == 60) &&
+		    (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
+			ring->rx_stats.csum_err++;
+
 		/* let the stack verify checksum errors */
 		return;
 	}
@@ -4694,11 +4867,38 @@
 	if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-	dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err);
-	adapter->hw_csum_good++;
+	dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
 }
 
-static inline u16 igb_get_hlen(struct igb_adapter *adapter,
+static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
+                                   struct sk_buff *skb)
+{
+	struct igb_adapter *adapter = q_vector->adapter;
+	struct e1000_hw *hw = &adapter->hw;
+	u64 regval;
+
+	/*
+	 * If this bit is set, then the RX registers contain the time stamp. No
+	 * other packet will be time stamped until we read these registers, so
+	 * read the registers to make them available again. Because only one
+	 * packet can be time stamped at a time, we know that the register
+	 * values must belong to this one here and therefore we don't need to
+	 * compare any of the additional attributes stored for it.
+	 *
+	 * If nothing went wrong, then it should have a skb_shared_tx that we
+	 * can turn into a skb_shared_hwtstamps.
+	 */
+	if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
+		return;
+	if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+		return;
+
+	regval = rd32(E1000_RXSTMPL);
+	regval |= (u64)rd32(E1000_RXSTMPH) << 32;
+
+	igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+}
+static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
                                union e1000_adv_rx_desc *rx_desc)
 {
 	/* HW will not DMA in data larger than the given buffer, even if it
@@ -4707,18 +4907,17 @@
 	 */
 	u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
 	           E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
-	if (hlen > adapter->rx_ps_hdr_size)
-		hlen = adapter->rx_ps_hdr_size;
+	if (hlen > rx_ring->rx_buffer_len)
+		hlen = rx_ring->rx_buffer_len;
 	return hlen;
 }
 
-static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
-				 int *work_done, int budget)
+static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
+                                 int *work_done, int budget)
 {
-	struct igb_adapter *adapter = rx_ring->adapter;
-	struct net_device *netdev = adapter->netdev;
-	struct e1000_hw *hw = &adapter->hw;
-	struct pci_dev *pdev = adapter->pdev;
+	struct igb_ring *rx_ring = q_vector->rx_ring;
+	struct net_device *netdev = rx_ring->netdev;
+	struct pci_dev *pdev = rx_ring->pdev;
 	union e1000_adv_rx_desc *rx_desc , *next_rxd;
 	struct igb_buffer *buffer_info , *next_buffer;
 	struct sk_buff *skb;
@@ -4728,6 +4927,7 @@
 	unsigned int i;
 	u32 staterr;
 	u16 length;
+	u16 vlan_tag;
 
 	i = rx_ring->next_to_clean;
 	buffer_info = &rx_ring->buffer_info[i];
@@ -4746,6 +4946,7 @@
 		i++;
 		if (i == rx_ring->count)
 			i = 0;
+
 		next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
 		prefetch(next_rxd);
 		next_buffer = &rx_ring->buffer_info[i];
@@ -4754,23 +4955,16 @@
 		cleaned = true;
 		cleaned_count++;
 
-		/* this is the fast path for the non-packet split case */
-		if (!adapter->rx_ps_hdr_size) {
-			pci_unmap_single(pdev, buffer_info->dma,
-					 adapter->rx_buffer_len,
-					 PCI_DMA_FROMDEVICE);
-			buffer_info->dma = 0;
-			skb_put(skb, length);
-			goto send_up;
-		}
-
 		if (buffer_info->dma) {
-			u16 hlen = igb_get_hlen(adapter, rx_desc);
 			pci_unmap_single(pdev, buffer_info->dma,
-					 adapter->rx_ps_hdr_size,
+					 rx_ring->rx_buffer_len,
 					 PCI_DMA_FROMDEVICE);
 			buffer_info->dma = 0;
-			skb_put(skb, hlen);
+			if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
+				skb_put(skb, length);
+				goto send_up;
+			}
+			skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
 		}
 
 		if (length) {
@@ -4783,15 +4977,13 @@
 						buffer_info->page_offset,
 						length);
 
-			if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
-			    (page_count(buffer_info->page) != 1))
+			if (page_count(buffer_info->page) != 1)
 				buffer_info->page = NULL;
 			else
 				get_page(buffer_info->page);
 
 			skb->len += length;
 			skb->data_len += length;
-
 			skb->truesize += length;
 		}
 
@@ -4803,60 +4995,24 @@
 			goto next_desc;
 		}
 send_up:
-		/*
-		 * If this bit is set, then the RX registers contain
-		 * the time stamp. No other packet will be time
-		 * stamped until we read these registers, so read the
-		 * registers to make them available again. Because
-		 * only one packet can be time stamped at a time, we
-		 * know that the register values must belong to this
-		 * one here and therefore we don't need to compare
-		 * any of the additional attributes stored for it.
-		 *
-		 * If nothing went wrong, then it should have a
-		 * skb_shared_tx that we can turn into a
-		 * skb_shared_hwtstamps.
-		 *
-		 * TODO: can time stamping be triggered (thus locking
-		 * the registers) without the packet reaching this point
-		 * here? In that case RX time stamping would get stuck.
-		 *
-		 * TODO: in "time stamp all packets" mode this bit is
-		 * not set. Need a global flag for this mode and then
-		 * always read the registers. Cannot be done without
-		 * a race condition.
-		 */
-		if (unlikely(staterr & E1000_RXD_STAT_TS)) {
-			u64 regval;
-			u64 ns;
-			struct skb_shared_hwtstamps *shhwtstamps =
-				skb_hwtstamps(skb);
-
-			WARN(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
-			     "igb: no RX time stamp available for time stamped packet");
-			regval = rd32(E1000_RXSTMPL);
-			regval |= (u64)rd32(E1000_RXSTMPH) << 32;
-			ns = timecounter_cyc2time(&adapter->clock, regval);
-			timecompare_update(&adapter->compare, ns);
-			memset(shhwtstamps, 0, sizeof(*shhwtstamps));
-			shhwtstamps->hwtstamp = ns_to_ktime(ns);
-			shhwtstamps->syststamp =
-				timecompare_transform(&adapter->compare, ns);
-		}
-
 		if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
 			dev_kfree_skb_irq(skb);
 			goto next_desc;
 		}
 
+		igb_rx_hwtstamp(q_vector, staterr, skb);
 		total_bytes += skb->len;
 		total_packets++;
 
-		igb_rx_checksum_adv(adapter, staterr, skb);
+		igb_rx_checksum_adv(rx_ring, staterr, skb);
 
 		skb->protocol = eth_type_trans(skb, netdev);
+		skb_record_rx_queue(skb, rx_ring->queue_index);
 
-		igb_receive_skb(rx_ring, staterr, rx_desc, skb);
+		vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
+		            le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
+
+		igb_receive_skb(q_vector, skb, vlan_tag);
 
 next_desc:
 		rx_desc->wb.upper.status_error = 0;
@@ -4883,8 +5039,6 @@
 	rx_ring->total_bytes += total_bytes;
 	rx_ring->rx_stats.packets += total_packets;
 	rx_ring->rx_stats.bytes += total_bytes;
-	netdev->stats.rx_bytes += total_bytes;
-	netdev->stats.rx_packets += total_packets;
 	return cleaned;
 }
 
@@ -4892,12 +5046,9 @@
  * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
  * @adapter: address of board private structure
  **/
-static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
-				     int cleaned_count)
+void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
 {
-	struct igb_adapter *adapter = rx_ring->adapter;
-	struct net_device *netdev = adapter->netdev;
-	struct pci_dev *pdev = adapter->pdev;
+	struct net_device *netdev = rx_ring->netdev;
 	union e1000_adv_rx_desc *rx_desc;
 	struct igb_buffer *buffer_info;
 	struct sk_buff *skb;
@@ -4907,19 +5058,16 @@
 	i = rx_ring->next_to_use;
 	buffer_info = &rx_ring->buffer_info[i];
 
-	if (adapter->rx_ps_hdr_size)
-		bufsz = adapter->rx_ps_hdr_size;
-	else
-		bufsz = adapter->rx_buffer_len;
+	bufsz = rx_ring->rx_buffer_len;
 
 	while (cleaned_count--) {
 		rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
 
-		if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
+		if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
 			if (!buffer_info->page) {
-				buffer_info->page = alloc_page(GFP_ATOMIC);
+				buffer_info->page = netdev_alloc_page(netdev);
 				if (!buffer_info->page) {
-					adapter->alloc_rx_buff_failed++;
+					rx_ring->rx_stats.alloc_failed++;
 					goto no_buffers;
 				}
 				buffer_info->page_offset = 0;
@@ -4927,33 +5075,48 @@
 				buffer_info->page_offset ^= PAGE_SIZE / 2;
 			}
 			buffer_info->page_dma =
-				pci_map_page(pdev, buffer_info->page,
+				pci_map_page(rx_ring->pdev, buffer_info->page,
 					     buffer_info->page_offset,
 					     PAGE_SIZE / 2,
 					     PCI_DMA_FROMDEVICE);
+			if (pci_dma_mapping_error(rx_ring->pdev,
+			                          buffer_info->page_dma)) {
+				buffer_info->page_dma = 0;
+				rx_ring->rx_stats.alloc_failed++;
+				goto no_buffers;
+			}
 		}
 
-		if (!buffer_info->skb) {
+		skb = buffer_info->skb;
+		if (!skb) {
 			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
 			if (!skb) {
-				adapter->alloc_rx_buff_failed++;
+				rx_ring->rx_stats.alloc_failed++;
 				goto no_buffers;
 			}
 
 			buffer_info->skb = skb;
-			buffer_info->dma = pci_map_single(pdev, skb->data,
+		}
+		if (!buffer_info->dma) {
+			buffer_info->dma = pci_map_single(rx_ring->pdev,
+			                                  skb->data,
 							  bufsz,
 							  PCI_DMA_FROMDEVICE);
+			if (pci_dma_mapping_error(rx_ring->pdev,
+			                          buffer_info->dma)) {
+				buffer_info->dma = 0;
+				rx_ring->rx_stats.alloc_failed++;
+				goto no_buffers;
+			}
 		}
 		/* Refresh the desc even if buffer_addrs didn't change because
 		 * each write-back erases this info. */
-		if (adapter->rx_ps_hdr_size) {
+		if (bufsz < IGB_RXBUFFER_1024) {
 			rx_desc->read.pkt_addr =
 			     cpu_to_le64(buffer_info->page_dma);
 			rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
 		} else {
-			rx_desc->read.pkt_addr =
-			     cpu_to_le64(buffer_info->dma);
+			rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
 			rx_desc->read.hdr_addr = 0;
 		}
 
@@ -4976,7 +5139,7 @@
 		 * applicable for weak-ordered memory model archs,
 		 * such as IA-64). */
 		wmb();
-		writel(i, adapter->hw.hw_addr + rx_ring->tail);
+		writel(i, rx_ring->tail);
 	}
 }
 
@@ -5035,13 +5198,11 @@
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
 	struct hwtstamp_config config;
-	u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
-	u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED;
-	u32 tsync_rx_ctl_type = 0;
+	u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
+	u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
 	u32 tsync_rx_cfg = 0;
-	int is_l4 = 0;
-	int is_l2 = 0;
-	short port = 319; /* PTP */
+	bool is_l4 = false;
+	bool is_l2 = false;
 	u32 regval;
 
 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
@@ -5053,10 +5214,8 @@
 
 	switch (config.tx_type) {
 	case HWTSTAMP_TX_OFF:
-		tsync_tx_ctl_bit = 0;
-		break;
+		tsync_tx_ctl = 0;
 	case HWTSTAMP_TX_ON:
-		tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
 		break;
 	default:
 		return -ERANGE;
@@ -5064,7 +5223,7 @@
 
 	switch (config.rx_filter) {
 	case HWTSTAMP_FILTER_NONE:
-		tsync_rx_ctl_bit = 0;
+		tsync_rx_ctl = 0;
 		break;
 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -5075,86 +5234,97 @@
 		 * possible to time stamp both Sync and Delay_Req messages
 		 * => fall back to time stamping all packets
 		 */
-		tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL;
+		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
 		config.rx_filter = HWTSTAMP_FILTER_ALL;
 		break;
 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
-		tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
+		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
 		tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
-		is_l4 = 1;
+		is_l4 = true;
 		break;
 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
-		tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
+		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
 		tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
-		is_l4 = 1;
+		is_l4 = true;
 		break;
 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
-		tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
 		tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
-		is_l2 = 1;
-		is_l4 = 1;
+		is_l2 = true;
+		is_l4 = true;
 		config.rx_filter = HWTSTAMP_FILTER_SOME;
 		break;
 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
-		tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
 		tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
-		is_l2 = 1;
-		is_l4 = 1;
+		is_l2 = true;
+		is_l4 = true;
 		config.rx_filter = HWTSTAMP_FILTER_SOME;
 		break;
 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
-		tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2;
+		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
 		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
-		is_l2 = 1;
+		is_l2 = true;
 		break;
 	default:
 		return -ERANGE;
 	}
 
+	if (hw->mac.type == e1000_82575) {
+		if (tsync_rx_ctl | tsync_tx_ctl)
+			return -EINVAL;
+		return 0;
+	}
+
 	/* enable/disable TX */
 	regval = rd32(E1000_TSYNCTXCTL);
-	regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit;
+	regval &= ~E1000_TSYNCTXCTL_ENABLED;
+	regval |= tsync_tx_ctl;
 	wr32(E1000_TSYNCTXCTL, regval);
 
-	/* enable/disable RX, define which PTP packets are time stamped */
+	/* enable/disable RX */
 	regval = rd32(E1000_TSYNCRXCTL);
-	regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit;
-	regval = (regval & ~0xE) | tsync_rx_ctl_type;
+	regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
+	regval |= tsync_rx_ctl;
 	wr32(E1000_TSYNCRXCTL, regval);
+
+	/* define which PTP packets are time stamped */
 	wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
 
-	/*
-	 * Ethertype Filter Queue Filter[0][15:0] = 0x88F7
-	 *                                          (Ethertype to filter on)
-	 * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
-	 * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
-	 */
-	wr32(E1000_ETQF0, is_l2 ? 0x440088f7 : 0);
+	/* define ethertype filter for timestamped packets */
+	if (is_l2)
+		wr32(E1000_ETQF(3),
+		                (E1000_ETQF_FILTER_ENABLE | /* enable filter */
+		                 E1000_ETQF_1588 | /* enable timestamping */
+		                 ETH_P_1588));     /* 1588 eth protocol type */
+	else
+		wr32(E1000_ETQF(3), 0);
 
-	/* L4 Queue Filter[0]: only filter by source and destination port */
-	wr32(E1000_SPQF0, htons(port));
-	wr32(E1000_IMIREXT(0), is_l4 ?
-	     ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
-	wr32(E1000_IMIR(0), is_l4 ?
-	     (htons(port)
-	      | (0<<16) /* immediate interrupt disabled */
-	      | 0 /* (1<<17) bit cleared: do not bypass
-		     destination port check */)
-		: 0);
-	wr32(E1000_FTQF0, is_l4 ?
-	     (0x11 /* UDP */
-	      | (1<<15) /* VF not compared */
-	      | (1<<27) /* Enable Timestamping */
-	      | (7<<28) /* only source port filter enabled,
-			   source/target address and protocol
-			   masked */)
-	     : ((1<<15) | (15<<28) /* all mask bits set = filter not
-				      enabled */));
+#define PTP_PORT 319
+	/* L4 Queue Filter[3]: filter by destination port and protocol */
+	if (is_l4) {
+		u32 ftqf = (IPPROTO_UDP /* UDP */
+			| E1000_FTQF_VF_BP /* VF not compared */
+			| E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
+			| E1000_FTQF_MASK); /* mask all inputs */
+		ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
 
+		wr32(E1000_IMIR(3), htons(PTP_PORT));
+		wr32(E1000_IMIREXT(3),
+		     (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
+		if (hw->mac.type == e1000_82576) {
+			/* enable source port check */
+			wr32(E1000_SPQF(3), htons(PTP_PORT));
+			ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
+		}
+		wr32(E1000_FTQF(3), ftqf);
+	} else {
+		wr32(E1000_FTQF(3), E1000_FTQF_MASK);
+	}
 	wrfl();
 
 	adapter->hwtstamp_config = config;
@@ -5231,21 +5401,15 @@
 		ctrl |= E1000_CTRL_VME;
 		wr32(E1000_CTRL, ctrl);
 
-		/* enable VLAN receive filtering */
+		/* Disable CFI check */
 		rctl = rd32(E1000_RCTL);
 		rctl &= ~E1000_RCTL_CFIEN;
 		wr32(E1000_RCTL, rctl);
-		igb_update_mng_vlan(adapter);
 	} else {
 		/* disable VLAN tag insert/strip */
 		ctrl = rd32(E1000_CTRL);
 		ctrl &= ~E1000_CTRL_VME;
 		wr32(E1000_CTRL, ctrl);
-
-		if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
-			igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
-			adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
-		}
 	}
 
 	igb_rlpml_set(adapter);
@@ -5260,16 +5424,11 @@
 	struct e1000_hw *hw = &adapter->hw;
 	int pf_id = adapter->vfs_allocated_count;
 
-	if ((hw->mng_cookie.status &
-	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
-	    (vid == adapter->mng_vlan_id))
-		return;
+	/* attempt to add filter to vlvf array */
+	igb_vlvf_set(adapter, vid, true, pf_id);
 
-	/* add vid to vlvf if sr-iov is enabled,
-	 * if that fails add directly to filter table */
-	if (igb_vlvf_set(adapter, vid, true, pf_id))
-		igb_vfta_set(hw, vid, true);
-
+	/* add the filter since PF can receive vlans w/o entry in vlvf */
+	igb_vfta_set(hw, vid, true);
 }
 
 static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -5277,6 +5436,7 @@
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
 	int pf_id = adapter->vfs_allocated_count;
+	s32 err;
 
 	igb_irq_disable(adapter);
 	vlan_group_set_device(adapter->vlgrp, vid, NULL);
@@ -5284,17 +5444,11 @@
 	if (!test_bit(__IGB_DOWN, &adapter->state))
 		igb_irq_enable(adapter);
 
-	if ((adapter->hw.mng_cookie.status &
-	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
-	    (vid == adapter->mng_vlan_id)) {
-		/* release control to f/w */
-		igb_release_hw_control(adapter);
-		return;
-	}
+	/* remove vlan from VLVF table array */
+	err = igb_vlvf_set(adapter, vid, false, pf_id);
 
-	/* remove vid from vlvf if sr-iov is enabled,
-	 * if not in vlvf remove from vfta */
-	if (igb_vlvf_set(adapter, vid, false, pf_id))
+	/* if vid was not present in VLVF just remove it from table */
+	if (err)
 		igb_vfta_set(hw, vid, false);
 }
 
@@ -5314,6 +5468,7 @@
 
 int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
 {
+	struct pci_dev *pdev = adapter->pdev;
 	struct e1000_mac_info *mac = &adapter->hw.mac;
 
 	mac->autoneg = 0;
@@ -5337,8 +5492,7 @@
 		break;
 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
 	default:
-		dev_err(&adapter->pdev->dev,
-			"Unsupported Speed/Duplex configuration\n");
+		dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
 		return -EINVAL;
 	}
 	return 0;
@@ -5360,9 +5514,7 @@
 	if (netif_running(netdev))
 		igb_close(netdev);
 
-	igb_reset_interrupt_capability(adapter);
-
-	igb_free_queues(adapter);
+	igb_clear_interrupt_scheme(adapter);
 
 #ifdef CONFIG_PM
 	retval = pci_save_state(pdev);
@@ -5394,7 +5546,7 @@
 		wr32(E1000_CTRL, ctrl);
 
 		/* Allow time for pending master requests to run */
-		igb_disable_pcie_master(&adapter->hw);
+		igb_disable_pcie_master(hw);
 
 		wr32(E1000_WUC, E1000_WUC_PME_EN);
 		wr32(E1000_WUFC, wufc);
@@ -5457,9 +5609,7 @@
 	pci_enable_wake(pdev, PCI_D3hot, 0);
 	pci_enable_wake(pdev, PCI_D3cold, 0);
 
-	igb_set_interrupt_capability(adapter);
-
-	if (igb_alloc_queues(adapter)) {
+	if (igb_init_interrupt_scheme(adapter)) {
 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
 		return -ENOMEM;
 	}
@@ -5511,22 +5661,16 @@
 	int i;
 
 	if (!adapter->msix_entries) {
+		struct igb_q_vector *q_vector = adapter->q_vector[0];
 		igb_irq_disable(adapter);
-		napi_schedule(&adapter->rx_ring[0].napi);
+		napi_schedule(&q_vector->napi);
 		return;
 	}
 
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		struct igb_ring *tx_ring = &adapter->tx_ring[i];
-		wr32(E1000_EIMC, tx_ring->eims_value);
-		igb_clean_tx_irq(tx_ring);
-		wr32(E1000_EIMS, tx_ring->eims_value);
-	}
-
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		struct igb_ring *rx_ring = &adapter->rx_ring[i];
-		wr32(E1000_EIMC, rx_ring->eims_value);
-		napi_schedule(&rx_ring->napi);
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		wr32(E1000_EIMC, q_vector->eims_value);
+		napi_schedule(&q_vector->napi);
 	}
 }
 #endif /* CONFIG_NET_POLL_CONTROLLER */
@@ -5671,19 +5815,29 @@
 static void igb_vmm_control(struct igb_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
-	u32 reg_data;
+	u32 reg;
 
-	if (!adapter->vfs_allocated_count)
+	/* replication is not supported for 82575 */
+	if (hw->mac.type == e1000_82575)
 		return;
 
-	/* VF's need PF reset indication before they
-	 * can send/receive mail */
-	reg_data = rd32(E1000_CTRL_EXT);
-	reg_data |= E1000_CTRL_EXT_PFRSTD;
-	wr32(E1000_CTRL_EXT, reg_data);
+	/* enable replication vlan tag stripping */
+	reg = rd32(E1000_RPLOLR);
+	reg |= E1000_RPLOLR_STRVLAN;
+	wr32(E1000_RPLOLR, reg);
 
-	igb_vmdq_set_loopback_pf(hw, true);
-	igb_vmdq_set_replication_pf(hw, true);
+	/* notify HW that the MAC is adding vlan tags */
+	reg = rd32(E1000_DTXCTL);
+	reg |= E1000_DTXCTL_VLAN_ADDED;
+	wr32(E1000_DTXCTL, reg);
+
+	if (adapter->vfs_allocated_count) {
+		igb_vmdq_set_loopback_pf(hw, true);
+		igb_vmdq_set_replication_pf(hw, true);
+	} else {
+		igb_vmdq_set_loopback_pf(hw, false);
+		igb_vmdq_set_replication_pf(hw, false);
+	}
 }
 
 /* igb_main.c */
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index bc606f8..8afff07 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -279,7 +279,7 @@
 {
 	struct igbvf_adapter *adapter = netdev_priv(netdev);
 	struct igbvf_ring *temp_ring;
-	int err;
+	int err = 0;
 	u32 new_rx_count, new_tx_count;
 
 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -299,15 +299,22 @@
 		return 0;
 	}
 
-	temp_ring = vmalloc(sizeof(struct igbvf_ring));
-	if (!temp_ring)
-		return -ENOMEM;
-
 	while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
 		msleep(1);
 
-	if (netif_running(adapter->netdev))
-		igbvf_down(adapter);
+	if (!netif_running(adapter->netdev)) {
+		adapter->tx_ring->count = new_tx_count;
+		adapter->rx_ring->count = new_rx_count;
+		goto clear_reset;
+	}
+
+	temp_ring = vmalloc(sizeof(struct igbvf_ring));
+	if (!temp_ring) {
+		err = -ENOMEM;
+		goto clear_reset;
+	}
+
+	igbvf_down(adapter);
 
 	/*
 	 * We can't just free everything and then setup again,
@@ -339,14 +346,11 @@
 
 		memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring));
 	}
-
-	err = 0;
 err_setup:
-	if (netif_running(adapter->netdev))
-		igbvf_up(adapter);
-
-	clear_bit(__IGBVF_RESETTING, &adapter->state);
+	igbvf_up(adapter);
 	vfree(temp_ring);
+clear_reset:
+	clear_bit(__IGBVF_RESETTING, &adapter->state);
 	return err;
 }
 
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 2b85416..7eb08a6 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -457,6 +457,7 @@
 extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
 extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
 #endif /* CONFIG_IXGBE_DCB */
+extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
 #endif /* IXGBE_FCOE */
 
 #endif /* _IXGBE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index ae27c41..7210689 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1000,6 +1000,10 @@
 		hw->mac.num_rar_entries--;
 	}
 
+	/* Store the alternative WWNN/WWPN prefix */
+	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+	                               &hw->mac.wwpn_prefix);
+
 reset_hw_out:
 	return status;
 }
@@ -2536,6 +2540,51 @@
 	return status;
 }
 
+/**
+ *  ixgbe_get_wwn_prefix_82599 - Get alternative WWNN/WWPN prefix from
+ *  the EEPROM
+ *  @hw: pointer to hardware structure
+ *  @wwnn_prefix: the alternative WWNN prefix
+ *  @wwpn_prefix: the alternative WWPN prefix
+ *
+ *  This function will read the EEPROM from the alternative SAN MAC address
+ *  block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+static s32 ixgbe_get_wwn_prefix_82599(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+                                      u16 *wwpn_prefix)
+{
+	u16 offset, caps;
+	u16 alt_san_mac_blk_offset;
+
+	/* clear output first */
+	*wwnn_prefix = 0xFFFF;
+	*wwpn_prefix = 0xFFFF;
+
+	/* check if alternative SAN MAC is supported */
+	hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
+	                    &alt_san_mac_blk_offset);
+
+	if ((alt_san_mac_blk_offset == 0) ||
+	    (alt_san_mac_blk_offset == 0xFFFF))
+		goto wwn_prefix_out;
+
+	/* check capability in alternative san mac address block */
+	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+	hw->eeprom.ops.read(hw, offset, &caps);
+	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+		goto wwn_prefix_out;
+
+	/* get the corresponding prefix for WWNN/WWPN */
+	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+	hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+
+	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+	hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+
+wwn_prefix_out:
+	return 0;
+}
+
 static struct ixgbe_mac_operations mac_ops_82599 = {
 	.init_hw                = &ixgbe_init_hw_generic,
 	.reset_hw               = &ixgbe_reset_hw_82599,
@@ -2547,6 +2596,7 @@
 	.get_mac_addr           = &ixgbe_get_mac_addr_generic,
 	.get_san_mac_addr       = &ixgbe_get_san_mac_addr_82599,
 	.get_device_caps        = &ixgbe_get_device_caps_82599,
+	.get_wwn_prefix         = &ixgbe_get_wwn_prefix_82599,
 	.stop_adapter           = &ixgbe_stop_adapter_generic,
 	.get_bus_info           = &ixgbe_get_bus_info_generic,
 	.set_lan_id             = &ixgbe_set_lan_id_multi_port_pcie,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 08eccf4..9d2cc83 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -806,7 +806,7 @@
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
-	int i, err;
+	int i, err = 0;
 	u32 new_rx_count, new_tx_count;
 	bool need_update = false;
 
@@ -830,6 +830,16 @@
 	while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
 		msleep(1);
 
+	if (!netif_running(adapter->netdev)) {
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			adapter->tx_ring[i].count = new_tx_count;
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			adapter->rx_ring[i].count = new_rx_count;
+		adapter->tx_ring_count = new_tx_count;
+		adapter->rx_ring_count = new_rx_count;
+		goto err_setup;
+	}
+
 	temp_tx_ring = kcalloc(adapter->num_tx_queues,
 	                       sizeof(struct ixgbe_ring), GFP_KERNEL);
 	if (!temp_tx_ring) {
@@ -887,8 +897,7 @@
 
 	/* if rings need to be updated, here's the place to do it in one shot */
 	if (need_update) {
-		if (netif_running(netdev))
-			ixgbe_down(adapter);
+		ixgbe_down(adapter);
 
 		/* tx */
 		if (new_tx_count != adapter->tx_ring_count) {
@@ -905,13 +914,8 @@
 			temp_rx_ring = NULL;
 			adapter->rx_ring_count = new_rx_count;
 		}
-	}
-
-	/* success! */
-	err = 0;
-	if (netif_running(netdev))
 		ixgbe_up(adapter);
-
+	}
 err_setup:
 	clear_bit(__IXGBE_RESETTING, &adapter->state);
 	return err;
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index a3c9f99..edecdc8 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -718,3 +718,49 @@
 	return 1;
 }
 #endif /* CONFIG_IXGBE_DCB */
+
+/**
+ * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
+ * @netdev : ixgbe adapter
+ * @wwn : the world wide name
+ * @type: the type of world wide name
+ *
+ * Returns the node or port world wide name if both the prefix and the san
+ * mac address are valid, then the wwn is formed based on the NAA-2 for
+ * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
+ *
+ * Returns : 0 on success
+ */
+int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
+{
+	int rc = -EINVAL;
+	u16 prefix = 0xffff;
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_mac_info *mac = &adapter->hw.mac;
+
+	switch (type) {
+	case NETDEV_FCOE_WWNN:
+		prefix = mac->wwnn_prefix;
+		break;
+	case NETDEV_FCOE_WWPN:
+		prefix = mac->wwpn_prefix;
+		break;
+	default:
+		break;
+	}
+
+	if ((prefix != 0xffff) &&
+	    is_valid_ether_addr(mac->san_addr)) {
+		*wwn = ((u64) prefix << 48) |
+		       ((u64) mac->san_addr[0] << 40) |
+		       ((u64) mac->san_addr[1] << 32) |
+		       ((u64) mac->san_addr[2] << 24) |
+		       ((u64) mac->san_addr[3] << 16) |
+		       ((u64) mac->san_addr[4] << 8)  |
+		       ((u64) mac->san_addr[5]);
+		rc = 0;
+	}
+	return rc;
+}
+
+
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 4c8a449..45c5faf 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -5449,6 +5449,7 @@
 	.ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
 	.ndo_fcoe_enable = ixgbe_fcoe_enable,
 	.ndo_fcoe_disable = ixgbe_fcoe_disable,
+	.ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
 #endif /* IXGBE_FCOE */
 };
 
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 1cab53e..21b6633da 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1539,6 +1539,16 @@
 #define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR   0x4
 #define IXGBE_FW_PATCH_VERSION_4   0x7
 
+/* Alternative SAN MAC Address Block */
+#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR      0x27 /* Alt. SAN MAC block */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET  0x0 /* Alt. SAN MAC capability */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET  0x7 /* Alt. WWNN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET  0x8 /* Alt. WWPN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC  0x0 /* Alt. SAN MAC exists */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN  0x1 /* Alt. WWN base exists */
+
 /* PCI Bus Info */
 #define IXGBE_PCI_LINK_STATUS     0xB2
 #define IXGBE_PCI_DEVICE_CONTROL2 0xC8
@@ -2345,6 +2355,7 @@
 	s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
 	s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
 	s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
+	s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
 	s32 (*stop_adapter)(struct ixgbe_hw *);
 	s32 (*get_bus_info)(struct ixgbe_hw *);
 	void (*set_lan_id)(struct ixgbe_hw *);
@@ -2416,6 +2427,10 @@
 	u8                              addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
 	u8                              perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
 	u8                              san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+	/* prefix for World Wide Node Name (WWNN) */
+	u16                             wwnn_prefix;
+	/* prefix for World Wide Port Name (WWPN) */
+	u16                             wwpn_prefix;
 	s32                             mc_filter_type;
 	u32                             mcft_size;
 	u32                             vft_size;
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 2378358..a23f739 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -171,6 +171,36 @@
 }
 
 /**
+ * ks8851_wrreg8 - write 8bit register value to chip
+ * @ks: The chip state
+ * @reg: The register address
+ * @val: The value to write
+ *
+ * Issue a write to put the value @val into the register specified in @reg.
+ */
+static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
+{
+	struct spi_transfer *xfer = &ks->spi_xfer1;
+	struct spi_message *msg = &ks->spi_msg1;
+	__le16 txb[2];
+	int ret;
+	int bit;
+
+	bit = 1 << (reg & 3);
+
+	txb[0] = cpu_to_le16(MK_OP(bit, reg) | KS_SPIOP_WR);
+	txb[1] = val;
+
+	xfer->tx_buf = txb;
+	xfer->rx_buf = NULL;
+	xfer->len = 3;
+
+	ret = spi_sync(ks->spidev, msg);
+	if (ret < 0)
+		ks_err(ks, "spi_sync() failed\n");
+}
+
+/**
  * ks8851_rx_1msg - select whether to use one or two messages for spi read
  * @ks: The device structure
  *
@@ -322,13 +352,12 @@
 static int ks8851_write_mac_addr(struct net_device *dev)
 {
 	struct ks8851_net *ks = netdev_priv(dev);
-	u16 *mcp = (u16 *)dev->dev_addr;
+	int i;
 
 	mutex_lock(&ks->lock);
 
-	ks8851_wrreg16(ks, KS_MARL, mcp[0]);
-	ks8851_wrreg16(ks, KS_MARM, mcp[1]);
-	ks8851_wrreg16(ks, KS_MARH, mcp[2]);
+	for (i = 0; i < ETH_ALEN; i++)
+		ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]);
 
 	mutex_unlock(&ks->lock);
 
@@ -951,7 +980,7 @@
 			mcptr = mcptr->next;
 		}
 
-		rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXAE | RXCR1_RXPAFMA;
+		rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA;
 	} else {
 		/* just accept broadcast / unicast */
 		rxctrl.rxcr1 = RXCR1_RXPAFMA;
@@ -1239,6 +1268,9 @@
 	ndev->netdev_ops = &ks8851_netdev_ops;
 	ndev->irq = spi->irq;
 
+	/* issue a global soft reset to reset the device. */
+	ks8851_soft_reset(ks, GRR_GSR);
+
 	/* simple check for a valid chip being connected to the bus */
 
 	if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
diff --git a/drivers/net/ks8851.h b/drivers/net/ks8851.h
index 85abe14..f52c312 100644
--- a/drivers/net/ks8851.h
+++ b/drivers/net/ks8851.h
@@ -16,6 +16,7 @@
 #define CCR_32PIN				(1 << 0)
 
 /* MAC address registers */
+#define KS_MAR(_m)				0x15 - (_m)
 #define KS_MARL					0x10
 #define KS_MARM					0x12
 #define KS_MARH					0x14
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 3aabfd9..20b7707 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -555,13 +555,13 @@
 	return 0;
 }
 
-static void macvlan_dellink(struct net_device *dev)
+static void macvlan_dellink(struct net_device *dev, struct list_head *head)
 {
 	struct macvlan_dev *vlan = netdev_priv(dev);
 	struct macvlan_port *port = vlan->port;
 
 	list_del(&vlan->list);
-	unregister_netdevice(dev);
+	unregister_netdevice_queue(dev, head);
 
 	if (list_empty(&port->vlans))
 		macvlan_port_destroy(port->dev);
@@ -601,7 +601,7 @@
 		break;
 	case NETDEV_UNREGISTER:
 		list_for_each_entry_safe(vlan, next, &port->vlans, list)
-			macvlan_dellink(vlan->dev);
+			macvlan_dellink(vlan->dev, NULL);
 		break;
 	}
 	return NOTIFY_DONE;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 29c9fe2..5319db9 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -75,7 +75,7 @@
 #include "myri10ge_mcp.h"
 #include "myri10ge_mcp_gen_header.h"
 
-#define MYRI10GE_VERSION_STR "1.5.0-1.432"
+#define MYRI10GE_VERSION_STR "1.5.1-1.451"
 
 MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
 MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -1623,10 +1623,21 @@
 			return 0;
 		}
 	}
-	if (*ptr == 'R' || *ptr == 'Q') {
-		/* We've found either an XFP or quad ribbon fiber */
+	if (*ptr == '2')
+		ptr++;
+	if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') {
+		/* We've found either an XFP, quad ribbon fiber, or SFP+ */
 		cmd->port = PORT_FIBRE;
+		cmd->supported |= SUPPORTED_FIBRE;
+		cmd->advertising |= ADVERTISED_FIBRE;
+	} else {
+		cmd->port = PORT_OTHER;
 	}
+	if (*ptr == 'R' || *ptr == 'S')
+		cmd->transceiver = XCVR_EXTERNAL;
+	else
+		cmd->transceiver = XCVR_INTERNAL;
+
 	return 0;
 }
 
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index e98cfa6..645450d 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
 
 #define _NETXEN_NIC_LINUX_MAJOR 4
 #define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 62
-#define NETXEN_NIC_LINUX_VERSIONID  "4.0.62"
+#define _NETXEN_NIC_LINUX_SUBVERSION 65
+#define NETXEN_NIC_LINUX_VERSIONID  "4.0.65"
 
 #define NETXEN_VERSION_CODE(a, b, c)	(((a) << 24) + ((b) << 16) + (c))
 #define _major(v)	(((v) >> 24) & 0xff)
@@ -74,8 +74,6 @@
 #define NETXEN_FLASH_TOTAL_SIZE  (NETXEN_NUM_FLASH_SECTORS \
 					* NETXEN_FLASH_SECTOR_SIZE)
 
-#define PHAN_VENDOR_ID 0x4040
-
 #define RCV_DESC_RINGSIZE(rds_ring)	\
 	(sizeof(struct rcv_desc) * (rds_ring)->num_desc)
 #define RCV_BUFF_RINGSIZE(rds_ring)	\
@@ -421,6 +419,34 @@
 	__le64 status_desc_data[2];
 } __attribute__ ((aligned(16)));
 
+/* UNIFIED ROMIMAGE *************************/
+#define NX_UNI_FW_MIN_SIZE		0x3eb000
+#define NX_UNI_DIR_SECT_PRODUCT_TBL	0x0
+#define NX_UNI_DIR_SECT_BOOTLD		0x6
+#define NX_UNI_DIR_SECT_FW		0x7
+
+/*Offsets */
+#define NX_UNI_CHIP_REV_OFF		10
+#define NX_UNI_FLAGS_OFF		11
+#define NX_UNI_BIOS_VERSION_OFF 	12
+#define NX_UNI_BOOTLD_IDX_OFF		27
+#define NX_UNI_FIRMWARE_IDX_OFF 	29
+
+struct uni_table_desc{
+	uint32_t	findex;
+	uint32_t	num_entries;
+	uint32_t	entry_size;
+	uint32_t	reserved[5];
+};
+
+struct uni_data_desc{
+	uint32_t	findex;
+	uint32_t	size;
+	uint32_t	reserved[5];
+};
+
+/* UNIFIED ROMIMAGE *************************/
+
 /* The version of the main data structure */
 #define	NETXEN_BDINFO_VERSION 1
 
@@ -487,7 +513,15 @@
 #define NX_P2_MN_ROMIMAGE	0
 #define NX_P3_CT_ROMIMAGE	1
 #define NX_P3_MN_ROMIMAGE	2
-#define NX_FLASH_ROMIMAGE	3
+#define NX_UNIFIED_ROMIMAGE	3
+#define NX_FLASH_ROMIMAGE	4
+#define NX_UNKNOWN_ROMIMAGE	0xff
+
+#define NX_P2_MN_ROMIMAGE_NAME		"nxromimg.bin"
+#define NX_P3_CT_ROMIMAGE_NAME		"nx3fwct.bin"
+#define NX_P3_MN_ROMIMAGE_NAME		"nx3fwmn.bin"
+#define NX_UNIFIED_ROMIMAGE_NAME	"phanfw.bin"
+#define NX_FLASH_ROMIMAGE_NAME		"flash"
 
 extern char netxen_nic_driver_name[];
 
@@ -1210,7 +1244,7 @@
 	nx_nic_intr_coalesce_t coal;
 
 	unsigned long state;
-	u32 resv5;
+	__le32 file_prd_off;	/*File fw product offset*/
 	u32 fw_version;
 	const struct firmware *fw;
 };
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index a3b18e0..c86095e 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -688,8 +688,8 @@
 	u32 data_read, data_written;
 
 	data_read = NXRD32(adapter, NETXEN_PCIX_PH_REG(0));
-	if ((data_read & 0xffff) != PHAN_VENDOR_ID)
-	return 1;
+	if ((data_read & 0xffff) != adapter->pdev->vendor)
+		return 1;
 
 	data_written = (u32)0xa5a5a5a5;
 
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 7386a7cc..a39155d 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -419,6 +419,7 @@
 #define NETXEN_CRB_ROMUSB	\
 	NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB)
 #define NETXEN_CRB_I2Q		NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q)
+#define NETXEN_CRB_I2C0		NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2C0)
 #define NETXEN_CRB_SMB		NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB)
 #define NETXEN_CRB_MAX		NETXEN_PCI_CRB_WINDOW(64)
 
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index e43cbbd..b3054c6 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -1090,39 +1090,33 @@
  * In: 'off' is offset from base in 128M pci map
  */
 static int
-netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter, ulong *off)
+netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter,
+		ulong off, void __iomem **addr)
 {
 	crb_128M_2M_sub_block_map_t *m;
 
 
-	if (*off >= NETXEN_CRB_MAX)
+	if ((off >= NETXEN_CRB_MAX) || (off < NETXEN_PCI_CRBSPACE))
 		return -EINVAL;
 
-	if (*off >= NETXEN_PCI_CAMQM && (*off < NETXEN_PCI_CAMQM_2M_END)) {
-		*off = (*off - NETXEN_PCI_CAMQM) + NETXEN_PCI_CAMQM_2M_BASE +
-			(ulong)adapter->ahw.pci_base0;
-		return 0;
-	}
-
-	if (*off < NETXEN_PCI_CRBSPACE)
-		return -EINVAL;
-
-	*off -= NETXEN_PCI_CRBSPACE;
+	off -= NETXEN_PCI_CRBSPACE;
 
 	/*
 	 * Try direct map
 	 */
-	m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
+	m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
 
-	if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
-		*off = *off + m->start_2M - m->start_128M +
-			(ulong)adapter->ahw.pci_base0;
+	if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
+		*addr = adapter->ahw.pci_base0 + m->start_2M +
+			(off - m->start_128M);
 		return 0;
 	}
 
 	/*
 	 * Not in direct map, use crb window
 	 */
+	*addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M +
+		(off & MASK(16));
 	return 1;
 }
 
@@ -1132,28 +1126,26 @@
  * side effect: lock crb window
  */
 static void
-netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong *off)
+netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong off)
 {
 	u32 window;
 	void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M;
 
-	window = CRB_HI(*off);
+	off -= NETXEN_PCI_CRBSPACE;
+
+	window = CRB_HI(off);
 
 	if (adapter->ahw.crb_win == window)
-		goto done;
+		return;
 
 	writel(window, addr);
 	if (readl(addr) != window) {
 		if (printk_ratelimit())
 			dev_warn(&adapter->pdev->dev,
 				"failed to set CRB window to %d off 0x%lx\n",
-				window, *off);
+				window, off);
 	}
 	adapter->ahw.crb_win = window;
-
-done:
-	*off = (*off & MASK(16)) + CRB_INDIRECT_2M +
-		(ulong)adapter->ahw.pci_base0;
 }
 
 static int
@@ -1217,11 +1209,12 @@
 {
 	unsigned long flags;
 	int rv;
+	void __iomem *addr = NULL;
 
-	rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off);
+	rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr);
 
 	if (rv == 0) {
-		writel(data, (void __iomem *)off);
+		writel(data, addr);
 		return 0;
 	}
 
@@ -1229,8 +1222,8 @@
 		/* indirect access */
 		write_lock_irqsave(&adapter->ahw.crb_lock, flags);
 		crb_win_lock(adapter);
-		netxen_nic_pci_set_crbwindow_2M(adapter, &off);
-		writel(data, (void __iomem *)off);
+		netxen_nic_pci_set_crbwindow_2M(adapter, off);
+		writel(data, addr);
 		crb_win_unlock(adapter);
 		write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
 		return 0;
@@ -1248,18 +1241,19 @@
 	unsigned long flags;
 	int rv;
 	u32 data;
+	void __iomem *addr = NULL;
 
-	rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off);
+	rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr);
 
 	if (rv == 0)
-		return readl((void __iomem *)off);
+		return readl(addr);
 
 	if (rv > 0) {
 		/* indirect access */
 		write_lock_irqsave(&adapter->ahw.crb_lock, flags);
 		crb_win_lock(adapter);
-		netxen_nic_pci_set_crbwindow_2M(adapter, &off);
-		data = readl((void __iomem *)off);
+		netxen_nic_pci_set_crbwindow_2M(adapter, off);
+		data = readl(addr);
 		crb_win_unlock(adapter);
 		write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
 		return data;
@@ -1307,17 +1301,20 @@
 void __iomem *
 netxen_get_ioaddr(struct netxen_adapter *adapter, u32 offset)
 {
-	ulong off = offset;
+	void __iomem *addr = NULL;
 
 	if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
-		if (offset < NETXEN_CRB_PCIX_HOST2 &&
-				offset > NETXEN_CRB_PCIX_HOST)
-			return PCI_OFFSET_SECOND_RANGE(adapter, offset);
-		return NETXEN_CRB_NORMALIZE(adapter, offset);
+		if ((offset < NETXEN_CRB_PCIX_HOST2) &&
+				(offset > NETXEN_CRB_PCIX_HOST))
+			addr = PCI_OFFSET_SECOND_RANGE(adapter, offset);
+		else
+			addr = NETXEN_CRB_NORMALIZE(adapter, offset);
+	} else {
+		WARN_ON(netxen_nic_pci_get_crb_addr_2M(adapter,
+					offset, &addr));
 	}
 
-	BUG_ON(netxen_nic_pci_get_crb_addr_2M(adapter, &off));
-	return (void __iomem *)off;
+	return addr;
 }
 
 static int
@@ -1778,22 +1775,16 @@
 
 int netxen_nic_get_board_info(struct netxen_adapter *adapter)
 {
-	int offset, board_type, magic, header_version;
+	int offset, board_type, magic;
 	struct pci_dev *pdev = adapter->pdev;
 
 	offset = NX_FW_MAGIC_OFFSET;
 	if (netxen_rom_fast_read(adapter, offset, &magic))
 		return -EIO;
 
-	offset = NX_HDR_VERSION_OFFSET;
-	if (netxen_rom_fast_read(adapter, offset, &header_version))
-		return -EIO;
-
-	if (magic != NETXEN_BDINFO_MAGIC ||
-			header_version != NETXEN_BDINFO_VERSION) {
-		dev_err(&pdev->dev,
-			"invalid board config, magic=%08x, version=%08x\n",
-			magic, header_version);
+	if (magic != NETXEN_BDINFO_MAGIC) {
+		dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
+			magic);
 		return -EIO;
 	}
 
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index d8c4b70..6ee27a6 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -46,6 +46,7 @@
 static void
 netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
 		struct nx_host_rds_ring *rds_ring);
+static int netxen_p3_has_mn(struct netxen_adapter *adapter);
 
 static void crb_addr_transform_setup(void)
 {
@@ -514,6 +515,8 @@
 			continue;
 
 		if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+			if (off == (NETXEN_CRB_I2C0 + 0x1c))
+				continue;
 			/* do not reset PCI */
 			if (off == (ROMUSB_GLB + 0xbc))
 				continue;
@@ -537,12 +540,6 @@
 				continue;
 		}
 
-		if (off == NETXEN_ADDR_ERROR) {
-			printk(KERN_ERR "%s: Err: Unknown addr: 0x%08x\n",
-					netxen_nic_driver_name, buf[i].addr);
-			continue;
-		}
-
 		init_delay = 1;
 		/* After writing this register, HW needs time for CRB */
 		/* to quiet down (else crb_window returns 0xffffffff) */
@@ -593,6 +590,172 @@
 	return 0;
 }
 
+static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section)
+{
+	uint32_t i;
+	struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+	__le32 entries = cpu_to_le32(directory->num_entries);
+
+	for (i = 0; i < entries; i++) {
+
+		__le32 offs = cpu_to_le32(directory->findex) +
+				(i * cpu_to_le32(directory->entry_size));
+		__le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
+
+		if (tab_type == section)
+			return (struct uni_table_desc *) &unirom[offs];
+	}
+
+	return NULL;
+}
+
+static int
+nx_set_product_offs(struct netxen_adapter *adapter)
+{
+	struct uni_table_desc *ptab_descr;
+	const u8 *unirom = adapter->fw->data;
+	uint32_t i;
+	__le32 entries;
+
+	ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL);
+	if (ptab_descr == NULL)
+		return -1;
+
+	entries = cpu_to_le32(ptab_descr->num_entries);
+
+	for (i = 0; i < entries; i++) {
+
+		__le32 flags, file_chiprev, offs;
+		u8 chiprev = adapter->ahw.revision_id;
+		int mn_present = netxen_p3_has_mn(adapter);
+		uint32_t flagbit;
+
+		offs = cpu_to_le32(ptab_descr->findex) +
+				(i * cpu_to_le32(ptab_descr->entry_size));
+		flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF));
+		file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
+							NX_UNI_CHIP_REV_OFF));
+
+		flagbit = mn_present ? 1 : 2;
+
+		if ((chiprev == file_chiprev) &&
+					((1ULL << flagbit) & flags)) {
+			adapter->file_prd_off = offs;
+			return 0;
+		}
+	}
+
+	return -1;
+}
+
+
+static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter,
+			u32 section, u32 idx_offset)
+{
+	const u8 *unirom = adapter->fw->data;
+	int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+								idx_offset));
+	struct uni_table_desc *tab_desc;
+	__le32 offs;
+
+	tab_desc = nx_get_table_desc(unirom, section);
+
+	if (tab_desc == NULL)
+		return NULL;
+
+	offs = cpu_to_le32(tab_desc->findex) +
+			(cpu_to_le32(tab_desc->entry_size) * idx);
+
+	return (struct uni_data_desc *)&unirom[offs];
+}
+
+static u8 *
+nx_get_bootld_offs(struct netxen_adapter *adapter)
+{
+	u32 offs = NETXEN_BOOTLD_START;
+
+	if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
+		offs = cpu_to_le32((nx_get_data_desc(adapter,
+					NX_UNI_DIR_SECT_BOOTLD,
+					NX_UNI_BOOTLD_IDX_OFF))->findex);
+
+	return (u8 *)&adapter->fw->data[offs];
+}
+
+static u8 *
+nx_get_fw_offs(struct netxen_adapter *adapter)
+{
+	u32 offs = NETXEN_IMAGE_START;
+
+	if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
+		offs = cpu_to_le32((nx_get_data_desc(adapter,
+					NX_UNI_DIR_SECT_FW,
+					NX_UNI_FIRMWARE_IDX_OFF))->findex);
+
+	return (u8 *)&adapter->fw->data[offs];
+}
+
+static __le32
+nx_get_fw_size(struct netxen_adapter *adapter)
+{
+	if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
+		return cpu_to_le32((nx_get_data_desc(adapter,
+					NX_UNI_DIR_SECT_FW,
+					NX_UNI_FIRMWARE_IDX_OFF))->size);
+	else
+		return cpu_to_le32(
+				*(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]);
+}
+
+static __le32
+nx_get_fw_version(struct netxen_adapter *adapter)
+{
+	struct uni_data_desc *fw_data_desc;
+	const struct firmware *fw = adapter->fw;
+	__le32 major, minor, sub;
+	const u8 *ver_str;
+	int i, ret = 0;
+
+	if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
+
+		fw_data_desc = nx_get_data_desc(adapter,
+				NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF);
+		ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
+				cpu_to_le32(fw_data_desc->size) - 17;
+
+		for (i = 0; i < 12; i++) {
+			if (!strncmp(&ver_str[i], "REV=", 4)) {
+				ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
+							&major, &minor, &sub);
+				break;
+			}
+		}
+
+		if (ret != 3)
+			return 0;
+
+		return major + (minor << 8) + (sub << 16);
+
+	} else
+		return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
+}
+
+static __le32
+nx_get_bios_version(struct netxen_adapter *adapter)
+{
+	const struct firmware *fw = adapter->fw;
+	__le32 bios_ver, prd_off = adapter->file_prd_off;
+
+	if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
+		bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
+						+ NX_UNI_BIOS_VERSION_OFF));
+		return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) +
+							(bios_ver >> 24);
+	} else
+		return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
+
+}
+
 int
 netxen_need_fw_reset(struct netxen_adapter *adapter)
 {
@@ -632,9 +795,8 @@
 	/* check if we have got newer or different file firmware */
 	if (adapter->fw) {
 
-		const struct firmware *fw = adapter->fw;
+		val = nx_get_fw_version(adapter);
 
-		val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
 		version = NETXEN_DECODE_VERSION(val);
 
 		major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
@@ -644,7 +806,8 @@
 		if (version > NETXEN_VERSION_CODE(major, minor, build))
 			return 1;
 
-		if (version == NETXEN_VERSION_CODE(major, minor, build)) {
+		if (version == NETXEN_VERSION_CODE(major, minor, build) &&
+			adapter->fw_type != NX_UNIFIED_ROMIMAGE) {
 
 			val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL);
 			fw_type = (val & 0x4) ?
@@ -659,7 +822,11 @@
 }
 
 static char *fw_name[] = {
-	"nxromimg.bin", "nx3fwct.bin", "nx3fwmn.bin", "flash",
+	NX_P2_MN_ROMIMAGE_NAME,
+	NX_P3_CT_ROMIMAGE_NAME,
+	NX_P3_MN_ROMIMAGE_NAME,
+	NX_UNIFIED_ROMIMAGE_NAME,
+	NX_FLASH_ROMIMAGE_NAME,
 };
 
 int
@@ -681,22 +848,21 @@
 
 		size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
 
-		ptr64 = (u64 *)&fw->data[NETXEN_BOOTLD_START];
+		ptr64 = (u64 *)nx_get_bootld_offs(adapter);
 		flashaddr = NETXEN_BOOTLD_START;
 
 		for (i = 0; i < size; i++) {
 			data = cpu_to_le64(ptr64[i]);
-			if (adapter->pci_mem_write(adapter,
-						flashaddr, data))
+
+			if (adapter->pci_mem_write(adapter, flashaddr, data))
 				return -EIO;
 
 			flashaddr += 8;
 		}
 
-		size = *(u32 *)&fw->data[NX_FW_SIZE_OFFSET];
-		size = (__force u32)cpu_to_le32(size) / 8;
+		size = (__force u32)nx_get_fw_size(adapter) / 8;
 
-		ptr64 = (u64 *)&fw->data[NETXEN_IMAGE_START];
+		ptr64 = (u64 *)nx_get_fw_offs(adapter);
 		flashaddr = NETXEN_IMAGE_START;
 
 		for (i = 0; i < size; i++) {
@@ -749,21 +915,31 @@
 }
 
 static int
-netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
+netxen_validate_firmware(struct netxen_adapter *adapter)
 {
 	__le32 val;
-	u32 ver, min_ver, bios;
+	u32 ver, min_ver, bios, min_size;
 	struct pci_dev *pdev = adapter->pdev;
 	const struct firmware *fw = adapter->fw;
+	u8 fw_type = adapter->fw_type;
 
-	if (fw->size < NX_FW_MIN_SIZE)
+	if (fw_type == NX_UNIFIED_ROMIMAGE) {
+		if (nx_set_product_offs(adapter))
+			return -EINVAL;
+
+		min_size = NX_UNI_FW_MIN_SIZE;
+	} else {
+		val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
+		if ((__force u32)val != NETXEN_BDINFO_MAGIC)
+			return -EINVAL;
+
+		min_size = NX_FW_MIN_SIZE;
+	}
+
+	if (fw->size < min_size)
 		return -EINVAL;
 
-	val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
-	if ((__force u32)val != NETXEN_BDINFO_MAGIC)
-		return -EINVAL;
-
-	val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
+	val = nx_get_fw_version(adapter);
 
 	if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
 		min_ver = NETXEN_VERSION_CODE(4, 0, 216);
@@ -775,15 +951,15 @@
 	if ((_major(ver) > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) {
 		dev_err(&pdev->dev,
 				"%s: firmware version %d.%d.%d unsupported\n",
-				fwname, _major(ver), _minor(ver), _build(ver));
+		fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
 		return -EINVAL;
 	}
 
-	val = cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
+	val = nx_get_bios_version(adapter);
 	netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios);
 	if ((__force u32)val != bios) {
 		dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
-				fwname);
+				fw_name[fw_type]);
 		return -EINVAL;
 	}
 
@@ -794,7 +970,7 @@
 	val = NETXEN_DECODE_VERSION(val);
 	if (val > ver) {
 		dev_info(&pdev->dev, "%s: firmware is older than flash\n",
-				fwname);
+				fw_name[fw_type]);
 		return -EINVAL;
 	}
 
@@ -802,6 +978,41 @@
 	return 0;
 }
 
+static void
+nx_get_next_fwtype(struct netxen_adapter *adapter)
+{
+	u8 fw_type;
+
+	switch (adapter->fw_type) {
+	case NX_UNKNOWN_ROMIMAGE:
+		fw_type = NX_UNIFIED_ROMIMAGE;
+		break;
+
+	case NX_UNIFIED_ROMIMAGE:
+		if (NX_IS_REVISION_P3P(adapter->ahw.revision_id))
+			fw_type = NX_FLASH_ROMIMAGE;
+		else if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+			fw_type = NX_P2_MN_ROMIMAGE;
+		else if (netxen_p3_has_mn(adapter))
+			fw_type = NX_P3_MN_ROMIMAGE;
+		else
+			fw_type = NX_P3_CT_ROMIMAGE;
+		break;
+
+	case NX_P3_MN_ROMIMAGE:
+		fw_type = NX_P3_CT_ROMIMAGE;
+		break;
+
+	case NX_P2_MN_ROMIMAGE:
+	case NX_P3_CT_ROMIMAGE:
+	default:
+		fw_type = NX_FLASH_ROMIMAGE;
+		break;
+	}
+
+	adapter->fw_type = fw_type;
+}
+
 static int
 netxen_p3_has_mn(struct netxen_adapter *adapter)
 {
@@ -823,55 +1034,29 @@
 
 void netxen_request_firmware(struct netxen_adapter *adapter)
 {
-	u8 fw_type;
 	struct pci_dev *pdev = adapter->pdev;
 	int rc = 0;
 
-	if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
-		fw_type = NX_P2_MN_ROMIMAGE;
-		goto request_fw;
-	}
+	adapter->fw_type = NX_UNKNOWN_ROMIMAGE;
 
-	if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
-		/* No file firmware for the time being */
-		fw_type = NX_FLASH_ROMIMAGE;
-		goto done;
-	}
+next:
+	nx_get_next_fwtype(adapter);
 
-	fw_type = netxen_p3_has_mn(adapter) ?
-		NX_P3_MN_ROMIMAGE : NX_P3_CT_ROMIMAGE;
-
-request_fw:
-	rc = request_firmware(&adapter->fw, fw_name[fw_type], &pdev->dev);
-	if (rc != 0) {
-		if (fw_type == NX_P3_MN_ROMIMAGE) {
-			msleep(1);
-			fw_type = NX_P3_CT_ROMIMAGE;
-			goto request_fw;
-		}
-
-		fw_type = NX_FLASH_ROMIMAGE;
+	if (adapter->fw_type == NX_FLASH_ROMIMAGE) {
 		adapter->fw = NULL;
-		goto done;
-	}
+	} else {
+		rc = request_firmware(&adapter->fw,
+				fw_name[adapter->fw_type], &pdev->dev);
+		if (rc != 0)
+			goto next;
 
-	rc = netxen_validate_firmware(adapter, fw_name[fw_type]);
-	if (rc != 0) {
-		release_firmware(adapter->fw);
-
-		if (fw_type == NX_P3_MN_ROMIMAGE) {
+		rc = netxen_validate_firmware(adapter);
+		if (rc != 0) {
+			release_firmware(adapter->fw);
 			msleep(1);
-			fw_type = NX_P3_CT_ROMIMAGE;
-			goto request_fw;
+			goto next;
 		}
-
-		fw_type = NX_FLASH_ROMIMAGE;
-		adapter->fw = NULL;
-		goto done;
 	}
-
-done:
-	adapter->fw_type = fw_type;
 }
 
 
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 1071f09..12d1037 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -34,13 +34,18 @@
 #include <net/ip.h>
 #include <linux/ipv6.h>
 #include <linux/inetdevice.h>
+#include <linux/sysfs.h>
 
-MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
+MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
+MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME);
+MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME);
+MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME);
+MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
 
 char netxen_nic_driver_name[] = "netxen_nic";
-static char netxen_nic_driver_string[] = "NetXen Network Driver version "
+static char netxen_nic_driver_string[] = "QLogic/NetXen Network Driver v"
     NETXEN_NIC_LINUX_VERSIONID;
 
 static int port_mode = NETXEN_PORT_MODE_AUTO_NEG;
@@ -54,7 +59,6 @@
 
 static unsigned long auto_fw_reset = AUTO_FW_RESET_ENABLED;
 
-/* Local functions to NetXen NIC driver */
 static int __devinit netxen_nic_probe(struct pci_dev *pdev,
 		const struct pci_device_id *ent);
 static void __devexit netxen_nic_remove(struct pci_dev *pdev);
@@ -91,6 +95,11 @@
 #define ENTRY(device) \
 	{PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
 	.class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
+#define ENTRY2(device) \
+	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
+	.class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
+
+#define PCI_DEVICE_ID_QLOGIC_QLE824X	0x8020
 
 static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
 	ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
@@ -101,6 +110,7 @@
 	ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
 	ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
 	ENTRY(PCI_DEVICE_ID_NX3031),
+	ENTRY2(PCI_DEVICE_ID_QLOGIC_QLE824X),
 	{0,}
 };
 
@@ -724,7 +734,8 @@
 	if (adapter->portnum == 0) {
 		get_brd_name_by_type(adapter->ahw.board_type, brd_name);
 
-		printk(KERN_INFO "NetXen %s Board S/N %s  Chip rev 0x%x\n",
+		pr_info("%s: %s Board S/N %s  Chip rev 0x%x\n",
+				module_name(THIS_MODULE),
 				brd_name, serial_num, adapter->ahw.revision_id);
 	}
 
@@ -1206,16 +1217,10 @@
 	int pci_func_id = PCI_FUNC(pdev->devfn);
 	uint8_t revision_id;
 
-	if (pdev->class != 0x020000) {
-		printk(KERN_DEBUG "NetXen function %d, class %x will not "
-				"be enabled.\n",pci_func_id, pdev->class);
-		return -ENODEV;
-	}
-
 	if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) {
-		printk(KERN_WARNING "NetXen chip revisions between 0x%x-0x%x"
+		pr_warning("%s: chip revisions between 0x%x-0x%x"
 				"will not be enabled.\n",
-				NX_P3_A0, NX_P3_B1);
+				module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1);
 		return -ENODEV;
 	}
 
@@ -1925,6 +1930,7 @@
 
 request_reset:
 	adapter->need_fw_reset = 1;
+	clear_bit(__NX_RESETTING, &adapter->state);
 }
 
 struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
@@ -2499,6 +2505,7 @@
 	.write = netxen_sysfs_write_mem,
 };
 
+#ifdef CONFIG_MODULES
 static ssize_t
 netxen_store_auto_fw_reset(struct module_attribute *mattr,
 		struct module *mod, const char *buf, size_t count)
@@ -2533,6 +2540,7 @@
 	.show = netxen_show_auto_fw_reset,
 	.store = netxen_store_auto_fw_reset,
 };
+#endif
 
 static void
 netxen_create_sysfs_entries(struct netxen_adapter *adapter)
@@ -2738,7 +2746,9 @@
 
 static int __init netxen_init_module(void)
 {
+#ifdef CONFIG_MODULES
 	struct module *mod = THIS_MODULE;
+#endif
 
 	printk(KERN_INFO "%s\n", netxen_nic_driver_string);
 
@@ -2747,9 +2757,11 @@
 	register_inetaddr_notifier(&netxen_inetaddr_cb);
 #endif
 
+#ifdef CONFIG_MODULES
 	if (sysfs_create_file(&mod->mkobj.kobj, &mod_attr_fw_reset.attr))
 		printk(KERN_ERR "%s: Failed to create auto_fw_reset "
 				"sysfs entry.", netxen_nic_driver_name);
+#endif
 
 	return pci_register_driver(&netxen_driver);
 }
@@ -2758,9 +2770,11 @@
 
 static void __exit netxen_exit_module(void)
 {
+#ifdef CONFIG_MODULES
 	struct module *mod = THIS_MODULE;
 
 	sysfs_remove_file(&mod->mkobj.kobj, &mod_attr_fw_reset.attr);
+#endif
 
 	pci_unregister_driver(&netxen_driver);
 
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 1d1e657..5506f87 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3545,7 +3545,7 @@
 	rp->rcr_index = index;
 
 	skb_reserve(skb, NET_IP_ALIGN);
-	__pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX));
+	__pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN));
 
 	rp->rx_packets++;
 	rp->rx_bytes += skb->len;
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index bd3447f..94c9ad2 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1760,7 +1760,7 @@
 	PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"),
 	PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"),
 	PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"),
-	PCMCIA_DEVICE_CIS_PROD_ID12("PMX   ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"),
+	PCMCIA_DEVICE_CIS_PROD_ID12("PMX   ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"),
 	PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"),
 	PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b),
 	PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0",
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index f81e532..f63c96a 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -16,6 +16,7 @@
 
 #include <linux/module.h>
 #include <linux/phy.h>
+#include <linux/brcmphy.h>
 
 #define PHY_ID_BCM50610		0x0143bd60
 #define PHY_ID_BCM50610M	0x0143bd70
@@ -24,6 +25,9 @@
 #define BRCM_PHY_MODEL(phydev) \
 	((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask)
 
+#define BRCM_PHY_REV(phydev) \
+	((phydev)->drv->phy_id & ~((phydev)->drv->phy_id_mask))
+
 
 #define MII_BCM54XX_ECR		0x10	/* BCM54xx extended control register */
 #define MII_BCM54XX_ECR_IM	0x1000	/* Interrupt mask */
@@ -94,22 +98,35 @@
 #define BCM_LED_SRC_OFF		0xe	/* Tied high */
 #define BCM_LED_SRC_ON		0xf	/* Tied low */
 
+
 /*
  * BCM5482: Shadow registers
  * Shadow values go into bits [14:10] of register 0x1c to select a shadow
  * register to access.
  */
+/* 00101: Spare Control Register 3 */
+#define BCM54XX_SHD_SCR3		0x05
+#define  BCM54XX_SHD_SCR3_DEF_CLK125	0x0001
+#define  BCM54XX_SHD_SCR3_DLLAPD_DIS	0x0002
+#define  BCM54XX_SHD_SCR3_TRDDAPD	0x0004
+
+/* 01010: Auto Power-Down */
+#define BCM54XX_SHD_APD			0x0a
+#define  BCM54XX_SHD_APD_EN		0x0020
+
 #define BCM5482_SHD_LEDS1	0x0d	/* 01101: LED Selector 1 */
 					/* LED3 / ~LINKSPD[2] selector */
 #define BCM5482_SHD_LEDS1_LED3(src)	((src & 0xf) << 4)
 					/* LED1 / ~LINKSPD[1] selector */
 #define BCM5482_SHD_LEDS1_LED1(src)	((src & 0xf) << 0)
+#define BCM54XX_SHD_RGMII_MODE	0x0b	/* 01011: RGMII Mode Selector */
 #define BCM5482_SHD_SSD		0x14	/* 10100: Secondary SerDes control */
 #define BCM5482_SHD_SSD_LEDM	0x0008	/* SSD LED Mode enable */
 #define BCM5482_SHD_SSD_EN	0x0001	/* SSD enable */
 #define BCM5482_SHD_MODE	0x1f	/* 11111: Mode Control Register */
 #define BCM5482_SHD_MODE_1000BX	0x0001	/* Enable 1000BASE-X registers */
 
+
 /*
  * EXPANSION SHADOW ACCESS REGISTERS.  (PHY REG 0x15, 0x16, and 0x17)
  */
@@ -138,16 +155,6 @@
 #define BCM5482_SSD_SGMII_SLAVE_EN	0x0002	/* Slave mode enable */
 #define BCM5482_SSD_SGMII_SLAVE_AD	0x0001	/* Slave auto-detection */
 
-/*
- * Device flags for PHYs that can be configured for different operating
- * modes.
- */
-#define PHY_BCM_FLAGS_VALID		0x80000000
-#define PHY_BCM_FLAGS_INTF_XAUI		0x00000020
-#define PHY_BCM_FLAGS_INTF_SGMII	0x00000010
-#define PHY_BCM_FLAGS_MODE_1000BX	0x00000002
-#define PHY_BCM_FLAGS_MODE_COPPER	0x00000001
-
 
 /*****************************************************************************/
 /* Fast Ethernet Transceiver definitions. */
@@ -237,10 +244,43 @@
 	return phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | val);
 }
 
+/* Needs SMDSP clock enabled via bcm54xx_phydsp_config() */
 static int bcm50610_a0_workaround(struct phy_device *phydev)
 {
 	int err;
 
+	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH0,
+				MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN |
+				MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF);
+	if (err < 0)
+		return err;
+
+	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH3,
+					MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ);
+	if (err < 0)
+		return err;
+
+	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75,
+				MII_BCM54XX_EXP_EXP75_VDACCTRL);
+	if (err < 0)
+		return err;
+
+	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP96,
+				MII_BCM54XX_EXP_EXP96_MYST);
+	if (err < 0)
+		return err;
+
+	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP97,
+				MII_BCM54XX_EXP_EXP97_MYST);
+
+	return err;
+}
+
+static int bcm54xx_phydsp_config(struct phy_device *phydev)
+{
+	int err, err2;
+
+	/* Enable the SMDSP clock */
 	err = bcm54xx_auxctl_write(phydev,
 				   MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
 				   MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA |
@@ -248,42 +288,101 @@
 	if (err < 0)
 		return err;
 
-	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP08,
-				MII_BCM54XX_EXP_EXP08_RJCT_2MHZ	|
-				MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE);
-	if (err < 0)
-		goto error;
+	if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
+	    BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) {
+		/* Clear bit 9 to fix a phy interop issue. */
+		err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP08,
+					MII_BCM54XX_EXP_EXP08_RJCT_2MHZ);
+		if (err < 0)
+			goto error;
 
-	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH0,
-				MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN |
-				MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF);
-	if (err < 0)
-		goto error;
+		if (phydev->drv->phy_id == PHY_ID_BCM50610) {
+			err = bcm50610_a0_workaround(phydev);
+			if (err < 0)
+				goto error;
+		}
+	}
 
-	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH3,
-					MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ);
-	if (err < 0)
-		goto error;
+	if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM57780) {
+		int val;
 
-	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75,
-				MII_BCM54XX_EXP_EXP75_VDACCTRL);
-	if (err < 0)
-		goto error;
+		val = bcm54xx_exp_read(phydev, MII_BCM54XX_EXP_EXP75);
+		if (val < 0)
+			goto error;
 
-	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP96,
-				MII_BCM54XX_EXP_EXP96_MYST);
-	if (err < 0)
-		goto error;
-
-	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP97,
-				MII_BCM54XX_EXP_EXP97_MYST);
+		val |= MII_BCM54XX_EXP_EXP75_CM_OSC;
+		err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75, val);
+	}
 
 error:
-	bcm54xx_auxctl_write(phydev,
-			     MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
-			     MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
+	/* Disable the SMDSP clock */
+	err2 = bcm54xx_auxctl_write(phydev,
+				    MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
+				    MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
 
-	return err;
+	/* Return the first error reported. */
+	return err ? err : err2;
+}
+
+static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
+{
+	u32 val, orig;
+	bool clk125en = true;
+
+	/* Abort if we are using an untested phy. */
+	if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 ||
+	    BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 ||
+	    BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M)
+		return;
+
+	val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_SCR3);
+	if (val < 0)
+		return;
+
+	orig = val;
+
+	if ((BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
+	     BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) &&
+	    BRCM_PHY_REV(phydev) >= 0x3) {
+		/*
+		 * Here, bit 0 _disables_ CLK125 when set.
+		 * This bit is set by default.
+		 */
+		clk125en = false;
+	} else {
+		if (phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) {
+			/* Here, bit 0 _enables_ CLK125 when set */
+			val &= ~BCM54XX_SHD_SCR3_DEF_CLK125;
+			clk125en = false;
+		}
+	}
+
+	if (clk125en == false ||
+	    (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
+		val &= ~BCM54XX_SHD_SCR3_DLLAPD_DIS;
+	else
+		val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
+
+	if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY)
+		val |= BCM54XX_SHD_SCR3_TRDDAPD;
+
+	if (orig != val)
+		bcm54xx_shadow_write(phydev, BCM54XX_SHD_SCR3, val);
+
+	val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_APD);
+	if (val < 0)
+		return;
+
+	orig = val;
+
+	if (clk125en == false ||
+	    (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
+		val |= BCM54XX_SHD_APD_EN;
+	else
+		val &= ~BCM54XX_SHD_APD_EN;
+
+	if (orig != val)
+		bcm54xx_shadow_write(phydev, BCM54XX_SHD_APD, val);
 }
 
 static int bcm54xx_config_init(struct phy_device *phydev)
@@ -308,38 +407,17 @@
 	if (err < 0)
 		return err;
 
-	if (phydev->drv->phy_id == PHY_ID_BCM50610) {
-		err = bcm50610_a0_workaround(phydev);
-		if (err < 0)
-			return err;
-	}
+	if ((BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
+	     BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) &&
+	    (phydev->dev_flags & PHY_BRCM_CLEAR_RGMII_MODE))
+		bcm54xx_shadow_write(phydev, BCM54XX_SHD_RGMII_MODE, 0);
 
-	if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM57780) {
-		int err2;
+	if ((phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) ||
+	    (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) ||
+	    (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
+		bcm54xx_adjust_rxrefclk(phydev);
 
-		err = bcm54xx_auxctl_write(phydev,
-					   MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
-					   MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA |
-					   MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
-		if (err < 0)
-			return err;
-
-		reg = bcm54xx_exp_read(phydev, MII_BCM54XX_EXP_EXP75);
-		if (reg < 0)
-			goto error;
-
-		reg |= MII_BCM54XX_EXP_EXP75_CM_OSC;
-		err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75, reg);
-
-error:
-		err2 = bcm54xx_auxctl_write(phydev,
-					    MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
-					    MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
-		if (err)
-			return err;
-		if (err2)
-			return err2;
-	}
+	bcm54xx_phydsp_config(phydev);
 
 	return 0;
 }
@@ -564,9 +642,11 @@
 	if (err < 0)
 		goto done;
 
-	/* Enable auto power down */
-	err = brcm_phy_setbits(phydev, MII_BRCM_FET_SHDW_AUXSTAT2,
-				       MII_BRCM_FET_SHDW_AS2_APDE);
+	if (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE) {
+		/* Enable auto power down */
+		err = brcm_phy_setbits(phydev, MII_BRCM_FET_SHDW_AUXSTAT2,
+					       MII_BRCM_FET_SHDW_AS2_APDE);
+	}
 
 done:
 	/* Disable shadow register access */
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 250e10f..8659d34 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -238,6 +238,7 @@
 	},
 	{},
 };
+MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
 
 static struct of_platform_driver mdio_ofgpio_driver = {
 	.name = "mdio-gpio",
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 7cbf6f9..60c8d23 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -111,9 +111,6 @@
 	rwlock_t hash_lock;
 };
 
-/* to eliminate a race btw pppoe_flush_dev and pppoe_release */
-static DEFINE_SPINLOCK(flush_lock);
-
 /*
  * PPPoE could be in the following stages:
  * 1) Discovery stage (to obtain remote MAC and Session ID)
@@ -253,20 +250,19 @@
 {
 	struct net_device *dev;
 	struct pppoe_net *pn;
-	struct pppox_sock *pppox_sock;
+	struct pppox_sock *pppox_sock = NULL;
 
 	int ifindex;
 
-	dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev);
-	if (!dev)
-		return NULL;
-
-	ifindex = dev->ifindex;
-	pn = net_generic(net, pppoe_net_id);
-	pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
+	rcu_read_lock();
+	dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev);
+	if (dev) {
+		ifindex = dev->ifindex;
+		pn = net_generic(net, pppoe_net_id);
+		pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
 				sp->sa_addr.pppoe.remote, ifindex);
-	dev_put(dev);
-
+	}
+	rcu_read_unlock();
 	return pppox_sock;
 }
 
@@ -303,45 +299,48 @@
 	write_lock_bh(&pn->hash_lock);
 	for (i = 0; i < PPPOE_HASH_SIZE; i++) {
 		struct pppox_sock *po = pn->hash_table[i];
+		struct sock *sk;
 
-		while (po != NULL) {
-			struct sock *sk;
-			if (po->pppoe_dev != dev) {
+		while (po) {
+			while (po && po->pppoe_dev != dev) {
 				po = po->next;
-				continue;
 			}
+
+			if (!po)
+				break;
+
 			sk = sk_pppox(po);
-			spin_lock(&flush_lock);
-			po->pppoe_dev = NULL;
-			spin_unlock(&flush_lock);
-			dev_put(dev);
 
 			/* We always grab the socket lock, followed by the
-			 * hash_lock, in that order.  Since we should
-			 * hold the sock lock while doing any unbinding,
-			 * we need to release the lock we're holding.
-			 * Hold a reference to the sock so it doesn't disappear
-			 * as we're jumping between locks.
+			 * hash_lock, in that order.  Since we should hold the
+			 * sock lock while doing any unbinding, we need to
+			 * release the lock we're holding.  Hold a reference to
+			 * the sock so it doesn't disappear as we're jumping
+			 * between locks.
 			 */
 
 			sock_hold(sk);
-
 			write_unlock_bh(&pn->hash_lock);
 			lock_sock(sk);
 
-			if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+			if (po->pppoe_dev == dev
+			    && sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
 				pppox_unbind_sock(sk);
 				sk->sk_state = PPPOX_ZOMBIE;
 				sk->sk_state_change(sk);
+				po->pppoe_dev = NULL;
+				dev_put(dev);
 			}
 
 			release_sock(sk);
 			sock_put(sk);
 
-			/* Restart scan at the beginning of this hash chain.
-			 * While the lock was dropped the chain contents may
-			 * have changed.
+			/* Restart the process from the start of the current
+			 * hash chain. We dropped locks so the world may have
+			 * change from underneath us.
 			 */
+
+			BUG_ON(pppoe_pernet(dev_net(dev)) == NULL);
 			write_lock_bh(&pn->hash_lock);
 			po = pn->hash_table[i];
 		}
@@ -388,11 +387,16 @@
 	struct pppox_sock *po = pppox_sk(sk);
 	struct pppox_sock *relay_po;
 
+	/* Backlog receive. Semantics of backlog rcv preclude any code from
+	 * executing in lock_sock()/release_sock() bounds; meaning sk->sk_state
+	 * can't change.
+	 */
+
 	if (sk->sk_state & PPPOX_BOUND) {
 		ppp_input(&po->chan, skb);
 	} else if (sk->sk_state & PPPOX_RELAY) {
-		relay_po = get_item_by_addr(dev_net(po->pppoe_dev),
-						&po->pppoe_relay);
+		relay_po = get_item_by_addr(sock_net(sk),
+					    &po->pppoe_relay);
 		if (relay_po == NULL)
 			goto abort_kfree;
 
@@ -447,6 +451,10 @@
 		goto drop;
 
 	pn = pppoe_pernet(dev_net(dev));
+
+	/* Note that get_item does a sock_hold(), so sk_pppox(po)
+	 * is known to be safe.
+	 */
 	po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
 	if (!po)
 		goto drop;
@@ -561,6 +569,7 @@
 	struct sock *sk = sock->sk;
 	struct pppox_sock *po;
 	struct pppoe_net *pn;
+	struct net *net = NULL;
 
 	if (!sk)
 		return 0;
@@ -571,44 +580,28 @@
 		return -EBADF;
 	}
 
+	po = pppox_sk(sk);
+
+	if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+		dev_put(po->pppoe_dev);
+		po->pppoe_dev = NULL;
+	}
+
 	pppox_unbind_sock(sk);
 
 	/* Signal the death of the socket. */
 	sk->sk_state = PPPOX_DEAD;
 
-	/*
-	 * pppoe_flush_dev could lead to a race with
-	 * this routine so we use flush_lock to eliminate
-	 * such a case (we only need per-net specific data)
-	 */
-	spin_lock(&flush_lock);
-	po = pppox_sk(sk);
-	if (!po->pppoe_dev) {
-		spin_unlock(&flush_lock);
-		goto out;
-	}
-	pn = pppoe_pernet(dev_net(po->pppoe_dev));
-	spin_unlock(&flush_lock);
+	net = sock_net(sk);
+	pn = pppoe_pernet(net);
 
 	/*
 	 * protect "po" from concurrent updates
 	 * on pppoe_flush_dev
 	 */
-	write_lock_bh(&pn->hash_lock);
+	delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
+		    po->pppoe_ifindex);
 
-	po = pppox_sk(sk);
-	if (stage_session(po->pppoe_pa.sid))
-		__delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
-				po->pppoe_ifindex);
-
-	if (po->pppoe_dev) {
-		dev_put(po->pppoe_dev);
-		po->pppoe_dev = NULL;
-	}
-
-	write_unlock_bh(&pn->hash_lock);
-
-out:
 	sock_orphan(sk);
 	sock->sk = NULL;
 
@@ -625,8 +618,9 @@
 	struct sock *sk = sock->sk;
 	struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr;
 	struct pppox_sock *po = pppox_sk(sk);
-	struct net_device *dev;
+	struct net_device *dev = NULL;
 	struct pppoe_net *pn;
+	struct net *net = NULL;
 	int error;
 
 	lock_sock(sk);
@@ -652,12 +646,14 @@
 	/* Delete the old binding */
 	if (stage_session(po->pppoe_pa.sid)) {
 		pppox_unbind_sock(sk);
+		pn = pppoe_pernet(sock_net(sk));
+		delete_item(pn, po->pppoe_pa.sid,
+			    po->pppoe_pa.remote, po->pppoe_ifindex);
 		if (po->pppoe_dev) {
-			pn = pppoe_pernet(dev_net(po->pppoe_dev));
-			delete_item(pn, po->pppoe_pa.sid,
-				po->pppoe_pa.remote, po->pppoe_ifindex);
 			dev_put(po->pppoe_dev);
+			po->pppoe_dev = NULL;
 		}
+
 		memset(sk_pppox(po) + 1, 0,
 		       sizeof(struct pppox_sock) - sizeof(struct sock));
 		sk->sk_state = PPPOX_NONE;
@@ -666,16 +662,15 @@
 	/* Re-bind in session stage only */
 	if (stage_session(sp->sa_addr.pppoe.sid)) {
 		error = -ENODEV;
-		dev = dev_get_by_name(sock_net(sk), sp->sa_addr.pppoe.dev);
+		net = sock_net(sk);
+		dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev);
 		if (!dev)
-			goto end;
+			goto err_put;
 
 		po->pppoe_dev = dev;
 		po->pppoe_ifindex = dev->ifindex;
-		pn = pppoe_pernet(dev_net(dev));
-		write_lock_bh(&pn->hash_lock);
+		pn = pppoe_pernet(net);
 		if (!(dev->flags & IFF_UP)) {
-			write_unlock_bh(&pn->hash_lock);
 			goto err_put;
 		}
 
@@ -683,6 +678,7 @@
 		       &sp->sa_addr.pppoe,
 		       sizeof(struct pppoe_addr));
 
+		write_lock_bh(&pn->hash_lock);
 		error = __set_item(pn, po);
 		write_unlock_bh(&pn->hash_lock);
 		if (error < 0)
@@ -696,8 +692,11 @@
 		po->chan.ops = &pppoe_chan_ops;
 
 		error = ppp_register_net_channel(dev_net(dev), &po->chan);
-		if (error)
+		if (error) {
+			delete_item(pn, po->pppoe_pa.sid,
+				    po->pppoe_pa.remote, po->pppoe_ifindex);
 			goto err_put;
+		}
 
 		sk->sk_state = PPPOX_CONNECTED;
 	}
@@ -915,6 +914,14 @@
 	struct pppoe_hdr *ph;
 	int data_len = skb->len;
 
+	/* The higher-level PPP code (ppp_unregister_channel()) ensures the PPP
+	 * xmit operations conclude prior to an unregistration call.  Thus
+	 * sk->sk_state cannot change, so we don't need to do lock_sock().
+	 * But, we also can't do a lock_sock since that introduces a potential
+	 * deadlock as we'd reverse the lock ordering used when calling
+	 * ppp_unregister_channel().
+	 */
+
 	if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
 		goto abort;
 
@@ -944,7 +951,6 @@
 			po->pppoe_pa.remote, NULL, data_len);
 
 	dev_queue_xmit(skb);
-
 	return 1;
 
 abort:
diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c
index c14ee24..ac806b2 100644
--- a/drivers/net/pppox.c
+++ b/drivers/net/pppox.c
@@ -104,7 +104,8 @@
 
 EXPORT_SYMBOL(pppox_ioctl);
 
-static int pppox_create(struct net *net, struct socket *sock, int protocol)
+static int pppox_create(struct net *net, struct socket *sock, int protocol,
+			int kern)
 {
 	int rc = -EPROTOTYPE;
 
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 4214717..1f59f05 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -56,7 +56,8 @@
 		MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
 #define SMALL_BUFFER_SIZE 512
 #define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
-#define LARGE_BUFFER_SIZE	PAGE_SIZE
+#define LARGE_BUFFER_MAX_SIZE 8192
+#define LARGE_BUFFER_MIN_SIZE 2048
 #define MAX_SPLIT_SIZE 1023
 #define QLGE_SB_PAD 32
 
@@ -96,6 +97,7 @@
 
 	/* Misc. stuff */
 	MAILBOX_COUNT = 16,
+	MAILBOX_TIMEOUT = 5,
 
 	PROC_ADDR_RDY = (1 << 31),
 	PROC_ADDR_R = (1 << 30),
@@ -795,6 +797,7 @@
 	MB_WOL_BCAST = (1 << 5),
 	MB_WOL_LINK_UP = (1 << 6),
 	MB_WOL_LINK_DOWN = (1 << 7),
+	MB_WOL_MODE_ON = (1 << 16),		/* Wake on Lan Mode on */
 	MB_CMD_SET_WOL_FLTR = 0x00000111,	/* Wake On Lan Filter */
 	MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
 	MB_CMD_SET_WOL_MAGIC = 0x00000113,	/* Wake On Lan Magic Packet */
@@ -804,12 +807,27 @@
 	MB_CMD_SET_PORT_CFG = 0x00000122,
 	MB_CMD_GET_PORT_CFG = 0x00000123,
 	MB_CMD_GET_LINK_STS = 0x00000124,
+	MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */
+		QL_LED_BLINK = 0x03e803e8,
+	MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */
 	MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */
 	MB_SET_MPI_TFK_STOP = (1 << 0),
 	MB_SET_MPI_TFK_RESUME = (1 << 1),
 	MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */
 	MB_GET_MPI_TFK_STOPPED = (1 << 0),
 	MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1),
+	/* Sub-commands for IDC request.
+	 * This describes the reason for the
+	 * IDC request.
+	 */
+	MB_CMD_IOP_NONE = 0x0000,
+	MB_CMD_IOP_PREP_UPDATE_MPI	= 0x0001,
+	MB_CMD_IOP_COMP_UPDATE_MPI	= 0x0002,
+	MB_CMD_IOP_PREP_LINK_DOWN	= 0x0010,
+	MB_CMD_IOP_DVR_START	 = 0x0100,
+	MB_CMD_IOP_FLASH_ACC	 = 0x0101,
+	MB_CMD_IOP_RESTART_MPI	= 0x0102,
+	MB_CMD_IOP_CORE_DUMP_MPI	= 0x0103,
 
 	/* Mailbox Command Status. */
 	MB_CMD_STS_GOOD = 0x00004000,	/* Success. */
@@ -1201,9 +1219,17 @@
 	struct tx_ring_desc *next;
 };
 
+struct page_chunk {
+	struct page *page;	/* master page */
+	char *va;		/* virt addr for this chunk */
+	u64 map;		/* mapping for master */
+	unsigned int offset;	/* offset for this chunk */
+	unsigned int last_flag; /* flag set for last chunk in page */
+};
+
 struct bq_desc {
 	union {
-		struct page *lbq_page;
+		struct page_chunk pg_chunk;
 		struct sk_buff *skb;
 	} p;
 	__le64 *addr;
@@ -1237,6 +1263,9 @@
 	atomic_t queue_stopped;	/* Turns queue off when full. */
 	struct delayed_work tx_work;
 	struct ql_adapter *qdev;
+	u64 tx_packets;
+	u64 tx_bytes;
+	u64 tx_errors;
 };
 
 /*
@@ -1272,6 +1301,7 @@
 	dma_addr_t lbq_base_dma;
 	void *lbq_base_indirect;
 	dma_addr_t lbq_base_indirect_dma;
+	struct page_chunk pg_chunk; /* current page for chunks */
 	struct bq_desc *lbq;	/* array of control blocks */
 	void __iomem *lbq_prod_idx_db_reg;	/* PCI doorbell mem area + 0x18 */
 	u32 lbq_prod_idx;	/* current sw prod idx */
@@ -1302,6 +1332,11 @@
 	struct napi_struct napi;
 	u8 reserved;
 	struct ql_adapter *qdev;
+	u64 rx_packets;
+	u64 rx_multicast;
+	u64 rx_bytes;
+	u64 rx_dropped;
+	u64 rx_errors;
 };
 
 /*
@@ -1386,6 +1421,153 @@
 	u64 rx_nic_fifo_drop;
 };
 
+/* Address/Length pairs for the coredump. */
+enum {
+	MPI_CORE_REGS_ADDR = 0x00030000,
+	MPI_CORE_REGS_CNT = 127,
+	MPI_CORE_SH_REGS_CNT = 16,
+	TEST_REGS_ADDR = 0x00001000,
+	TEST_REGS_CNT = 23,
+	RMII_REGS_ADDR = 0x00001040,
+	RMII_REGS_CNT = 64,
+	FCMAC1_REGS_ADDR = 0x00001080,
+	FCMAC2_REGS_ADDR = 0x000010c0,
+	FCMAC_REGS_CNT = 64,
+	FC1_MBX_REGS_ADDR = 0x00001100,
+	FC2_MBX_REGS_ADDR = 0x00001240,
+	FC_MBX_REGS_CNT = 64,
+	IDE_REGS_ADDR = 0x00001140,
+	IDE_REGS_CNT = 64,
+	NIC1_MBX_REGS_ADDR = 0x00001180,
+	NIC2_MBX_REGS_ADDR = 0x00001280,
+	NIC_MBX_REGS_CNT = 64,
+	SMBUS_REGS_ADDR = 0x00001200,
+	SMBUS_REGS_CNT = 64,
+	I2C_REGS_ADDR = 0x00001fc0,
+	I2C_REGS_CNT = 64,
+	MEMC_REGS_ADDR = 0x00003000,
+	MEMC_REGS_CNT = 256,
+	PBUS_REGS_ADDR = 0x00007c00,
+	PBUS_REGS_CNT = 256,
+	MDE_REGS_ADDR = 0x00010000,
+	MDE_REGS_CNT = 6,
+	CODE_RAM_ADDR = 0x00020000,
+	CODE_RAM_CNT = 0x2000,
+	MEMC_RAM_ADDR = 0x00100000,
+	MEMC_RAM_CNT = 0x2000,
+};
+
+#define MPI_COREDUMP_COOKIE 0x5555aaaa
+struct mpi_coredump_global_header {
+	u32	cookie;
+	u8	idString[16];
+	u32	timeLo;
+	u32	timeHi;
+	u32	imageSize;
+	u32	headerSize;
+	u8	info[220];
+};
+
+struct mpi_coredump_segment_header {
+	u32	cookie;
+	u32	segNum;
+	u32	segSize;
+	u32	extra;
+	u8	description[16];
+};
+
+/* Reg dump segment numbers. */
+enum {
+	CORE_SEG_NUM = 1,
+	TEST_LOGIC_SEG_NUM = 2,
+	RMII_SEG_NUM = 3,
+	FCMAC1_SEG_NUM = 4,
+	FCMAC2_SEG_NUM = 5,
+	FC1_MBOX_SEG_NUM = 6,
+	IDE_SEG_NUM = 7,
+	NIC1_MBOX_SEG_NUM = 8,
+	SMBUS_SEG_NUM = 9,
+	FC2_MBOX_SEG_NUM = 10,
+	NIC2_MBOX_SEG_NUM = 11,
+	I2C_SEG_NUM = 12,
+	MEMC_SEG_NUM = 13,
+	PBUS_SEG_NUM = 14,
+	MDE_SEG_NUM = 15,
+	NIC1_CONTROL_SEG_NUM = 16,
+	NIC2_CONTROL_SEG_NUM = 17,
+	NIC1_XGMAC_SEG_NUM = 18,
+	NIC2_XGMAC_SEG_NUM = 19,
+	WCS_RAM_SEG_NUM = 20,
+	MEMC_RAM_SEG_NUM = 21,
+	XAUI_AN_SEG_NUM = 22,
+	XAUI_HSS_PCS_SEG_NUM = 23,
+	XFI_AN_SEG_NUM = 24,
+	XFI_TRAIN_SEG_NUM = 25,
+	XFI_HSS_PCS_SEG_NUM = 26,
+	XFI_HSS_TX_SEG_NUM = 27,
+	XFI_HSS_RX_SEG_NUM = 28,
+	XFI_HSS_PLL_SEG_NUM = 29,
+	MISC_NIC_INFO_SEG_NUM = 30,
+	INTR_STATES_SEG_NUM = 31,
+	CAM_ENTRIES_SEG_NUM = 32,
+	ROUTING_WORDS_SEG_NUM = 33,
+	ETS_SEG_NUM = 34,
+	PROBE_DUMP_SEG_NUM = 35,
+	ROUTING_INDEX_SEG_NUM = 36,
+	MAC_PROTOCOL_SEG_NUM = 37,
+	XAUI2_AN_SEG_NUM = 38,
+	XAUI2_HSS_PCS_SEG_NUM = 39,
+	XFI2_AN_SEG_NUM = 40,
+	XFI2_TRAIN_SEG_NUM = 41,
+	XFI2_HSS_PCS_SEG_NUM = 42,
+	XFI2_HSS_TX_SEG_NUM = 43,
+	XFI2_HSS_RX_SEG_NUM = 44,
+	XFI2_HSS_PLL_SEG_NUM = 45,
+	SEM_REGS_SEG_NUM = 50
+
+};
+
+struct ql_nic_misc {
+	u32 rx_ring_count;
+	u32 tx_ring_count;
+	u32 intr_count;
+	u32 function;
+};
+
+struct ql_reg_dump {
+
+	/* segment 0 */
+	struct mpi_coredump_global_header mpi_global_header;
+
+	/* segment 16 */
+	struct mpi_coredump_segment_header nic_regs_seg_hdr;
+	u32 nic_regs[64];
+
+	/* segment 30 */
+	struct mpi_coredump_segment_header misc_nic_seg_hdr;
+	struct ql_nic_misc misc_nic_info;
+
+	/* segment 31 */
+	/* one interrupt state for each CQ */
+	struct mpi_coredump_segment_header intr_states_seg_hdr;
+	u32 intr_states[MAX_CPUS];
+
+	/* segment 32 */
+	/* 3 cam words each for 16 unicast,
+	 * 2 cam words for each of 32 multicast.
+	 */
+	struct mpi_coredump_segment_header cam_entries_seg_hdr;
+	u32 cam_entries[(16 * 3) + (32 * 3)];
+
+	/* segment 33 */
+	struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
+	u32 nic_routing_words[16];
+
+	/* segment 34 */
+	struct mpi_coredump_segment_header ets_seg_hdr;
+	u32 ets[8+2];
+};
+
 /*
  * intr_context structure is used during initialization
  * to hook the interrupts.  It is also used in a single
@@ -1419,6 +1601,8 @@
 	QL_ALLMULTI = 6,
 	QL_PORT_CFG = 7,
 	QL_CAM_RT_SET = 8,
+	QL_SELFTEST = 9,
+	QL_LB_LINK_UP = 10,
 };
 
 /* link_status bit definitions */
@@ -1526,6 +1710,7 @@
 
 	struct rx_ring rx_ring[MAX_RX_RINGS];
 	struct tx_ring tx_ring[MAX_TX_RINGS];
+	unsigned int lbq_buf_order;
 
 	int rx_csum;
 	u32 default_rx_queue;
@@ -1540,6 +1725,7 @@
 	u32 port_init;
 	u32 link_status;
 	u32 link_config;
+	u32 led_config;
 	u32 max_frame_size;
 
 	union flash_params flash;
@@ -1553,6 +1739,7 @@
 	struct completion ide_completion;
 	struct nic_operations *nic_ops;
 	u16 device_id;
+	atomic_t lb_count;
 };
 
 /*
@@ -1631,10 +1818,22 @@
 int ql_cam_route_initialize(struct ql_adapter *qdev);
 int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
 int ql_mb_about_fw(struct ql_adapter *qdev);
+int ql_wol(struct ql_adapter *qdev);
+int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
+int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
+int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
+int ql_mb_get_led_cfg(struct ql_adapter *qdev);
 void ql_link_on(struct ql_adapter *qdev);
 void ql_link_off(struct ql_adapter *qdev);
 int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
+int ql_mb_get_port_cfg(struct ql_adapter *qdev);
+int ql_mb_set_port_cfg(struct ql_adapter *qdev);
 int ql_wait_fifo_empty(struct ql_adapter *qdev);
+void ql_gen_reg_dump(struct ql_adapter *qdev,
+			struct ql_reg_dump *mpi_coredump);
+netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
+void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
+int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
 
 #if 1
 #define QL_ALL_DUMP
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index aa88cb3..9f58c47 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1,5 +1,185 @@
 #include "qlge.h"
 
+
+static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
+{
+	int status = 0;
+	int i;
+
+	for (i = 0; i < 8; i++, buf++) {
+		ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
+		*buf = ql_read32(qdev, NIC_ETS);
+	}
+
+	for (i = 0; i < 2; i++, buf++) {
+		ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
+		*buf = ql_read32(qdev, CNA_ETS);
+	}
+
+	return status;
+}
+
+static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf)
+{
+	int i;
+
+	for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
+		ql_write32(qdev, INTR_EN,
+				qdev->intr_context[i].intr_read_mask);
+		*buf = ql_read32(qdev, INTR_EN);
+	}
+}
+
+static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
+{
+	int i, status;
+	u32 value[3];
+
+	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+	if (status)
+		return status;
+
+	for (i = 0; i < 16; i++) {
+		status = ql_get_mac_addr_reg(qdev,
+					MAC_ADDR_TYPE_CAM_MAC, i, value);
+		if (status) {
+			QPRINTK(qdev, DRV, ERR,
+				"Failed read of mac index register.\n");
+			goto err;
+		}
+		*buf++ = value[0];	/* lower MAC address */
+		*buf++ = value[1];	/* upper MAC address */
+		*buf++ = value[2];	/* output */
+	}
+	for (i = 0; i < 32; i++) {
+		status = ql_get_mac_addr_reg(qdev,
+					MAC_ADDR_TYPE_MULTI_MAC, i, value);
+		if (status) {
+			QPRINTK(qdev, DRV, ERR,
+				"Failed read of mac index register.\n");
+			goto err;
+		}
+		*buf++ = value[0];	/* lower Mcast address */
+		*buf++ = value[1];	/* upper Mcast address */
+	}
+err:
+	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+	return status;
+}
+
+static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
+{
+	int status;
+	u32 value, i;
+
+	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+	if (status)
+		return status;
+
+	for (i = 0; i < 16; i++) {
+		status = ql_get_routing_reg(qdev, i, &value);
+		if (status) {
+			QPRINTK(qdev, DRV, ERR,
+				"Failed read of routing index register.\n");
+			goto err;
+		} else {
+			*buf++ = value;
+		}
+	}
+err:
+	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+	return status;
+}
+
+/* Create a coredump segment header */
+static void ql_build_coredump_seg_header(
+		struct mpi_coredump_segment_header *seg_hdr,
+		u32 seg_number, u32 seg_size, u8 *desc)
+{
+	memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
+	seg_hdr->cookie = MPI_COREDUMP_COOKIE;
+	seg_hdr->segNum = seg_number;
+	seg_hdr->segSize = seg_size;
+	memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
+}
+
+void ql_gen_reg_dump(struct ql_adapter *qdev,
+			struct ql_reg_dump *mpi_coredump)
+{
+	int i, status;
+
+
+	memset(&(mpi_coredump->mpi_global_header), 0,
+		sizeof(struct mpi_coredump_global_header));
+	mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
+	mpi_coredump->mpi_global_header.headerSize =
+		sizeof(struct mpi_coredump_global_header);
+	mpi_coredump->mpi_global_header.imageSize =
+		sizeof(struct ql_reg_dump);
+	memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+		sizeof(mpi_coredump->mpi_global_header.idString));
+
+
+	/* segment 16 */
+	ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
+				MISC_NIC_INFO_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->misc_nic_info),
+				"MISC NIC INFO");
+	mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
+	mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
+	mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
+	mpi_coredump->misc_nic_info.function = qdev->func;
+
+	/* Segment 16, Rev C. Step 18 */
+	ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
+				NIC1_CONTROL_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->nic_regs),
+				"NIC Registers");
+	/* Get generic reg dump */
+	for (i = 0; i < 64; i++)
+		mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
+
+	/* Segment 31 */
+	/* Get indexed register values. */
+	ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
+				INTR_STATES_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->intr_states),
+				"INTR States");
+	ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
+
+	ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
+				CAM_ENTRIES_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->cam_entries),
+				"CAM Entries");
+	status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
+	if (status)
+		return;
+
+	ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
+				ROUTING_WORDS_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->nic_routing_words),
+				"Routing Words");
+	status = ql_get_routing_entries(qdev,
+			 &mpi_coredump->nic_routing_words[0]);
+	if (status)
+		return;
+
+	/* Segment 34 (Rev C. step 23) */
+	ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
+				ETS_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->ets),
+				"ETS Registers");
+	status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
+	if (status)
+		return;
+}
+
 #ifdef QL_REG_DUMP
 static void ql_dump_intr_states(struct ql_adapter *qdev)
 {
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index aac6c6f..058fa0a 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -36,6 +36,11 @@
 
 #include "qlge.h"
 
+static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
+	"Loopback test  (offline)"
+};
+#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
+
 static int ql_update_ring_coalescing(struct ql_adapter *qdev)
 {
 	int i, status = 0;
@@ -251,6 +256,8 @@
 static int ql_get_sset_count(struct net_device *dev, int sset)
 {
 	switch (sset) {
+	case ETH_SS_TEST:
+		return QLGE_TEST_LEN;
 	case ETH_SS_STATS:
 		return ARRAY_SIZE(ql_stats_str_arr);
 	default:
@@ -371,6 +378,181 @@
 	drvinfo->eedump_len = 0;
 }
 
+static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	/* What we support. */
+	wol->supported = WAKE_MAGIC;
+	/* What we've currently got set. */
+	wol->wolopts = qdev->wol;
+}
+
+static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	int status;
+
+	if (wol->wolopts & ~WAKE_MAGIC)
+		return -EINVAL;
+	qdev->wol = wol->wolopts;
+
+	QPRINTK(qdev, DRV, INFO, "Set wol option 0x%x on %s\n",
+			 qdev->wol, ndev->name);
+	if (!qdev->wol) {
+		u32 wol = 0;
+		status = ql_mb_wol_mode(qdev, wol);
+		QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
+			(status == 0) ? "cleared sucessfully" : "clear failed",
+			wol, qdev->ndev->name);
+	}
+
+	return 0;
+}
+
+static int ql_phys_id(struct net_device *ndev, u32 data)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	u32 led_reg, i;
+	int status;
+
+	/* Save the current LED settings */
+	status = ql_mb_get_led_cfg(qdev);
+	if (status)
+		return status;
+	led_reg = qdev->led_config;
+
+	/* Start blinking the led */
+	if (!data || data > 300)
+		data = 300;
+
+	for (i = 0; i < (data * 10); i++)
+		ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
+
+	/* Restore LED settings */
+	status = ql_mb_set_led_cfg(qdev, led_reg);
+	if (status)
+		return status;
+
+	return 0;
+}
+
+static int ql_start_loopback(struct ql_adapter *qdev)
+{
+	if (netif_carrier_ok(qdev->ndev)) {
+		set_bit(QL_LB_LINK_UP, &qdev->flags);
+		netif_carrier_off(qdev->ndev);
+	} else
+		clear_bit(QL_LB_LINK_UP, &qdev->flags);
+	qdev->link_config |= CFG_LOOPBACK_PCS;
+	return ql_mb_set_port_cfg(qdev);
+}
+
+static void ql_stop_loopback(struct ql_adapter *qdev)
+{
+	qdev->link_config &= ~CFG_LOOPBACK_PCS;
+	ql_mb_set_port_cfg(qdev);
+	if (test_bit(QL_LB_LINK_UP, &qdev->flags)) {
+		netif_carrier_on(qdev->ndev);
+		clear_bit(QL_LB_LINK_UP, &qdev->flags);
+	}
+}
+
+static void ql_create_lb_frame(struct sk_buff *skb,
+					unsigned int frame_size)
+{
+	memset(skb->data, 0xFF, frame_size);
+	frame_size &= ~1;
+	memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+	memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+	memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+void ql_check_lb_frame(struct ql_adapter *qdev,
+					struct sk_buff *skb)
+{
+	unsigned int frame_size = skb->len;
+
+	if ((*(skb->data + 3) == 0xFF) &&
+		(*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+		(*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+			atomic_dec(&qdev->lb_count);
+			return;
+	}
+}
+
+static int ql_run_loopback_test(struct ql_adapter *qdev)
+{
+	int i;
+	netdev_tx_t rc;
+	struct sk_buff *skb;
+	unsigned int size = SMALL_BUF_MAP_SIZE;
+
+	for (i = 0; i < 64; i++) {
+		skb = netdev_alloc_skb(qdev->ndev, size);
+		if (!skb)
+			return -ENOMEM;
+
+		skb->queue_mapping = 0;
+		skb_put(skb, size);
+		ql_create_lb_frame(skb, size);
+		rc = ql_lb_send(skb, qdev->ndev);
+		if (rc != NETDEV_TX_OK)
+			return -EPIPE;
+		atomic_inc(&qdev->lb_count);
+	}
+
+	ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
+	return atomic_read(&qdev->lb_count) ? -EIO : 0;
+}
+
+static int ql_loopback_test(struct ql_adapter *qdev, u64 *data)
+{
+	*data = ql_start_loopback(qdev);
+	if (*data)
+		goto out;
+	*data = ql_run_loopback_test(qdev);
+out:
+	ql_stop_loopback(qdev);
+	return *data;
+}
+
+static void ql_self_test(struct net_device *ndev,
+				struct ethtool_test *eth_test, u64 *data)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	if (netif_running(ndev)) {
+		set_bit(QL_SELFTEST, &qdev->flags);
+		if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+			/* Offline tests */
+			if (ql_loopback_test(qdev, &data[0]))
+				eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		} else {
+			/* Online tests */
+			data[0] = 0;
+		}
+		clear_bit(QL_SELFTEST, &qdev->flags);
+	} else {
+		QPRINTK(qdev, DRV, ERR,
+			"%s: is down, Loopback test will fail.\n", ndev->name);
+		eth_test->flags |= ETH_TEST_FL_FAILED;
+	}
+}
+
+static int ql_get_regs_len(struct net_device *ndev)
+{
+	return sizeof(struct ql_reg_dump);
+}
+
+static void ql_get_regs(struct net_device *ndev,
+			struct ethtool_regs *regs, void *p)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	ql_gen_reg_dump(qdev, p);
+}
+
 static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
 {
 	struct ql_adapter *qdev = netdev_priv(dev);
@@ -424,6 +606,37 @@
 	return ql_update_ring_coalescing(qdev);
 }
 
+static void ql_get_pauseparam(struct net_device *netdev,
+			struct ethtool_pauseparam *pause)
+{
+	struct ql_adapter *qdev = netdev_priv(netdev);
+
+	ql_mb_get_port_cfg(qdev);
+	if (qdev->link_config & CFG_PAUSE_STD) {
+		pause->rx_pause = 1;
+		pause->tx_pause = 1;
+	}
+}
+
+static int ql_set_pauseparam(struct net_device *netdev,
+			struct ethtool_pauseparam *pause)
+{
+	struct ql_adapter *qdev = netdev_priv(netdev);
+	int status = 0;
+
+	if ((pause->rx_pause) && (pause->tx_pause))
+		qdev->link_config |= CFG_PAUSE_STD;
+	else if (!pause->rx_pause && !pause->tx_pause)
+		qdev->link_config &= ~CFG_PAUSE_STD;
+	else
+		return -EINVAL;
+
+	status = ql_mb_set_port_cfg(qdev);
+	if (status)
+		return status;
+	return status;
+}
+
 static u32 ql_get_rx_csum(struct net_device *netdev)
 {
 	struct ql_adapter *qdev = netdev_priv(netdev);
@@ -465,9 +678,17 @@
 const struct ethtool_ops qlge_ethtool_ops = {
 	.get_settings = ql_get_settings,
 	.get_drvinfo = ql_get_drvinfo,
+	.get_wol = ql_get_wol,
+	.set_wol = ql_set_wol,
+	.get_regs_len	= ql_get_regs_len,
+	.get_regs = ql_get_regs,
 	.get_msglevel = ql_get_msglevel,
 	.set_msglevel = ql_set_msglevel,
 	.get_link = ethtool_op_get_link,
+	.phys_id		 = ql_phys_id,
+	.self_test		 = ql_self_test,
+	.get_pauseparam		 = ql_get_pauseparam,
+	.set_pauseparam		 = ql_set_pauseparam,
 	.get_rx_csum = ql_get_rx_csum,
 	.set_rx_csum = ql_set_rx_csum,
 	.get_tx_csum = ethtool_op_get_tx_csum,
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 8176139..0de596a 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1025,6 +1025,11 @@
 	return status;
 }
 
+static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
+{
+	return PAGE_SIZE << qdev->lbq_buf_order;
+}
+
 /* Get the next large buffer. */
 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
 {
@@ -1036,6 +1041,28 @@
 	return lbq_desc;
 }
 
+static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
+		struct rx_ring *rx_ring)
+{
+	struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
+
+	pci_dma_sync_single_for_cpu(qdev->pdev,
+					pci_unmap_addr(lbq_desc, mapaddr),
+				    rx_ring->lbq_buf_size,
+					PCI_DMA_FROMDEVICE);
+
+	/* If it's the last chunk of our master page then
+	 * we unmap it.
+	 */
+	if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
+					== ql_lbq_block_size(qdev))
+		pci_unmap_page(qdev->pdev,
+				lbq_desc->p.pg_chunk.map,
+				ql_lbq_block_size(qdev),
+				PCI_DMA_FROMDEVICE);
+	return lbq_desc;
+}
+
 /* Get the next small buffer. */
 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
 {
@@ -1063,6 +1090,53 @@
 	ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
 }
 
+static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
+						struct bq_desc *lbq_desc)
+{
+	if (!rx_ring->pg_chunk.page) {
+		u64 map;
+		rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
+						GFP_ATOMIC,
+						qdev->lbq_buf_order);
+		if (unlikely(!rx_ring->pg_chunk.page)) {
+			QPRINTK(qdev, DRV, ERR,
+				"page allocation failed.\n");
+			return -ENOMEM;
+		}
+		rx_ring->pg_chunk.offset = 0;
+		map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
+					0, ql_lbq_block_size(qdev),
+					PCI_DMA_FROMDEVICE);
+		if (pci_dma_mapping_error(qdev->pdev, map)) {
+			__free_pages(rx_ring->pg_chunk.page,
+					qdev->lbq_buf_order);
+			QPRINTK(qdev, DRV, ERR,
+				"PCI mapping failed.\n");
+			return -ENOMEM;
+		}
+		rx_ring->pg_chunk.map = map;
+		rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
+	}
+
+	/* Copy the current master pg_chunk info
+	 * to the current descriptor.
+	 */
+	lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
+
+	/* Adjust the master page chunk for next
+	 * buffer get.
+	 */
+	rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
+	if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
+		rx_ring->pg_chunk.page = NULL;
+		lbq_desc->p.pg_chunk.last_flag = 1;
+	} else {
+		rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
+		get_page(rx_ring->pg_chunk.page);
+		lbq_desc->p.pg_chunk.last_flag = 0;
+	}
+	return 0;
+}
 /* Process (refill) a large buffer queue. */
 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
 {
@@ -1072,39 +1146,28 @@
 	u64 map;
 	int i;
 
-	while (rx_ring->lbq_free_cnt > 16) {
+	while (rx_ring->lbq_free_cnt > 32) {
 		for (i = 0; i < 16; i++) {
 			QPRINTK(qdev, RX_STATUS, DEBUG,
 				"lbq: try cleaning clean_idx = %d.\n",
 				clean_idx);
 			lbq_desc = &rx_ring->lbq[clean_idx];
-			if (lbq_desc->p.lbq_page == NULL) {
-				QPRINTK(qdev, RX_STATUS, DEBUG,
-					"lbq: getting new page for index %d.\n",
-					lbq_desc->index);
-				lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
-				if (lbq_desc->p.lbq_page == NULL) {
-					rx_ring->lbq_clean_idx = clean_idx;
-					QPRINTK(qdev, RX_STATUS, ERR,
-						"Couldn't get a page.\n");
+			if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
+				QPRINTK(qdev, IFUP, ERR,
+					"Could not get a page chunk.\n");
 					return;
 				}
-				map = pci_map_page(qdev->pdev,
-						   lbq_desc->p.lbq_page,
-						   0, PAGE_SIZE,
-						   PCI_DMA_FROMDEVICE);
-				if (pci_dma_mapping_error(qdev->pdev, map)) {
-					rx_ring->lbq_clean_idx = clean_idx;
-					put_page(lbq_desc->p.lbq_page);
-					lbq_desc->p.lbq_page = NULL;
-					QPRINTK(qdev, RX_STATUS, ERR,
-						"PCI mapping failed.\n");
-					return;
-				}
+
+			map = lbq_desc->p.pg_chunk.map +
+				lbq_desc->p.pg_chunk.offset;
 				pci_unmap_addr_set(lbq_desc, mapaddr, map);
-				pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
+			pci_unmap_len_set(lbq_desc, maplen,
+					rx_ring->lbq_buf_size);
 				*lbq_desc->addr = cpu_to_le64(map);
-			}
+
+			pci_dma_sync_single_for_device(qdev->pdev, map,
+						rx_ring->lbq_buf_size,
+						PCI_DMA_FROMDEVICE);
 			clean_idx++;
 			if (clean_idx == rx_ring->lbq_len)
 				clean_idx = 0;
@@ -1480,27 +1543,24 @@
 			 * chain it to the header buffer's skb and let
 			 * it rip.
 			 */
-			lbq_desc = ql_get_curr_lbuf(rx_ring);
-			pci_unmap_page(qdev->pdev,
-				       pci_unmap_addr(lbq_desc,
-						      mapaddr),
-				       pci_unmap_len(lbq_desc, maplen),
-				       PCI_DMA_FROMDEVICE);
+			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
 			QPRINTK(qdev, RX_STATUS, DEBUG,
-				"Chaining page to skb.\n");
-			skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
-					   0, length);
+				"Chaining page at offset = %d,"
+				"for %d bytes  to skb.\n",
+				lbq_desc->p.pg_chunk.offset, length);
+			skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
+						lbq_desc->p.pg_chunk.offset,
+						length);
 			skb->len += length;
 			skb->data_len += length;
 			skb->truesize += length;
-			lbq_desc->p.lbq_page = NULL;
 		} else {
 			/*
 			 * The headers and data are in a single large buffer. We
 			 * copy it to a new skb and let it go. This can happen with
 			 * jumbo mtu on a non-TCP/UDP frame.
 			 */
-			lbq_desc = ql_get_curr_lbuf(rx_ring);
+			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
 			skb = netdev_alloc_skb(qdev->ndev, length);
 			if (skb == NULL) {
 				QPRINTK(qdev, PROBE, DEBUG,
@@ -1515,13 +1575,14 @@
 			skb_reserve(skb, NET_IP_ALIGN);
 			QPRINTK(qdev, RX_STATUS, DEBUG,
 				"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
-			skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
-					   0, length);
+			skb_fill_page_desc(skb, 0,
+						lbq_desc->p.pg_chunk.page,
+						lbq_desc->p.pg_chunk.offset,
+						length);
 			skb->len += length;
 			skb->data_len += length;
 			skb->truesize += length;
 			length -= length;
-			lbq_desc->p.lbq_page = NULL;
 			__pskb_pull_tail(skb,
 				(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
 				VLAN_ETH_HLEN : ETH_HLEN);
@@ -1538,8 +1599,7 @@
 		 *         frames.  If the MTU goes up we could
 		 *          eventually be in trouble.
 		 */
-		int size, offset, i = 0;
-		__le64 *bq, bq_array[8];
+		int size, i = 0;
 		sbq_desc = ql_get_curr_sbuf(rx_ring);
 		pci_unmap_single(qdev->pdev,
 				 pci_unmap_addr(sbq_desc, mapaddr),
@@ -1558,37 +1618,25 @@
 			QPRINTK(qdev, RX_STATUS, DEBUG,
 				"%d bytes of headers & data in chain of large.\n", length);
 			skb = sbq_desc->p.skb;
-			bq = &bq_array[0];
-			memcpy(bq, skb->data, sizeof(bq_array));
 			sbq_desc->p.skb = NULL;
 			skb_reserve(skb, NET_IP_ALIGN);
-		} else {
-			QPRINTK(qdev, RX_STATUS, DEBUG,
-				"Headers in small, %d bytes of data in chain of large.\n", length);
-			bq = (__le64 *)sbq_desc->p.skb->data;
 		}
 		while (length > 0) {
-			lbq_desc = ql_get_curr_lbuf(rx_ring);
-			pci_unmap_page(qdev->pdev,
-				       pci_unmap_addr(lbq_desc,
-						      mapaddr),
-				       pci_unmap_len(lbq_desc,
-						     maplen),
-				       PCI_DMA_FROMDEVICE);
-			size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
-			offset = 0;
+			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+			size = (length < rx_ring->lbq_buf_size) ? length :
+				rx_ring->lbq_buf_size;
 
 			QPRINTK(qdev, RX_STATUS, DEBUG,
 				"Adding page %d to skb for %d bytes.\n",
 				i, size);
-			skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
-					   offset, size);
+			skb_fill_page_desc(skb, i,
+						lbq_desc->p.pg_chunk.page,
+						lbq_desc->p.pg_chunk.offset,
+						size);
 			skb->len += size;
 			skb->data_len += size;
 			skb->truesize += size;
 			length -= size;
-			lbq_desc->p.lbq_page = NULL;
-			bq++;
 			i++;
 		}
 		__pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
@@ -1613,6 +1661,7 @@
 	if (unlikely(!skb)) {
 		QPRINTK(qdev, RX_STATUS, DEBUG,
 			"No skb available, drop packet.\n");
+		rx_ring->rx_dropped++;
 		return;
 	}
 
@@ -1621,6 +1670,7 @@
 		QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
 					ib_mac_rsp->flags2);
 		dev_kfree_skb_any(skb);
+		rx_ring->rx_errors++;
 		return;
 	}
 
@@ -1629,6 +1679,14 @@
 	 */
 	if (skb->len > ndev->mtu + ETH_HLEN) {
 		dev_kfree_skb_any(skb);
+		rx_ring->rx_dropped++;
+		return;
+	}
+
+	/* loopback self test for ethtool */
+	if (test_bit(QL_SELFTEST, &qdev->flags)) {
+		ql_check_lb_frame(qdev, skb);
+		dev_kfree_skb_any(skb);
 		return;
 	}
 
@@ -1642,6 +1700,7 @@
 			IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
 			(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
 			IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+		rx_ring->rx_multicast++;
 	}
 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
 		QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
@@ -1673,8 +1732,8 @@
 		}
 	}
 
-	ndev->stats.rx_packets++;
-	ndev->stats.rx_bytes += skb->len;
+	rx_ring->rx_packets++;
+	rx_ring->rx_bytes += skb->len;
 	skb_record_rx_queue(skb, rx_ring->cq_id);
 	if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 		if (qdev->vlgrp &&
@@ -1698,7 +1757,6 @@
 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
 				   struct ob_mac_iocb_rsp *mac_rsp)
 {
-	struct net_device *ndev = qdev->ndev;
 	struct tx_ring *tx_ring;
 	struct tx_ring_desc *tx_ring_desc;
 
@@ -1706,8 +1764,8 @@
 	tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
 	tx_ring_desc = &tx_ring->q[mac_rsp->tid];
 	ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
-	ndev->stats.tx_bytes += (tx_ring_desc->skb)->len;
-	ndev->stats.tx_packets++;
+	tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
+	tx_ring->tx_packets++;
 	dev_kfree_skb(tx_ring_desc->skb);
 	tx_ring_desc->skb = NULL;
 
@@ -1930,7 +1988,7 @@
 	return work_done;
 }
 
-static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
+static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
 {
 	struct ql_adapter *qdev = netdev_priv(ndev);
 
@@ -1946,7 +2004,7 @@
 	}
 }
 
-static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
+static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
 {
 	struct ql_adapter *qdev = netdev_priv(ndev);
 	u32 enable_bit = MAC_ADDR_E;
@@ -1962,7 +2020,7 @@
 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
 }
 
-static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
+static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
 {
 	struct ql_adapter *qdev = netdev_priv(ndev);
 	u32 enable_bit = 0;
@@ -2047,12 +2105,12 @@
 	 */
 	var = ql_read32(qdev, ISR1);
 	if (var & intr_context->irq_mask) {
-				QPRINTK(qdev, INTR, INFO,
+		QPRINTK(qdev, INTR, INFO,
 			"Waking handler for rx_ring[0].\n");
 		ql_disable_completion_interrupt(qdev, intr_context->intr);
-					napi_schedule(&rx_ring->napi);
-				work_done++;
-			}
+		napi_schedule(&rx_ring->napi);
+		work_done++;
+	}
 	ql_enable_completion_interrupt(qdev, intr_context->intr);
 	return work_done ? IRQ_HANDLED : IRQ_NONE;
 }
@@ -2150,6 +2208,7 @@
 			__func__, tx_ring_idx);
 		netif_stop_subqueue(ndev, tx_ring->wq_id);
 		atomic_inc(&tx_ring->queue_stopped);
+		tx_ring->tx_errors++;
 		return NETDEV_TX_BUSY;
 	}
 	tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
@@ -2184,6 +2243,7 @@
 			NETDEV_TX_OK) {
 		QPRINTK(qdev, TX_QUEUED, ERR,
 				"Could not map the segments.\n");
+		tx_ring->tx_errors++;
 		return NETDEV_TX_BUSY;
 	}
 	QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
@@ -2200,6 +2260,7 @@
 	return NETDEV_TX_OK;
 }
 
+
 static void ql_free_shadow_space(struct ql_adapter *qdev)
 {
 	if (qdev->rx_ring_shadow_reg_area) {
@@ -2305,20 +2366,29 @@
 
 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
 {
-	int i;
 	struct bq_desc *lbq_desc;
 
-	for (i = 0; i < rx_ring->lbq_len; i++) {
-		lbq_desc = &rx_ring->lbq[i];
-		if (lbq_desc->p.lbq_page) {
-			pci_unmap_page(qdev->pdev,
-				       pci_unmap_addr(lbq_desc, mapaddr),
-				       pci_unmap_len(lbq_desc, maplen),
-				       PCI_DMA_FROMDEVICE);
+	uint32_t  curr_idx, clean_idx;
 
-			put_page(lbq_desc->p.lbq_page);
-			lbq_desc->p.lbq_page = NULL;
+	curr_idx = rx_ring->lbq_curr_idx;
+	clean_idx = rx_ring->lbq_clean_idx;
+	while (curr_idx != clean_idx) {
+		lbq_desc = &rx_ring->lbq[curr_idx];
+
+		if (lbq_desc->p.pg_chunk.last_flag) {
+			pci_unmap_page(qdev->pdev,
+				lbq_desc->p.pg_chunk.map,
+				ql_lbq_block_size(qdev),
+				       PCI_DMA_FROMDEVICE);
+			lbq_desc->p.pg_chunk.last_flag = 0;
 		}
+
+		put_page(lbq_desc->p.pg_chunk.page);
+		lbq_desc->p.pg_chunk.page = NULL;
+
+		if (++curr_idx == rx_ring->lbq_len)
+			curr_idx = 0;
+
 	}
 }
 
@@ -2616,6 +2686,7 @@
 	/* Set up the shadow registers for this ring. */
 	rx_ring->prod_idx_sh_reg = shadow_reg;
 	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
+	*rx_ring->prod_idx_sh_reg = 0;
 	shadow_reg += sizeof(u64);
 	shadow_reg_dma += sizeof(u64);
 	rx_ring->lbq_base_indirect = shadow_reg;
@@ -3277,6 +3348,22 @@
 	 * the same MAC address.
 	 */
 	ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
+	/* Reroute all packets to our Interface.
+	 * They may have been routed to MPI firmware
+	 * due to WOL.
+	 */
+	value = ql_read32(qdev, MGMT_RCV_CFG);
+	value &= ~MGMT_RCV_CFG_RM;
+	mask = 0xffff0000;
+
+	/* Sticky reg needs clearing due to WOL. */
+	ql_write32(qdev, MGMT_RCV_CFG, mask);
+	ql_write32(qdev, MGMT_RCV_CFG, mask | value);
+
+	/* Default WOL is enable on Mezz cards */
+	if (qdev->pdev->subsystem_device == 0x0068 ||
+			qdev->pdev->subsystem_device == 0x0180)
+		qdev->wol = WAKE_MAGIC;
 
 	/* Start up the rx queues. */
 	for (i = 0; i < qdev->rx_ring_count; i++) {
@@ -3391,6 +3478,55 @@
 	QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
 }
 
+int ql_wol(struct ql_adapter *qdev)
+{
+	int status = 0;
+	u32 wol = MB_WOL_DISABLE;
+
+	/* The CAM is still intact after a reset, but if we
+	 * are doing WOL, then we may need to program the
+	 * routing regs. We would also need to issue the mailbox
+	 * commands to instruct the MPI what to do per the ethtool
+	 * settings.
+	 */
+
+	if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
+			WAKE_MCAST | WAKE_BCAST)) {
+		QPRINTK(qdev, IFDOWN, ERR,
+			"Unsupported WOL paramter. qdev->wol = 0x%x.\n",
+			qdev->wol);
+		return -EINVAL;
+	}
+
+	if (qdev->wol & WAKE_MAGIC) {
+		status = ql_mb_wol_set_magic(qdev, 1);
+		if (status) {
+			QPRINTK(qdev, IFDOWN, ERR,
+				"Failed to set magic packet on %s.\n",
+				qdev->ndev->name);
+			return status;
+		} else
+			QPRINTK(qdev, DRV, INFO,
+				"Enabled magic packet successfully on %s.\n",
+				qdev->ndev->name);
+
+		wol |= MB_WOL_MAGIC_PKT;
+	}
+
+	if (qdev->wol) {
+		/* Reroute all packets to Management Interface */
+		ql_write32(qdev, MGMT_RCV_CFG, (MGMT_RCV_CFG_RM |
+			(MGMT_RCV_CFG_RM << 16)));
+		wol |= MB_WOL_MODE_ON;
+		status = ql_mb_wol_mode(qdev, wol);
+		QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
+			(status == 0) ? "Sucessfully set" : "Failed", wol,
+			qdev->ndev->name);
+	}
+
+	return status;
+}
+
 static int ql_adapter_down(struct ql_adapter *qdev)
 {
 	int i, status = 0;
@@ -3496,6 +3632,10 @@
 	struct rx_ring *rx_ring;
 	struct tx_ring *tx_ring;
 	int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
+	unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
+		LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
+
+	qdev->lbq_buf_order = get_order(lbq_buf_len);
 
 	/* In a perfect world we have one RSS ring for each CPU
 	 * and each has it's own vector.  To do that we ask for
@@ -3543,7 +3683,10 @@
 			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
 			rx_ring->lbq_size =
 			    rx_ring->lbq_len * sizeof(__le64);
-			rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
+			rx_ring->lbq_buf_size = (u16)lbq_buf_len;
+			QPRINTK(qdev, IFUP, DEBUG,
+				"lbq_buf_size %d, order = %d\n",
+				rx_ring->lbq_buf_size, qdev->lbq_buf_order);
 			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
 			rx_ring->sbq_size =
 			    rx_ring->sbq_len * sizeof(__le64);
@@ -3593,14 +3736,63 @@
 	return err;
 }
 
+static int ql_change_rx_buffers(struct ql_adapter *qdev)
+{
+	struct rx_ring *rx_ring;
+	int i, status;
+	u32 lbq_buf_len;
+
+	/* Wait for an oustanding reset to complete. */
+	if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
+		int i = 3;
+		while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
+			QPRINTK(qdev, IFUP, ERR,
+				 "Waiting for adapter UP...\n");
+			ssleep(1);
+		}
+
+		if (!i) {
+			QPRINTK(qdev, IFUP, ERR,
+			 "Timed out waiting for adapter UP\n");
+			return -ETIMEDOUT;
+		}
+	}
+
+	status = ql_adapter_down(qdev);
+	if (status)
+		goto error;
+
+	/* Get the new rx buffer size. */
+	lbq_buf_len = (qdev->ndev->mtu > 1500) ?
+		LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
+	qdev->lbq_buf_order = get_order(lbq_buf_len);
+
+	for (i = 0; i < qdev->rss_ring_count; i++) {
+		rx_ring = &qdev->rx_ring[i];
+		/* Set the new size. */
+		rx_ring->lbq_buf_size = lbq_buf_len;
+	}
+
+	status = ql_adapter_up(qdev);
+	if (status)
+		goto error;
+
+	return status;
+error:
+	QPRINTK(qdev, IFUP, ALERT,
+		"Driver up/down cycle failed, closing device.\n");
+	set_bit(QL_ADAPTER_UP, &qdev->flags);
+	dev_close(qdev->ndev);
+	return status;
+}
+
 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
 {
 	struct ql_adapter *qdev = netdev_priv(ndev);
+	int status;
 
 	if (ndev->mtu == 1500 && new_mtu == 9000) {
 		QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
-		queue_delayed_work(qdev->workqueue,
-				&qdev->mpi_port_cfg_work, 0);
 	} else if (ndev->mtu == 9000 && new_mtu == 1500) {
 		QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
 	} else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
@@ -3608,13 +3800,59 @@
 		return 0;
 	} else
 		return -EINVAL;
+
+	queue_delayed_work(qdev->workqueue,
+			&qdev->mpi_port_cfg_work, 3*HZ);
+
+	if (!netif_running(qdev->ndev)) {
+		ndev->mtu = new_mtu;
+		return 0;
+	}
+
 	ndev->mtu = new_mtu;
-	return 0;
+	status = ql_change_rx_buffers(qdev);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Changing MTU failed.\n");
+	}
+
+	return status;
 }
 
 static struct net_device_stats *qlge_get_stats(struct net_device
 					       *ndev)
 {
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	struct rx_ring *rx_ring = &qdev->rx_ring[0];
+	struct tx_ring *tx_ring = &qdev->tx_ring[0];
+	unsigned long pkts, mcast, dropped, errors, bytes;
+	int i;
+
+	/* Get RX stats. */
+	pkts = mcast = dropped = errors = bytes = 0;
+	for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
+			pkts += rx_ring->rx_packets;
+			bytes += rx_ring->rx_bytes;
+			dropped += rx_ring->rx_dropped;
+			errors += rx_ring->rx_errors;
+			mcast += rx_ring->rx_multicast;
+	}
+	ndev->stats.rx_packets = pkts;
+	ndev->stats.rx_bytes = bytes;
+	ndev->stats.rx_dropped = dropped;
+	ndev->stats.rx_errors = errors;
+	ndev->stats.multicast = mcast;
+
+	/* Get TX stats. */
+	pkts = errors = bytes = 0;
+	for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
+			pkts += tx_ring->tx_packets;
+			bytes += tx_ring->tx_bytes;
+			errors += tx_ring->tx_errors;
+	}
+	ndev->stats.tx_packets = pkts;
+	ndev->stats.tx_bytes = bytes;
+	ndev->stats.tx_errors = errors;
 	return &ndev->stats;
 }
 
@@ -3907,6 +4145,7 @@
 		goto err_out;
 	}
 
+	pci_save_state(pdev);
 	qdev->reg_base =
 	    ioremap_nocache(pci_resource_start(pdev, 1),
 			    pci_resource_len(pdev, 1));
@@ -3979,7 +4218,6 @@
 	return err;
 }
 
-
 static const struct net_device_ops qlge_netdev_ops = {
 	.ndo_open		= qlge_open,
 	.ndo_stop		= qlge_close,
@@ -3990,9 +4228,9 @@
 	.ndo_set_mac_address	= qlge_set_mac_address,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_tx_timeout		= qlge_tx_timeout,
-	.ndo_vlan_rx_register	= ql_vlan_rx_register,
-	.ndo_vlan_rx_add_vid	= ql_vlan_rx_add_vid,
-	.ndo_vlan_rx_kill_vid	= ql_vlan_rx_kill_vid,
+	.ndo_vlan_rx_register	= qlge_vlan_rx_register,
+	.ndo_vlan_rx_add_vid	= qlge_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= qlge_vlan_rx_kill_vid,
 };
 
 static int __devinit qlge_probe(struct pci_dev *pdev,
@@ -4048,10 +4286,21 @@
 	}
 	ql_link_off(qdev);
 	ql_display_dev_info(ndev);
+	atomic_set(&qdev->lb_count, 0);
 	cards_found++;
 	return 0;
 }
 
+netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
+{
+	return qlge_send(skb, ndev);
+}
+
+int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
+{
+	return ql_clean_inbound_rx_ring(rx_ring, budget);
+}
+
 static void __devexit qlge_remove(struct pci_dev *pdev)
 {
 	struct net_device *ndev = pci_get_drvdata(pdev);
@@ -4061,6 +4310,33 @@
 	free_netdev(ndev);
 }
 
+/* Clean up resources without touching hardware. */
+static void ql_eeh_close(struct net_device *ndev)
+{
+	int i;
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	if (netif_carrier_ok(ndev)) {
+		netif_carrier_off(ndev);
+		netif_stop_queue(ndev);
+	}
+
+	if (test_bit(QL_ADAPTER_UP, &qdev->flags))
+		cancel_delayed_work_sync(&qdev->asic_reset_work);
+	cancel_delayed_work_sync(&qdev->mpi_reset_work);
+	cancel_delayed_work_sync(&qdev->mpi_work);
+	cancel_delayed_work_sync(&qdev->mpi_idc_work);
+	cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
+
+	for (i = 0; i < qdev->rss_ring_count; i++)
+		netif_napi_del(&qdev->rx_ring[i].napi);
+
+	clear_bit(QL_ADAPTER_UP, &qdev->flags);
+	ql_tx_ring_clean(qdev);
+	ql_free_rx_buffers(qdev);
+	ql_release_adapter_resources(qdev);
+}
+
 /*
  * This callback is called by the PCI subsystem whenever
  * a PCI bus error is detected.
@@ -4069,17 +4345,21 @@
 					       enum pci_channel_state state)
 {
 	struct net_device *ndev = pci_get_drvdata(pdev);
-	struct ql_adapter *qdev = netdev_priv(ndev);
 
-	netif_device_detach(ndev);
-
-	if (state == pci_channel_io_perm_failure)
+	switch (state) {
+	case pci_channel_io_normal:
+		return PCI_ERS_RESULT_CAN_RECOVER;
+	case pci_channel_io_frozen:
+		netif_device_detach(ndev);
+		if (netif_running(ndev))
+			ql_eeh_close(ndev);
+		pci_disable_device(pdev);
+		return PCI_ERS_RESULT_NEED_RESET;
+	case pci_channel_io_perm_failure:
+		dev_err(&pdev->dev,
+			"%s: pci_channel_io_perm_failure.\n", __func__);
 		return PCI_ERS_RESULT_DISCONNECT;
-
-	if (netif_running(ndev))
-		ql_adapter_down(qdev);
-
-	pci_disable_device(pdev);
+	}
 
 	/* Request a slot reset. */
 	return PCI_ERS_RESULT_NEED_RESET;
@@ -4096,25 +4376,15 @@
 	struct net_device *ndev = pci_get_drvdata(pdev);
 	struct ql_adapter *qdev = netdev_priv(ndev);
 
+	pdev->error_state = pci_channel_io_normal;
+
+	pci_restore_state(pdev);
 	if (pci_enable_device(pdev)) {
 		QPRINTK(qdev, IFUP, ERR,
 			"Cannot re-enable PCI device after reset.\n");
 		return PCI_ERS_RESULT_DISCONNECT;
 	}
-
 	pci_set_master(pdev);
-
-	netif_carrier_off(ndev);
-	ql_adapter_reset(qdev);
-
-	/* Make sure the EEPROM is good */
-	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
-
-	if (!is_valid_ether_addr(ndev->perm_addr)) {
-		QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
-		return PCI_ERS_RESULT_DISCONNECT;
-	}
-
 	return PCI_ERS_RESULT_RECOVERED;
 }
 
@@ -4122,17 +4392,21 @@
 {
 	struct net_device *ndev = pci_get_drvdata(pdev);
 	struct ql_adapter *qdev = netdev_priv(ndev);
+	int err = 0;
 
-	pci_set_master(pdev);
-
+	if (ql_adapter_reset(qdev))
+		QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
 	if (netif_running(ndev)) {
-		if (ql_adapter_up(qdev)) {
+		err = qlge_open(ndev);
+		if (err) {
 			QPRINTK(qdev, IFUP, ERR,
 				"Device initialization failed after reset.\n");
 			return;
 		}
+	} else {
+		QPRINTK(qdev, IFUP, ERR,
+			"Device was not running prior to EEH.\n");
 	}
-
 	netif_device_attach(ndev);
 }
 
@@ -4156,6 +4430,7 @@
 			return err;
 	}
 
+	ql_wol(qdev);
 	err = pci_save_state(pdev);
 	if (err)
 		return err;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index e497eac..f5619fe 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -454,7 +454,8 @@
  */
 static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
 {
-	int status, count;
+	int status;
+	unsigned long count;
 
 
 	/* Begin polled mode for MPI */
@@ -475,9 +476,9 @@
 	/* Wait for the command to complete. We loop
 	 * here because some AEN might arrive while
 	 * we're waiting for the mailbox command to
-	 * complete. If more than 5 arrive then we can
+	 * complete. If more than 5 seconds expire we can
 	 * assume something is wrong. */
-	count = 5;
+	count = jiffies + HZ * MAILBOX_TIMEOUT;
 	do {
 		/* Wait for the interrupt to come in. */
 		status = ql_wait_mbx_cmd_cmplt(qdev);
@@ -501,15 +502,15 @@
 					MB_CMD_STS_GOOD) ||
 			((mbcp->mbox_out[0] & 0x0000f000) ==
 					MB_CMD_STS_INTRMDT))
-			break;
-	} while (--count);
+			goto done;
+	} while (time_before(jiffies, count));
 
-	if (!count) {
-		QPRINTK(qdev, DRV, ERR,
-			"Timed out waiting for mailbox complete.\n");
-		status = -ETIMEDOUT;
-		goto end;
-	}
+	QPRINTK(qdev, DRV, ERR,
+		"Timed out waiting for mailbox complete.\n");
+	status = -ETIMEDOUT;
+	goto end;
+
+done:
 
 	/* Now we can clear the interrupt condition
 	 * and look at our status.
@@ -637,7 +638,7 @@
  * for the current port.
  * Most likely will block.
  */
-static int ql_mb_set_port_cfg(struct ql_adapter *qdev)
+int ql_mb_set_port_cfg(struct ql_adapter *qdev)
 {
 	struct mbox_params mbc;
 	struct mbox_params *mbcp = &mbc;
@@ -672,7 +673,7 @@
  * for the current port.
  * Most likely will block.
  */
-static int ql_mb_get_port_cfg(struct ql_adapter *qdev)
+int ql_mb_get_port_cfg(struct ql_adapter *qdev)
 {
 	struct mbox_params mbc;
 	struct mbox_params *mbcp = &mbc;
@@ -702,6 +703,76 @@
 	return status;
 }
 
+int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
+{
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	int status;
+
+	memset(mbcp, 0, sizeof(struct mbox_params));
+
+	mbcp->in_count = 2;
+	mbcp->out_count = 1;
+
+	mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
+	mbcp->mbox_in[1] = wol;
+
+
+	status = ql_mailbox_command(qdev, mbcp);
+	if (status)
+		return status;
+
+	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed to set WOL mode.\n");
+		status = -EIO;
+	}
+	return status;
+}
+
+int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
+{
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	int status;
+	u8 *addr = qdev->ndev->dev_addr;
+
+	memset(mbcp, 0, sizeof(struct mbox_params));
+
+	mbcp->in_count = 8;
+	mbcp->out_count = 1;
+
+	mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC;
+	if (enable_wol) {
+		mbcp->mbox_in[1] = (u32)addr[0];
+		mbcp->mbox_in[2] = (u32)addr[1];
+		mbcp->mbox_in[3] = (u32)addr[2];
+		mbcp->mbox_in[4] = (u32)addr[3];
+		mbcp->mbox_in[5] = (u32)addr[4];
+		mbcp->mbox_in[6] = (u32)addr[5];
+		mbcp->mbox_in[7] = 0;
+	} else {
+		mbcp->mbox_in[1] = 0;
+		mbcp->mbox_in[2] = 1;
+		mbcp->mbox_in[3] = 1;
+		mbcp->mbox_in[4] = 1;
+		mbcp->mbox_in[5] = 1;
+		mbcp->mbox_in[6] = 1;
+		mbcp->mbox_in[7] = 0;
+	}
+
+	status = ql_mailbox_command(qdev, mbcp);
+	if (status)
+		return status;
+
+	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed to set WOL mode.\n");
+		status = -EIO;
+	}
+	return status;
+}
+
 /* IDC - Inter Device Communication...
  * Some firmware commands require consent of adjacent FCOE
  * function.  This function waits for the OK, or a
@@ -751,6 +822,61 @@
 	return status;
 }
 
+int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
+{
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	int status;
+
+	memset(mbcp, 0, sizeof(struct mbox_params));
+
+	mbcp->in_count = 2;
+	mbcp->out_count = 1;
+
+	mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
+	mbcp->mbox_in[1] = led_config;
+
+
+	status = ql_mailbox_command(qdev, mbcp);
+	if (status)
+		return status;
+
+	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed to set LED Configuration.\n");
+		status = -EIO;
+	}
+
+	return status;
+}
+
+int ql_mb_get_led_cfg(struct ql_adapter *qdev)
+{
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	int status;
+
+	memset(mbcp, 0, sizeof(struct mbox_params));
+
+	mbcp->in_count = 1;
+	mbcp->out_count = 2;
+
+	mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG;
+
+	status = ql_mailbox_command(qdev, mbcp);
+	if (status)
+		return status;
+
+	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed to get LED Configuration.\n");
+		status = -EIO;
+	} else
+		qdev->led_config = mbcp->mbox_out[1];
+
+	return status;
+}
+
 int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
 {
 	struct mbox_params mbc;
@@ -912,8 +1038,11 @@
 	int status;
 	struct mbox_params *mbcp = &qdev->idc_mbc;
 	u32 aen;
+	int timeout;
 
+	rtnl_lock();
 	aen = mbcp->mbox_out[1] >> 16;
+	timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
 
 	switch (aen) {
 	default:
@@ -921,22 +1050,61 @@
 			"Bug: Unhandled IDC action.\n");
 		break;
 	case MB_CMD_PORT_RESET:
-	case MB_CMD_SET_PORT_CFG:
 	case MB_CMD_STOP_FW:
 		ql_link_off(qdev);
+	case MB_CMD_SET_PORT_CFG:
 		/* Signal the resulting link up AEN
 		 * that the frame routing and mac addr
 		 * needs to be set.
 		 * */
 		set_bit(QL_CAM_RT_SET, &qdev->flags);
-		rtnl_lock();
-		status = ql_mb_idc_ack(qdev);
-		rtnl_unlock();
-		if (status) {
-			QPRINTK(qdev, DRV, ERR,
-			"Bug: No pending IDC!\n");
+		/* Do ACK if required */
+		if (timeout) {
+			status = ql_mb_idc_ack(qdev);
+			if (status)
+				QPRINTK(qdev, DRV, ERR,
+					"Bug: No pending IDC!\n");
+		} else {
+			QPRINTK(qdev, DRV, DEBUG,
+				    "IDC ACK not required\n");
+			status = 0; /* success */
 		}
+		break;
+
+	/* These sub-commands issued by another (FCoE)
+	 * function are requesting to do an operation
+	 * on the shared resource (MPI environment).
+	 * We currently don't issue these so we just
+	 * ACK the request.
+	 */
+	case MB_CMD_IOP_RESTART_MPI:
+	case MB_CMD_IOP_PREP_LINK_DOWN:
+		/* Drop the link, reload the routing
+		 * table when link comes up.
+		 */
+		ql_link_off(qdev);
+		set_bit(QL_CAM_RT_SET, &qdev->flags);
+		/* Fall through. */
+	case MB_CMD_IOP_DVR_START:
+	case MB_CMD_IOP_FLASH_ACC:
+	case MB_CMD_IOP_CORE_DUMP_MPI:
+	case MB_CMD_IOP_PREP_UPDATE_MPI:
+	case MB_CMD_IOP_COMP_UPDATE_MPI:
+	case MB_CMD_IOP_NONE:	/*  an IDC without params */
+		/* Do ACK if required */
+		if (timeout) {
+			status = ql_mb_idc_ack(qdev);
+			if (status)
+				QPRINTK(qdev, DRV, ERR,
+				    "Bug: No pending IDC!\n");
+		} else {
+			QPRINTK(qdev, DRV, DEBUG,
+			    "IDC ACK not required\n");
+			status = 0; /* success */
+		}
+		break;
 	}
+	rtnl_unlock();
 }
 
 void ql_mpi_work(struct work_struct *work)
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 7d9fc06..1b0aa4c 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1029,7 +1029,10 @@
 
 	spin_lock_irqsave(&tp->lock, flags);
 	tp->vlgrp = grp;
-	if (tp->vlgrp)
+	/*
+	 * Do not disable RxVlan on 8110SCd.
+	 */
+	if (tp->vlgrp || (tp->mac_version == RTL_GIGA_MAC_VER_05))
 		tp->cp_cmd |= RxVlan;
 	else
 		tp->cp_cmd &= ~RxVlan;
@@ -3197,6 +3200,14 @@
 	}
 
 	rtl8169_init_phy(dev, tp);
+
+	/*
+	 * Pretend we are using VLANs; This bypasses a nasty bug where
+	 * Interrupts stop flowing on high load on 8110SCd controllers.
+	 */
+	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+		RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
+
 	device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
 
 out:
@@ -3368,7 +3379,7 @@
 static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
 {
 	/* Low hurts. Let's disable the filtering. */
-	RTL_W16(RxMaxSize, rx_buf_sz);
+	RTL_W16(RxMaxSize, rx_buf_sz + 1);
 }
 
 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index b89f9be..7b52fe1 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -1,6 +1,6 @@
 sfc-y			+= efx.o falcon.o tx.o rx.o falcon_gmac.o \
-			   falcon_xmac.o selftest.o ethtool.o xfp_phy.o \
-			   mdio_10g.o tenxpress.o boards.o sfe4001.o
+			   falcon_xmac.o selftest.o ethtool.o qt202x_phy.o \
+			   mdio_10g.o tenxpress.o falcon_boards.o
 sfc-$(CONFIG_SFC_MTD)	+= mtd.o
 
 obj-$(CONFIG_SFC)	+= sfc.o
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h
index d54d84c..6ad909b 100644
--- a/drivers/net/sfc/bitfield.h
+++ b/drivers/net/sfc/bitfield.h
@@ -520,19 +520,6 @@
 #define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
 #endif
 
-#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
-	if (falcon_rev(efx) >= FALCON_REV_B0) {			   \
-		EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
-	} else { \
-		EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
-	} \
-} while (0)
-
-#define EFX_QWORD_FIELD_VER(efx, qword, field)	\
-	(falcon_rev(efx) >= FALCON_REV_B0 ?	\
-	 EFX_QWORD_FIELD((qword), field##_B0) :	\
-	 EFX_QWORD_FIELD((qword), field##_A1))
-
 /* Used to avoid compiler warnings about shift range exceeding width
  * of the data types when dma_addr_t is only 32 bits wide.
  */
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
deleted file mode 100644
index 4a4c74c..0000000
--- a/drivers/net/sfc/boards.c
+++ /dev/null
@@ -1,328 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2008 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#include "net_driver.h"
-#include "phy.h"
-#include "boards.h"
-#include "efx.h"
-#include "workarounds.h"
-
-/* Macros for unpacking the board revision */
-/* The revision info is in host byte order. */
-#define BOARD_TYPE(_rev) (_rev >> 8)
-#define BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
-#define BOARD_MINOR(_rev) (_rev & 0xf)
-
-/* Blink support. If the PHY has no auto-blink mode so we hang it off a timer */
-#define BLINK_INTERVAL (HZ/2)
-
-static void blink_led_timer(unsigned long context)
-{
-	struct efx_nic *efx = (struct efx_nic *)context;
-	struct efx_blinker *bl = &efx->board_info.blinker;
-	efx->board_info.set_id_led(efx, bl->state);
-	bl->state = !bl->state;
-	if (bl->resubmit)
-		mod_timer(&bl->timer, jiffies + BLINK_INTERVAL);
-}
-
-static void board_blink(struct efx_nic *efx, bool blink)
-{
-	struct efx_blinker *blinker = &efx->board_info.blinker;
-
-	/* The rtnl mutex serialises all ethtool ioctls, so
-	 * nothing special needs doing here. */
-	if (blink) {
-		blinker->resubmit = true;
-		blinker->state = false;
-		setup_timer(&blinker->timer, blink_led_timer,
-			    (unsigned long)efx);
-		mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL);
-	} else {
-		blinker->resubmit = false;
-		if (blinker->timer.function)
-			del_timer_sync(&blinker->timer);
-		efx->board_info.init_leds(efx);
-	}
-}
-
-/*****************************************************************************
- * Support for LM87 sensor chip used on several boards
- */
-#define LM87_REG_ALARMS1		0x41
-#define LM87_REG_ALARMS2		0x42
-#define LM87_IN_LIMITS(nr, _min, _max)			\
-	0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min
-#define LM87_AIN_LIMITS(nr, _min, _max)			\
-	0x3B + (nr), _max, 0x1A + (nr), _min
-#define LM87_TEMP_INT_LIMITS(_min, _max)		\
-	0x39, _max, 0x3A, _min
-#define LM87_TEMP_EXT1_LIMITS(_min, _max)		\
-	0x37, _max, 0x38, _min
-
-#define LM87_ALARM_TEMP_INT		0x10
-#define LM87_ALARM_TEMP_EXT1		0x20
-
-#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
-
-static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
-			 const u8 *reg_values)
-{
-	struct i2c_client *client = i2c_new_device(&efx->i2c_adap, info);
-	int rc;
-
-	if (!client)
-		return -EIO;
-
-	while (*reg_values) {
-		u8 reg = *reg_values++;
-		u8 value = *reg_values++;
-		rc = i2c_smbus_write_byte_data(client, reg, value);
-		if (rc)
-			goto err;
-	}
-
-	efx->board_info.hwmon_client = client;
-	return 0;
-
-err:
-	i2c_unregister_device(client);
-	return rc;
-}
-
-static void efx_fini_lm87(struct efx_nic *efx)
-{
-	i2c_unregister_device(efx->board_info.hwmon_client);
-}
-
-static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
-{
-	struct i2c_client *client = efx->board_info.hwmon_client;
-	s32 alarms1, alarms2;
-
-	/* If link is up then do not monitor temperature */
-	if (EFX_WORKAROUND_7884(efx) && efx->link_up)
-		return 0;
-
-	alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
-	alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
-	if (alarms1 < 0)
-		return alarms1;
-	if (alarms2 < 0)
-		return alarms2;
-	alarms1 &= mask;
-	alarms2 &= mask >> 8;
-	if (alarms1 || alarms2) {
-		EFX_ERR(efx,
-			"LM87 detected a hardware failure (status %02x:%02x)"
-			"%s%s\n",
-			alarms1, alarms2,
-			(alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "",
-			(alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : "");
-		return -ERANGE;
-	}
-
-	return 0;
-}
-
-#else /* !CONFIG_SENSORS_LM87 */
-
-static inline int
-efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
-	      const u8 *reg_values)
-{
-	return 0;
-}
-static inline void efx_fini_lm87(struct efx_nic *efx)
-{
-}
-static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
-{
-	return 0;
-}
-
-#endif /* CONFIG_SENSORS_LM87 */
-
-/*****************************************************************************
- * Support for the SFE4002
- *
- */
-static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
-
-static const u8 sfe4002_lm87_regs[] = {
-	LM87_IN_LIMITS(0, 0x83, 0x91),		/* 2.5V:  1.8V +/- 5% */
-	LM87_IN_LIMITS(1, 0x51, 0x5a),		/* Vccp1: 1.2V +/- 5% */
-	LM87_IN_LIMITS(2, 0xb6, 0xca),		/* 3.3V:  3.3V +/- 5% */
-	LM87_IN_LIMITS(3, 0xb0, 0xc9),		/* 5V:    4.6-5.2V */
-	LM87_IN_LIMITS(4, 0xb0, 0xe0),		/* 12V:   11-14V */
-	LM87_IN_LIMITS(5, 0x44, 0x4b),		/* Vccp2: 1.0V +/- 5% */
-	LM87_AIN_LIMITS(0, 0xa0, 0xb2),		/* AIN1:  1.66V +/- 5% */
-	LM87_AIN_LIMITS(1, 0x91, 0xa1),		/* AIN2:  1.5V +/- 5% */
-	LM87_TEMP_INT_LIMITS(10, 60),		/* board */
-	LM87_TEMP_EXT1_LIMITS(10, 70),		/* Falcon */
-	0
-};
-
-static struct i2c_board_info sfe4002_hwmon_info = {
-	I2C_BOARD_INFO("lm87", 0x2e),
-	.platform_data	= &sfe4002_lm87_channel,
-};
-
-/****************************************************************************/
-/* LED allocations. Note that on rev A0 boards the schematic and the reality
- * differ: red and green are swapped. Below is the fixed (A1) layout (there
- * are only 3 A0 boards in existence, so no real reason to make this
- * conditional).
- */
-#define SFE4002_FAULT_LED (2)	/* Red */
-#define SFE4002_RX_LED    (0)	/* Green */
-#define SFE4002_TX_LED    (1)	/* Amber */
-
-static void sfe4002_init_leds(struct efx_nic *efx)
-{
-	/* Set the TX and RX LEDs to reflect status and activity, and the
-	 * fault LED off */
-	xfp_set_led(efx, SFE4002_TX_LED,
-		    QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
-	xfp_set_led(efx, SFE4002_RX_LED,
-		    QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
-	xfp_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
-}
-
-static void sfe4002_set_id_led(struct efx_nic *efx, bool state)
-{
-	xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON :
-			QUAKE_LED_OFF);
-}
-
-static int sfe4002_check_hw(struct efx_nic *efx)
-{
-	/* A0 board rev. 4002s report a temperature fault the whole time
-	 * (bad sensor) so we mask it out. */
-	unsigned alarm_mask =
-		(efx->board_info.major == 0 && efx->board_info.minor == 0) ?
-		~LM87_ALARM_TEMP_EXT1 : ~0;
-
-	return efx_check_lm87(efx, alarm_mask);
-}
-
-static int sfe4002_init(struct efx_nic *efx)
-{
-	int rc = efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
-	if (rc)
-		return rc;
-	efx->board_info.monitor = sfe4002_check_hw;
-	efx->board_info.init_leds = sfe4002_init_leds;
-	efx->board_info.set_id_led = sfe4002_set_id_led;
-	efx->board_info.blink = board_blink;
-	efx->board_info.fini = efx_fini_lm87;
-	return 0;
-}
-
-/*****************************************************************************
- * Support for the SFN4112F
- *
- */
-static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
-
-static const u8 sfn4112f_lm87_regs[] = {
-	LM87_IN_LIMITS(0, 0x83, 0x91),		/* 2.5V:  1.8V +/- 5% */
-	LM87_IN_LIMITS(1, 0x51, 0x5a),		/* Vccp1: 1.2V +/- 5% */
-	LM87_IN_LIMITS(2, 0xb6, 0xca),		/* 3.3V:  3.3V +/- 5% */
-	LM87_IN_LIMITS(4, 0xb0, 0xe0),		/* 12V:   11-14V */
-	LM87_IN_LIMITS(5, 0x44, 0x4b),		/* Vccp2: 1.0V +/- 5% */
-	LM87_AIN_LIMITS(1, 0x91, 0xa1),		/* AIN2:  1.5V +/- 5% */
-	LM87_TEMP_INT_LIMITS(10, 60),		/* board */
-	LM87_TEMP_EXT1_LIMITS(10, 70),		/* Falcon */
-	0
-};
-
-static struct i2c_board_info sfn4112f_hwmon_info = {
-	I2C_BOARD_INFO("lm87", 0x2e),
-	.platform_data	= &sfn4112f_lm87_channel,
-};
-
-#define SFN4112F_ACT_LED	0
-#define SFN4112F_LINK_LED	1
-
-static void sfn4112f_init_leds(struct efx_nic *efx)
-{
-	xfp_set_led(efx, SFN4112F_ACT_LED,
-		    QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT);
-	xfp_set_led(efx, SFN4112F_LINK_LED,
-		    QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT);
-}
-
-static void sfn4112f_set_id_led(struct efx_nic *efx, bool state)
-{
-	xfp_set_led(efx, SFN4112F_LINK_LED,
-		    state ? QUAKE_LED_ON : QUAKE_LED_OFF);
-}
-
-static int sfn4112f_check_hw(struct efx_nic *efx)
-{
-	/* Mask out unused sensors */
-	return efx_check_lm87(efx, ~0x48);
-}
-
-static int sfn4112f_init(struct efx_nic *efx)
-{
-	int rc = efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
-	if (rc)
-		return rc;
-	efx->board_info.monitor = sfn4112f_check_hw;
-	efx->board_info.init_leds = sfn4112f_init_leds;
-	efx->board_info.set_id_led = sfn4112f_set_id_led;
-	efx->board_info.blink = board_blink;
-	efx->board_info.fini = efx_fini_lm87;
-	return 0;
-}
-
-/* This will get expanded as board-specific details get moved out of the
- * PHY drivers. */
-struct efx_board_data {
-	enum efx_board_type type;
-	const char *ref_model;
-	const char *gen_type;
-	int (*init) (struct efx_nic *nic);
-};
-
-
-static struct efx_board_data board_data[] = {
-	{ EFX_BOARD_SFE4001, "SFE4001", "10GBASE-T adapter", sfe4001_init },
-	{ EFX_BOARD_SFE4002, "SFE4002", "XFP adapter", sfe4002_init },
-	{ EFX_BOARD_SFN4111T, "SFN4111T", "100/1000/10GBASE-T adapter",
-	  sfn4111t_init },
-	{ EFX_BOARD_SFN4112F, "SFN4112F", "SFP+ adapter",
-	  sfn4112f_init },
-};
-
-void efx_set_board_info(struct efx_nic *efx, u16 revision_info)
-{
-	struct efx_board_data *data = NULL;
-	int i;
-
-	efx->board_info.type = BOARD_TYPE(revision_info);
-	efx->board_info.major = BOARD_MAJOR(revision_info);
-	efx->board_info.minor = BOARD_MINOR(revision_info);
-
-	for (i = 0; i < ARRAY_SIZE(board_data); i++)
-		if (board_data[i].type == efx->board_info.type)
-			data = &board_data[i];
-
-	if (data) {
-		EFX_INFO(efx, "board is %s rev %c%d\n",
-			 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
-			 ? data->ref_model : data->gen_type,
-			 'A' + efx->board_info.major, efx->board_info.minor);
-		efx->board_info.init = data->init;
-	} else {
-		EFX_ERR(efx, "unknown board type %d\n", efx->board_info.type);
-	}
-}
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
deleted file mode 100644
index 44942de..0000000
--- a/drivers/net/sfc/boards.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2008 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#ifndef EFX_BOARDS_H
-#define EFX_BOARDS_H
-
-/* Board IDs (must fit in 8 bits) */
-enum efx_board_type {
-	EFX_BOARD_SFE4001 = 1,
-	EFX_BOARD_SFE4002 = 2,
-	EFX_BOARD_SFN4111T = 0x51,
-	EFX_BOARD_SFN4112F = 0x52,
-};
-
-extern void efx_set_board_info(struct efx_nic *efx, u16 revision_info);
-
-/* SFE4001 (10GBASE-T) */
-extern int sfe4001_init(struct efx_nic *efx);
-/* SFN4111T (100/1000/10GBASE-T) */
-extern int sfn4111t_init(struct efx_nic *efx);
-
-#endif
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index cc4b2f9..0d0243b 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -228,26 +228,20 @@
 		if (channel->used_flags & EFX_USED_BY_RX &&
 		    efx->irq_rx_adaptive &&
 		    unlikely(++channel->irq_count == 1000)) {
-			unsigned old_irq_moderation = channel->irq_moderation;
-
 			if (unlikely(channel->irq_mod_score <
 				     irq_adapt_low_thresh)) {
-				channel->irq_moderation =
-					max_t(int,
-					      channel->irq_moderation -
-					      FALCON_IRQ_MOD_RESOLUTION,
-					      FALCON_IRQ_MOD_RESOLUTION);
+				if (channel->irq_moderation > 1) {
+					channel->irq_moderation -= 1;
+					falcon_set_int_moderation(channel);
+				}
 			} else if (unlikely(channel->irq_mod_score >
 					    irq_adapt_high_thresh)) {
-				channel->irq_moderation =
-					min(channel->irq_moderation +
-					    FALCON_IRQ_MOD_RESOLUTION,
-					    efx->irq_rx_moderation);
+				if (channel->irq_moderation <
+				    efx->irq_rx_moderation) {
+					channel->irq_moderation += 1;
+					falcon_set_int_moderation(channel);
+				}
 			}
-
-			if (channel->irq_moderation != old_irq_moderation)
-				falcon_set_int_moderation(channel);
-
 			channel->irq_count = 0;
 			channel->irq_mod_score = 0;
 		}
@@ -290,7 +284,7 @@
 	napi_disable(&channel->napi_str);
 
 	/* Poll the channel */
-	efx_process_channel(channel, efx->type->evq_size);
+	efx_process_channel(channel, EFX_EVQ_SIZE);
 
 	/* Ack the eventq. This may cause an interrupt to be generated
 	 * when they are reenabled */
@@ -824,9 +818,8 @@
 		goto fail2;
 	}
 
-	efx->membase_phys = pci_resource_start(efx->pci_dev,
-					       efx->type->mem_bar);
-	rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
+	efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
+	rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
 	if (rc) {
 		EFX_ERR(efx, "request for memory BAR failed\n");
 		rc = -EIO;
@@ -835,21 +828,20 @@
 	efx->membase = ioremap_nocache(efx->membase_phys,
 				       efx->type->mem_map_size);
 	if (!efx->membase) {
-		EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n",
-			efx->type->mem_bar,
+		EFX_ERR(efx, "could not map memory BAR at %llx+%x\n",
 			(unsigned long long)efx->membase_phys,
 			efx->type->mem_map_size);
 		rc = -ENOMEM;
 		goto fail4;
 	}
-	EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n",
-		efx->type->mem_bar, (unsigned long long)efx->membase_phys,
+	EFX_LOG(efx, "memory BAR at %llx+%x (virtual %p)\n",
+		(unsigned long long)efx->membase_phys,
 		efx->type->mem_map_size, efx->membase);
 
 	return 0;
 
  fail4:
-	pci_release_region(efx->pci_dev, efx->type->mem_bar);
+	pci_release_region(efx->pci_dev, EFX_MEM_BAR);
  fail3:
 	efx->membase_phys = 0;
  fail2:
@@ -868,7 +860,7 @@
 	}
 
 	if (efx->membase_phys) {
-		pci_release_region(efx->pci_dev, efx->type->mem_bar);
+		pci_release_region(efx->pci_dev, EFX_MEM_BAR);
 		efx->membase_phys = 0;
 	}
 
@@ -1220,22 +1212,33 @@
  *
  **************************************************************************/
 
+static unsigned irq_mod_ticks(int usecs, int resolution)
+{
+	if (usecs <= 0)
+		return 0; /* cannot receive interrupts ahead of time :-) */
+	if (usecs < resolution)
+		return 1; /* never round down to 0 */
+	return usecs / resolution;
+}
+
 /* Set interrupt moderation parameters */
 void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
 			     bool rx_adaptive)
 {
 	struct efx_tx_queue *tx_queue;
 	struct efx_rx_queue *rx_queue;
+	unsigned tx_ticks = irq_mod_ticks(tx_usecs, FALCON_IRQ_MOD_RESOLUTION);
+	unsigned rx_ticks = irq_mod_ticks(rx_usecs, FALCON_IRQ_MOD_RESOLUTION);
 
 	EFX_ASSERT_RESET_SERIALISED(efx);
 
 	efx_for_each_tx_queue(tx_queue, efx)
-		tx_queue->channel->irq_moderation = tx_usecs;
+		tx_queue->channel->irq_moderation = tx_ticks;
 
 	efx->irq_rx_adaptive = rx_adaptive;
-	efx->irq_rx_moderation = rx_usecs;
+	efx->irq_rx_moderation = rx_ticks;
 	efx_for_each_rx_queue(rx_queue, efx)
-		rx_queue->channel->irq_moderation = rx_usecs;
+		rx_queue->channel->irq_moderation = rx_ticks;
 }
 
 /**************************************************************************
@@ -1981,17 +1984,9 @@
 
 	efx->type = type;
 
-	/* Sanity-check NIC type */
-	EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
-			    (efx->type->txd_ring_mask + 1));
-	EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
-			    (efx->type->rxd_ring_mask + 1));
-	EFX_BUG_ON_PARANOID(efx->type->evq_size &
-			    (efx->type->evq_size - 1));
 	/* As close as we can get to guaranteeing that we don't overflow */
-	EFX_BUG_ON_PARANOID(efx->type->evq_size <
-			    (efx->type->txd_ring_mask + 1 +
-			     efx->type->rxd_ring_mask + 1));
+	BUILD_BUG_ON(EFX_EVQ_SIZE < EFX_TXQ_SIZE + EFX_RXQ_SIZE);
+
 	EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
 
 	/* Higher numbered interrupt modes are less capable! */
@@ -2027,18 +2022,12 @@
  */
 static void efx_pci_remove_main(struct efx_nic *efx)
 {
-	EFX_ASSERT_RESET_SERIALISED(efx);
-
-	/* Skip everything if we never obtained a valid membase */
-	if (!efx->membase)
-		return;
-
+	falcon_fini_interrupt(efx);
 	efx_fini_channels(efx);
 	efx_fini_port(efx);
 
 	/* Shutdown the board, then the NIC and board state */
 	efx->board_info.fini(efx);
-	falcon_fini_interrupt(efx);
 
 	efx_fini_napi(efx);
 	efx_remove_all(efx);
@@ -2063,9 +2052,6 @@
 	/* Allow any queued efx_resets() to complete */
 	rtnl_unlock();
 
-	if (efx->membase == NULL)
-		goto out;
-
 	efx_unregister_netdev(efx);
 
 	efx_mtd_remove(efx);
@@ -2078,7 +2064,6 @@
 
 	efx_pci_remove_main(efx);
 
-out:
 	efx_fini_io(efx);
 	EFX_LOG(efx, "shutdown successful\n");
 
@@ -2224,13 +2209,15 @@
 	 * MAC stats succeeds. */
 	efx->state = STATE_RUNNING;
 
-	efx_mtd_probe(efx); /* allowed to fail */
-
 	rc = efx_register_netdev(efx);
 	if (rc)
 		goto fail5;
 
 	EFX_LOG(efx, "initialisation successful\n");
+
+	rtnl_lock();
+	efx_mtd_probe(efx); /* allowed to fail */
+	rtnl_unlock();
 	return 0;
 
  fail5:
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index aecaf62..179e0e3 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -19,22 +19,31 @@
 #define FALCON_A_S_DEVID        0x6703
 #define FALCON_B_P_DEVID        0x0710
 
+/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
+#define EFX_MEM_BAR 2
+
 /* TX */
 extern netdev_tx_t efx_xmit(struct efx_nic *efx,
 				  struct efx_tx_queue *tx_queue,
 				  struct sk_buff *skb);
+extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
 extern void efx_stop_queue(struct efx_nic *efx);
 extern void efx_wake_queue(struct efx_nic *efx);
+#define EFX_TXQ_SIZE 1024
+#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)
 
 /* RX */
-extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
 extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
 			  unsigned int len, bool checksummed, bool discard);
 extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
+#define EFX_RXQ_SIZE 1024
+#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1)
 
 /* Channels */
 extern void efx_process_channel_now(struct efx_channel *channel);
 extern void efx_flush_queues(struct efx_nic *efx);
+#define EFX_EVQ_SIZE 4096
+#define EFX_EVQ_MASK (EFX_EVQ_SIZE - 1)
 
 /* Ports */
 extern void efx_stats_disable(struct efx_nic *efx);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 45018f2..a313b61 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -618,6 +618,9 @@
 	coalesce->use_adaptive_rx_coalesce = efx->irq_rx_adaptive;
 	coalesce->rx_coalesce_usecs_irq = efx->irq_rx_moderation;
 
+	coalesce->tx_coalesce_usecs_irq *= FALCON_IRQ_MOD_RESOLUTION;
+	coalesce->rx_coalesce_usecs_irq *= FALCON_IRQ_MOD_RESOLUTION;
+
 	return 0;
 }
 
@@ -656,11 +659,6 @@
 	}
 
 	efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive);
-
-	/* Reset channel to pick up new moderation value.  Note that
-	 * this may change the value of the irq_moderation field
-	 * (e.g. to allow for hardware timer granularity).
-	 */
 	efx_for_each_channel(channel, efx)
 		falcon_set_int_moderation(channel);
 
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index c049364..865638b 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -22,11 +22,10 @@
 #include "mac.h"
 #include "spi.h"
 #include "falcon.h"
-#include "falcon_hwdefs.h"
-#include "falcon_io.h"
+#include "regs.h"
+#include "io.h"
 #include "mdio_10g.h"
 #include "phy.h"
-#include "boards.h"
 #include "workarounds.h"
 
 /* Falcon hardware control.
@@ -36,19 +35,12 @@
 
 /**
  * struct falcon_nic_data - Falcon NIC state
- * @next_buffer_table: First available buffer table id
  * @pci_dev2: The secondary PCI device if present
  * @i2c_data: Operations and state for I2C bit-bashing algorithm
- * @int_error_count: Number of internal errors seen recently
- * @int_error_expire: Time at which error count will be expired
  */
 struct falcon_nic_data {
-	unsigned next_buffer_table;
 	struct pci_dev *pci_dev2;
 	struct i2c_algo_bit_data i2c_data;
-
-	unsigned int_error_count;
-	unsigned long int_error_expire;
 };
 
 /**************************************************************************
@@ -109,21 +101,6 @@
 module_param(rx_xon_thresh_bytes, int, 0644);
 MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
 
-/* TX descriptor ring size - min 512 max 4k */
-#define FALCON_TXD_RING_ORDER TX_DESCQ_SIZE_1K
-#define FALCON_TXD_RING_SIZE 1024
-#define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1)
-
-/* RX descriptor ring size - min 512 max 4k */
-#define FALCON_RXD_RING_ORDER RX_DESCQ_SIZE_1K
-#define FALCON_RXD_RING_SIZE 1024
-#define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1)
-
-/* Event queue size - max 32k */
-#define FALCON_EVQ_ORDER EVQ_SIZE_4K
-#define FALCON_EVQ_SIZE 4096
-#define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1)
-
 /* If FALCON_MAX_INT_ERRORS internal errors occur within
  * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
  * disable it.
@@ -143,12 +120,6 @@
  **************************************************************************
  */
 
-/* DMA address mask */
-#define FALCON_DMA_MASK DMA_BIT_MASK(46)
-
-/* TX DMA length mask (13-bit) */
-#define FALCON_TX_DMA_MASK (4096 - 1)
-
 /* Size and alignment of special buffers (4KB) */
 #define FALCON_BUF_SIZE 4096
 
@@ -164,6 +135,13 @@
  *
  **************************************************************************/
 
+static inline void falcon_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
+					unsigned int index)
+{
+	efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
+			value, index);
+}
+
 /* Read the current event from the event queue */
 static inline efx_qword_t *falcon_event(struct efx_channel *channel,
 					unsigned int index)
@@ -200,9 +178,9 @@
 	struct efx_nic *efx = (struct efx_nic *)data;
 	efx_oword_t reg;
 
-	falcon_read(efx, &reg, GPIO_CTL_REG_KER);
-	EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, !state);
-	falcon_write(efx, &reg, GPIO_CTL_REG_KER);
+	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
+	efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
 }
 
 static void falcon_setscl(void *data, int state)
@@ -210,9 +188,9 @@
 	struct efx_nic *efx = (struct efx_nic *)data;
 	efx_oword_t reg;
 
-	falcon_read(efx, &reg, GPIO_CTL_REG_KER);
-	EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, !state);
-	falcon_write(efx, &reg, GPIO_CTL_REG_KER);
+	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
+	efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
 }
 
 static int falcon_getsda(void *data)
@@ -220,8 +198,8 @@
 	struct efx_nic *efx = (struct efx_nic *)data;
 	efx_oword_t reg;
 
-	falcon_read(efx, &reg, GPIO_CTL_REG_KER);
-	return EFX_OWORD_FIELD(reg, GPIO3_IN);
+	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
+	return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
 }
 
 static int falcon_getscl(void *data)
@@ -229,8 +207,8 @@
 	struct efx_nic *efx = (struct efx_nic *)data;
 	efx_oword_t reg;
 
-	falcon_read(efx, &reg, GPIO_CTL_REG_KER);
-	return EFX_OWORD_FIELD(reg, GPIO0_IN);
+	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
+	return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
 }
 
 static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
@@ -275,12 +253,11 @@
 		dma_addr = buffer->dma_addr + (i * 4096);
 		EFX_LOG(efx, "mapping special buffer %d at %llx\n",
 			index, (unsigned long long)dma_addr);
-		EFX_POPULATE_QWORD_4(buf_desc,
-				     IP_DAT_BUF_SIZE, IP_DAT_BUF_SIZE_4K,
-				     BUF_ADR_REGION, 0,
-				     BUF_ADR_FBUF, (dma_addr >> 12),
-				     BUF_OWNER_ID_FBUF, 0);
-		falcon_write_sram(efx, &buf_desc, index);
+		EFX_POPULATE_QWORD_3(buf_desc,
+				     FRF_AZ_BUF_ADR_REGION, 0,
+				     FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
+				     FRF_AZ_BUF_OWNER_ID_FBUF, 0);
+		falcon_write_buf_tbl(efx, &buf_desc, index);
 	}
 }
 
@@ -300,11 +277,11 @@
 		buffer->index, buffer->index + buffer->entries - 1);
 
 	EFX_POPULATE_OWORD_4(buf_tbl_upd,
-			     BUF_UPD_CMD, 0,
-			     BUF_CLR_CMD, 1,
-			     BUF_CLR_END_ID, end,
-			     BUF_CLR_START_ID, start);
-	falcon_write(efx, &buf_tbl_upd, BUF_TBL_UPD_REG_KER);
+			     FRF_AZ_BUF_UPD_CMD, 0,
+			     FRF_AZ_BUF_CLR_CMD, 1,
+			     FRF_AZ_BUF_CLR_END_ID, end,
+			     FRF_AZ_BUF_CLR_START_ID, start);
+	efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
 }
 
 /*
@@ -320,8 +297,6 @@
 				       struct efx_special_buffer *buffer,
 				       unsigned int len)
 {
-	struct falcon_nic_data *nic_data = efx->nic_data;
-
 	len = ALIGN(len, FALCON_BUF_SIZE);
 
 	buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
@@ -336,8 +311,8 @@
 	memset(buffer->addr, 0xff, len);
 
 	/* Select new buffer ID */
-	buffer->index = nic_data->next_buffer_table;
-	nic_data->next_buffer_table += buffer->entries;
+	buffer->index = efx->next_buffer_table;
+	efx->next_buffer_table += buffer->entries;
 
 	EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
 		"(virt %p phys %llx)\n", buffer->index,
@@ -415,10 +390,10 @@
 	unsigned write_ptr;
 	efx_dword_t reg;
 
-	write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
-	EFX_POPULATE_DWORD_1(reg, TX_DESC_WPTR_DWORD, write_ptr);
-	falcon_writel_page(tx_queue->efx, &reg,
-			   TX_DESC_UPD_REG_KER_DWORD, tx_queue->queue);
+	write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
+	EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
+	efx_writed_page(tx_queue->efx, &reg,
+			FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
 }
 
 
@@ -436,18 +411,17 @@
 	BUG_ON(tx_queue->write_count == tx_queue->insert_count);
 
 	do {
-		write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
+		write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
 		buffer = &tx_queue->buffer[write_ptr];
 		txd = falcon_tx_desc(tx_queue, write_ptr);
 		++tx_queue->write_count;
 
 		/* Create TX descriptor ring entry */
-		EFX_POPULATE_QWORD_5(*txd,
-				     TX_KER_PORT, 0,
-				     TX_KER_CONT, buffer->continuation,
-				     TX_KER_BYTE_CNT, buffer->len,
-				     TX_KER_BUF_REGION, 0,
-				     TX_KER_BUF_ADR, buffer->dma_addr);
+		EFX_POPULATE_QWORD_4(*txd,
+				     FSF_AZ_TX_KER_CONT, buffer->continuation,
+				     FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
+				     FSF_AZ_TX_KER_BUF_REGION, 0,
+				     FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
 	} while (tx_queue->write_count != tx_queue->insert_count);
 
 	wmb(); /* Ensure descriptors are written before they are fetched */
@@ -458,9 +432,10 @@
 int falcon_probe_tx(struct efx_tx_queue *tx_queue)
 {
 	struct efx_nic *efx = tx_queue->efx;
+	BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 ||
+		     EFX_TXQ_SIZE & EFX_TXQ_MASK);
 	return falcon_alloc_special_buffer(efx, &tx_queue->txd,
-					   FALCON_TXD_RING_SIZE *
-					   sizeof(efx_qword_t));
+					   EFX_TXQ_SIZE * sizeof(efx_qword_t));
 }
 
 void falcon_init_tx(struct efx_tx_queue *tx_queue)
@@ -475,25 +450,28 @@
 
 	/* Push TX descriptor ring to card */
 	EFX_POPULATE_OWORD_10(tx_desc_ptr,
-			      TX_DESCQ_EN, 1,
-			      TX_ISCSI_DDIG_EN, 0,
-			      TX_ISCSI_HDIG_EN, 0,
-			      TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
-			      TX_DESCQ_EVQ_ID, tx_queue->channel->channel,
-			      TX_DESCQ_OWNER_ID, 0,
-			      TX_DESCQ_LABEL, tx_queue->queue,
-			      TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER,
-			      TX_DESCQ_TYPE, 0,
-			      TX_NON_IP_DROP_DIS_B0, 1);
+			      FRF_AZ_TX_DESCQ_EN, 1,
+			      FRF_AZ_TX_ISCSI_DDIG_EN, 0,
+			      FRF_AZ_TX_ISCSI_HDIG_EN, 0,
+			      FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
+			      FRF_AZ_TX_DESCQ_EVQ_ID,
+			      tx_queue->channel->channel,
+			      FRF_AZ_TX_DESCQ_OWNER_ID, 0,
+			      FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
+			      FRF_AZ_TX_DESCQ_SIZE,
+			      __ffs(tx_queue->txd.entries),
+			      FRF_AZ_TX_DESCQ_TYPE, 0,
+			      FRF_BZ_TX_NON_IP_DROP_DIS, 1);
 
 	if (falcon_rev(efx) >= FALCON_REV_B0) {
 		int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
-		EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, !csum);
-		EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, !csum);
+		EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
+		EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
+				    !csum);
 	}
 
-	falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
-			   tx_queue->queue);
+	efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
+			 tx_queue->queue);
 
 	if (falcon_rev(efx) < FALCON_REV_B0) {
 		efx_oword_t reg;
@@ -501,12 +479,12 @@
 		/* Only 128 bits in this register */
 		BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
 
-		falcon_read(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
+		efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
 		if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
 			clear_bit_le(tx_queue->queue, (void *)&reg);
 		else
 			set_bit_le(tx_queue->queue, (void *)&reg);
-		falcon_write(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
+		efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
 	}
 }
 
@@ -517,9 +495,9 @@
 
 	/* Post a flush command */
 	EFX_POPULATE_OWORD_2(tx_flush_descq,
-			     TX_FLUSH_DESCQ_CMD, 1,
-			     TX_FLUSH_DESCQ, tx_queue->queue);
-	falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER);
+			     FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
+			     FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
+	efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
 }
 
 void falcon_fini_tx(struct efx_tx_queue *tx_queue)
@@ -532,8 +510,8 @@
 
 	/* Remove TX descriptor ring from card */
 	EFX_ZERO_OWORD(tx_desc_ptr);
-	falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
-			   tx_queue->queue);
+	efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
+			 tx_queue->queue);
 
 	/* Unpin TX descriptor ring */
 	falcon_fini_special_buffer(efx, &tx_queue->txd);
@@ -568,11 +546,11 @@
 	rxd = falcon_rx_desc(rx_queue, index);
 	rx_buf = efx_rx_buffer(rx_queue, index);
 	EFX_POPULATE_QWORD_3(*rxd,
-			     RX_KER_BUF_SIZE,
+			     FSF_AZ_RX_KER_BUF_SIZE,
 			     rx_buf->len -
 			     rx_queue->efx->type->rx_buffer_padding,
-			     RX_KER_BUF_REGION, 0,
-			     RX_KER_BUF_ADR, rx_buf->dma_addr);
+			     FSF_AZ_RX_KER_BUF_REGION, 0,
+			     FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
 }
 
 /* This writes to the RX_DESC_WPTR register for the specified receive
@@ -586,23 +564,24 @@
 	while (rx_queue->notified_count != rx_queue->added_count) {
 		falcon_build_rx_desc(rx_queue,
 				     rx_queue->notified_count &
-				     FALCON_RXD_RING_MASK);
+				     EFX_RXQ_MASK);
 		++rx_queue->notified_count;
 	}
 
 	wmb();
-	write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK;
-	EFX_POPULATE_DWORD_1(reg, RX_DESC_WPTR_DWORD, write_ptr);
-	falcon_writel_page(rx_queue->efx, &reg,
-			   RX_DESC_UPD_REG_KER_DWORD, rx_queue->queue);
+	write_ptr = rx_queue->added_count & EFX_RXQ_MASK;
+	EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
+	efx_writed_page(rx_queue->efx, &reg,
+			FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue);
 }
 
 int falcon_probe_rx(struct efx_rx_queue *rx_queue)
 {
 	struct efx_nic *efx = rx_queue->efx;
+	BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 ||
+		     EFX_RXQ_SIZE & EFX_RXQ_MASK);
 	return falcon_alloc_special_buffer(efx, &rx_queue->rxd,
-					   FALCON_RXD_RING_SIZE *
-					   sizeof(efx_qword_t));
+					   EFX_RXQ_SIZE * sizeof(efx_qword_t));
 }
 
 void falcon_init_rx(struct efx_rx_queue *rx_queue)
@@ -623,19 +602,21 @@
 
 	/* Push RX descriptor ring to card */
 	EFX_POPULATE_OWORD_10(rx_desc_ptr,
-			      RX_ISCSI_DDIG_EN, iscsi_digest_en,
-			      RX_ISCSI_HDIG_EN, iscsi_digest_en,
-			      RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
-			      RX_DESCQ_EVQ_ID, rx_queue->channel->channel,
-			      RX_DESCQ_OWNER_ID, 0,
-			      RX_DESCQ_LABEL, rx_queue->queue,
-			      RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER,
-			      RX_DESCQ_TYPE, 0 /* kernel queue */ ,
+			      FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
+			      FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
+			      FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
+			      FRF_AZ_RX_DESCQ_EVQ_ID,
+			      rx_queue->channel->channel,
+			      FRF_AZ_RX_DESCQ_OWNER_ID, 0,
+			      FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue,
+			      FRF_AZ_RX_DESCQ_SIZE,
+			      __ffs(rx_queue->rxd.entries),
+			      FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
 			      /* For >=B0 this is scatter so disable */
-			      RX_DESCQ_JUMBO, !is_b0,
-			      RX_DESCQ_EN, 1);
-	falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
-			   rx_queue->queue);
+			      FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
+			      FRF_AZ_RX_DESCQ_EN, 1);
+	efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+			 rx_queue->queue);
 }
 
 static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
@@ -645,9 +626,9 @@
 
 	/* Post a flush command */
 	EFX_POPULATE_OWORD_2(rx_flush_descq,
-			     RX_FLUSH_DESCQ_CMD, 1,
-			     RX_FLUSH_DESCQ, rx_queue->queue);
-	falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER);
+			     FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
+			     FRF_AZ_RX_FLUSH_DESCQ, rx_queue->queue);
+	efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
 }
 
 void falcon_fini_rx(struct efx_rx_queue *rx_queue)
@@ -660,8 +641,8 @@
 
 	/* Remove RX descriptor ring from card */
 	EFX_ZERO_OWORD(rx_desc_ptr);
-	falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
-			   rx_queue->queue);
+	efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+			 rx_queue->queue);
 
 	/* Unpin RX descriptor ring */
 	falcon_fini_special_buffer(efx, &rx_queue->rxd);
@@ -694,8 +675,8 @@
 	efx_dword_t reg;
 	struct efx_nic *efx = channel->efx;
 
-	EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr);
-	falcon_writel_table(efx, &reg, efx->type->evq_rptr_tbl_base,
+	EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
+	efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
 			    channel->channel);
 }
 
@@ -704,11 +685,14 @@
 {
 	efx_oword_t drv_ev_reg;
 
-	EFX_POPULATE_OWORD_2(drv_ev_reg,
-			     DRV_EV_QID, channel->channel,
-			     DRV_EV_DATA,
-			     EFX_QWORD_FIELD64(*event, WHOLE_EVENT));
-	falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER);
+	BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
+		     FRF_AZ_DRV_EV_DATA_WIDTH != 64);
+	drv_ev_reg.u32[0] = event->u32[0];
+	drv_ev_reg.u32[1] = event->u32[1];
+	drv_ev_reg.u32[2] = 0;
+	drv_ev_reg.u32[3] = 0;
+	EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
+	efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
 }
 
 /* Handle a transmit completion event
@@ -724,18 +708,18 @@
 	struct efx_tx_queue *tx_queue;
 	struct efx_nic *efx = channel->efx;
 
-	if (likely(EFX_QWORD_FIELD(*event, TX_EV_COMP))) {
+	if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
 		/* Transmit completion */
-		tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, TX_EV_DESC_PTR);
-		tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
+		tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
+		tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
 		tx_queue = &efx->tx_queue[tx_ev_q_label];
 		channel->irq_mod_score +=
 			(tx_ev_desc_ptr - tx_queue->read_count) &
-			efx->type->txd_ring_mask;
+			EFX_TXQ_MASK;
 		efx_xmit_done(tx_queue, tx_ev_desc_ptr);
-	} else if (EFX_QWORD_FIELD(*event, TX_EV_WQ_FF_FULL)) {
+	} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
 		/* Rewrite the FIFO write pointer */
-		tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
+		tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
 		tx_queue = &efx->tx_queue[tx_ev_q_label];
 
 		if (efx_dev_registered(efx))
@@ -743,7 +727,7 @@
 		falcon_notify_tx_desc(tx_queue);
 		if (efx_dev_registered(efx))
 			netif_tx_unlock(efx->net_dev);
-	} else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
+	} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
 		   EFX_WORKAROUND_10727(efx)) {
 		efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
 	} else {
@@ -767,22 +751,22 @@
 	bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
 	unsigned rx_ev_pkt_type;
 
-	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
-	rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
-	rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, RX_EV_TOBE_DISC);
-	rx_ev_pkt_type = EFX_QWORD_FIELD(*event, RX_EV_PKT_TYPE);
+	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
+	rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
+	rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
+	rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
 	rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
-						 RX_EV_BUF_OWNER_ID_ERR);
-	rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, RX_EV_IF_FRAG_ERR);
+						 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
+	rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_IP_FRAG_ERR);
 	rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
-						  RX_EV_IP_HDR_CHKSUM_ERR);
+						  FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
 	rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
-						   RX_EV_TCP_UDP_CHKSUM_ERR);
-	rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);
-	rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);
+						   FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
+	rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
+	rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
 	rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ?
-			  0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));
-	rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);
+			  0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
+	rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
 
 	/* Every error apart from tobe_disc and pause_frm */
 	rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
@@ -838,9 +822,8 @@
 	struct efx_nic *efx = rx_queue->efx;
 	unsigned expected, dropped;
 
-	expected = rx_queue->removed_count & FALCON_RXD_RING_MASK;
-	dropped = ((index + FALCON_RXD_RING_SIZE - expected) &
-		   FALCON_RXD_RING_MASK);
+	expected = rx_queue->removed_count & EFX_RXQ_MASK;
+	dropped = (index - expected) & EFX_RXQ_MASK;
 	EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
 		dropped, index, expected);
 
@@ -866,17 +849,18 @@
 	struct efx_nic *efx = channel->efx;
 
 	/* Basic packet information */
-	rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, RX_EV_BYTE_CNT);
-	rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, RX_EV_PKT_OK);
-	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
-	WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT));
-	WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1);
-	WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL) != channel->channel);
+	rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
+	rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
+	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
+	WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
+	WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
+	WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
+		channel->channel);
 
 	rx_queue = &efx->rx_queue[channel->channel];
 
-	rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
-	expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK;
+	rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
+	expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK;
 	if (unlikely(rx_ev_desc_ptr != expected_ptr))
 		falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
 
@@ -884,7 +868,10 @@
 		/* If packet is marked as OK and packet type is TCP/IPv4 or
 		 * UDP/IPv4, then we can rely on the hardware checksum.
 		 */
-		checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type);
+		checksummed =
+			efx->rx_checksum_enabled &&
+			(rx_ev_hdr_type == FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP ||
+			 rx_ev_hdr_type == FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP);
 	} else {
 		falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
 					&discard);
@@ -892,10 +879,10 @@
 	}
 
 	/* Detect multicast packets that didn't match the filter */
-	rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
+	rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
 	if (rx_ev_mcast_pkt) {
 		unsigned int rx_ev_mcast_hash_match =
-			EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH);
+			EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
 
 		if (unlikely(!rx_ev_mcast_hash_match))
 			discard = true;
@@ -915,22 +902,23 @@
 	struct efx_nic *efx = channel->efx;
 	bool handled = false;
 
-	if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) ||
-	    EFX_QWORD_FIELD(*event, G_PHY1_INTR) ||
-	    EFX_QWORD_FIELD(*event, XG_PHY_INTR) ||
-	    EFX_QWORD_FIELD(*event, XFP_PHY_INTR)) {
+	if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
+	    EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
+	    EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) {
 		efx->phy_op->clear_interrupt(efx);
 		queue_work(efx->workqueue, &efx->phy_work);
 		handled = true;
 	}
 
 	if ((falcon_rev(efx) >= FALCON_REV_B0) &&
-	    EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0)) {
+	    EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
 		queue_work(efx->workqueue, &efx->mac_work);
 		handled = true;
 	}
 
-	if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) {
+	if (falcon_rev(efx) <= FALCON_REV_A1 ?
+	    EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
+	    EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
 		EFX_ERR(efx, "channel %d seen global RX_RESET "
 			"event. Resetting.\n", channel->channel);
 
@@ -953,35 +941,35 @@
 	unsigned int ev_sub_code;
 	unsigned int ev_sub_data;
 
-	ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
-	ev_sub_data = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_DATA);
+	ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
+	ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
 
 	switch (ev_sub_code) {
-	case TX_DESCQ_FLS_DONE_EV_DECODE:
+	case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
 		EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
 			  channel->channel, ev_sub_data);
 		break;
-	case RX_DESCQ_FLS_DONE_EV_DECODE:
+	case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
 		EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
 			  channel->channel, ev_sub_data);
 		break;
-	case EVQ_INIT_DONE_EV_DECODE:
+	case FSE_AZ_EVQ_INIT_DONE_EV:
 		EFX_LOG(efx, "channel %d EVQ %d initialised\n",
 			channel->channel, ev_sub_data);
 		break;
-	case SRM_UPD_DONE_EV_DECODE:
+	case FSE_AZ_SRM_UPD_DONE_EV:
 		EFX_TRACE(efx, "channel %d SRAM update done\n",
 			  channel->channel);
 		break;
-	case WAKE_UP_EV_DECODE:
+	case FSE_AZ_WAKE_UP_EV:
 		EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
 			  channel->channel, ev_sub_data);
 		break;
-	case TIMER_EV_DECODE:
+	case FSE_AZ_TIMER_EV:
 		EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
 			  channel->channel, ev_sub_data);
 		break;
-	case RX_RECOVERY_EV_DECODE:
+	case FSE_AA_RX_RECOVER_EV:
 		EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
 			"Resetting.\n", channel->channel);
 		atomic_inc(&efx->rx_reset);
@@ -990,12 +978,12 @@
 				   RESET_TYPE_RX_RECOVERY :
 				   RESET_TYPE_DISABLE);
 		break;
-	case RX_DSC_ERROR_EV_DECODE:
+	case FSE_BZ_RX_DSC_ERROR_EV:
 		EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
 			" RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
 		efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
 		break;
-	case TX_DSC_ERROR_EV_DECODE:
+	case FSE_BZ_TX_DSC_ERROR_EV:
 		EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
 			" TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
 		efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
@@ -1031,27 +1019,27 @@
 		/* Clear this event by marking it all ones */
 		EFX_SET_QWORD(*p_event);
 
-		ev_code = EFX_QWORD_FIELD(event, EV_CODE);
+		ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
 
 		switch (ev_code) {
-		case RX_IP_EV_DECODE:
+		case FSE_AZ_EV_CODE_RX_EV:
 			falcon_handle_rx_event(channel, &event);
 			++rx_packets;
 			break;
-		case TX_IP_EV_DECODE:
+		case FSE_AZ_EV_CODE_TX_EV:
 			falcon_handle_tx_event(channel, &event);
 			break;
-		case DRV_GEN_EV_DECODE:
-			channel->eventq_magic
-				= EFX_QWORD_FIELD(event, EVQ_MAGIC);
+		case FSE_AZ_EV_CODE_DRV_GEN_EV:
+			channel->eventq_magic = EFX_QWORD_FIELD(
+				event, FSF_AZ_DRV_GEN_EV_MAGIC);
 			EFX_LOG(channel->efx, "channel %d received generated "
 				"event "EFX_QWORD_FMT"\n", channel->channel,
 				EFX_QWORD_VAL(event));
 			break;
-		case GLOBAL_EV_DECODE:
+		case FSE_AZ_EV_CODE_GLOBAL_EV:
 			falcon_handle_global_event(channel, &event);
 			break;
-		case DRIVER_EV_DECODE:
+		case FSE_AZ_EV_CODE_DRIVER_EV:
 			falcon_handle_driver_event(channel, &event);
 			break;
 		default:
@@ -1061,7 +1049,7 @@
 		}
 
 		/* Increment read pointer */
-		read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
+		read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
 
 	} while (rx_packets < rx_quota);
 
@@ -1076,26 +1064,20 @@
 
 	/* Set timer register */
 	if (channel->irq_moderation) {
-		/* Round to resolution supported by hardware.  The value we
-		 * program is based at 0.  So actual interrupt moderation
-		 * achieved is ((x + 1) * res).
-		 */
-		channel->irq_moderation -= (channel->irq_moderation %
-					    FALCON_IRQ_MOD_RESOLUTION);
-		if (channel->irq_moderation < FALCON_IRQ_MOD_RESOLUTION)
-			channel->irq_moderation = FALCON_IRQ_MOD_RESOLUTION;
 		EFX_POPULATE_DWORD_2(timer_cmd,
-				     TIMER_MODE, TIMER_MODE_INT_HLDOFF,
-				     TIMER_VAL,
-				     channel->irq_moderation /
-				     FALCON_IRQ_MOD_RESOLUTION - 1);
+				     FRF_AB_TC_TIMER_MODE,
+				     FFE_BB_TIMER_MODE_INT_HLDOFF,
+				     FRF_AB_TC_TIMER_VAL,
+				     channel->irq_moderation - 1);
 	} else {
 		EFX_POPULATE_DWORD_2(timer_cmd,
-				     TIMER_MODE, TIMER_MODE_DIS,
-				     TIMER_VAL, 0);
+				     FRF_AB_TC_TIMER_MODE,
+				     FFE_BB_TIMER_MODE_DIS,
+				     FRF_AB_TC_TIMER_VAL, 0);
 	}
-	falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER,
-				  channel->channel);
+	BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
+	efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
+			       channel->channel);
 
 }
 
@@ -1103,10 +1085,10 @@
 int falcon_probe_eventq(struct efx_channel *channel)
 {
 	struct efx_nic *efx = channel->efx;
-	unsigned int evq_size;
-
-	evq_size = FALCON_EVQ_SIZE * sizeof(efx_qword_t);
-	return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size);
+	BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 ||
+		     EFX_EVQ_SIZE & EFX_EVQ_MASK);
+	return falcon_alloc_special_buffer(efx, &channel->eventq,
+					   EFX_EVQ_SIZE * sizeof(efx_qword_t));
 }
 
 void falcon_init_eventq(struct efx_channel *channel)
@@ -1126,11 +1108,11 @@
 
 	/* Push event queue to card */
 	EFX_POPULATE_OWORD_3(evq_ptr,
-			     EVQ_EN, 1,
-			     EVQ_SIZE, FALCON_EVQ_ORDER,
-			     EVQ_BUF_BASE_ID, channel->eventq.index);
-	falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
-			   channel->channel);
+			     FRF_AZ_EVQ_EN, 1,
+			     FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
+			     FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
+	efx_writeo_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
+			 channel->channel);
 
 	falcon_set_int_moderation(channel);
 }
@@ -1142,8 +1124,8 @@
 
 	/* Remove event queue from card */
 	EFX_ZERO_OWORD(eventq_ptr);
-	falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
-			   channel->channel);
+	efx_writeo_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
+			 channel->channel);
 
 	/* Unpin event queue */
 	falcon_fini_special_buffer(efx, &channel->eventq);
@@ -1164,9 +1146,9 @@
 {
 	efx_qword_t test_event;
 
-	EFX_POPULATE_QWORD_2(test_event,
-			     EV_CODE, DRV_GEN_EV_DECODE,
-			     EVQ_MAGIC, magic);
+	EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
+			     FSE_AZ_EV_CODE_DRV_GEN_EV,
+			     FSF_AZ_DRV_GEN_EV_MAGIC, magic);
 	falcon_generate_event(channel, &test_event);
 }
 
@@ -1174,11 +1156,12 @@
 {
 	efx_qword_t phy_event;
 
-	EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE);
+	EFX_POPULATE_QWORD_1(phy_event, FSF_AZ_EV_CODE,
+			     FSE_AZ_EV_CODE_GLOBAL_EV);
 	if (EFX_IS10G(efx))
-		EFX_SET_QWORD_FIELD(phy_event, XG_PHY_INTR, 1);
+		EFX_SET_QWORD_FIELD(phy_event, FSF_AB_GLB_EV_XG_PHY0_INTR, 1);
 	else
-		EFX_SET_QWORD_FIELD(phy_event, G_PHY0_INTR, 1);
+		EFX_SET_QWORD_FIELD(phy_event, FSF_AB_GLB_EV_G_PHY0_INTR, 1);
 
 	falcon_generate_event(&efx->channel[0], &phy_event);
 }
@@ -1196,7 +1179,7 @@
 	struct efx_tx_queue *tx_queue;
 	struct efx_rx_queue *rx_queue;
 	unsigned int read_ptr = channel->eventq_read_ptr;
-	unsigned int end_ptr = (read_ptr - 1) & FALCON_EVQ_MASK;
+	unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK;
 
 	do {
 		efx_qword_t *event = falcon_event(channel, read_ptr);
@@ -1206,22 +1189,23 @@
 		if (!falcon_event_present(event))
 			break;
 
-		ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
-		ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
-		if (ev_code == DRIVER_EV_DECODE &&
-		    ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) {
+		ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
+		ev_sub_code = EFX_QWORD_FIELD(*event,
+					      FSF_AZ_DRIVER_EV_SUBCODE);
+		if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
+		    ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
 			ev_queue = EFX_QWORD_FIELD(*event,
-						   DRIVER_EV_TX_DESCQ_ID);
+						   FSF_AZ_DRIVER_EV_SUBDATA);
 			if (ev_queue < EFX_TX_QUEUE_COUNT) {
 				tx_queue = efx->tx_queue + ev_queue;
 				tx_queue->flushed = true;
 			}
-		} else if (ev_code == DRIVER_EV_DECODE &&
-			   ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) {
-			ev_queue = EFX_QWORD_FIELD(*event,
-						   DRIVER_EV_RX_DESCQ_ID);
-			ev_failed = EFX_QWORD_FIELD(*event,
-						    DRIVER_EV_RX_FLUSH_FAIL);
+		} else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
+			   ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
+			ev_queue = EFX_QWORD_FIELD(
+				*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
+			ev_failed = EFX_QWORD_FIELD(
+				*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
 			if (ev_queue < efx->n_rx_queues) {
 				rx_queue = efx->rx_queue + ev_queue;
 
@@ -1233,7 +1217,7 @@
 			}
 		}
 
-		read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
+		read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
 	} while (read_ptr != end_ptr);
 }
 
@@ -1311,9 +1295,9 @@
 	efx_oword_t int_en_reg_ker;
 
 	EFX_POPULATE_OWORD_2(int_en_reg_ker,
-			     KER_INT_KER, force,
-			     DRV_INT_EN_KER, enabled);
-	falcon_write(efx, &int_en_reg_ker, INT_EN_REG_KER);
+			     FRF_AZ_KER_INT_KER, force,
+			     FRF_AZ_DRV_INT_EN_KER, enabled);
+	efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
 }
 
 void falcon_enable_interrupts(struct efx_nic *efx)
@@ -1326,9 +1310,10 @@
 
 	/* Program address */
 	EFX_POPULATE_OWORD_2(int_adr_reg_ker,
-			     NORM_INT_VEC_DIS_KER, EFX_INT_MODE_USE_MSI(efx),
-			     INT_ADR_KER, efx->irq_status.dma_addr);
-	falcon_write(efx, &int_adr_reg_ker, INT_ADR_REG_KER);
+			     FRF_AZ_NORM_INT_VEC_DIS_KER,
+			     EFX_INT_MODE_USE_MSI(efx),
+			     FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
+	efx_writeo(efx, &int_adr_reg_ker, FR_AZ_INT_ADR_KER);
 
 	/* Enable interrupts */
 	falcon_interrupts(efx, 1, 0);
@@ -1368,9 +1353,9 @@
 {
 	efx_dword_t reg;
 
-	EFX_POPULATE_DWORD_1(reg, INT_ACK_DUMMY_DATA, 0xb7eb7e);
-	falcon_writel(efx, &reg, INT_ACK_REG_KER_A1);
-	falcon_readl(efx, &reg, WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1);
+	EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
+	efx_writed(efx, &reg, FR_AA_INT_ACK_KER);
+	efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
 }
 
 /* Process a fatal interrupt
@@ -1383,8 +1368,8 @@
 	efx_oword_t fatal_intr;
 	int error, mem_perr;
 
-	falcon_read(efx, &fatal_intr, FATAL_INTR_REG_KER);
-	error = EFX_OWORD_FIELD(fatal_intr, INT_KER_ERROR);
+	efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
+	error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
 
 	EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
 		EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
@@ -1394,10 +1379,10 @@
 		goto out;
 
 	/* If this is a memory parity error dump which blocks are offending */
-	mem_perr = EFX_OWORD_FIELD(fatal_intr, MEM_PERR_INT_KER);
+	mem_perr = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER);
 	if (mem_perr) {
 		efx_oword_t reg;
-		falcon_read(efx, &reg, MEM_STAT_REG_KER);
+		efx_reado(efx, &reg, FR_AZ_MEM_STAT);
 		EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
 			EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
 	}
@@ -1409,13 +1394,13 @@
 	falcon_disable_interrupts(efx);
 
 	/* Count errors and reset or disable the NIC accordingly */
-	if (nic_data->int_error_count == 0 ||
-	    time_after(jiffies, nic_data->int_error_expire)) {
-		nic_data->int_error_count = 0;
-		nic_data->int_error_expire =
+	if (efx->int_error_count == 0 ||
+	    time_after(jiffies, efx->int_error_expire)) {
+		efx->int_error_count = 0;
+		efx->int_error_expire =
 			jiffies + FALCON_INT_ERROR_EXPIRE * HZ;
 	}
-	if (++nic_data->int_error_count < FALCON_MAX_INT_ERRORS) {
+	if (++efx->int_error_count < FALCON_MAX_INT_ERRORS) {
 		EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
 		efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
 	} else {
@@ -1441,11 +1426,11 @@
 	int syserr;
 
 	/* Read the ISR which also ACKs the interrupts */
-	falcon_readl(efx, &reg, INT_ISR0_B0);
+	efx_readd(efx, &reg, FR_BZ_INT_ISR0);
 	queues = EFX_EXTRACT_DWORD(reg, 0, 31);
 
 	/* Check to see if we have a serious error condition */
-	syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
+	syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
 	if (unlikely(syserr))
 		return falcon_fatal_interrupt(efx);
 
@@ -1491,7 +1476,7 @@
 		  irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
 
 	/* Check to see if we have a serious error condition */
-	syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
+	syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
 	if (unlikely(syserr))
 		return falcon_fatal_interrupt(efx);
 
@@ -1558,12 +1543,12 @@
 	if (falcon_rev(efx) < FALCON_REV_B0)
 		return;
 
-	for (offset = RX_RSS_INDIR_TBL_B0;
-	     offset < RX_RSS_INDIR_TBL_B0 + 0x800;
+	for (offset = FR_BZ_RX_INDIRECTION_TBL;
+	     offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
 	     offset += 0x10) {
-		EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0,
+		EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
 				     i % efx->n_rx_queues);
-		falcon_writel(efx, &dword, offset);
+		efx_writed(efx, &dword, offset);
 		i++;
 	}
 }
@@ -1626,7 +1611,7 @@
 
 	/* ACK legacy interrupt */
 	if (falcon_rev(efx) >= FALCON_REV_B0)
-		falcon_read(efx, &reg, INT_ISR0_B0);
+		efx_reado(efx, &reg, FR_BZ_INT_ISR0);
 	else
 		falcon_irq_ack_a1(efx);
 
@@ -1647,8 +1632,8 @@
 static int falcon_spi_poll(struct efx_nic *efx)
 {
 	efx_oword_t reg;
-	falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER);
-	return EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
+	efx_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
+	return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
 }
 
 /* Wait for SPI command completion */
@@ -1700,27 +1685,27 @@
 
 	/* Program address register, if we have an address */
 	if (addressed) {
-		EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address);
-		falcon_write(efx, &reg, EE_SPI_HADR_REG_KER);
+		EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
+		efx_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
 	}
 
 	/* Program data register, if we have data */
 	if (in != NULL) {
 		memcpy(&reg, in, len);
-		falcon_write(efx, &reg, EE_SPI_HDATA_REG_KER);
+		efx_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
 	}
 
 	/* Issue read/write command */
 	EFX_POPULATE_OWORD_7(reg,
-			     EE_SPI_HCMD_CMD_EN, 1,
-			     EE_SPI_HCMD_SF_SEL, spi->device_id,
-			     EE_SPI_HCMD_DABCNT, len,
-			     EE_SPI_HCMD_READ, reading,
-			     EE_SPI_HCMD_DUBCNT, 0,
-			     EE_SPI_HCMD_ADBCNT,
+			     FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
+			     FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
+			     FRF_AB_EE_SPI_HCMD_DABCNT, len,
+			     FRF_AB_EE_SPI_HCMD_READ, reading,
+			     FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
+			     FRF_AB_EE_SPI_HCMD_ADBCNT,
 			     (addressed ? spi->addr_len : 0),
-			     EE_SPI_HCMD_ENC, command);
-	falcon_write(efx, &reg, EE_SPI_HCMD_REG_KER);
+			     FRF_AB_EE_SPI_HCMD_ENC, command);
+	efx_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
 
 	/* Wait for read/write to complete */
 	rc = falcon_spi_wait(efx);
@@ -1729,7 +1714,7 @@
 
 	/* Read data */
 	if (out != NULL) {
-		falcon_read(efx, &reg, EE_SPI_HDATA_REG_KER);
+		efx_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
 		memcpy(out, &reg, len);
 	}
 
@@ -1870,21 +1855,22 @@
 		 * macs, so instead use the internal MAC resets
 		 */
 		if (!EFX_IS10G(efx)) {
-			EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 1);
-			falcon_write(efx, &reg, GM_CFG1_REG);
+			EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 1);
+			efx_writeo(efx, &reg, FR_AB_GM_CFG1);
 			udelay(1000);
 
-			EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 0);
-			falcon_write(efx, &reg, GM_CFG1_REG);
+			EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 0);
+			efx_writeo(efx, &reg, FR_AB_GM_CFG1);
 			udelay(1000);
 			return 0;
 		} else {
-			EFX_POPULATE_OWORD_1(reg, XM_CORE_RST, 1);
-			falcon_write(efx, &reg, XM_GLB_CFG_REG);
+			EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
+			efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
 
 			for (count = 0; count < 10000; count++) {
-				falcon_read(efx, &reg, XM_GLB_CFG_REG);
-				if (EFX_OWORD_FIELD(reg, XM_CORE_RST) == 0)
+				efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
+				if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
+				    0)
 					return 0;
 				udelay(10);
 			}
@@ -1898,22 +1884,22 @@
 	 * the drain sequence with the statistics fetch */
 	efx_stats_disable(efx);
 
-	falcon_read(efx, &reg, MAC0_CTRL_REG_KER);
-	EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1);
-	falcon_write(efx, &reg, MAC0_CTRL_REG_KER);
+	efx_reado(efx, &reg, FR_AB_MAC_CTRL);
+	EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, 1);
+	efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
 
-	falcon_read(efx, &reg, GLB_CTL_REG_KER);
-	EFX_SET_OWORD_FIELD(reg, RST_XGTX, 1);
-	EFX_SET_OWORD_FIELD(reg, RST_XGRX, 1);
-	EFX_SET_OWORD_FIELD(reg, RST_EM, 1);
-	falcon_write(efx, &reg, GLB_CTL_REG_KER);
+	efx_reado(efx, &reg, FR_AB_GLB_CTL);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
+	efx_writeo(efx, &reg, FR_AB_GLB_CTL);
 
 	count = 0;
 	while (1) {
-		falcon_read(efx, &reg, GLB_CTL_REG_KER);
-		if (!EFX_OWORD_FIELD(reg, RST_XGTX) &&
-		    !EFX_OWORD_FIELD(reg, RST_XGRX) &&
-		    !EFX_OWORD_FIELD(reg, RST_EM)) {
+		efx_reado(efx, &reg, FR_AB_GLB_CTL);
+		if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
+		    !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
+		    !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
 			EFX_LOG(efx, "Completed MAC reset after %d loops\n",
 				count);
 			break;
@@ -1944,9 +1930,9 @@
 	    (efx->loopback_mode != LOOPBACK_NONE))
 		return;
 
-	falcon_read(efx, &reg, MAC0_CTRL_REG_KER);
+	efx_reado(efx, &reg, FR_AB_MAC_CTRL);
 	/* There is no point in draining more than once */
-	if (EFX_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0))
+	if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
 		return;
 
 	falcon_reset_macs(efx);
@@ -1960,9 +1946,9 @@
 		return;
 
 	/* Isolate the MAC -> RX */
-	falcon_read(efx, &reg, RX_CFG_REG_KER);
-	EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 0);
-	falcon_write(efx, &reg, RX_CFG_REG_KER);
+	efx_reado(efx, &reg, FR_AZ_RX_CFG);
+	EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
+	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
 
 	if (!efx->link_up)
 		falcon_drain_tx_fifo(efx);
@@ -1985,19 +1971,19 @@
 	 * indefinitely held and TX queue can be flushed at any point
 	 * while the link is down. */
 	EFX_POPULATE_OWORD_5(reg,
-			     MAC_XOFF_VAL, 0xffff /* max pause time */,
-			     MAC_BCAD_ACPT, 1,
-			     MAC_UC_PROM, efx->promiscuous,
-			     MAC_LINK_STATUS, 1, /* always set */
-			     MAC_SPEED, link_speed);
+			     FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
+			     FRF_AB_MAC_BCAD_ACPT, 1,
+			     FRF_AB_MAC_UC_PROM, efx->promiscuous,
+			     FRF_AB_MAC_LINK_STATUS, 1, /* always set */
+			     FRF_AB_MAC_SPEED, link_speed);
 	/* On B0, MAC backpressure can be disabled and packets get
 	 * discarded. */
 	if (falcon_rev(efx) >= FALCON_REV_B0) {
-		EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,
+		EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
 				    !efx->link_up);
 	}
 
-	falcon_write(efx, &reg, MAC0_CTRL_REG_KER);
+	efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
 
 	/* Restore the multicast hash registers. */
 	falcon_set_multicast_hash(efx);
@@ -2006,13 +1992,13 @@
 	 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
 	 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
 	tx_fc = !!(efx->link_fc & EFX_FC_TX);
-	falcon_read(efx, &reg, RX_CFG_REG_KER);
-	EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
+	efx_reado(efx, &reg, FR_AZ_RX_CFG);
+	EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, tx_fc);
 
 	/* Unisolate the MAC -> RX */
 	if (falcon_rev(efx) >= FALCON_REV_B0)
-		EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);
-	falcon_write(efx, &reg, RX_CFG_REG_KER);
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
+	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
 }
 
 int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
@@ -2027,8 +2013,8 @@
 	/* Statistics fetch will fail if the MAC is in TX drain */
 	if (falcon_rev(efx) >= FALCON_REV_B0) {
 		efx_oword_t temp;
-		falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
-		if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
+		efx_reado(efx, &temp, FR_AB_MAC_CTRL);
+		if (EFX_OWORD_FIELD(temp, FRF_BB_TXFIFO_DRAIN_EN))
 			return 0;
 	}
 
@@ -2038,10 +2024,10 @@
 
 	/* Initiate DMA transfer of stats */
 	EFX_POPULATE_OWORD_2(reg,
-			     MAC_STAT_DMA_CMD, 1,
-			     MAC_STAT_DMA_ADR,
+			     FRF_AB_MAC_STAT_DMA_CMD, 1,
+			     FRF_AB_MAC_STAT_DMA_ADR,
 			     efx->stats_buffer.dma_addr);
-	falcon_write(efx, &reg, MAC0_STAT_DMA_REG_KER);
+	efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
 
 	/* Wait for transfer to complete */
 	for (i = 0; i < 400; i++) {
@@ -2071,10 +2057,10 @@
 
 	/* wait upto 50ms - taken max from datasheet */
 	for (count = 0; count < 5000; count++) {
-		falcon_readl(efx, &md_stat, MD_STAT_REG_KER);
-		if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) {
-			if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 ||
-			    EFX_DWORD_FIELD(md_stat, MD_BSERR) != 0) {
+		efx_readd(efx, &md_stat, FR_AB_MD_STAT);
+		if (EFX_DWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
+			if (EFX_DWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
+			    EFX_DWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
 				EFX_ERR(efx, "error from GMII access "
 					EFX_DWORD_FMT"\n",
 					EFX_DWORD_VAL(md_stat));
@@ -2107,29 +2093,30 @@
 		goto out;
 
 	/* Write the address/ID register */
-	EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
-	falcon_write(efx, &reg, MD_PHY_ADR_REG_KER);
+	EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
+	efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
 
-	EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad);
-	falcon_write(efx, &reg, MD_ID_REG_KER);
+	EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
+			     FRF_AB_MD_DEV_ADR, devad);
+	efx_writeo(efx, &reg, FR_AB_MD_ID);
 
 	/* Write data */
-	EFX_POPULATE_OWORD_1(reg, MD_TXD, value);
-	falcon_write(efx, &reg, MD_TXD_REG_KER);
+	EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
+	efx_writeo(efx, &reg, FR_AB_MD_TXD);
 
 	EFX_POPULATE_OWORD_2(reg,
-			     MD_WRC, 1,
-			     MD_GC, 0);
-	falcon_write(efx, &reg, MD_CS_REG_KER);
+			     FRF_AB_MD_WRC, 1,
+			     FRF_AB_MD_GC, 0);
+	efx_writeo(efx, &reg, FR_AB_MD_CS);
 
 	/* Wait for data to be written */
 	rc = falcon_gmii_wait(efx);
 	if (rc) {
 		/* Abort the write operation */
 		EFX_POPULATE_OWORD_2(reg,
-				     MD_WRC, 0,
-				     MD_GC, 1);
-		falcon_write(efx, &reg, MD_CS_REG_KER);
+				     FRF_AB_MD_WRC, 0,
+				     FRF_AB_MD_GC, 1);
+		efx_writeo(efx, &reg, FR_AB_MD_CS);
 		udelay(10);
 	}
 
@@ -2153,29 +2140,30 @@
 	if (rc)
 		goto out;
 
-	EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
-	falcon_write(efx, &reg, MD_PHY_ADR_REG_KER);
+	EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
+	efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
 
-	EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad);
-	falcon_write(efx, &reg, MD_ID_REG_KER);
+	EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
+			     FRF_AB_MD_DEV_ADR, devad);
+	efx_writeo(efx, &reg, FR_AB_MD_ID);
 
 	/* Request data to be read */
-	EFX_POPULATE_OWORD_2(reg, MD_RDC, 1, MD_GC, 0);
-	falcon_write(efx, &reg, MD_CS_REG_KER);
+	EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
+	efx_writeo(efx, &reg, FR_AB_MD_CS);
 
 	/* Wait for data to become available */
 	rc = falcon_gmii_wait(efx);
 	if (rc == 0) {
-		falcon_read(efx, &reg, MD_RXD_REG_KER);
-		rc = EFX_OWORD_FIELD(reg, MD_RXD);
+		efx_reado(efx, &reg, FR_AB_MD_RXD);
+		rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
 		EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n",
 			    prtad, devad, addr, rc);
 	} else {
 		/* Abort the read operation */
 		EFX_POPULATE_OWORD_2(reg,
-				     MD_RIC, 0,
-				     MD_GC, 1);
-		falcon_write(efx, &reg, MD_CS_REG_KER);
+				     FRF_AB_MD_RIC, 0,
+				     FRF_AB_MD_GC, 1);
+		efx_writeo(efx, &reg, FR_AB_MD_CS);
 
 		EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n",
 			prtad, devad, addr, rc);
@@ -2186,37 +2174,6 @@
 	return rc;
 }
 
-static int falcon_probe_phy(struct efx_nic *efx)
-{
-	switch (efx->phy_type) {
-	case PHY_TYPE_SFX7101:
-		efx->phy_op = &falcon_sfx7101_phy_ops;
-		break;
-	case PHY_TYPE_SFT9001A:
-	case PHY_TYPE_SFT9001B:
-		efx->phy_op = &falcon_sft9001_phy_ops;
-		break;
-	case PHY_TYPE_QT2022C2:
-	case PHY_TYPE_QT2025C:
-		efx->phy_op = &falcon_xfp_phy_ops;
-		break;
-	default:
-		EFX_ERR(efx, "Unknown PHY type %d\n",
-			efx->phy_type);
-		return -1;
-	}
-
-	if (efx->phy_op->macs & EFX_XMAC)
-		efx->loopback_modes |= ((1 << LOOPBACK_XGMII) |
-					(1 << LOOPBACK_XGXS) |
-					(1 << LOOPBACK_XAUI));
-	if (efx->phy_op->macs & EFX_GMAC)
-		efx->loopback_modes |= (1 << LOOPBACK_GMAC);
-	efx->loopback_modes |= efx->phy_op->loopbacks;
-
-	return 0;
-}
-
 int falcon_switch_mac(struct efx_nic *efx)
 {
 	struct efx_mac_operations *old_mac_op = efx->mac_op;
@@ -2242,16 +2199,17 @@
 
 	/* Always push the NIC_STAT_REG setting even if the mac hasn't
 	 * changed, because this function is run post online reset */
-	falcon_read(efx, &nic_stat, NIC_STAT_REG);
+	efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
 	strap_val = EFX_IS10G(efx) ? 5 : 3;
 	if (falcon_rev(efx) >= FALCON_REV_B0) {
-		EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_EN, 1);
-		EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_OVR, strap_val);
-		falcon_write(efx, &nic_stat, NIC_STAT_REG);
+		EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP_EN, 1);
+		EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP, strap_val);
+		efx_writeo(efx, &nic_stat, FR_AB_NIC_STAT);
 	} else {
 		/* Falcon A1 does not support 1G/10G speed switching
 		 * and must not be used with a PHY that does. */
-		BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val);
+		BUG_ON(EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_PINS) !=
+		       strap_val);
 	}
 
 	if (old_mac_op == efx->mac_op)
@@ -2272,10 +2230,31 @@
 {
 	int rc;
 
-	/* Hook in PHY operations table */
-	rc = falcon_probe_phy(efx);
-	if (rc)
-		return rc;
+	switch (efx->phy_type) {
+	case PHY_TYPE_SFX7101:
+		efx->phy_op = &falcon_sfx7101_phy_ops;
+		break;
+	case PHY_TYPE_SFT9001A:
+	case PHY_TYPE_SFT9001B:
+		efx->phy_op = &falcon_sft9001_phy_ops;
+		break;
+	case PHY_TYPE_QT2022C2:
+	case PHY_TYPE_QT2025C:
+		efx->phy_op = &falcon_qt202x_phy_ops;
+		break;
+	default:
+		EFX_ERR(efx, "Unknown PHY type %d\n",
+			efx->phy_type);
+		return -ENODEV;
+	}
+
+	if (efx->phy_op->macs & EFX_XMAC)
+		efx->loopback_modes |= ((1 << LOOPBACK_XGMII) |
+					(1 << LOOPBACK_XGXS) |
+					(1 << LOOPBACK_XAUI));
+	if (efx->phy_op->macs & EFX_GMAC)
+		efx->loopback_modes |= (1 << LOOPBACK_GMAC);
+	efx->loopback_modes |= efx->phy_op->loopbacks;
 
 	/* Set up MDIO structure for PHY */
 	efx->mdio.mmds = efx->phy_op->mmds;
@@ -2324,8 +2303,8 @@
 	 */
 	set_bit_le(0xff, mc_hash->byte);
 
-	falcon_write(efx, &mc_hash->oword[0], MAC_MCAST_HASH_REG0_KER);
-	falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER);
+	efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
+	efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
 }
 
 
@@ -2351,7 +2330,7 @@
 	region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
 	if (!region)
 		return -ENOMEM;
-	nvconfig = region + NVCONFIG_OFFSET;
+	nvconfig = region + FALCON_NVCONFIG_OFFSET;
 
 	mutex_lock(&efx->spi_lock);
 	rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region);
@@ -2367,7 +2346,7 @@
 	struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
 
 	rc = -EINVAL;
-	if (magic_num != NVCONFIG_BOARD_MAGIC_NUM) {
+	if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
 		EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
 		goto out;
 	}
@@ -2403,41 +2382,41 @@
 	unsigned address;
 	efx_oword_t mask;
 } efx_test_registers[] = {
-	{ ADR_REGION_REG_KER,
+	{ FR_AZ_ADR_REGION,
 	  EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
-	{ RX_CFG_REG_KER,
+	{ FR_AZ_RX_CFG,
 	  EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
-	{ TX_CFG_REG_KER,
+	{ FR_AZ_TX_CFG,
 	  EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
-	{ TX_CFG2_REG_KER,
+	{ FR_AZ_TX_RESERVED,
 	  EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
-	{ MAC0_CTRL_REG_KER,
+	{ FR_AB_MAC_CTRL,
 	  EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
-	{ SRM_TX_DC_CFG_REG_KER,
+	{ FR_AZ_SRM_TX_DC_CFG,
 	  EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
-	{ RX_DC_CFG_REG_KER,
+	{ FR_AZ_RX_DC_CFG,
 	  EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
-	{ RX_DC_PF_WM_REG_KER,
+	{ FR_AZ_RX_DC_PF_WM,
 	  EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
-	{ DP_CTRL_REG,
+	{ FR_BZ_DP_CTRL,
 	  EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
-	{ GM_CFG2_REG,
+	{ FR_AB_GM_CFG2,
 	  EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
-	{ GMF_CFG0_REG,
+	{ FR_AB_GMF_CFG0,
 	  EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
-	{ XM_GLB_CFG_REG,
+	{ FR_AB_XM_GLB_CFG,
 	  EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
-	{ XM_TX_CFG_REG,
+	{ FR_AB_XM_TX_CFG,
 	  EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
-	{ XM_RX_CFG_REG,
+	{ FR_AB_XM_RX_CFG,
 	  EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
-	{ XM_RX_PARAM_REG,
+	{ FR_AB_XM_RX_PARAM,
 	  EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
-	{ XM_FC_REG,
+	{ FR_AB_XM_FC,
 	  EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
-	{ XM_ADR_LO_REG,
+	{ FR_AB_XM_ADR_LO,
 	  EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
-	{ XX_SD_CTL_REG,
+	{ FR_AB_XX_SD_CTL,
 	  EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
 };
 
@@ -2461,7 +2440,7 @@
 		mask = imask = efx_test_registers[i].mask;
 		EFX_INVERT_OWORD(imask);
 
-		falcon_read(efx, &original, address);
+		efx_reado(efx, &original, address);
 
 		/* bit sweep on and off */
 		for (j = 0; j < 128; j++) {
@@ -2472,8 +2451,8 @@
 			EFX_AND_OWORD(reg, original, mask);
 			EFX_SET_OWORD32(reg, j, j, 1);
 
-			falcon_write(efx, &reg, address);
-			falcon_read(efx, &buf, address);
+			efx_writeo(efx, &reg, address);
+			efx_reado(efx, &buf, address);
 
 			if (efx_masked_compare_oword(&reg, &buf, &mask))
 				goto fail;
@@ -2482,14 +2461,14 @@
 			EFX_OR_OWORD(reg, original, mask);
 			EFX_SET_OWORD32(reg, j, j, 0);
 
-			falcon_write(efx, &reg, address);
-			falcon_read(efx, &buf, address);
+			efx_writeo(efx, &reg, address);
+			efx_reado(efx, &buf, address);
 
 			if (efx_masked_compare_oword(&reg, &buf, &mask))
 				goto fail;
 		}
 
-		falcon_write(efx, &original, address);
+		efx_writeo(efx, &original, address);
 	}
 
 	return 0;
@@ -2537,22 +2516,24 @@
 		}
 
 		EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
-				     EXT_PHY_RST_DUR, 0x7,
-				     SWRST, 1);
+				     FRF_AB_EXT_PHY_RST_DUR,
+				     FFE_AB_EXT_PHY_RST_DUR_10240US,
+				     FRF_AB_SWRST, 1);
 	} else {
-		int reset_phy = (method == RESET_TYPE_INVISIBLE ?
-				 EXCLUDE_FROM_RESET : 0);
-
 		EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
-				     EXT_PHY_RST_CTL, reset_phy,
-				     PCIE_CORE_RST_CTL, EXCLUDE_FROM_RESET,
-				     PCIE_NSTCK_RST_CTL, EXCLUDE_FROM_RESET,
-				     PCIE_SD_RST_CTL, EXCLUDE_FROM_RESET,
-				     EE_RST_CTL, EXCLUDE_FROM_RESET,
-				     EXT_PHY_RST_DUR, 0x7 /* 10ms */,
-				     SWRST, 1);
+				     /* exclude PHY from "invisible" reset */
+				     FRF_AB_EXT_PHY_RST_CTL,
+				     method == RESET_TYPE_INVISIBLE,
+				     /* exclude EEPROM/flash and PCIe */
+				     FRF_AB_PCIE_CORE_RST_CTL, 1,
+				     FRF_AB_PCIE_NSTKY_RST_CTL, 1,
+				     FRF_AB_PCIE_SD_RST_CTL, 1,
+				     FRF_AB_EE_RST_CTL, 1,
+				     FRF_AB_EXT_PHY_RST_DUR,
+				     FFE_AB_EXT_PHY_RST_DUR_10240US,
+				     FRF_AB_SWRST, 1);
 	}
-	falcon_write(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER);
+	efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
 
 	EFX_LOG(efx, "waiting for hardware reset\n");
 	schedule_timeout_uninterruptible(HZ / 20);
@@ -2577,8 +2558,8 @@
 	}
 
 	/* Assert that reset complete */
-	falcon_read(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER);
-	if (EFX_OWORD_FIELD(glb_ctl_reg_ker, SWRST) != 0) {
+	efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
+	if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
 		rc = -ETIMEDOUT;
 		EFX_ERR(efx, "timed out waiting for hardware reset\n");
 		goto fail5;
@@ -2606,16 +2587,16 @@
 	int count;
 
 	/* Set the SRAM wake/sleep GPIO appropriately. */
-	falcon_read(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
-	EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OEN, 1);
-	EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, 1);
-	falcon_write(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
+	efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
+	EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
+	EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
+	efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
 
 	/* Initiate SRAM reset */
 	EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
-			     SRAM_OOB_BT_INIT_EN, 1,
-			     SRM_NUM_BANKS_AND_BANK_SIZE, 0);
-	falcon_write(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER);
+			     FRF_AZ_SRM_INIT_EN, 1,
+			     FRF_AZ_SRM_NB_SZ, 0);
+	efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
 
 	/* Wait for SRAM reset to complete */
 	count = 0;
@@ -2626,8 +2607,8 @@
 		schedule_timeout_uninterruptible(HZ / 50);
 
 		/* Check for reset complete */
-		falcon_read(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER);
-		if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, SRAM_OOB_BT_INIT_EN)) {
+		efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
+		if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
 			EFX_LOG(efx, "SRAM reset complete\n");
 
 			return 0;
@@ -2712,16 +2693,16 @@
 		board_rev = le16_to_cpu(v2->board_revision);
 
 		if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
-			__le32 fl = v3->spi_device_type[EE_SPI_FLASH];
-			__le32 ee = v3->spi_device_type[EE_SPI_EEPROM];
-			rc = falcon_spi_device_init(efx, &efx->spi_flash,
-						    EE_SPI_FLASH,
-						    le32_to_cpu(fl));
+			rc = falcon_spi_device_init(
+				efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
+				le32_to_cpu(v3->spi_device_type
+					    [FFE_AB_SPI_DEVICE_FLASH]));
 			if (rc)
 				goto fail2;
-			rc = falcon_spi_device_init(efx, &efx->spi_eeprom,
-						    EE_SPI_EEPROM,
-						    le32_to_cpu(ee));
+			rc = falcon_spi_device_init(
+				efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
+				le32_to_cpu(v3->spi_device_type
+					    [FFE_AB_SPI_DEVICE_EEPROM]));
 			if (rc)
 				goto fail2;
 		}
@@ -2732,7 +2713,7 @@
 
 	EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
 
-	efx_set_board_info(efx, board_rev);
+	falcon_probe_board(efx, board_rev);
 
 	kfree(nvconfig);
 	return 0;
@@ -2752,13 +2733,13 @@
 	efx_oword_t altera_build;
 	efx_oword_t nic_stat;
 
-	falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER);
-	if (EFX_OWORD_FIELD(altera_build, VER_ALL)) {
+	efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
+	if (EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER)) {
 		EFX_ERR(efx, "Falcon FPGA not supported\n");
 		return -ENODEV;
 	}
 
-	falcon_read(efx, &nic_stat, NIC_STAT_REG);
+	efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
 
 	switch (falcon_rev(efx)) {
 	case FALCON_REV_A0:
@@ -2767,7 +2748,7 @@
 		return -ENODEV;
 
 	case FALCON_REV_A1:
-		if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) {
+		if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
 			EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
 			return -ENODEV;
 		}
@@ -2782,7 +2763,7 @@
 	}
 
 	/* Initial assumed speed */
-	efx->link_speed = EFX_OWORD_FIELD(nic_stat, STRAP_10G) ? 10000 : 1000;
+	efx->link_speed = EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) ? 10000 : 1000;
 
 	return 0;
 }
@@ -2793,34 +2774,36 @@
 	efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
 	int boot_dev;
 
-	falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER);
-	falcon_read(efx, &nic_stat, NIC_STAT_REG);
-	falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
+	efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
+	efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
+	efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
 
-	if (EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE)) {
-		boot_dev = (EFX_OWORD_FIELD(nic_stat, SF_PRST) ?
-			    EE_SPI_FLASH : EE_SPI_EEPROM);
+	if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
+		boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
+			    FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
 		EFX_LOG(efx, "Booted from %s\n",
-			boot_dev == EE_SPI_FLASH ? "flash" : "EEPROM");
+			boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM");
 	} else {
 		/* Disable VPD and set clock dividers to safe
 		 * values for initial programming. */
 		boot_dev = -1;
 		EFX_LOG(efx, "Booted from internal ASIC settings;"
 			" setting SPI config\n");
-		EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0,
+		EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
 				     /* 125 MHz / 7 ~= 20 MHz */
-				     EE_SF_CLOCK_DIV, 7,
+				     FRF_AB_EE_SF_CLOCK_DIV, 7,
 				     /* 125 MHz / 63 ~= 2 MHz */
-				     EE_EE_CLOCK_DIV, 63);
-		falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
+				     FRF_AB_EE_EE_CLOCK_DIV, 63);
+		efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
 	}
 
-	if (boot_dev == EE_SPI_FLASH)
-		falcon_spi_device_init(efx, &efx->spi_flash, EE_SPI_FLASH,
+	if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
+		falcon_spi_device_init(efx, &efx->spi_flash,
+				       FFE_AB_SPI_DEVICE_FLASH,
 				       default_flash_type);
-	if (boot_dev == EE_SPI_EEPROM)
-		falcon_spi_device_init(efx, &efx->spi_eeprom, EE_SPI_EEPROM,
+	if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
+		falcon_spi_device_init(efx, &efx->spi_eeprom,
+				       FFE_AB_SPI_DEVICE_EEPROM,
 				       large_eeprom_type);
 }
 
@@ -2911,6 +2894,52 @@
 	return rc;
 }
 
+static void falcon_init_rx_cfg(struct efx_nic *efx)
+{
+	/* Prior to Siena the RX DMA engine will split each frame at
+	 * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
+	 * be so large that that never happens. */
+	const unsigned huge_buf_size = (3 * 4096) >> 5;
+	/* RX control FIFO thresholds (32 entries) */
+	const unsigned ctrl_xon_thr = 20;
+	const unsigned ctrl_xoff_thr = 25;
+	/* RX data FIFO thresholds (256-byte units; size varies) */
+	int data_xon_thr = rx_xon_thresh_bytes >> 8;
+	int data_xoff_thr = rx_xoff_thresh_bytes >> 8;
+	efx_oword_t reg;
+
+	efx_reado(efx, &reg, FR_AZ_RX_CFG);
+	if (falcon_rev(efx) <= FALCON_REV_A1) {
+		/* Data FIFO size is 5.5K */
+		if (data_xon_thr < 0)
+			data_xon_thr = 512 >> 8;
+		if (data_xoff_thr < 0)
+			data_xoff_thr = 2048 >> 8;
+		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
+		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
+				    huge_buf_size);
+		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr);
+		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr);
+		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
+		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
+	} else {
+		/* Data FIFO size is 80K; register fields moved */
+		if (data_xon_thr < 0)
+			data_xon_thr = 27648 >> 8; /* ~3*max MTU */
+		if (data_xoff_thr < 0)
+			data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
+				    huge_buf_size);
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr);
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr);
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
+	}
+	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
+}
+
 /* This call performs hardware-specific global initialisation, such as
  * defining the descriptor cache sizes and number of RSS channels.
  * It does not set up any buffers, descriptor rings or event queues.
@@ -2918,56 +2947,51 @@
 int falcon_init_nic(struct efx_nic *efx)
 {
 	efx_oword_t temp;
-	unsigned thresh;
 	int rc;
 
 	/* Use on-chip SRAM */
-	falcon_read(efx, &temp, NIC_STAT_REG);
-	EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1);
-	falcon_write(efx, &temp, NIC_STAT_REG);
+	efx_reado(efx, &temp, FR_AB_NIC_STAT);
+	EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
+	efx_writeo(efx, &temp, FR_AB_NIC_STAT);
 
 	/* Set the source of the GMAC clock */
 	if (falcon_rev(efx) == FALCON_REV_B0) {
-		falcon_read(efx, &temp, GPIO_CTL_REG_KER);
-		EFX_SET_OWORD_FIELD(temp, GPIO_USE_NIC_CLK, true);
-		falcon_write(efx, &temp, GPIO_CTL_REG_KER);
+		efx_reado(efx, &temp, FR_AB_GPIO_CTL);
+		EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true);
+		efx_writeo(efx, &temp, FR_AB_GPIO_CTL);
 	}
 
-	/* Set buffer table mode */
-	EFX_POPULATE_OWORD_1(temp, BUF_TBL_MODE, BUF_TBL_MODE_FULL);
-	falcon_write(efx, &temp, BUF_TBL_CFG_REG_KER);
-
 	rc = falcon_reset_sram(efx);
 	if (rc)
 		return rc;
 
 	/* Set positions of descriptor caches in SRAM. */
-	EFX_POPULATE_OWORD_1(temp, SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8);
-	falcon_write(efx, &temp, SRM_TX_DC_CFG_REG_KER);
-	EFX_POPULATE_OWORD_1(temp, SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8);
-	falcon_write(efx, &temp, SRM_RX_DC_CFG_REG_KER);
+	EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8);
+	efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
+	EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8);
+	efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
 
 	/* Set TX descriptor cache size. */
 	BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER));
-	EFX_POPULATE_OWORD_1(temp, TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
-	falcon_write(efx, &temp, TX_DC_CFG_REG_KER);
+	EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
+	efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
 
 	/* Set RX descriptor cache size.  Set low watermark to size-8, as
 	 * this allows most efficient prefetching.
 	 */
 	BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER));
-	EFX_POPULATE_OWORD_1(temp, RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
-	falcon_write(efx, &temp, RX_DC_CFG_REG_KER);
-	EFX_POPULATE_OWORD_1(temp, RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
-	falcon_write(efx, &temp, RX_DC_PF_WM_REG_KER);
+	EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
+	efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
+	EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
+	efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
 
 	/* Clear the parity enables on the TX data fifos as
 	 * they produce false parity errors because of timing issues
 	 */
 	if (EFX_WORKAROUND_5129(efx)) {
-		falcon_read(efx, &temp, SPARE_REG_KER);
-		EFX_SET_OWORD_FIELD(temp, MEM_PERR_EN_TX_DATA, 0);
-		falcon_write(efx, &temp, SPARE_REG_KER);
+		efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
+		EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
+		efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
 	}
 
 	/* Enable all the genuinely fatal interrupts.  (They are still
@@ -2977,83 +3001,65 @@
 	 * Note: All other fatal interrupts are enabled
 	 */
 	EFX_POPULATE_OWORD_3(temp,
-			     ILL_ADR_INT_KER_EN, 1,
-			     RBUF_OWN_INT_KER_EN, 1,
-			     TBUF_OWN_INT_KER_EN, 1);
+			     FRF_AZ_ILL_ADR_INT_KER_EN, 1,
+			     FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
+			     FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
 	EFX_INVERT_OWORD(temp);
-	falcon_write(efx, &temp, FATAL_INTR_REG_KER);
+	efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
 
 	if (EFX_WORKAROUND_7244(efx)) {
-		falcon_read(efx, &temp, RX_FILTER_CTL_REG);
-		EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8);
-		EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8);
-		EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8);
-		EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8);
-		falcon_write(efx, &temp, RX_FILTER_CTL_REG);
+		efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
+		EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
+		EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
+		EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
+		EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
+		efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
 	}
 
 	falcon_setup_rss_indir_table(efx);
 
+	/* XXX This is documented only for Falcon A0/A1 */
 	/* Setup RX.  Wait for descriptor is broken and must
 	 * be disabled.  RXDP recovery shouldn't be needed, but is.
 	 */
-	falcon_read(efx, &temp, RX_SELF_RST_REG_KER);
-	EFX_SET_OWORD_FIELD(temp, RX_NODESC_WAIT_DIS, 1);
-	EFX_SET_OWORD_FIELD(temp, RX_RECOVERY_EN, 1);
+	efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
+	EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
+	EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
 	if (EFX_WORKAROUND_5583(efx))
-		EFX_SET_OWORD_FIELD(temp, RX_ISCSI_DIS, 1);
-	falcon_write(efx, &temp, RX_SELF_RST_REG_KER);
+		EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
+	efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
 
 	/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
 	 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
 	 */
-	falcon_read(efx, &temp, TX_CFG2_REG_KER);
-	EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER, 0xfe);
-	EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER_EN, 1);
-	EFX_SET_OWORD_FIELD(temp, TX_ONE_PKT_PER_Q, 1);
-	EFX_SET_OWORD_FIELD(temp, TX_CSR_PUSH_EN, 0);
-	EFX_SET_OWORD_FIELD(temp, TX_DIS_NON_IP_EV, 1);
+	efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
+	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
+	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
+	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
+	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0);
+	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
 	/* Enable SW_EV to inherit in char driver - assume harmless here */
-	EFX_SET_OWORD_FIELD(temp, TX_SW_EV_EN, 1);
+	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
 	/* Prefetch threshold 2 => fetch when descriptor cache half empty */
-	EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);
+	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
 	/* Squash TX of packets of 16 bytes or less */
 	if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
-		EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);
-	falcon_write(efx, &temp, TX_CFG2_REG_KER);
+		EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+	efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
 
 	/* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
 	 * descriptors (which is bad).
 	 */
-	falcon_read(efx, &temp, TX_CFG_REG_KER);
-	EFX_SET_OWORD_FIELD(temp, TX_NO_EOP_DISC_EN, 0);
-	falcon_write(efx, &temp, TX_CFG_REG_KER);
+	efx_reado(efx, &temp, FR_AZ_TX_CFG);
+	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
+	efx_writeo(efx, &temp, FR_AZ_TX_CFG);
 
-	/* RX config */
-	falcon_read(efx, &temp, RX_CFG_REG_KER);
-	EFX_SET_OWORD_FIELD_VER(efx, temp, RX_DESC_PUSH_EN, 0);
-	if (EFX_WORKAROUND_7575(efx))
-		EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
-					(3 * 4096) / 32);
-	if (falcon_rev(efx) >= FALCON_REV_B0)
-		EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
-
-	/* RX FIFO flow control thresholds */
-	thresh = ((rx_xon_thresh_bytes >= 0) ?
-		  rx_xon_thresh_bytes : efx->type->rx_xon_thresh);
-	EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_MAC_TH, thresh / 256);
-	thresh = ((rx_xoff_thresh_bytes >= 0) ?
-		  rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh);
-	EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256);
-	/* RX control FIFO thresholds [32 entries] */
-	EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 20);
-	EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 25);
-	falcon_write(efx, &temp, RX_CFG_REG_KER);
+	falcon_init_rx_cfg(efx);
 
 	/* Set destination of both TX and RX Flush events */
 	if (falcon_rev(efx) >= FALCON_REV_B0) {
-		EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0);
-		falcon_write(efx, &temp, DP_CTRL_REG);
+		EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
+		efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
 	}
 
 	return 0;
@@ -3089,8 +3095,9 @@
 {
 	efx_oword_t cnt;
 
-	falcon_read(efx, &cnt, RX_NODESC_DROP_REG_KER);
-	efx->n_rx_nodesc_drop_cnt += EFX_OWORD_FIELD(cnt, RX_NODESC_DROP_CNT);
+	efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
+	efx->n_rx_nodesc_drop_cnt +=
+		EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
 }
 
 /**************************************************************************
@@ -3101,45 +3108,31 @@
  */
 
 struct efx_nic_type falcon_a_nic_type = {
-	.mem_bar = 2,
 	.mem_map_size = 0x20000,
-	.txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_A1,
-	.rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_A1,
-	.buf_tbl_base = BUF_TBL_KER_A1,
-	.evq_ptr_tbl_base = EVQ_PTR_TBL_KER_A1,
-	.evq_rptr_tbl_base = EVQ_RPTR_REG_KER_A1,
-	.txd_ring_mask = FALCON_TXD_RING_MASK,
-	.rxd_ring_mask = FALCON_RXD_RING_MASK,
-	.evq_size = FALCON_EVQ_SIZE,
-	.max_dma_mask = FALCON_DMA_MASK,
-	.tx_dma_mask = FALCON_TX_DMA_MASK,
-	.bug5391_mask = 0xf,
-	.rx_xoff_thresh = 2048,
-	.rx_xon_thresh = 512,
+	.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
+	.rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
+	.buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
+	.evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
+	.evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
+	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
 	.rx_buffer_padding = 0x24,
 	.max_interrupt_mode = EFX_INT_MODE_MSI,
 	.phys_addr_channels = 4,
 };
 
 struct efx_nic_type falcon_b_nic_type = {
-	.mem_bar = 2,
 	/* Map everything up to and including the RSS indirection
 	 * table.  Don't map MSI-X table, MSI-X PBA since Linux
 	 * requires that they not be mapped.  */
-	.mem_map_size = RX_RSS_INDIR_TBL_B0 + 0x800,
-	.txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_B0,
-	.rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_B0,
-	.buf_tbl_base = BUF_TBL_KER_B0,
-	.evq_ptr_tbl_base = EVQ_PTR_TBL_KER_B0,
-	.evq_rptr_tbl_base = EVQ_RPTR_REG_KER_B0,
-	.txd_ring_mask = FALCON_TXD_RING_MASK,
-	.rxd_ring_mask = FALCON_RXD_RING_MASK,
-	.evq_size = FALCON_EVQ_SIZE,
-	.max_dma_mask = FALCON_DMA_MASK,
-	.tx_dma_mask = FALCON_TX_DMA_MASK,
-	.bug5391_mask = 0,
-	.rx_xoff_thresh = 54272, /* ~80Kb - 3*max MTU */
-	.rx_xon_thresh = 27648,  /* ~3*max MTU */
+	.mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
+			 FR_BZ_RX_INDIRECTION_TBL_STEP *
+			 FR_BZ_RX_INDIRECTION_TBL_ROWS),
+	.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
+	.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
+	.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
+	.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
+	.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
+	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
 	.rx_buffer_padding = 0,
 	.max_interrupt_mode = EFX_INT_MODE_MSIX,
 	.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
index 77f2e0d..4dd9657 100644
--- a/drivers/net/sfc/falcon.h
+++ b/drivers/net/sfc/falcon.h
@@ -39,6 +39,8 @@
  **************************************************************************
  */
 
+extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info);
+
 /* TX data path */
 extern int falcon_probe_tx(struct efx_tx_queue *tx_queue);
 extern void falcon_init_tx(struct efx_tx_queue *tx_queue);
@@ -89,11 +91,9 @@
 
 /* Global Resources */
 extern int falcon_probe_nic(struct efx_nic *efx);
-extern int falcon_probe_resources(struct efx_nic *efx);
 extern int falcon_init_nic(struct efx_nic *efx);
 extern int falcon_flush_queues(struct efx_nic *efx);
 extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
-extern void falcon_remove_resources(struct efx_nic *efx);
 extern void falcon_remove_nic(struct efx_nic *efx);
 extern void falcon_update_nic_stats(struct efx_nic *efx);
 extern void falcon_set_multicast_hash(struct efx_nic *efx);
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
new file mode 100644
index 0000000..99f7372
--- /dev/null
+++ b/drivers/net/sfc/falcon_boards.c
@@ -0,0 +1,752 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2007-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/rtnetlink.h>
+
+#include "net_driver.h"
+#include "phy.h"
+#include "efx.h"
+#include "falcon.h"
+#include "regs.h"
+#include "io.h"
+#include "workarounds.h"
+
+/* Macros for unpacking the board revision */
+/* The revision info is in host byte order. */
+#define FALCON_BOARD_TYPE(_rev) (_rev >> 8)
+#define FALCON_BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
+#define FALCON_BOARD_MINOR(_rev) (_rev & 0xf)
+
+/* Board types */
+#define FALCON_BOARD_SFE4001 0x01
+#define FALCON_BOARD_SFE4002 0x02
+#define FALCON_BOARD_SFN4111T 0x51
+#define FALCON_BOARD_SFN4112F 0x52
+
+/* Blink support. If the PHY has no auto-blink mode so we hang it off a timer */
+#define BLINK_INTERVAL (HZ/2)
+
+static void blink_led_timer(unsigned long context)
+{
+	struct efx_nic *efx = (struct efx_nic *)context;
+	struct efx_board *board = &efx->board_info;
+
+	board->set_id_led(efx, board->blink_state);
+	board->blink_state = !board->blink_state;
+	if (board->blink_resubmit)
+		mod_timer(&board->blink_timer, jiffies + BLINK_INTERVAL);
+}
+
+static void board_blink(struct efx_nic *efx, bool blink)
+{
+	struct efx_board *board = &efx->board_info;
+
+	/* The rtnl mutex serialises all ethtool ioctls, so
+	 * nothing special needs doing here. */
+	if (blink) {
+		board->blink_resubmit = true;
+		board->blink_state = false;
+		setup_timer(&board->blink_timer, blink_led_timer,
+			    (unsigned long)efx);
+		mod_timer(&board->blink_timer, jiffies + BLINK_INTERVAL);
+	} else {
+		board->blink_resubmit = false;
+		if (board->blink_timer.function)
+			del_timer_sync(&board->blink_timer);
+		board->init_leds(efx);
+	}
+}
+
+/*****************************************************************************
+ * Support for LM87 sensor chip used on several boards
+ */
+#define LM87_REG_ALARMS1		0x41
+#define LM87_REG_ALARMS2		0x42
+#define LM87_IN_LIMITS(nr, _min, _max)			\
+	0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min
+#define LM87_AIN_LIMITS(nr, _min, _max)			\
+	0x3B + (nr), _max, 0x1A + (nr), _min
+#define LM87_TEMP_INT_LIMITS(_min, _max)		\
+	0x39, _max, 0x3A, _min
+#define LM87_TEMP_EXT1_LIMITS(_min, _max)		\
+	0x37, _max, 0x38, _min
+
+#define LM87_ALARM_TEMP_INT		0x10
+#define LM87_ALARM_TEMP_EXT1		0x20
+
+#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
+
+static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
+			 const u8 *reg_values)
+{
+	struct i2c_client *client = i2c_new_device(&efx->i2c_adap, info);
+	int rc;
+
+	if (!client)
+		return -EIO;
+
+	while (*reg_values) {
+		u8 reg = *reg_values++;
+		u8 value = *reg_values++;
+		rc = i2c_smbus_write_byte_data(client, reg, value);
+		if (rc)
+			goto err;
+	}
+
+	efx->board_info.hwmon_client = client;
+	return 0;
+
+err:
+	i2c_unregister_device(client);
+	return rc;
+}
+
+static void efx_fini_lm87(struct efx_nic *efx)
+{
+	i2c_unregister_device(efx->board_info.hwmon_client);
+}
+
+static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
+{
+	struct i2c_client *client = efx->board_info.hwmon_client;
+	s32 alarms1, alarms2;
+
+	/* If link is up then do not monitor temperature */
+	if (EFX_WORKAROUND_7884(efx) && efx->link_up)
+		return 0;
+
+	alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
+	alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
+	if (alarms1 < 0)
+		return alarms1;
+	if (alarms2 < 0)
+		return alarms2;
+	alarms1 &= mask;
+	alarms2 &= mask >> 8;
+	if (alarms1 || alarms2) {
+		EFX_ERR(efx,
+			"LM87 detected a hardware failure (status %02x:%02x)"
+			"%s%s\n",
+			alarms1, alarms2,
+			(alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "",
+			(alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : "");
+		return -ERANGE;
+	}
+
+	return 0;
+}
+
+#else /* !CONFIG_SENSORS_LM87 */
+
+static inline int
+efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
+	      const u8 *reg_values)
+{
+	return 0;
+}
+static inline void efx_fini_lm87(struct efx_nic *efx)
+{
+}
+static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
+{
+	return 0;
+}
+
+#endif /* CONFIG_SENSORS_LM87 */
+
+/*****************************************************************************
+ * Support for the SFE4001 and SFN4111T NICs.
+ *
+ * The SFE4001 does not power-up fully at reset due to its high power
+ * consumption.  We control its power via a PCA9539 I/O expander.
+ * Both boards have a MAX6647 temperature monitor which we expose to
+ * the lm90 driver.
+ *
+ * This also provides minimal support for reflashing the PHY, which is
+ * initiated by resetting it with the FLASH_CFG_1 pin pulled down.
+ * On SFE4001 rev A2 and later this is connected to the 3V3X output of
+ * the IO-expander; on the SFN4111T it is connected to Falcon's GPIO3.
+ * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually
+ * exclusive with the network device being open.
+ */
+
+/**************************************************************************
+ * Support for I2C IO Expander device on SFE40001
+ */
+#define	PCA9539 0x74
+
+#define	P0_IN 0x00
+#define	P0_OUT 0x02
+#define	P0_INVERT 0x04
+#define	P0_CONFIG 0x06
+
+#define	P0_EN_1V0X_LBN 0
+#define	P0_EN_1V0X_WIDTH 1
+#define	P0_EN_1V2_LBN 1
+#define	P0_EN_1V2_WIDTH 1
+#define	P0_EN_2V5_LBN 2
+#define	P0_EN_2V5_WIDTH 1
+#define	P0_EN_3V3X_LBN 3
+#define	P0_EN_3V3X_WIDTH 1
+#define	P0_EN_5V_LBN 4
+#define	P0_EN_5V_WIDTH 1
+#define	P0_SHORTEN_JTAG_LBN 5
+#define	P0_SHORTEN_JTAG_WIDTH 1
+#define	P0_X_TRST_LBN 6
+#define	P0_X_TRST_WIDTH 1
+#define	P0_DSP_RESET_LBN 7
+#define	P0_DSP_RESET_WIDTH 1
+
+#define	P1_IN 0x01
+#define	P1_OUT 0x03
+#define	P1_INVERT 0x05
+#define	P1_CONFIG 0x07
+
+#define	P1_AFE_PWD_LBN 0
+#define	P1_AFE_PWD_WIDTH 1
+#define	P1_DSP_PWD25_LBN 1
+#define	P1_DSP_PWD25_WIDTH 1
+#define	P1_RESERVED_LBN 2
+#define	P1_RESERVED_WIDTH 2
+#define	P1_SPARE_LBN 4
+#define	P1_SPARE_WIDTH 4
+
+/* Temperature Sensor */
+#define MAX664X_REG_RSL		0x02
+#define MAX664X_REG_WLHO	0x0B
+
+static void sfe4001_poweroff(struct efx_nic *efx)
+{
+	struct i2c_client *ioexp_client = efx->board_info.ioexp_client;
+	struct i2c_client *hwmon_client = efx->board_info.hwmon_client;
+
+	/* Turn off all power rails and disable outputs */
+	i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff);
+	i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff);
+	i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff);
+
+	/* Clear any over-temperature alert */
+	i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
+}
+
+static int sfe4001_poweron(struct efx_nic *efx)
+{
+	struct i2c_client *hwmon_client = efx->board_info.hwmon_client;
+	struct i2c_client *ioexp_client = efx->board_info.ioexp_client;
+	unsigned int i, j;
+	int rc;
+	u8 out;
+
+	/* Clear any previous over-temperature alert */
+	rc = i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
+	if (rc < 0)
+		return rc;
+
+	/* Enable port 0 and port 1 outputs on IO expander */
+	rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
+	if (rc)
+		return rc;
+	rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
+				       0xff & ~(1 << P1_SPARE_LBN));
+	if (rc)
+		goto fail_on;
+
+	/* If PHY power is on, turn it all off and wait 1 second to
+	 * ensure a full reset.
+	 */
+	rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT);
+	if (rc < 0)
+		goto fail_on;
+	out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
+		       (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
+		       (0 << P0_EN_1V0X_LBN));
+	if (rc != out) {
+		EFX_INFO(efx, "power-cycling PHY\n");
+		rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
+		if (rc)
+			goto fail_on;
+		schedule_timeout_uninterruptible(HZ);
+	}
+
+	for (i = 0; i < 20; ++i) {
+		/* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
+		out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
+			       (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
+			       (1 << P0_X_TRST_LBN));
+		if (efx->phy_mode & PHY_MODE_SPECIAL)
+			out |= 1 << P0_EN_3V3X_LBN;
+
+		rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
+		if (rc)
+			goto fail_on;
+		msleep(10);
+
+		/* Turn on 1V power rail */
+		out &= ~(1 << P0_EN_1V0X_LBN);
+		rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
+		if (rc)
+			goto fail_on;
+
+		EFX_INFO(efx, "waiting for DSP boot (attempt %d)...\n", i);
+
+		/* In flash config mode, DSP does not turn on AFE, so
+		 * just wait 1 second.
+		 */
+		if (efx->phy_mode & PHY_MODE_SPECIAL) {
+			schedule_timeout_uninterruptible(HZ);
+			return 0;
+		}
+
+		for (j = 0; j < 10; ++j) {
+			msleep(100);
+
+			/* Check DSP has asserted AFE power line */
+			rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
+			if (rc < 0)
+				goto fail_on;
+			if (rc & (1 << P1_AFE_PWD_LBN))
+				return 0;
+		}
+	}
+
+	EFX_INFO(efx, "timed out waiting for DSP boot\n");
+	rc = -ETIMEDOUT;
+fail_on:
+	sfe4001_poweroff(efx);
+	return rc;
+}
+
+static int sfn4111t_reset(struct efx_nic *efx)
+{
+	efx_oword_t reg;
+
+	/* GPIO 3 and the GPIO register are shared with I2C, so block that */
+	mutex_lock(&efx->i2c_adap.bus_lock);
+
+	/* Pull RST_N (GPIO 2) low then let it up again, setting the
+	 * FLASH_CFG_1 strap (GPIO 3) appropriately.  Only change the
+	 * output enables; the output levels should always be 0 (low)
+	 * and we rely on external pull-ups. */
+	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, true);
+	efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
+	msleep(1000);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, false);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN,
+			    !!(efx->phy_mode & PHY_MODE_SPECIAL));
+	efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
+	msleep(1);
+
+	mutex_unlock(&efx->i2c_adap.bus_lock);
+
+	ssleep(1);
+	return 0;
+}
+
+static ssize_t show_phy_flash_cfg(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+	return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
+}
+
+static ssize_t set_phy_flash_cfg(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+	enum efx_phy_mode old_mode, new_mode;
+	int err;
+
+	rtnl_lock();
+	old_mode = efx->phy_mode;
+	if (count == 0 || *buf == '0')
+		new_mode = old_mode & ~PHY_MODE_SPECIAL;
+	else
+		new_mode = PHY_MODE_SPECIAL;
+	if (old_mode == new_mode) {
+		err = 0;
+	} else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
+		err = -EBUSY;
+	} else {
+		/* Reset the PHY, reconfigure the MAC and enable/disable
+		 * MAC stats accordingly. */
+		efx->phy_mode = new_mode;
+		if (new_mode & PHY_MODE_SPECIAL)
+			efx_stats_disable(efx);
+		if (efx->board_info.type == FALCON_BOARD_SFE4001)
+			err = sfe4001_poweron(efx);
+		else
+			err = sfn4111t_reset(efx);
+		efx_reconfigure_port(efx);
+		if (!(new_mode & PHY_MODE_SPECIAL))
+			efx_stats_enable(efx);
+	}
+	rtnl_unlock();
+
+	return err ? err : count;
+}
+
+static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg);
+
+static void sfe4001_fini(struct efx_nic *efx)
+{
+	EFX_INFO(efx, "%s\n", __func__);
+
+	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
+	sfe4001_poweroff(efx);
+	i2c_unregister_device(efx->board_info.ioexp_client);
+	i2c_unregister_device(efx->board_info.hwmon_client);
+}
+
+static int sfe4001_check_hw(struct efx_nic *efx)
+{
+	s32 status;
+
+	/* If XAUI link is up then do not monitor */
+	if (EFX_WORKAROUND_7884(efx) && efx->mac_up)
+		return 0;
+
+	/* Check the powered status of the PHY. Lack of power implies that
+	 * the MAX6647 has shut down power to it, probably due to a temp.
+	 * alarm. Reading the power status rather than the MAX6647 status
+	 * directly because the later is read-to-clear and would thus
+	 * start to power up the PHY again when polled, causing us to blip
+	 * the power undesirably.
+	 * We know we can read from the IO expander because we did
+	 * it during power-on. Assume failure now is bad news. */
+	status = i2c_smbus_read_byte_data(efx->board_info.ioexp_client, P1_IN);
+	if (status >= 0 &&
+	    (status & ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN))) != 0)
+		return 0;
+
+	/* Use board power control, not PHY power control */
+	sfe4001_poweroff(efx);
+	efx->phy_mode = PHY_MODE_OFF;
+
+	return (status < 0) ? -EIO : -ERANGE;
+}
+
+static struct i2c_board_info sfe4001_hwmon_info = {
+	I2C_BOARD_INFO("max6647", 0x4e),
+};
+
+/* This board uses an I2C expander to provider power to the PHY, which needs to
+ * be turned on before the PHY can be used.
+ * Context: Process context, rtnl lock held
+ */
+static int sfe4001_init(struct efx_nic *efx)
+{
+	int rc;
+
+#if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE)
+	efx->board_info.hwmon_client =
+		i2c_new_device(&efx->i2c_adap, &sfe4001_hwmon_info);
+#else
+	efx->board_info.hwmon_client =
+		i2c_new_dummy(&efx->i2c_adap, sfe4001_hwmon_info.addr);
+#endif
+	if (!efx->board_info.hwmon_client)
+		return -EIO;
+
+	/* Raise board/PHY high limit from 85 to 90 degrees Celsius */
+	rc = i2c_smbus_write_byte_data(efx->board_info.hwmon_client,
+				       MAX664X_REG_WLHO, 90);
+	if (rc)
+		goto fail_hwmon;
+
+	efx->board_info.ioexp_client = i2c_new_dummy(&efx->i2c_adap, PCA9539);
+	if (!efx->board_info.ioexp_client) {
+		rc = -EIO;
+		goto fail_hwmon;
+	}
+
+	/* 10Xpress has fixed-function LED pins, so there is no board-specific
+	 * blink code. */
+	efx->board_info.blink = tenxpress_phy_blink;
+
+	efx->board_info.monitor = sfe4001_check_hw;
+	efx->board_info.fini = sfe4001_fini;
+
+	if (efx->phy_mode & PHY_MODE_SPECIAL) {
+		/* PHY won't generate a 156.25 MHz clock and MAC stats fetch
+		 * will fail. */
+		efx_stats_disable(efx);
+	}
+	rc = sfe4001_poweron(efx);
+	if (rc)
+		goto fail_ioexp;
+
+	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
+	if (rc)
+		goto fail_on;
+
+	EFX_INFO(efx, "PHY is powered on\n");
+	return 0;
+
+fail_on:
+	sfe4001_poweroff(efx);
+fail_ioexp:
+	i2c_unregister_device(efx->board_info.ioexp_client);
+fail_hwmon:
+	i2c_unregister_device(efx->board_info.hwmon_client);
+	return rc;
+}
+
+static int sfn4111t_check_hw(struct efx_nic *efx)
+{
+	s32 status;
+
+	/* If XAUI link is up then do not monitor */
+	if (EFX_WORKAROUND_7884(efx) && efx->mac_up)
+		return 0;
+
+	/* Test LHIGH, RHIGH, FAULT, EOT and IOT alarms */
+	status = i2c_smbus_read_byte_data(efx->board_info.hwmon_client,
+					  MAX664X_REG_RSL);
+	if (status < 0)
+		return -EIO;
+	if (status & 0x57)
+		return -ERANGE;
+	return 0;
+}
+
+static void sfn4111t_fini(struct efx_nic *efx)
+{
+	EFX_INFO(efx, "%s\n", __func__);
+
+	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
+	i2c_unregister_device(efx->board_info.hwmon_client);
+}
+
+static struct i2c_board_info sfn4111t_a0_hwmon_info = {
+	I2C_BOARD_INFO("max6647", 0x4e),
+};
+
+static struct i2c_board_info sfn4111t_r5_hwmon_info = {
+	I2C_BOARD_INFO("max6646", 0x4d),
+};
+
+static int sfn4111t_init(struct efx_nic *efx)
+{
+	int i = 0;
+	int rc;
+
+	efx->board_info.hwmon_client =
+		i2c_new_device(&efx->i2c_adap,
+			       (efx->board_info.minor < 5) ?
+			       &sfn4111t_a0_hwmon_info :
+			       &sfn4111t_r5_hwmon_info);
+	if (!efx->board_info.hwmon_client)
+		return -EIO;
+
+	efx->board_info.blink = tenxpress_phy_blink;
+	efx->board_info.monitor = sfn4111t_check_hw;
+	efx->board_info.fini = sfn4111t_fini;
+
+	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
+	if (rc)
+		goto fail_hwmon;
+
+	do {
+		if (efx->phy_mode & PHY_MODE_SPECIAL) {
+			/* PHY may not generate a 156.25 MHz clock and MAC
+			 * stats fetch will fail. */
+			efx_stats_disable(efx);
+			sfn4111t_reset(efx);
+		}
+		rc = sft9001_wait_boot(efx);
+		if (rc == 0)
+			return 0;
+		efx->phy_mode = PHY_MODE_SPECIAL;
+	} while (rc == -EINVAL && ++i < 2);
+
+	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
+fail_hwmon:
+	i2c_unregister_device(efx->board_info.hwmon_client);
+	return rc;
+}
+
+/*****************************************************************************
+ * Support for the SFE4002
+ *
+ */
+static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
+
+static const u8 sfe4002_lm87_regs[] = {
+	LM87_IN_LIMITS(0, 0x83, 0x91),		/* 2.5V:  1.8V +/- 5% */
+	LM87_IN_LIMITS(1, 0x51, 0x5a),		/* Vccp1: 1.2V +/- 5% */
+	LM87_IN_LIMITS(2, 0xb6, 0xca),		/* 3.3V:  3.3V +/- 5% */
+	LM87_IN_LIMITS(3, 0xb0, 0xc9),		/* 5V:    4.6-5.2V */
+	LM87_IN_LIMITS(4, 0xb0, 0xe0),		/* 12V:   11-14V */
+	LM87_IN_LIMITS(5, 0x44, 0x4b),		/* Vccp2: 1.0V +/- 5% */
+	LM87_AIN_LIMITS(0, 0xa0, 0xb2),		/* AIN1:  1.66V +/- 5% */
+	LM87_AIN_LIMITS(1, 0x91, 0xa1),		/* AIN2:  1.5V +/- 5% */
+	LM87_TEMP_INT_LIMITS(10, 60),		/* board */
+	LM87_TEMP_EXT1_LIMITS(10, 70),		/* Falcon */
+	0
+};
+
+static struct i2c_board_info sfe4002_hwmon_info = {
+	I2C_BOARD_INFO("lm87", 0x2e),
+	.platform_data	= &sfe4002_lm87_channel,
+};
+
+/****************************************************************************/
+/* LED allocations. Note that on rev A0 boards the schematic and the reality
+ * differ: red and green are swapped. Below is the fixed (A1) layout (there
+ * are only 3 A0 boards in existence, so no real reason to make this
+ * conditional).
+ */
+#define SFE4002_FAULT_LED (2)	/* Red */
+#define SFE4002_RX_LED    (0)	/* Green */
+#define SFE4002_TX_LED    (1)	/* Amber */
+
+static void sfe4002_init_leds(struct efx_nic *efx)
+{
+	/* Set the TX and RX LEDs to reflect status and activity, and the
+	 * fault LED off */
+	falcon_qt202x_set_led(efx, SFE4002_TX_LED,
+			      QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
+	falcon_qt202x_set_led(efx, SFE4002_RX_LED,
+			      QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
+	falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
+}
+
+static void sfe4002_set_id_led(struct efx_nic *efx, bool state)
+{
+	falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON :
+			      QUAKE_LED_OFF);
+}
+
+static int sfe4002_check_hw(struct efx_nic *efx)
+{
+	/* A0 board rev. 4002s report a temperature fault the whole time
+	 * (bad sensor) so we mask it out. */
+	unsigned alarm_mask =
+		(efx->board_info.major == 0 && efx->board_info.minor == 0) ?
+		~LM87_ALARM_TEMP_EXT1 : ~0;
+
+	return efx_check_lm87(efx, alarm_mask);
+}
+
+static int sfe4002_init(struct efx_nic *efx)
+{
+	int rc = efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
+	if (rc)
+		return rc;
+	efx->board_info.monitor = sfe4002_check_hw;
+	efx->board_info.init_leds = sfe4002_init_leds;
+	efx->board_info.set_id_led = sfe4002_set_id_led;
+	efx->board_info.blink = board_blink;
+	efx->board_info.fini = efx_fini_lm87;
+	return 0;
+}
+
+/*****************************************************************************
+ * Support for the SFN4112F
+ *
+ */
+static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
+
+static const u8 sfn4112f_lm87_regs[] = {
+	LM87_IN_LIMITS(0, 0x83, 0x91),		/* 2.5V:  1.8V +/- 5% */
+	LM87_IN_LIMITS(1, 0x51, 0x5a),		/* Vccp1: 1.2V +/- 5% */
+	LM87_IN_LIMITS(2, 0xb6, 0xca),		/* 3.3V:  3.3V +/- 5% */
+	LM87_IN_LIMITS(4, 0xb0, 0xe0),		/* 12V:   11-14V */
+	LM87_IN_LIMITS(5, 0x44, 0x4b),		/* Vccp2: 1.0V +/- 5% */
+	LM87_AIN_LIMITS(1, 0x91, 0xa1),		/* AIN2:  1.5V +/- 5% */
+	LM87_TEMP_INT_LIMITS(10, 60),		/* board */
+	LM87_TEMP_EXT1_LIMITS(10, 70),		/* Falcon */
+	0
+};
+
+static struct i2c_board_info sfn4112f_hwmon_info = {
+	I2C_BOARD_INFO("lm87", 0x2e),
+	.platform_data	= &sfn4112f_lm87_channel,
+};
+
+#define SFN4112F_ACT_LED	0
+#define SFN4112F_LINK_LED	1
+
+static void sfn4112f_init_leds(struct efx_nic *efx)
+{
+	falcon_qt202x_set_led(efx, SFN4112F_ACT_LED,
+			      QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT);
+	falcon_qt202x_set_led(efx, SFN4112F_LINK_LED,
+			      QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT);
+}
+
+static void sfn4112f_set_id_led(struct efx_nic *efx, bool state)
+{
+	falcon_qt202x_set_led(efx, SFN4112F_LINK_LED,
+			      state ? QUAKE_LED_ON : QUAKE_LED_OFF);
+}
+
+static int sfn4112f_check_hw(struct efx_nic *efx)
+{
+	/* Mask out unused sensors */
+	return efx_check_lm87(efx, ~0x48);
+}
+
+static int sfn4112f_init(struct efx_nic *efx)
+{
+	int rc = efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
+	if (rc)
+		return rc;
+	efx->board_info.monitor = sfn4112f_check_hw;
+	efx->board_info.init_leds = sfn4112f_init_leds;
+	efx->board_info.set_id_led = sfn4112f_set_id_led;
+	efx->board_info.blink = board_blink;
+	efx->board_info.fini = efx_fini_lm87;
+	return 0;
+}
+
+/* This will get expanded as board-specific details get moved out of the
+ * PHY drivers. */
+struct falcon_board_data {
+	u8 type;
+	const char *ref_model;
+	const char *gen_type;
+	int (*init) (struct efx_nic *nic);
+};
+
+
+static struct falcon_board_data board_data[] = {
+	{ FALCON_BOARD_SFE4001, "SFE4001", "10GBASE-T adapter", sfe4001_init },
+	{ FALCON_BOARD_SFE4002, "SFE4002", "XFP adapter", sfe4002_init },
+	{ FALCON_BOARD_SFN4111T, "SFN4111T", "100/1000/10GBASE-T adapter",
+	  sfn4111t_init },
+	{ FALCON_BOARD_SFN4112F, "SFN4112F", "SFP+ adapter",
+	  sfn4112f_init },
+};
+
+void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
+{
+	struct falcon_board_data *data = NULL;
+	int i;
+
+	efx->board_info.type = FALCON_BOARD_TYPE(revision_info);
+	efx->board_info.major = FALCON_BOARD_MAJOR(revision_info);
+	efx->board_info.minor = FALCON_BOARD_MINOR(revision_info);
+
+	for (i = 0; i < ARRAY_SIZE(board_data); i++)
+		if (board_data[i].type == efx->board_info.type)
+			data = &board_data[i];
+
+	if (data) {
+		EFX_INFO(efx, "board is %s rev %c%d\n",
+			 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
+			 ? data->ref_model : data->gen_type,
+			 'A' + efx->board_info.major, efx->board_info.minor);
+		efx->board_info.init = data->init;
+	} else {
+		EFX_ERR(efx, "unknown board type %d\n", efx->board_info.type);
+	}
+}
diff --git a/drivers/net/sfc/falcon_gmac.c b/drivers/net/sfc/falcon_gmac.c
index 8865eae..8a1b80d 100644
--- a/drivers/net/sfc/falcon_gmac.c
+++ b/drivers/net/sfc/falcon_gmac.c
@@ -13,9 +13,8 @@
 #include "efx.h"
 #include "falcon.h"
 #include "mac.h"
-#include "falcon_hwdefs.h"
-#include "falcon_io.h"
-#include "gmii.h"
+#include "regs.h"
+#include "io.h"
 
 /**************************************************************************
  *
@@ -37,89 +36,89 @@
 	bytemode = (efx->link_speed == 1000);
 
 	EFX_POPULATE_OWORD_5(reg,
-			     GM_LOOP, loopback,
-			     GM_TX_EN, 1,
-			     GM_TX_FC_EN, tx_fc,
-			     GM_RX_EN, 1,
-			     GM_RX_FC_EN, rx_fc);
-	falcon_write(efx, &reg, GM_CFG1_REG);
+			     FRF_AB_GM_LOOP, loopback,
+			     FRF_AB_GM_TX_EN, 1,
+			     FRF_AB_GM_TX_FC_EN, tx_fc,
+			     FRF_AB_GM_RX_EN, 1,
+			     FRF_AB_GM_RX_FC_EN, rx_fc);
+	efx_writeo(efx, &reg, FR_AB_GM_CFG1);
 	udelay(10);
 
 	/* Configuration register 2 */
 	if_mode = (bytemode) ? 2 : 1;
 	EFX_POPULATE_OWORD_5(reg,
-			     GM_IF_MODE, if_mode,
-			     GM_PAD_CRC_EN, 1,
-			     GM_LEN_CHK, 1,
-			     GM_FD, efx->link_fd,
-			     GM_PAMBL_LEN, 0x7/*datasheet recommended */);
+			     FRF_AB_GM_IF_MODE, if_mode,
+			     FRF_AB_GM_PAD_CRC_EN, 1,
+			     FRF_AB_GM_LEN_CHK, 1,
+			     FRF_AB_GM_FD, efx->link_fd,
+			     FRF_AB_GM_PAMBL_LEN, 0x7/*datasheet recommended */);
 
-	falcon_write(efx, &reg, GM_CFG2_REG);
+	efx_writeo(efx, &reg, FR_AB_GM_CFG2);
 	udelay(10);
 
 	/* Max frame len register */
 	max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
-	EFX_POPULATE_OWORD_1(reg, GM_MAX_FLEN, max_frame_len);
-	falcon_write(efx, &reg, GM_MAX_FLEN_REG);
+	EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_MAX_FLEN, max_frame_len);
+	efx_writeo(efx, &reg, FR_AB_GM_MAX_FLEN);
 	udelay(10);
 
 	/* FIFO configuration register 0 */
 	EFX_POPULATE_OWORD_5(reg,
-			     GMF_FTFENREQ, 1,
-			     GMF_STFENREQ, 1,
-			     GMF_FRFENREQ, 1,
-			     GMF_SRFENREQ, 1,
-			     GMF_WTMENREQ, 1);
-	falcon_write(efx, &reg, GMF_CFG0_REG);
+			     FRF_AB_GMF_FTFENREQ, 1,
+			     FRF_AB_GMF_STFENREQ, 1,
+			     FRF_AB_GMF_FRFENREQ, 1,
+			     FRF_AB_GMF_SRFENREQ, 1,
+			     FRF_AB_GMF_WTMENREQ, 1);
+	efx_writeo(efx, &reg, FR_AB_GMF_CFG0);
 	udelay(10);
 
 	/* FIFO configuration register 1 */
 	EFX_POPULATE_OWORD_2(reg,
-			     GMF_CFGFRTH, 0x12,
-			     GMF_CFGXOFFRTX, 0xffff);
-	falcon_write(efx, &reg, GMF_CFG1_REG);
+			     FRF_AB_GMF_CFGFRTH, 0x12,
+			     FRF_AB_GMF_CFGXOFFRTX, 0xffff);
+	efx_writeo(efx, &reg, FR_AB_GMF_CFG1);
 	udelay(10);
 
 	/* FIFO configuration register 2 */
 	EFX_POPULATE_OWORD_2(reg,
-			     GMF_CFGHWM, 0x3f,
-			     GMF_CFGLWM, 0xa);
-	falcon_write(efx, &reg, GMF_CFG2_REG);
+			     FRF_AB_GMF_CFGHWM, 0x3f,
+			     FRF_AB_GMF_CFGLWM, 0xa);
+	efx_writeo(efx, &reg, FR_AB_GMF_CFG2);
 	udelay(10);
 
 	/* FIFO configuration register 3 */
 	EFX_POPULATE_OWORD_2(reg,
-			     GMF_CFGHWMFT, 0x1c,
-			     GMF_CFGFTTH, 0x08);
-	falcon_write(efx, &reg, GMF_CFG3_REG);
+			     FRF_AB_GMF_CFGHWMFT, 0x1c,
+			     FRF_AB_GMF_CFGFTTH, 0x08);
+	efx_writeo(efx, &reg, FR_AB_GMF_CFG3);
 	udelay(10);
 
 	/* FIFO configuration register 4 */
-	EFX_POPULATE_OWORD_1(reg, GMF_HSTFLTRFRM_PAUSE, 1);
-	falcon_write(efx, &reg, GMF_CFG4_REG);
+	EFX_POPULATE_OWORD_1(reg, FRF_AB_GMF_HSTFLTRFRM_PAUSE, 1);
+	efx_writeo(efx, &reg, FR_AB_GMF_CFG4);
 	udelay(10);
 
 	/* FIFO configuration register 5 */
-	falcon_read(efx, &reg, GMF_CFG5_REG);
-	EFX_SET_OWORD_FIELD(reg, GMF_CFGBYTMODE, bytemode);
-	EFX_SET_OWORD_FIELD(reg, GMF_CFGHDPLX, !efx->link_fd);
-	EFX_SET_OWORD_FIELD(reg, GMF_HSTDRPLT64, !efx->link_fd);
-	EFX_SET_OWORD_FIELD(reg, GMF_HSTFLTRFRMDC_PAUSE, 0);
-	falcon_write(efx, &reg, GMF_CFG5_REG);
+	efx_reado(efx, &reg, FR_AB_GMF_CFG5);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGBYTMODE, bytemode);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGHDPLX, !efx->link_fd);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTDRPLT64, !efx->link_fd);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTFLTRFRMDC_PAUSE, 0);
+	efx_writeo(efx, &reg, FR_AB_GMF_CFG5);
 	udelay(10);
 
 	/* MAC address */
 	EFX_POPULATE_OWORD_4(reg,
-			     GM_HWADDR_5, efx->net_dev->dev_addr[5],
-			     GM_HWADDR_4, efx->net_dev->dev_addr[4],
-			     GM_HWADDR_3, efx->net_dev->dev_addr[3],
-			     GM_HWADDR_2, efx->net_dev->dev_addr[2]);
-	falcon_write(efx, &reg, GM_ADR1_REG);
+			     FRF_AB_GM_ADR_B0, efx->net_dev->dev_addr[5],
+			     FRF_AB_GM_ADR_B1, efx->net_dev->dev_addr[4],
+			     FRF_AB_GM_ADR_B2, efx->net_dev->dev_addr[3],
+			     FRF_AB_GM_ADR_B3, efx->net_dev->dev_addr[2]);
+	efx_writeo(efx, &reg, FR_AB_GM_ADR1);
 	udelay(10);
 	EFX_POPULATE_OWORD_2(reg,
-			     GM_HWADDR_1, efx->net_dev->dev_addr[1],
-			     GM_HWADDR_0, efx->net_dev->dev_addr[0]);
-	falcon_write(efx, &reg, GM_ADR2_REG);
+			     FRF_AB_GM_ADR_B4, efx->net_dev->dev_addr[1],
+			     FRF_AB_GM_ADR_B5, efx->net_dev->dev_addr[0]);
+	efx_writeo(efx, &reg, FR_AB_GM_ADR2);
 	udelay(10);
 
 	falcon_reconfigure_mac_wrapper(efx);
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
deleted file mode 100644
index 2d22611..0000000
--- a/drivers/net/sfc/falcon_hwdefs.h
+++ /dev/null
@@ -1,1333 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#ifndef EFX_FALCON_HWDEFS_H
-#define EFX_FALCON_HWDEFS_H
-
-/*
- * Falcon hardware value definitions.
- * Falcon is the internal codename for the SFC4000 controller that is
- * present in SFE400X evaluation boards
- */
-
-/**************************************************************************
- *
- * Falcon registers
- *
- **************************************************************************
- */
-
-/* Address region register */
-#define ADR_REGION_REG_KER	0x00
-#define ADR_REGION0_LBN	0
-#define ADR_REGION0_WIDTH	18
-#define ADR_REGION1_LBN	32
-#define ADR_REGION1_WIDTH	18
-#define ADR_REGION2_LBN	64
-#define ADR_REGION2_WIDTH	18
-#define ADR_REGION3_LBN	96
-#define ADR_REGION3_WIDTH	18
-
-/* Interrupt enable register */
-#define INT_EN_REG_KER 0x0010
-#define KER_INT_KER_LBN 3
-#define KER_INT_KER_WIDTH 1
-#define DRV_INT_EN_KER_LBN 0
-#define DRV_INT_EN_KER_WIDTH 1
-
-/* Interrupt status address register */
-#define INT_ADR_REG_KER	0x0030
-#define NORM_INT_VEC_DIS_KER_LBN 64
-#define NORM_INT_VEC_DIS_KER_WIDTH 1
-#define INT_ADR_KER_LBN 0
-#define INT_ADR_KER_WIDTH EFX_DMA_TYPE_WIDTH(64) /* not 46 for this one */
-
-/* Interrupt status register (B0 only) */
-#define INT_ISR0_B0 0x90
-#define INT_ISR1_B0 0xA0
-
-/* Interrupt acknowledge register (A0/A1 only) */
-#define INT_ACK_REG_KER_A1 0x0050
-#define INT_ACK_DUMMY_DATA_LBN 0
-#define INT_ACK_DUMMY_DATA_WIDTH 32
-
-/* Interrupt acknowledge work-around register (A0/A1 only )*/
-#define WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1 0x0070
-
-/* SPI host command register */
-#define EE_SPI_HCMD_REG_KER 0x0100
-#define EE_SPI_HCMD_CMD_EN_LBN 31
-#define EE_SPI_HCMD_CMD_EN_WIDTH 1
-#define EE_WR_TIMER_ACTIVE_LBN 28
-#define EE_WR_TIMER_ACTIVE_WIDTH 1
-#define EE_SPI_HCMD_SF_SEL_LBN 24
-#define EE_SPI_HCMD_SF_SEL_WIDTH 1
-#define EE_SPI_EEPROM 0
-#define EE_SPI_FLASH 1
-#define EE_SPI_HCMD_DABCNT_LBN 16
-#define EE_SPI_HCMD_DABCNT_WIDTH 5
-#define EE_SPI_HCMD_READ_LBN 15
-#define EE_SPI_HCMD_READ_WIDTH 1
-#define EE_SPI_READ 1
-#define EE_SPI_WRITE 0
-#define EE_SPI_HCMD_DUBCNT_LBN 12
-#define EE_SPI_HCMD_DUBCNT_WIDTH 2
-#define EE_SPI_HCMD_ADBCNT_LBN 8
-#define EE_SPI_HCMD_ADBCNT_WIDTH 2
-#define EE_SPI_HCMD_ENC_LBN 0
-#define EE_SPI_HCMD_ENC_WIDTH 8
-
-/* SPI host address register */
-#define EE_SPI_HADR_REG_KER 0x0110
-#define EE_SPI_HADR_ADR_LBN 0
-#define EE_SPI_HADR_ADR_WIDTH 24
-
-/* SPI host data register */
-#define EE_SPI_HDATA_REG_KER 0x0120
-
-/* SPI/VPD config register */
-#define EE_VPD_CFG_REG_KER 0x0140
-#define EE_VPD_EN_LBN 0
-#define EE_VPD_EN_WIDTH 1
-#define EE_VPD_EN_AD9_MODE_LBN 1
-#define EE_VPD_EN_AD9_MODE_WIDTH 1
-#define EE_EE_CLOCK_DIV_LBN 112
-#define EE_EE_CLOCK_DIV_WIDTH 7
-#define EE_SF_CLOCK_DIV_LBN 120
-#define EE_SF_CLOCK_DIV_WIDTH 7
-
-/* PCIE CORE ACCESS REG */
-#define PCIE_CORE_ADDR_PCIE_DEVICE_CTRL_STAT 0x68
-#define PCIE_CORE_ADDR_PCIE_LINK_CTRL_STAT 0x70
-#define PCIE_CORE_ADDR_ACK_RPL_TIMER 0x700
-#define PCIE_CORE_ADDR_ACK_FREQ 0x70C
-
-/* NIC status register */
-#define NIC_STAT_REG 0x0200
-#define EE_STRAP_EN_LBN 31
-#define EE_STRAP_EN_WIDTH 1
-#define EE_STRAP_OVR_LBN 24
-#define EE_STRAP_OVR_WIDTH 4
-#define ONCHIP_SRAM_LBN 16
-#define ONCHIP_SRAM_WIDTH 1
-#define SF_PRST_LBN 9
-#define SF_PRST_WIDTH 1
-#define EE_PRST_LBN 8
-#define EE_PRST_WIDTH 1
-#define STRAP_PINS_LBN 0
-#define STRAP_PINS_WIDTH 3
-/* These bit definitions are extrapolated from the list of numerical
- * values for STRAP_PINS.
- */
-#define STRAP_10G_LBN 2
-#define STRAP_10G_WIDTH 1
-#define STRAP_PCIE_LBN 0
-#define STRAP_PCIE_WIDTH 1
-
-#define BOOTED_USING_NVDEVICE_LBN 3
-#define BOOTED_USING_NVDEVICE_WIDTH 1
-
-/* GPIO control register */
-#define GPIO_CTL_REG_KER 0x0210
-#define GPIO_USE_NIC_CLK_LBN (30)
-#define GPIO_USE_NIC_CLK_WIDTH (1)
-#define GPIO_OUTPUTS_LBN   (16)
-#define GPIO_OUTPUTS_WIDTH (4)
-#define GPIO_INPUTS_LBN (8)
-#define GPIO_DIRECTION_LBN (24)
-#define GPIO_DIRECTION_WIDTH (4)
-#define GPIO_DIRECTION_OUT (1)
-#define GPIO_SRAM_SLEEP (1 << 1)
-
-#define GPIO3_OEN_LBN (GPIO_DIRECTION_LBN + 3)
-#define	GPIO3_OEN_WIDTH 1
-#define	GPIO2_OEN_LBN (GPIO_DIRECTION_LBN + 2)
-#define	GPIO2_OEN_WIDTH 1
-#define	GPIO1_OEN_LBN (GPIO_DIRECTION_LBN + 1)
-#define	GPIO1_OEN_WIDTH 1
-#define GPIO0_OEN_LBN (GPIO_DIRECTION_LBN + 0)
-#define	GPIO0_OEN_WIDTH 1
-
-#define	GPIO3_OUT_LBN (GPIO_OUTPUTS_LBN + 3)
-#define	GPIO3_OUT_WIDTH 1
-#define	GPIO2_OUT_LBN (GPIO_OUTPUTS_LBN + 2)
-#define	GPIO2_OUT_WIDTH 1
-#define	GPIO1_OUT_LBN (GPIO_OUTPUTS_LBN + 1)
-#define	GPIO1_OUT_WIDTH 1
-#define	GPIO0_OUT_LBN (GPIO_OUTPUTS_LBN + 0)
-#define	GPIO0_OUT_WIDTH 1
-
-#define GPIO3_IN_LBN (GPIO_INPUTS_LBN + 3)
-#define	GPIO3_IN_WIDTH 1
-#define	GPIO2_IN_WIDTH 1
-#define	GPIO1_IN_WIDTH 1
-#define GPIO0_IN_LBN (GPIO_INPUTS_LBN + 0)
-#define	GPIO0_IN_WIDTH 1
-
-/* Global control register */
-#define GLB_CTL_REG_KER	0x0220
-#define EXT_PHY_RST_CTL_LBN 63
-#define EXT_PHY_RST_CTL_WIDTH 1
-#define PCIE_SD_RST_CTL_LBN 61
-#define PCIE_SD_RST_CTL_WIDTH 1
-
-#define PCIE_NSTCK_RST_CTL_LBN 58
-#define PCIE_NSTCK_RST_CTL_WIDTH 1
-#define PCIE_CORE_RST_CTL_LBN 57
-#define PCIE_CORE_RST_CTL_WIDTH 1
-#define EE_RST_CTL_LBN 49
-#define EE_RST_CTL_WIDTH 1
-#define RST_XGRX_LBN 24
-#define RST_XGRX_WIDTH 1
-#define RST_XGTX_LBN 23
-#define RST_XGTX_WIDTH 1
-#define RST_EM_LBN 22
-#define RST_EM_WIDTH 1
-#define EXT_PHY_RST_DUR_LBN 1
-#define EXT_PHY_RST_DUR_WIDTH 3
-#define SWRST_LBN 0
-#define SWRST_WIDTH 1
-#define INCLUDE_IN_RESET 0
-#define EXCLUDE_FROM_RESET 1
-
-/* Fatal interrupt register */
-#define FATAL_INTR_REG_KER 0x0230
-#define RBUF_OWN_INT_KER_EN_LBN 39
-#define RBUF_OWN_INT_KER_EN_WIDTH 1
-#define TBUF_OWN_INT_KER_EN_LBN 38
-#define TBUF_OWN_INT_KER_EN_WIDTH 1
-#define ILL_ADR_INT_KER_EN_LBN 33
-#define ILL_ADR_INT_KER_EN_WIDTH 1
-#define MEM_PERR_INT_KER_LBN 8
-#define MEM_PERR_INT_KER_WIDTH 1
-#define INT_KER_ERROR_LBN 0
-#define INT_KER_ERROR_WIDTH 12
-
-#define DP_CTRL_REG 0x250
-#define FLS_EVQ_ID_LBN 0
-#define FLS_EVQ_ID_WIDTH 11
-
-#define MEM_STAT_REG_KER 0x260
-
-/* Debug probe register */
-#define DEBUG_BLK_SEL_MISC 7
-#define DEBUG_BLK_SEL_SERDES 6
-#define DEBUG_BLK_SEL_EM 5
-#define DEBUG_BLK_SEL_SR 4
-#define DEBUG_BLK_SEL_EV 3
-#define DEBUG_BLK_SEL_RX 2
-#define DEBUG_BLK_SEL_TX 1
-#define DEBUG_BLK_SEL_BIU 0
-
-/* FPGA build version */
-#define ALTERA_BUILD_REG_KER 0x0300
-#define VER_ALL_LBN 0
-#define VER_ALL_WIDTH 32
-
-/* Spare EEPROM bits register (flash 0x390) */
-#define SPARE_REG_KER 0x310
-#define MEM_PERR_EN_TX_DATA_LBN 72
-#define MEM_PERR_EN_TX_DATA_WIDTH 2
-
-/* Timer table for kernel access */
-#define TIMER_CMD_REG_KER 0x420
-#define TIMER_MODE_LBN 12
-#define TIMER_MODE_WIDTH 2
-#define TIMER_MODE_DIS 0
-#define TIMER_MODE_INT_HLDOFF 2
-#define TIMER_VAL_LBN 0
-#define TIMER_VAL_WIDTH 12
-
-/* Driver generated event register */
-#define DRV_EV_REG_KER 0x440
-#define DRV_EV_QID_LBN 64
-#define DRV_EV_QID_WIDTH 12
-#define DRV_EV_DATA_LBN 0
-#define DRV_EV_DATA_WIDTH 64
-
-/* Buffer table configuration register */
-#define BUF_TBL_CFG_REG_KER 0x600
-#define BUF_TBL_MODE_LBN 3
-#define BUF_TBL_MODE_WIDTH 1
-#define BUF_TBL_MODE_HALF 0
-#define BUF_TBL_MODE_FULL 1
-
-/* SRAM receive descriptor cache configuration register */
-#define SRM_RX_DC_CFG_REG_KER 0x610
-#define SRM_RX_DC_BASE_ADR_LBN 0
-#define SRM_RX_DC_BASE_ADR_WIDTH 21
-
-/* SRAM transmit descriptor cache configuration register */
-#define SRM_TX_DC_CFG_REG_KER 0x620
-#define SRM_TX_DC_BASE_ADR_LBN 0
-#define SRM_TX_DC_BASE_ADR_WIDTH 21
-
-/* SRAM configuration register */
-#define SRM_CFG_REG_KER 0x630
-#define SRAM_OOB_BT_INIT_EN_LBN 3
-#define SRAM_OOB_BT_INIT_EN_WIDTH 1
-#define SRM_NUM_BANKS_AND_BANK_SIZE_LBN 0
-#define SRM_NUM_BANKS_AND_BANK_SIZE_WIDTH 3
-#define SRM_NB_BSZ_1BANKS_2M 0
-#define SRM_NB_BSZ_1BANKS_4M 1
-#define SRM_NB_BSZ_1BANKS_8M 2
-#define SRM_NB_BSZ_DEFAULT 3 /* char driver will set the default */
-#define SRM_NB_BSZ_2BANKS_4M 4
-#define SRM_NB_BSZ_2BANKS_8M 5
-#define SRM_NB_BSZ_2BANKS_16M 6
-#define SRM_NB_BSZ_RESERVED 7
-
-/* Special buffer table update register */
-#define BUF_TBL_UPD_REG_KER 0x0650
-#define BUF_UPD_CMD_LBN 63
-#define BUF_UPD_CMD_WIDTH 1
-#define BUF_CLR_CMD_LBN 62
-#define BUF_CLR_CMD_WIDTH 1
-#define BUF_CLR_END_ID_LBN 32
-#define BUF_CLR_END_ID_WIDTH 20
-#define BUF_CLR_START_ID_LBN 0
-#define BUF_CLR_START_ID_WIDTH 20
-
-/* Receive configuration register */
-#define RX_CFG_REG_KER 0x800
-
-/* B0 */
-#define RX_INGR_EN_B0_LBN 47
-#define RX_INGR_EN_B0_WIDTH 1
-#define RX_DESC_PUSH_EN_B0_LBN 43
-#define RX_DESC_PUSH_EN_B0_WIDTH 1
-#define RX_XON_TX_TH_B0_LBN 33
-#define RX_XON_TX_TH_B0_WIDTH 5
-#define RX_XOFF_TX_TH_B0_LBN 28
-#define RX_XOFF_TX_TH_B0_WIDTH 5
-#define RX_USR_BUF_SIZE_B0_LBN 19
-#define RX_USR_BUF_SIZE_B0_WIDTH 9
-#define RX_XON_MAC_TH_B0_LBN 10
-#define RX_XON_MAC_TH_B0_WIDTH 9
-#define RX_XOFF_MAC_TH_B0_LBN 1
-#define RX_XOFF_MAC_TH_B0_WIDTH 9
-#define RX_XOFF_MAC_EN_B0_LBN 0
-#define RX_XOFF_MAC_EN_B0_WIDTH 1
-
-/* A1 */
-#define RX_DESC_PUSH_EN_A1_LBN 35
-#define RX_DESC_PUSH_EN_A1_WIDTH 1
-#define RX_XON_TX_TH_A1_LBN 25
-#define RX_XON_TX_TH_A1_WIDTH 5
-#define RX_XOFF_TX_TH_A1_LBN 20
-#define RX_XOFF_TX_TH_A1_WIDTH 5
-#define RX_USR_BUF_SIZE_A1_LBN 11
-#define RX_USR_BUF_SIZE_A1_WIDTH 9
-#define RX_XON_MAC_TH_A1_LBN 6
-#define RX_XON_MAC_TH_A1_WIDTH 5
-#define RX_XOFF_MAC_TH_A1_LBN 1
-#define RX_XOFF_MAC_TH_A1_WIDTH 5
-#define RX_XOFF_MAC_EN_A1_LBN 0
-#define RX_XOFF_MAC_EN_A1_WIDTH 1
-
-/* Receive filter control register */
-#define RX_FILTER_CTL_REG 0x810
-#define UDP_FULL_SRCH_LIMIT_LBN 32
-#define UDP_FULL_SRCH_LIMIT_WIDTH 8
-#define NUM_KER_LBN 24
-#define NUM_KER_WIDTH 2
-#define UDP_WILD_SRCH_LIMIT_LBN 16
-#define UDP_WILD_SRCH_LIMIT_WIDTH 8
-#define TCP_WILD_SRCH_LIMIT_LBN 8
-#define TCP_WILD_SRCH_LIMIT_WIDTH 8
-#define TCP_FULL_SRCH_LIMIT_LBN 0
-#define TCP_FULL_SRCH_LIMIT_WIDTH 8
-
-/* RX queue flush register */
-#define RX_FLUSH_DESCQ_REG_KER 0x0820
-#define RX_FLUSH_DESCQ_CMD_LBN 24
-#define RX_FLUSH_DESCQ_CMD_WIDTH 1
-#define RX_FLUSH_DESCQ_LBN 0
-#define RX_FLUSH_DESCQ_WIDTH 12
-
-/* Receive descriptor update register */
-#define RX_DESC_UPD_REG_KER_DWORD (0x830 + 12)
-#define RX_DESC_WPTR_DWORD_LBN 0
-#define RX_DESC_WPTR_DWORD_WIDTH 12
-
-/* Receive descriptor cache configuration register */
-#define RX_DC_CFG_REG_KER 0x840
-#define RX_DC_SIZE_LBN 0
-#define RX_DC_SIZE_WIDTH 2
-
-#define RX_DC_PF_WM_REG_KER 0x850
-#define RX_DC_PF_LWM_LBN 0
-#define RX_DC_PF_LWM_WIDTH 6
-
-/* RX no descriptor drop counter */
-#define RX_NODESC_DROP_REG_KER 0x880
-#define RX_NODESC_DROP_CNT_LBN 0
-#define RX_NODESC_DROP_CNT_WIDTH 16
-
-/* RX black magic register */
-#define RX_SELF_RST_REG_KER 0x890
-#define RX_ISCSI_DIS_LBN 17
-#define RX_ISCSI_DIS_WIDTH 1
-#define RX_NODESC_WAIT_DIS_LBN 9
-#define RX_NODESC_WAIT_DIS_WIDTH 1
-#define RX_RECOVERY_EN_LBN 8
-#define RX_RECOVERY_EN_WIDTH 1
-
-/* TX queue flush register */
-#define TX_FLUSH_DESCQ_REG_KER 0x0a00
-#define TX_FLUSH_DESCQ_CMD_LBN 12
-#define TX_FLUSH_DESCQ_CMD_WIDTH 1
-#define TX_FLUSH_DESCQ_LBN 0
-#define TX_FLUSH_DESCQ_WIDTH 12
-
-/* Transmit descriptor update register */
-#define TX_DESC_UPD_REG_KER_DWORD (0xa10 + 12)
-#define TX_DESC_WPTR_DWORD_LBN 0
-#define TX_DESC_WPTR_DWORD_WIDTH 12
-
-/* Transmit descriptor cache configuration register */
-#define TX_DC_CFG_REG_KER 0xa20
-#define TX_DC_SIZE_LBN 0
-#define TX_DC_SIZE_WIDTH 2
-
-/* Transmit checksum configuration register (A0/A1 only) */
-#define TX_CHKSM_CFG_REG_KER_A1 0xa30
-
-/* Transmit configuration register */
-#define TX_CFG_REG_KER 0xa50
-#define TX_NO_EOP_DISC_EN_LBN 5
-#define TX_NO_EOP_DISC_EN_WIDTH 1
-
-/* Transmit configuration register 2 */
-#define TX_CFG2_REG_KER 0xa80
-#define TX_CSR_PUSH_EN_LBN 89
-#define TX_CSR_PUSH_EN_WIDTH 1
-#define TX_RX_SPACER_LBN 64
-#define TX_RX_SPACER_WIDTH 8
-#define TX_SW_EV_EN_LBN 59
-#define TX_SW_EV_EN_WIDTH 1
-#define TX_RX_SPACER_EN_LBN 57
-#define TX_RX_SPACER_EN_WIDTH 1
-#define TX_PREF_THRESHOLD_LBN 19
-#define TX_PREF_THRESHOLD_WIDTH 2
-#define TX_ONE_PKT_PER_Q_LBN 18
-#define TX_ONE_PKT_PER_Q_WIDTH 1
-#define TX_DIS_NON_IP_EV_LBN 17
-#define TX_DIS_NON_IP_EV_WIDTH 1
-#define TX_FLUSH_MIN_LEN_EN_B0_LBN 7
-#define TX_FLUSH_MIN_LEN_EN_B0_WIDTH 1
-
-/* PHY management transmit data register */
-#define MD_TXD_REG_KER 0xc00
-#define MD_TXD_LBN 0
-#define MD_TXD_WIDTH 16
-
-/* PHY management receive data register */
-#define MD_RXD_REG_KER 0xc10
-#define MD_RXD_LBN 0
-#define MD_RXD_WIDTH 16
-
-/* PHY management configuration & status register */
-#define MD_CS_REG_KER 0xc20
-#define MD_GC_LBN 4
-#define MD_GC_WIDTH 1
-#define MD_RIC_LBN 2
-#define MD_RIC_WIDTH 1
-#define MD_RDC_LBN 1
-#define MD_RDC_WIDTH 1
-#define MD_WRC_LBN 0
-#define MD_WRC_WIDTH 1
-
-/* PHY management PHY address register */
-#define MD_PHY_ADR_REG_KER 0xc30
-#define MD_PHY_ADR_LBN 0
-#define MD_PHY_ADR_WIDTH 16
-
-/* PHY management ID register */
-#define MD_ID_REG_KER 0xc40
-#define MD_PRT_ADR_LBN 11
-#define MD_PRT_ADR_WIDTH 5
-#define MD_DEV_ADR_LBN 6
-#define MD_DEV_ADR_WIDTH 5
-
-/* PHY management status & mask register (DWORD read only) */
-#define MD_STAT_REG_KER 0xc50
-#define MD_BSERR_LBN 2
-#define MD_BSERR_WIDTH 1
-#define MD_LNFL_LBN 1
-#define MD_LNFL_WIDTH 1
-#define MD_BSY_LBN 0
-#define MD_BSY_WIDTH 1
-
-/* Port 0 and 1 MAC stats registers */
-#define MAC0_STAT_DMA_REG_KER 0xc60
-#define MAC_STAT_DMA_CMD_LBN 48
-#define MAC_STAT_DMA_CMD_WIDTH 1
-#define MAC_STAT_DMA_ADR_LBN 0
-#define MAC_STAT_DMA_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
-
-/* Port 0 and 1 MAC control registers */
-#define MAC0_CTRL_REG_KER 0xc80
-#define MAC_XOFF_VAL_LBN 16
-#define MAC_XOFF_VAL_WIDTH 16
-#define TXFIFO_DRAIN_EN_B0_LBN 7
-#define TXFIFO_DRAIN_EN_B0_WIDTH 1
-#define MAC_BCAD_ACPT_LBN 4
-#define MAC_BCAD_ACPT_WIDTH 1
-#define MAC_UC_PROM_LBN 3
-#define MAC_UC_PROM_WIDTH 1
-#define MAC_LINK_STATUS_LBN 2
-#define MAC_LINK_STATUS_WIDTH 1
-#define MAC_SPEED_LBN 0
-#define MAC_SPEED_WIDTH 2
-
-/* 10G XAUI XGXS default values */
-#define XX_TXDRV_DEQ_DEFAULT 0xe /* deq=.6 */
-#define XX_TXDRV_DTX_DEFAULT 0x5 /* 1.25 */
-#define XX_SD_CTL_DRV_DEFAULT 0  /* 20mA */
-
-/* Multicast address hash table */
-#define MAC_MCAST_HASH_REG0_KER 0xca0
-#define MAC_MCAST_HASH_REG1_KER 0xcb0
-
-/* GMAC configuration register 1 */
-#define GM_CFG1_REG 0xe00
-#define GM_SW_RST_LBN 31
-#define GM_SW_RST_WIDTH 1
-#define GM_LOOP_LBN 8
-#define GM_LOOP_WIDTH 1
-#define GM_RX_FC_EN_LBN 5
-#define GM_RX_FC_EN_WIDTH 1
-#define GM_TX_FC_EN_LBN 4
-#define GM_TX_FC_EN_WIDTH 1
-#define GM_RX_EN_LBN 2
-#define GM_RX_EN_WIDTH 1
-#define GM_TX_EN_LBN 0
-#define GM_TX_EN_WIDTH 1
-
-/* GMAC configuration register 2 */
-#define GM_CFG2_REG 0xe10
-#define GM_PAMBL_LEN_LBN 12
-#define GM_PAMBL_LEN_WIDTH 4
-#define GM_IF_MODE_LBN 8
-#define GM_IF_MODE_WIDTH 2
-#define GM_LEN_CHK_LBN 4
-#define GM_LEN_CHK_WIDTH 1
-#define GM_PAD_CRC_EN_LBN 2
-#define GM_PAD_CRC_EN_WIDTH 1
-#define GM_FD_LBN 0
-#define GM_FD_WIDTH 1
-
-/* GMAC maximum frame length register */
-#define GM_MAX_FLEN_REG 0xe40
-#define GM_MAX_FLEN_LBN 0
-#define GM_MAX_FLEN_WIDTH 16
-
-/* GMAC station address register 1 */
-#define GM_ADR1_REG 0xf00
-#define GM_HWADDR_5_LBN 24
-#define GM_HWADDR_5_WIDTH 8
-#define GM_HWADDR_4_LBN 16
-#define GM_HWADDR_4_WIDTH 8
-#define GM_HWADDR_3_LBN 8
-#define GM_HWADDR_3_WIDTH 8
-#define GM_HWADDR_2_LBN 0
-#define GM_HWADDR_2_WIDTH 8
-
-/* GMAC station address register 2 */
-#define GM_ADR2_REG 0xf10
-#define GM_HWADDR_1_LBN 24
-#define GM_HWADDR_1_WIDTH 8
-#define GM_HWADDR_0_LBN 16
-#define GM_HWADDR_0_WIDTH 8
-
-/* GMAC FIFO configuration register 0 */
-#define GMF_CFG0_REG 0xf20
-#define GMF_FTFENREQ_LBN 12
-#define GMF_FTFENREQ_WIDTH 1
-#define GMF_STFENREQ_LBN 11
-#define GMF_STFENREQ_WIDTH 1
-#define GMF_FRFENREQ_LBN 10
-#define GMF_FRFENREQ_WIDTH 1
-#define GMF_SRFENREQ_LBN 9
-#define GMF_SRFENREQ_WIDTH 1
-#define GMF_WTMENREQ_LBN 8
-#define GMF_WTMENREQ_WIDTH 1
-
-/* GMAC FIFO configuration register 1 */
-#define GMF_CFG1_REG 0xf30
-#define GMF_CFGFRTH_LBN 16
-#define GMF_CFGFRTH_WIDTH 5
-#define GMF_CFGXOFFRTX_LBN 0
-#define GMF_CFGXOFFRTX_WIDTH 16
-
-/* GMAC FIFO configuration register 2 */
-#define GMF_CFG2_REG 0xf40
-#define GMF_CFGHWM_LBN 16
-#define GMF_CFGHWM_WIDTH 6
-#define GMF_CFGLWM_LBN 0
-#define GMF_CFGLWM_WIDTH 6
-
-/* GMAC FIFO configuration register 3 */
-#define GMF_CFG3_REG 0xf50
-#define GMF_CFGHWMFT_LBN 16
-#define GMF_CFGHWMFT_WIDTH 6
-#define GMF_CFGFTTH_LBN 0
-#define GMF_CFGFTTH_WIDTH 6
-
-/* GMAC FIFO configuration register 4 */
-#define GMF_CFG4_REG 0xf60
-#define GMF_HSTFLTRFRM_PAUSE_LBN 12
-#define GMF_HSTFLTRFRM_PAUSE_WIDTH 12
-
-/* GMAC FIFO configuration register 5 */
-#define GMF_CFG5_REG 0xf70
-#define GMF_CFGHDPLX_LBN 22
-#define GMF_CFGHDPLX_WIDTH 1
-#define GMF_CFGBYTMODE_LBN 19
-#define GMF_CFGBYTMODE_WIDTH 1
-#define GMF_HSTDRPLT64_LBN 18
-#define GMF_HSTDRPLT64_WIDTH 1
-#define GMF_HSTFLTRFRMDC_PAUSE_LBN 12
-#define GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
-
-/* XGMAC address register low */
-#define XM_ADR_LO_REG 0x1200
-#define XM_ADR_3_LBN 24
-#define XM_ADR_3_WIDTH 8
-#define XM_ADR_2_LBN 16
-#define XM_ADR_2_WIDTH 8
-#define XM_ADR_1_LBN 8
-#define XM_ADR_1_WIDTH 8
-#define XM_ADR_0_LBN 0
-#define XM_ADR_0_WIDTH 8
-
-/* XGMAC address register high */
-#define XM_ADR_HI_REG 0x1210
-#define XM_ADR_5_LBN 8
-#define XM_ADR_5_WIDTH 8
-#define XM_ADR_4_LBN 0
-#define XM_ADR_4_WIDTH 8
-
-/* XGMAC global configuration */
-#define XM_GLB_CFG_REG 0x1220
-#define XM_RX_STAT_EN_LBN 11
-#define XM_RX_STAT_EN_WIDTH 1
-#define XM_TX_STAT_EN_LBN 10
-#define XM_TX_STAT_EN_WIDTH 1
-#define XM_RX_JUMBO_MODE_LBN 6
-#define XM_RX_JUMBO_MODE_WIDTH 1
-#define XM_INTCLR_MODE_LBN 3
-#define XM_INTCLR_MODE_WIDTH 1
-#define XM_CORE_RST_LBN 0
-#define XM_CORE_RST_WIDTH 1
-
-/* XGMAC transmit configuration */
-#define XM_TX_CFG_REG 0x1230
-#define XM_IPG_LBN 16
-#define XM_IPG_WIDTH 4
-#define XM_FCNTL_LBN 10
-#define XM_FCNTL_WIDTH 1
-#define XM_TXCRC_LBN 8
-#define XM_TXCRC_WIDTH 1
-#define XM_AUTO_PAD_LBN 5
-#define XM_AUTO_PAD_WIDTH 1
-#define XM_TX_PRMBL_LBN 2
-#define XM_TX_PRMBL_WIDTH 1
-#define XM_TXEN_LBN 1
-#define XM_TXEN_WIDTH 1
-
-/* XGMAC receive configuration */
-#define XM_RX_CFG_REG 0x1240
-#define XM_PASS_CRC_ERR_LBN 25
-#define XM_PASS_CRC_ERR_WIDTH 1
-#define XM_ACPT_ALL_MCAST_LBN 11
-#define XM_ACPT_ALL_MCAST_WIDTH 1
-#define XM_ACPT_ALL_UCAST_LBN 9
-#define XM_ACPT_ALL_UCAST_WIDTH 1
-#define XM_AUTO_DEPAD_LBN 8
-#define XM_AUTO_DEPAD_WIDTH 1
-#define XM_RXEN_LBN 1
-#define XM_RXEN_WIDTH 1
-
-/* XGMAC management interrupt mask register */
-#define XM_MGT_INT_MSK_REG_B0 0x1250
-#define XM_MSK_PRMBLE_ERR_LBN 2
-#define XM_MSK_PRMBLE_ERR_WIDTH 1
-#define XM_MSK_RMTFLT_LBN 1
-#define XM_MSK_RMTFLT_WIDTH 1
-#define XM_MSK_LCLFLT_LBN 0
-#define XM_MSK_LCLFLT_WIDTH 1
-
-/* XGMAC flow control register */
-#define XM_FC_REG 0x1270
-#define XM_PAUSE_TIME_LBN 16
-#define XM_PAUSE_TIME_WIDTH 16
-#define XM_DIS_FCNTL_LBN 0
-#define XM_DIS_FCNTL_WIDTH 1
-
-/* XGMAC pause time count register */
-#define XM_PAUSE_TIME_REG 0x1290
-
-/* XGMAC transmit parameter register */
-#define XM_TX_PARAM_REG 0x012d0
-#define XM_TX_JUMBO_MODE_LBN 31
-#define XM_TX_JUMBO_MODE_WIDTH 1
-#define XM_MAX_TX_FRM_SIZE_LBN 16
-#define XM_MAX_TX_FRM_SIZE_WIDTH 14
-
-/* XGMAC receive parameter register */
-#define XM_RX_PARAM_REG 0x12e0
-#define XM_MAX_RX_FRM_SIZE_LBN 0
-#define XM_MAX_RX_FRM_SIZE_WIDTH 14
-
-/* XGMAC management interrupt status register */
-#define XM_MGT_INT_REG_B0 0x12f0
-#define XM_PRMBLE_ERR 2
-#define XM_PRMBLE_WIDTH 1
-#define XM_RMTFLT_LBN 1
-#define XM_RMTFLT_WIDTH 1
-#define XM_LCLFLT_LBN 0
-#define XM_LCLFLT_WIDTH 1
-
-/* XGXS/XAUI powerdown/reset register */
-#define XX_PWR_RST_REG 0x1300
-
-#define XX_SD_RST_ACT_LBN 16
-#define XX_SD_RST_ACT_WIDTH 1
-#define XX_PWRDND_EN_LBN 15
-#define XX_PWRDND_EN_WIDTH 1
-#define XX_PWRDNC_EN_LBN 14
-#define XX_PWRDNC_EN_WIDTH 1
-#define XX_PWRDNB_EN_LBN 13
-#define XX_PWRDNB_EN_WIDTH 1
-#define XX_PWRDNA_EN_LBN 12
-#define XX_PWRDNA_EN_WIDTH 1
-#define XX_RSTPLLCD_EN_LBN 9
-#define XX_RSTPLLCD_EN_WIDTH 1
-#define XX_RSTPLLAB_EN_LBN 8
-#define XX_RSTPLLAB_EN_WIDTH 1
-#define XX_RESETD_EN_LBN 7
-#define XX_RESETD_EN_WIDTH 1
-#define XX_RESETC_EN_LBN 6
-#define XX_RESETC_EN_WIDTH 1
-#define XX_RESETB_EN_LBN 5
-#define XX_RESETB_EN_WIDTH 1
-#define XX_RESETA_EN_LBN 4
-#define XX_RESETA_EN_WIDTH 1
-#define XX_RSTXGXSRX_EN_LBN 2
-#define XX_RSTXGXSRX_EN_WIDTH 1
-#define XX_RSTXGXSTX_EN_LBN 1
-#define XX_RSTXGXSTX_EN_WIDTH 1
-#define XX_RST_XX_EN_LBN 0
-#define XX_RST_XX_EN_WIDTH 1
-
-/* XGXS/XAUI powerdown/reset control register */
-#define XX_SD_CTL_REG 0x1310
-#define XX_HIDRVD_LBN 15
-#define XX_HIDRVD_WIDTH 1
-#define XX_LODRVD_LBN 14
-#define XX_LODRVD_WIDTH 1
-#define XX_HIDRVC_LBN 13
-#define XX_HIDRVC_WIDTH 1
-#define XX_LODRVC_LBN 12
-#define XX_LODRVC_WIDTH 1
-#define XX_HIDRVB_LBN 11
-#define XX_HIDRVB_WIDTH 1
-#define XX_LODRVB_LBN 10
-#define XX_LODRVB_WIDTH 1
-#define XX_HIDRVA_LBN 9
-#define XX_HIDRVA_WIDTH 1
-#define XX_LODRVA_LBN 8
-#define XX_LODRVA_WIDTH 1
-#define XX_LPBKD_LBN 3
-#define XX_LPBKD_WIDTH 1
-#define XX_LPBKC_LBN 2
-#define XX_LPBKC_WIDTH 1
-#define XX_LPBKB_LBN 1
-#define XX_LPBKB_WIDTH 1
-#define XX_LPBKA_LBN 0
-#define XX_LPBKA_WIDTH 1
-
-#define XX_TXDRV_CTL_REG 0x1320
-#define XX_DEQD_LBN 28
-#define XX_DEQD_WIDTH 4
-#define XX_DEQC_LBN 24
-#define XX_DEQC_WIDTH 4
-#define XX_DEQB_LBN 20
-#define XX_DEQB_WIDTH 4
-#define XX_DEQA_LBN 16
-#define XX_DEQA_WIDTH 4
-#define XX_DTXD_LBN 12
-#define XX_DTXD_WIDTH 4
-#define XX_DTXC_LBN 8
-#define XX_DTXC_WIDTH 4
-#define XX_DTXB_LBN 4
-#define XX_DTXB_WIDTH 4
-#define XX_DTXA_LBN 0
-#define XX_DTXA_WIDTH 4
-
-/* XAUI XGXS core status register */
-#define XX_CORE_STAT_REG 0x1360
-#define XX_FORCE_SIG_LBN 24
-#define XX_FORCE_SIG_WIDTH 8
-#define XX_FORCE_SIG_DECODE_FORCED 0xff
-#define XX_XGXS_LB_EN_LBN 23
-#define XX_XGXS_LB_EN_WIDTH 1
-#define XX_XGMII_LB_EN_LBN 22
-#define XX_XGMII_LB_EN_WIDTH 1
-#define XX_ALIGN_DONE_LBN 20
-#define XX_ALIGN_DONE_WIDTH 1
-#define XX_SYNC_STAT_LBN 16
-#define XX_SYNC_STAT_WIDTH 4
-#define XX_SYNC_STAT_DECODE_SYNCED 0xf
-#define XX_COMMA_DET_LBN 12
-#define XX_COMMA_DET_WIDTH 4
-#define XX_COMMA_DET_DECODE_DETECTED 0xf
-#define XX_COMMA_DET_RESET 0xf
-#define XX_CHARERR_LBN 4
-#define XX_CHARERR_WIDTH 4
-#define XX_CHARERR_RESET 0xf
-#define XX_DISPERR_LBN 0
-#define XX_DISPERR_WIDTH 4
-#define XX_DISPERR_RESET 0xf
-
-/* Receive filter table */
-#define RX_FILTER_TBL0 0xF00000
-
-/* Receive descriptor pointer table */
-#define RX_DESC_PTR_TBL_KER_A1 0x11800
-#define RX_DESC_PTR_TBL_KER_B0 0xF40000
-#define RX_DESC_PTR_TBL_KER_P0 0x900
-#define RX_ISCSI_DDIG_EN_LBN 88
-#define RX_ISCSI_DDIG_EN_WIDTH 1
-#define RX_ISCSI_HDIG_EN_LBN 87
-#define RX_ISCSI_HDIG_EN_WIDTH 1
-#define RX_DESCQ_BUF_BASE_ID_LBN 36
-#define RX_DESCQ_BUF_BASE_ID_WIDTH 20
-#define RX_DESCQ_EVQ_ID_LBN 24
-#define RX_DESCQ_EVQ_ID_WIDTH 12
-#define RX_DESCQ_OWNER_ID_LBN 10
-#define RX_DESCQ_OWNER_ID_WIDTH 14
-#define RX_DESCQ_LABEL_LBN 5
-#define RX_DESCQ_LABEL_WIDTH 5
-#define RX_DESCQ_SIZE_LBN 3
-#define RX_DESCQ_SIZE_WIDTH 2
-#define RX_DESCQ_SIZE_4K 3
-#define RX_DESCQ_SIZE_2K 2
-#define RX_DESCQ_SIZE_1K 1
-#define RX_DESCQ_SIZE_512 0
-#define RX_DESCQ_TYPE_LBN 2
-#define RX_DESCQ_TYPE_WIDTH 1
-#define RX_DESCQ_JUMBO_LBN 1
-#define RX_DESCQ_JUMBO_WIDTH 1
-#define RX_DESCQ_EN_LBN 0
-#define RX_DESCQ_EN_WIDTH 1
-
-/* Transmit descriptor pointer table */
-#define TX_DESC_PTR_TBL_KER_A1 0x11900
-#define TX_DESC_PTR_TBL_KER_B0 0xF50000
-#define TX_DESC_PTR_TBL_KER_P0 0xa40
-#define TX_NON_IP_DROP_DIS_B0_LBN 91
-#define TX_NON_IP_DROP_DIS_B0_WIDTH 1
-#define TX_IP_CHKSM_DIS_B0_LBN 90
-#define TX_IP_CHKSM_DIS_B0_WIDTH 1
-#define TX_TCP_CHKSM_DIS_B0_LBN 89
-#define TX_TCP_CHKSM_DIS_B0_WIDTH 1
-#define TX_DESCQ_EN_LBN 88
-#define TX_DESCQ_EN_WIDTH 1
-#define TX_ISCSI_DDIG_EN_LBN 87
-#define TX_ISCSI_DDIG_EN_WIDTH 1
-#define TX_ISCSI_HDIG_EN_LBN 86
-#define TX_ISCSI_HDIG_EN_WIDTH 1
-#define TX_DESCQ_BUF_BASE_ID_LBN 36
-#define TX_DESCQ_BUF_BASE_ID_WIDTH 20
-#define TX_DESCQ_EVQ_ID_LBN 24
-#define TX_DESCQ_EVQ_ID_WIDTH 12
-#define TX_DESCQ_OWNER_ID_LBN 10
-#define TX_DESCQ_OWNER_ID_WIDTH 14
-#define TX_DESCQ_LABEL_LBN 5
-#define TX_DESCQ_LABEL_WIDTH 5
-#define TX_DESCQ_SIZE_LBN 3
-#define TX_DESCQ_SIZE_WIDTH 2
-#define TX_DESCQ_SIZE_4K 3
-#define TX_DESCQ_SIZE_2K 2
-#define TX_DESCQ_SIZE_1K 1
-#define TX_DESCQ_SIZE_512 0
-#define TX_DESCQ_TYPE_LBN 1
-#define TX_DESCQ_TYPE_WIDTH 2
-
-/* Event queue pointer */
-#define EVQ_PTR_TBL_KER_A1 0x11a00
-#define EVQ_PTR_TBL_KER_B0 0xf60000
-#define EVQ_PTR_TBL_KER_P0 0x500
-#define EVQ_EN_LBN 23
-#define EVQ_EN_WIDTH 1
-#define EVQ_SIZE_LBN 20
-#define EVQ_SIZE_WIDTH 3
-#define EVQ_SIZE_32K 6
-#define EVQ_SIZE_16K 5
-#define EVQ_SIZE_8K 4
-#define EVQ_SIZE_4K 3
-#define EVQ_SIZE_2K 2
-#define EVQ_SIZE_1K 1
-#define EVQ_SIZE_512 0
-#define EVQ_BUF_BASE_ID_LBN 0
-#define EVQ_BUF_BASE_ID_WIDTH 20
-
-/* Event queue read pointer */
-#define EVQ_RPTR_REG_KER_A1 0x11b00
-#define EVQ_RPTR_REG_KER_B0 0xfa0000
-#define EVQ_RPTR_REG_KER_DWORD (EVQ_RPTR_REG_KER + 0)
-#define EVQ_RPTR_DWORD_LBN 0
-#define EVQ_RPTR_DWORD_WIDTH 14
-
-/* RSS indirection table */
-#define RX_RSS_INDIR_TBL_B0 0xFB0000
-#define RX_RSS_INDIR_ENT_B0_LBN 0
-#define RX_RSS_INDIR_ENT_B0_WIDTH 6
-
-/* Special buffer descriptors (full-mode) */
-#define BUF_FULL_TBL_KER_A1 0x8000
-#define BUF_FULL_TBL_KER_B0 0x800000
-#define IP_DAT_BUF_SIZE_LBN 50
-#define IP_DAT_BUF_SIZE_WIDTH 1
-#define IP_DAT_BUF_SIZE_8K 1
-#define IP_DAT_BUF_SIZE_4K 0
-#define BUF_ADR_REGION_LBN 48
-#define BUF_ADR_REGION_WIDTH 2
-#define BUF_ADR_FBUF_LBN 14
-#define BUF_ADR_FBUF_WIDTH 34
-#define BUF_OWNER_ID_FBUF_LBN 0
-#define BUF_OWNER_ID_FBUF_WIDTH 14
-
-/* Transmit descriptor */
-#define TX_KER_PORT_LBN 63
-#define TX_KER_PORT_WIDTH 1
-#define TX_KER_CONT_LBN 62
-#define TX_KER_CONT_WIDTH 1
-#define TX_KER_BYTE_CNT_LBN 48
-#define TX_KER_BYTE_CNT_WIDTH 14
-#define TX_KER_BUF_REGION_LBN 46
-#define TX_KER_BUF_REGION_WIDTH 2
-#define TX_KER_BUF_REGION0_DECODE 0
-#define TX_KER_BUF_REGION1_DECODE 1
-#define TX_KER_BUF_REGION2_DECODE 2
-#define TX_KER_BUF_REGION3_DECODE 3
-#define TX_KER_BUF_ADR_LBN 0
-#define TX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
-
-/* Receive descriptor */
-#define RX_KER_BUF_SIZE_LBN 48
-#define RX_KER_BUF_SIZE_WIDTH 14
-#define RX_KER_BUF_REGION_LBN 46
-#define RX_KER_BUF_REGION_WIDTH 2
-#define RX_KER_BUF_REGION0_DECODE 0
-#define RX_KER_BUF_REGION1_DECODE 1
-#define RX_KER_BUF_REGION2_DECODE 2
-#define RX_KER_BUF_REGION3_DECODE 3
-#define RX_KER_BUF_ADR_LBN 0
-#define RX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
-
-/**************************************************************************
- *
- * Falcon events
- *
- **************************************************************************
- */
-
-/* Event queue entries */
-#define EV_CODE_LBN 60
-#define EV_CODE_WIDTH 4
-#define RX_IP_EV_DECODE 0
-#define TX_IP_EV_DECODE 2
-#define DRIVER_EV_DECODE 5
-#define GLOBAL_EV_DECODE 6
-#define DRV_GEN_EV_DECODE 7
-#define WHOLE_EVENT_LBN 0
-#define WHOLE_EVENT_WIDTH 64
-
-/* Receive events */
-#define RX_EV_PKT_OK_LBN 56
-#define RX_EV_PKT_OK_WIDTH 1
-#define RX_EV_PAUSE_FRM_ERR_LBN 55
-#define RX_EV_PAUSE_FRM_ERR_WIDTH 1
-#define RX_EV_BUF_OWNER_ID_ERR_LBN 54
-#define RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
-#define RX_EV_IF_FRAG_ERR_LBN 53
-#define RX_EV_IF_FRAG_ERR_WIDTH 1
-#define RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
-#define RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
-#define RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
-#define RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
-#define RX_EV_ETH_CRC_ERR_LBN 50
-#define RX_EV_ETH_CRC_ERR_WIDTH 1
-#define RX_EV_FRM_TRUNC_LBN 49
-#define RX_EV_FRM_TRUNC_WIDTH 1
-#define RX_EV_DRIB_NIB_LBN 48
-#define RX_EV_DRIB_NIB_WIDTH 1
-#define RX_EV_TOBE_DISC_LBN 47
-#define RX_EV_TOBE_DISC_WIDTH 1
-#define RX_EV_PKT_TYPE_LBN 44
-#define RX_EV_PKT_TYPE_WIDTH 3
-#define RX_EV_PKT_TYPE_ETH_DECODE 0
-#define RX_EV_PKT_TYPE_LLC_DECODE 1
-#define RX_EV_PKT_TYPE_JUMBO_DECODE 2
-#define RX_EV_PKT_TYPE_VLAN_DECODE 3
-#define RX_EV_PKT_TYPE_VLAN_LLC_DECODE 4
-#define RX_EV_PKT_TYPE_VLAN_JUMBO_DECODE 5
-#define RX_EV_HDR_TYPE_LBN 42
-#define RX_EV_HDR_TYPE_WIDTH 2
-#define RX_EV_HDR_TYPE_TCP_IPV4_DECODE 0
-#define RX_EV_HDR_TYPE_UDP_IPV4_DECODE 1
-#define RX_EV_HDR_TYPE_OTHER_IP_DECODE 2
-#define RX_EV_HDR_TYPE_NON_IP_DECODE 3
-#define RX_EV_HDR_TYPE_HAS_CHECKSUMS(hdr_type) \
-	((hdr_type) <= RX_EV_HDR_TYPE_UDP_IPV4_DECODE)
-#define RX_EV_MCAST_HASH_MATCH_LBN 40
-#define RX_EV_MCAST_HASH_MATCH_WIDTH 1
-#define RX_EV_MCAST_PKT_LBN 39
-#define RX_EV_MCAST_PKT_WIDTH 1
-#define RX_EV_Q_LABEL_LBN 32
-#define RX_EV_Q_LABEL_WIDTH 5
-#define RX_EV_JUMBO_CONT_LBN 31
-#define RX_EV_JUMBO_CONT_WIDTH 1
-#define RX_EV_BYTE_CNT_LBN 16
-#define RX_EV_BYTE_CNT_WIDTH 14
-#define RX_EV_SOP_LBN 15
-#define RX_EV_SOP_WIDTH 1
-#define RX_EV_DESC_PTR_LBN 0
-#define RX_EV_DESC_PTR_WIDTH 12
-
-/* Transmit events */
-#define TX_EV_PKT_ERR_LBN 38
-#define TX_EV_PKT_ERR_WIDTH 1
-#define TX_EV_Q_LABEL_LBN 32
-#define TX_EV_Q_LABEL_WIDTH 5
-#define TX_EV_WQ_FF_FULL_LBN 15
-#define TX_EV_WQ_FF_FULL_WIDTH 1
-#define TX_EV_COMP_LBN 12
-#define TX_EV_COMP_WIDTH 1
-#define TX_EV_DESC_PTR_LBN 0
-#define TX_EV_DESC_PTR_WIDTH 12
-
-/* Driver events */
-#define DRIVER_EV_SUB_CODE_LBN 56
-#define DRIVER_EV_SUB_CODE_WIDTH 4
-#define DRIVER_EV_SUB_DATA_LBN 0
-#define DRIVER_EV_SUB_DATA_WIDTH 14
-#define TX_DESCQ_FLS_DONE_EV_DECODE 0
-#define RX_DESCQ_FLS_DONE_EV_DECODE 1
-#define EVQ_INIT_DONE_EV_DECODE 2
-#define EVQ_NOT_EN_EV_DECODE 3
-#define RX_DESCQ_FLSFF_OVFL_EV_DECODE 4
-#define SRM_UPD_DONE_EV_DECODE 5
-#define WAKE_UP_EV_DECODE 6
-#define TX_PKT_NON_TCP_UDP_DECODE 9
-#define TIMER_EV_DECODE 10
-#define RX_RECOVERY_EV_DECODE 11
-#define RX_DSC_ERROR_EV_DECODE 14
-#define TX_DSC_ERROR_EV_DECODE 15
-#define DRIVER_EV_TX_DESCQ_ID_LBN 0
-#define DRIVER_EV_TX_DESCQ_ID_WIDTH 12
-#define DRIVER_EV_RX_FLUSH_FAIL_LBN 12
-#define DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
-#define DRIVER_EV_RX_DESCQ_ID_LBN 0
-#define DRIVER_EV_RX_DESCQ_ID_WIDTH 12
-#define SRM_CLR_EV_DECODE 0
-#define SRM_UPD_EV_DECODE 1
-#define SRM_ILLCLR_EV_DECODE 2
-
-/* Global events */
-#define RX_RECOVERY_B0_LBN 12
-#define RX_RECOVERY_B0_WIDTH 1
-#define XG_MNT_INTR_B0_LBN 11
-#define XG_MNT_INTR_B0_WIDTH 1
-#define RX_RECOVERY_A1_LBN 11
-#define RX_RECOVERY_A1_WIDTH 1
-#define XFP_PHY_INTR_LBN 10
-#define XFP_PHY_INTR_WIDTH 1
-#define XG_PHY_INTR_LBN 9
-#define XG_PHY_INTR_WIDTH 1
-#define G_PHY1_INTR_LBN 8
-#define G_PHY1_INTR_WIDTH 1
-#define G_PHY0_INTR_LBN 7
-#define G_PHY0_INTR_WIDTH 1
-
-/* Driver-generated test events */
-#define EVQ_MAGIC_LBN 0
-#define EVQ_MAGIC_WIDTH 32
-
-/**************************************************************************
- *
- * Falcon MAC stats
- *
- **************************************************************************
- *
- */
-
-#define GRxGoodOct_offset 0x0
-#define GRxGoodOct_WIDTH 48
-#define GRxBadOct_offset 0x8
-#define GRxBadOct_WIDTH 48
-#define GRxMissPkt_offset 0x10
-#define GRxMissPkt_WIDTH 32
-#define GRxFalseCRS_offset 0x14
-#define GRxFalseCRS_WIDTH 32
-#define GRxPausePkt_offset 0x18
-#define GRxPausePkt_WIDTH 32
-#define GRxBadPkt_offset 0x1C
-#define GRxBadPkt_WIDTH 32
-#define GRxUcastPkt_offset 0x20
-#define GRxUcastPkt_WIDTH 32
-#define GRxMcastPkt_offset 0x24
-#define GRxMcastPkt_WIDTH 32
-#define GRxBcastPkt_offset 0x28
-#define GRxBcastPkt_WIDTH 32
-#define GRxGoodLt64Pkt_offset 0x2C
-#define GRxGoodLt64Pkt_WIDTH 32
-#define GRxBadLt64Pkt_offset 0x30
-#define GRxBadLt64Pkt_WIDTH 32
-#define GRx64Pkt_offset 0x34
-#define GRx64Pkt_WIDTH 32
-#define GRx65to127Pkt_offset 0x38
-#define GRx65to127Pkt_WIDTH 32
-#define GRx128to255Pkt_offset 0x3C
-#define GRx128to255Pkt_WIDTH 32
-#define GRx256to511Pkt_offset 0x40
-#define GRx256to511Pkt_WIDTH 32
-#define GRx512to1023Pkt_offset 0x44
-#define GRx512to1023Pkt_WIDTH 32
-#define GRx1024to15xxPkt_offset 0x48
-#define GRx1024to15xxPkt_WIDTH 32
-#define GRx15xxtoJumboPkt_offset 0x4C
-#define GRx15xxtoJumboPkt_WIDTH 32
-#define GRxGtJumboPkt_offset 0x50
-#define GRxGtJumboPkt_WIDTH 32
-#define GRxFcsErr64to15xxPkt_offset 0x54
-#define GRxFcsErr64to15xxPkt_WIDTH 32
-#define GRxFcsErr15xxtoJumboPkt_offset 0x58
-#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
-#define GRxFcsErrGtJumboPkt_offset 0x5C
-#define GRxFcsErrGtJumboPkt_WIDTH 32
-#define GTxGoodBadOct_offset 0x80
-#define GTxGoodBadOct_WIDTH 48
-#define GTxGoodOct_offset 0x88
-#define GTxGoodOct_WIDTH 48
-#define GTxSglColPkt_offset 0x90
-#define GTxSglColPkt_WIDTH 32
-#define GTxMultColPkt_offset 0x94
-#define GTxMultColPkt_WIDTH 32
-#define GTxExColPkt_offset 0x98
-#define GTxExColPkt_WIDTH 32
-#define GTxDefPkt_offset 0x9C
-#define GTxDefPkt_WIDTH 32
-#define GTxLateCol_offset 0xA0
-#define GTxLateCol_WIDTH 32
-#define GTxExDefPkt_offset 0xA4
-#define GTxExDefPkt_WIDTH 32
-#define GTxPausePkt_offset 0xA8
-#define GTxPausePkt_WIDTH 32
-#define GTxBadPkt_offset 0xAC
-#define GTxBadPkt_WIDTH 32
-#define GTxUcastPkt_offset 0xB0
-#define GTxUcastPkt_WIDTH 32
-#define GTxMcastPkt_offset 0xB4
-#define GTxMcastPkt_WIDTH 32
-#define GTxBcastPkt_offset 0xB8
-#define GTxBcastPkt_WIDTH 32
-#define GTxLt64Pkt_offset 0xBC
-#define GTxLt64Pkt_WIDTH 32
-#define GTx64Pkt_offset 0xC0
-#define GTx64Pkt_WIDTH 32
-#define GTx65to127Pkt_offset 0xC4
-#define GTx65to127Pkt_WIDTH 32
-#define GTx128to255Pkt_offset 0xC8
-#define GTx128to255Pkt_WIDTH 32
-#define GTx256to511Pkt_offset 0xCC
-#define GTx256to511Pkt_WIDTH 32
-#define GTx512to1023Pkt_offset 0xD0
-#define GTx512to1023Pkt_WIDTH 32
-#define GTx1024to15xxPkt_offset 0xD4
-#define GTx1024to15xxPkt_WIDTH 32
-#define GTx15xxtoJumboPkt_offset 0xD8
-#define GTx15xxtoJumboPkt_WIDTH 32
-#define GTxGtJumboPkt_offset 0xDC
-#define GTxGtJumboPkt_WIDTH 32
-#define GTxNonTcpUdpPkt_offset 0xE0
-#define GTxNonTcpUdpPkt_WIDTH 16
-#define GTxMacSrcErrPkt_offset 0xE4
-#define GTxMacSrcErrPkt_WIDTH 16
-#define GTxIpSrcErrPkt_offset 0xE8
-#define GTxIpSrcErrPkt_WIDTH 16
-#define GDmaDone_offset 0xEC
-#define GDmaDone_WIDTH 32
-
-#define XgRxOctets_offset 0x0
-#define XgRxOctets_WIDTH 48
-#define XgRxOctetsOK_offset 0x8
-#define XgRxOctetsOK_WIDTH 48
-#define XgRxPkts_offset 0x10
-#define XgRxPkts_WIDTH 32
-#define XgRxPktsOK_offset 0x14
-#define XgRxPktsOK_WIDTH 32
-#define XgRxBroadcastPkts_offset 0x18
-#define XgRxBroadcastPkts_WIDTH 32
-#define XgRxMulticastPkts_offset 0x1C
-#define XgRxMulticastPkts_WIDTH 32
-#define XgRxUnicastPkts_offset 0x20
-#define XgRxUnicastPkts_WIDTH 32
-#define XgRxUndersizePkts_offset 0x24
-#define XgRxUndersizePkts_WIDTH 32
-#define XgRxOversizePkts_offset 0x28
-#define XgRxOversizePkts_WIDTH 32
-#define XgRxJabberPkts_offset 0x2C
-#define XgRxJabberPkts_WIDTH 32
-#define XgRxUndersizeFCSerrorPkts_offset 0x30
-#define XgRxUndersizeFCSerrorPkts_WIDTH 32
-#define XgRxDropEvents_offset 0x34
-#define XgRxDropEvents_WIDTH 32
-#define XgRxFCSerrorPkts_offset 0x38
-#define XgRxFCSerrorPkts_WIDTH 32
-#define XgRxAlignError_offset 0x3C
-#define XgRxAlignError_WIDTH 32
-#define XgRxSymbolError_offset 0x40
-#define XgRxSymbolError_WIDTH 32
-#define XgRxInternalMACError_offset 0x44
-#define XgRxInternalMACError_WIDTH 32
-#define XgRxControlPkts_offset 0x48
-#define XgRxControlPkts_WIDTH 32
-#define XgRxPausePkts_offset 0x4C
-#define XgRxPausePkts_WIDTH 32
-#define XgRxPkts64Octets_offset 0x50
-#define XgRxPkts64Octets_WIDTH 32
-#define XgRxPkts65to127Octets_offset 0x54
-#define XgRxPkts65to127Octets_WIDTH 32
-#define XgRxPkts128to255Octets_offset 0x58
-#define XgRxPkts128to255Octets_WIDTH 32
-#define XgRxPkts256to511Octets_offset 0x5C
-#define XgRxPkts256to511Octets_WIDTH 32
-#define XgRxPkts512to1023Octets_offset 0x60
-#define XgRxPkts512to1023Octets_WIDTH 32
-#define XgRxPkts1024to15xxOctets_offset 0x64
-#define XgRxPkts1024to15xxOctets_WIDTH 32
-#define XgRxPkts15xxtoMaxOctets_offset 0x68
-#define XgRxPkts15xxtoMaxOctets_WIDTH 32
-#define XgRxLengthError_offset 0x6C
-#define XgRxLengthError_WIDTH 32
-#define XgTxPkts_offset 0x80
-#define XgTxPkts_WIDTH 32
-#define XgTxOctets_offset 0x88
-#define XgTxOctets_WIDTH 48
-#define XgTxMulticastPkts_offset 0x90
-#define XgTxMulticastPkts_WIDTH 32
-#define XgTxBroadcastPkts_offset 0x94
-#define XgTxBroadcastPkts_WIDTH 32
-#define XgTxUnicastPkts_offset 0x98
-#define XgTxUnicastPkts_WIDTH 32
-#define XgTxControlPkts_offset 0x9C
-#define XgTxControlPkts_WIDTH 32
-#define XgTxPausePkts_offset 0xA0
-#define XgTxPausePkts_WIDTH 32
-#define XgTxPkts64Octets_offset 0xA4
-#define XgTxPkts64Octets_WIDTH 32
-#define XgTxPkts65to127Octets_offset 0xA8
-#define XgTxPkts65to127Octets_WIDTH 32
-#define XgTxPkts128to255Octets_offset 0xAC
-#define XgTxPkts128to255Octets_WIDTH 32
-#define XgTxPkts256to511Octets_offset 0xB0
-#define XgTxPkts256to511Octets_WIDTH 32
-#define XgTxPkts512to1023Octets_offset 0xB4
-#define XgTxPkts512to1023Octets_WIDTH 32
-#define XgTxPkts1024to15xxOctets_offset 0xB8
-#define XgTxPkts1024to15xxOctets_WIDTH 32
-#define XgTxPkts1519toMaxOctets_offset 0xBC
-#define XgTxPkts1519toMaxOctets_WIDTH 32
-#define XgTxUndersizePkts_offset 0xC0
-#define XgTxUndersizePkts_WIDTH 32
-#define XgTxOversizePkts_offset 0xC4
-#define XgTxOversizePkts_WIDTH 32
-#define XgTxNonTcpUdpPkt_offset 0xC8
-#define XgTxNonTcpUdpPkt_WIDTH 16
-#define XgTxMacSrcErrPkt_offset 0xCC
-#define XgTxMacSrcErrPkt_WIDTH 16
-#define XgTxIpSrcErrPkt_offset 0xD0
-#define XgTxIpSrcErrPkt_WIDTH 16
-#define XgDmaDone_offset 0xD4
-
-#define FALCON_STATS_NOT_DONE 0x00000000
-#define FALCON_STATS_DONE 0xffffffff
-
-/* Interrupt status register bits */
-#define FATAL_INT_LBN 64
-#define FATAL_INT_WIDTH 1
-#define INT_EVQS_LBN 40
-#define INT_EVQS_WIDTH 4
-
-/**************************************************************************
- *
- * Falcon non-volatile configuration
- *
- **************************************************************************
- */
-
-/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
-struct falcon_nvconfig_board_v2 {
-	__le16 nports;
-	u8 port0_phy_addr;
-	u8 port0_phy_type;
-	u8 port1_phy_addr;
-	u8 port1_phy_type;
-	__le16 asic_sub_revision;
-	__le16 board_revision;
-} __packed;
-
-/* Board configuration v3 extra information */
-struct falcon_nvconfig_board_v3 {
-	__le32 spi_device_type[2];
-} __packed;
-
-/* Bit numbers for spi_device_type */
-#define SPI_DEV_TYPE_SIZE_LBN 0
-#define SPI_DEV_TYPE_SIZE_WIDTH 5
-#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
-#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
-#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
-#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
-#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
-#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
-#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
-#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
-#define SPI_DEV_TYPE_FIELD(type, field)					\
-	(((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
-
-#define NVCONFIG_OFFSET 0x300
-
-#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
-struct falcon_nvconfig {
-	efx_oword_t ee_vpd_cfg_reg;			/* 0x300 */
-	u8 mac_address[2][8];			/* 0x310 */
-	efx_oword_t pcie_sd_ctl0123_reg;		/* 0x320 */
-	efx_oword_t pcie_sd_ctl45_reg;			/* 0x330 */
-	efx_oword_t pcie_pcs_ctl_stat_reg;		/* 0x340 */
-	efx_oword_t hw_init_reg;			/* 0x350 */
-	efx_oword_t nic_stat_reg;			/* 0x360 */
-	efx_oword_t glb_ctl_reg;			/* 0x370 */
-	efx_oword_t srm_cfg_reg;			/* 0x380 */
-	efx_oword_t spare_reg;				/* 0x390 */
-	__le16 board_magic_num;			/* 0x3A0 */
-	__le16 board_struct_ver;
-	__le16 board_checksum;
-	struct falcon_nvconfig_board_v2 board_v2;
-	efx_oword_t ee_base_page_reg;			/* 0x3B0 */
-	struct falcon_nvconfig_board_v3 board_v3;
-} __packed;
-
-#endif /* EFX_FALCON_HWDEFS_H */
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h
deleted file mode 100644
index 8883092..0000000
--- a/drivers/net/sfc/falcon_io.h
+++ /dev/null
@@ -1,258 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#ifndef EFX_FALCON_IO_H
-#define EFX_FALCON_IO_H
-
-#include <linux/io.h>
-#include <linux/spinlock.h>
-
-/**************************************************************************
- *
- * Falcon hardware access
- *
- **************************************************************************
- *
- * Notes on locking strategy:
- *
- * Most Falcon registers require 16-byte (or 8-byte, for SRAM
- * registers) atomic writes which necessitates locking.
- * Under normal operation few writes to the Falcon BAR are made and these
- * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special
- * cased to allow 4-byte (hence lockless) accesses.
- *
- * It *is* safe to write to these 4-byte registers in the middle of an
- * access to an 8-byte or 16-byte register.  We therefore use a
- * spinlock to protect accesses to the larger registers, but no locks
- * for the 4-byte registers.
- *
- * A write barrier is needed to ensure that DW3 is written after DW0/1/2
- * due to the way the 16byte registers are "collected" in the Falcon BIU
- *
- * We also lock when carrying out reads, to ensure consistency of the
- * data (made possible since the BIU reads all 128 bits into a cache).
- * Reads are very rare, so this isn't a significant performance
- * impact.  (Most data transferred from NIC to host is DMAed directly
- * into host memory).
- *
- * I/O BAR access uses locks for both reads and writes (but is only provided
- * for testing purposes).
- */
-
-/* Special buffer descriptors (Falcon SRAM) */
-#define BUF_TBL_KER_A1 0x18000
-#define BUF_TBL_KER_B0 0x800000
-
-
-#if BITS_PER_LONG == 64
-#define FALCON_USE_QWORD_IO 1
-#endif
-
-#ifdef FALCON_USE_QWORD_IO
-static inline void _falcon_writeq(struct efx_nic *efx, __le64 value,
-				  unsigned int reg)
-{
-	__raw_writeq((__force u64)value, efx->membase + reg);
-}
-static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg)
-{
-	return (__force __le64)__raw_readq(efx->membase + reg);
-}
-#endif
-
-static inline void _falcon_writel(struct efx_nic *efx, __le32 value,
-				  unsigned int reg)
-{
-	__raw_writel((__force u32)value, efx->membase + reg);
-}
-static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg)
-{
-	return (__force __le32)__raw_readl(efx->membase + reg);
-}
-
-/* Writes to a normal 16-byte Falcon register, locking as appropriate. */
-static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value,
-				unsigned int reg)
-{
-	unsigned long flags;
-
-	EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg,
-		    EFX_OWORD_VAL(*value));
-
-	spin_lock_irqsave(&efx->biu_lock, flags);
-#ifdef FALCON_USE_QWORD_IO
-	_falcon_writeq(efx, value->u64[0], reg + 0);
-	wmb();
-	_falcon_writeq(efx, value->u64[1], reg + 8);
-#else
-	_falcon_writel(efx, value->u32[0], reg + 0);
-	_falcon_writel(efx, value->u32[1], reg + 4);
-	_falcon_writel(efx, value->u32[2], reg + 8);
-	wmb();
-	_falcon_writel(efx, value->u32[3], reg + 12);
-#endif
-	mmiowb();
-	spin_unlock_irqrestore(&efx->biu_lock, flags);
-}
-
-/* Writes to an 8-byte Falcon SRAM register, locking as appropriate. */
-static inline void falcon_write_sram(struct efx_nic *efx, efx_qword_t *value,
-				     unsigned int index)
-{
-	unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
-	unsigned long flags;
-
-	EFX_REGDUMP(efx, "writing SRAM register %x with " EFX_QWORD_FMT "\n",
-		    reg, EFX_QWORD_VAL(*value));
-
-	spin_lock_irqsave(&efx->biu_lock, flags);
-#ifdef FALCON_USE_QWORD_IO
-	_falcon_writeq(efx, value->u64[0], reg + 0);
-#else
-	_falcon_writel(efx, value->u32[0], reg + 0);
-	wmb();
-	_falcon_writel(efx, value->u32[1], reg + 4);
-#endif
-	mmiowb();
-	spin_unlock_irqrestore(&efx->biu_lock, flags);
-}
-
-/* Write dword to Falcon register that allows partial writes
- *
- * Some Falcon registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
- * TX_DESC_UPD_REG) can be written to as a single dword.  This allows
- * for lockless writes.
- */
-static inline void falcon_writel(struct efx_nic *efx, efx_dword_t *value,
-				 unsigned int reg)
-{
-	EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n",
-		    reg, EFX_DWORD_VAL(*value));
-
-	/* No lock required */
-	_falcon_writel(efx, value->u32[0], reg);
-}
-
-/* Read from a Falcon register
- *
- * This reads an entire 16-byte Falcon register in one go, locking as
- * appropriate.  It is essential to read the first dword first, as this
- * prompts Falcon to load the current value into the shadow register.
- */
-static inline void falcon_read(struct efx_nic *efx, efx_oword_t *value,
-			       unsigned int reg)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&efx->biu_lock, flags);
-	value->u32[0] = _falcon_readl(efx, reg + 0);
-	rmb();
-	value->u32[1] = _falcon_readl(efx, reg + 4);
-	value->u32[2] = _falcon_readl(efx, reg + 8);
-	value->u32[3] = _falcon_readl(efx, reg + 12);
-	spin_unlock_irqrestore(&efx->biu_lock, flags);
-
-	EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg,
-		    EFX_OWORD_VAL(*value));
-}
-
-/* This reads an 8-byte Falcon SRAM entry in one go. */
-static inline void falcon_read_sram(struct efx_nic *efx, efx_qword_t *value,
-				    unsigned int index)
-{
-	unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
-	unsigned long flags;
-
-	spin_lock_irqsave(&efx->biu_lock, flags);
-#ifdef FALCON_USE_QWORD_IO
-	value->u64[0] = _falcon_readq(efx, reg + 0);
-#else
-	value->u32[0] = _falcon_readl(efx, reg + 0);
-	rmb();
-	value->u32[1] = _falcon_readl(efx, reg + 4);
-#endif
-	spin_unlock_irqrestore(&efx->biu_lock, flags);
-
-	EFX_REGDUMP(efx, "read from SRAM register %x, got "EFX_QWORD_FMT"\n",
-		    reg, EFX_QWORD_VAL(*value));
-}
-
-/* Read dword from Falcon register that allows partial writes (sic) */
-static inline void falcon_readl(struct efx_nic *efx, efx_dword_t *value,
-				unsigned int reg)
-{
-	value->u32[0] = _falcon_readl(efx, reg);
-	EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n",
-		    reg, EFX_DWORD_VAL(*value));
-}
-
-/* Write to a register forming part of a table */
-static inline void falcon_write_table(struct efx_nic *efx, efx_oword_t *value,
-				      unsigned int reg, unsigned int index)
-{
-	falcon_write(efx, value, reg + index * sizeof(efx_oword_t));
-}
-
-/* Read to a register forming part of a table */
-static inline void falcon_read_table(struct efx_nic *efx, efx_oword_t *value,
-				     unsigned int reg, unsigned int index)
-{
-	falcon_read(efx, value, reg + index * sizeof(efx_oword_t));
-}
-
-/* Write to a dword register forming part of a table */
-static inline void falcon_writel_table(struct efx_nic *efx, efx_dword_t *value,
-				       unsigned int reg, unsigned int index)
-{
-	falcon_writel(efx, value, reg + index * sizeof(efx_oword_t));
-}
-
-/* Page-mapped register block size */
-#define FALCON_PAGE_BLOCK_SIZE 0x2000
-
-/* Calculate offset to page-mapped register block */
-#define FALCON_PAGED_REG(page, reg) \
-	((page) * FALCON_PAGE_BLOCK_SIZE + (reg))
-
-/* As for falcon_write(), but for a page-mapped register. */
-static inline void falcon_write_page(struct efx_nic *efx, efx_oword_t *value,
-				     unsigned int reg, unsigned int page)
-{
-	falcon_write(efx, value, FALCON_PAGED_REG(page, reg));
-}
-
-/* As for falcon_writel(), but for a page-mapped register. */
-static inline void falcon_writel_page(struct efx_nic *efx, efx_dword_t *value,
-				      unsigned int reg, unsigned int page)
-{
-	falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
-}
-
-/* Write dword to Falcon page-mapped register with an extra lock.
- *
- * As for falcon_writel_page(), but for a register that suffers from
- * SFC bug 3181.  If writing to page 0, take out a lock so the BIU
- * collector cannot be confused.
- */
-static inline void falcon_writel_page_locked(struct efx_nic *efx,
-					     efx_dword_t *value,
-					     unsigned int reg,
-					     unsigned int page)
-{
-	unsigned long flags = 0;
-
-	if (page == 0)
-		spin_lock_irqsave(&efx->biu_lock, flags);
-	falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
-	if (page == 0)
-		spin_unlock_irqrestore(&efx->biu_lock, flags);
-}
-
-#endif /* EFX_FALCON_IO_H */
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index bec52ca..7e57b4a 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -12,12 +12,11 @@
 #include "net_driver.h"
 #include "efx.h"
 #include "falcon.h"
-#include "falcon_hwdefs.h"
-#include "falcon_io.h"
+#include "regs.h"
+#include "io.h"
 #include "mac.h"
 #include "mdio_10g.h"
 #include "phy.h"
-#include "boards.h"
 #include "workarounds.h"
 
 /**************************************************************************
@@ -36,27 +35,27 @@
 	if (efx->phy_type == PHY_TYPE_NONE)
 		return;
 
-	falcon_read(efx, &sdctl, XX_SD_CTL_REG);
-	EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_OWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_OWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_OWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT);
-	EFX_SET_OWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT);
-	falcon_write(efx, &sdctl, XX_SD_CTL_REG);
+	efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
+	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
+	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
+	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
+	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
+	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
+	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
+	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
+	EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
+	efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
 
 	EFX_POPULATE_OWORD_8(txdrv,
-			     XX_DEQD, XX_TXDRV_DEQ_DEFAULT,
-			     XX_DEQC, XX_TXDRV_DEQ_DEFAULT,
-			     XX_DEQB, XX_TXDRV_DEQ_DEFAULT,
-			     XX_DEQA, XX_TXDRV_DEQ_DEFAULT,
-			     XX_DTXD, XX_TXDRV_DTX_DEFAULT,
-			     XX_DTXC, XX_TXDRV_DTX_DEFAULT,
-			     XX_DTXB, XX_TXDRV_DTX_DEFAULT,
-			     XX_DTXA, XX_TXDRV_DTX_DEFAULT);
-	falcon_write(efx, &txdrv, XX_TXDRV_CTL_REG);
+			     FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
+			     FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
+			     FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
+			     FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
+			     FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
+			     FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
+			     FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
+			     FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
+	efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
 }
 
 int falcon_reset_xaui(struct efx_nic *efx)
@@ -65,14 +64,14 @@
 	int count;
 
 	/* Start reset sequence */
-	EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1);
-	falcon_write(efx, &reg, XX_PWR_RST_REG);
+	EFX_POPULATE_DWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
+	efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
 
 	/* Wait up to 10 ms for completion, then reinitialise */
 	for (count = 0; count < 1000; count++) {
-		falcon_read(efx, &reg, XX_PWR_RST_REG);
-		if (EFX_OWORD_FIELD(reg, XX_RST_XX_EN) == 0 &&
-		    EFX_OWORD_FIELD(reg, XX_SD_RST_ACT) == 0) {
+		efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
+		if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
+		    EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
 			falcon_setup_xaui(efx);
 			return 0;
 		}
@@ -100,12 +99,12 @@
 
 	/* Flush the ISR */
 	if (enable)
-		falcon_read(efx, &reg, XM_MGT_INT_REG_B0);
+		efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
 
 	EFX_POPULATE_OWORD_2(reg,
-			     XM_MSK_RMTFLT, !enable,
-			     XM_MSK_LCLFLT, !enable);
-	falcon_write(efx, &reg, XM_MGT_INT_MSK_REG_B0);
+			     FRF_AB_XM_MSK_RMTFLT, !enable,
+			     FRF_AB_XM_MSK_LCLFLT, !enable);
+	efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
 }
 
 /* Get status of XAUI link */
@@ -119,18 +118,18 @@
 		return true;
 
 	/* Read link status */
-	falcon_read(efx, &reg, XX_CORE_STAT_REG);
+	efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
 
-	align_done = EFX_OWORD_FIELD(reg, XX_ALIGN_DONE);
-	sync_status = EFX_OWORD_FIELD(reg, XX_SYNC_STAT);
-	if (align_done && (sync_status == XX_SYNC_STAT_DECODE_SYNCED))
+	align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
+	sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
+	if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
 		link_ok = true;
 
 	/* Clear link status ready for next read */
-	EFX_SET_OWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET);
-	EFX_SET_OWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET);
-	EFX_SET_OWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET);
-	falcon_write(efx, &reg, XX_CORE_STAT_REG);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
+	efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
 
 	/* If the link is up, then check the phy side of the xaui link */
 	if (efx->link_up && link_ok)
@@ -148,55 +147,49 @@
 
 	/* Configure MAC  - cut-thru mode is hard wired on */
 	EFX_POPULATE_DWORD_3(reg,
-			     XM_RX_JUMBO_MODE, 1,
-			     XM_TX_STAT_EN, 1,
-			     XM_RX_STAT_EN, 1);
-	falcon_write(efx, &reg, XM_GLB_CFG_REG);
+			     FRF_AB_XM_RX_JUMBO_MODE, 1,
+			     FRF_AB_XM_TX_STAT_EN, 1,
+			     FRF_AB_XM_RX_STAT_EN, 1);
+	efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
 
 	/* Configure TX */
 	EFX_POPULATE_DWORD_6(reg,
-			     XM_TXEN, 1,
-			     XM_TX_PRMBL, 1,
-			     XM_AUTO_PAD, 1,
-			     XM_TXCRC, 1,
-			     XM_FCNTL, 1,
-			     XM_IPG, 0x3);
-	falcon_write(efx, &reg, XM_TX_CFG_REG);
+			     FRF_AB_XM_TXEN, 1,
+			     FRF_AB_XM_TX_PRMBL, 1,
+			     FRF_AB_XM_AUTO_PAD, 1,
+			     FRF_AB_XM_TXCRC, 1,
+			     FRF_AB_XM_FCNTL, 1,
+			     FRF_AB_XM_IPG, 0x3);
+	efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
 
 	/* Configure RX */
 	EFX_POPULATE_DWORD_5(reg,
-			     XM_RXEN, 1,
-			     XM_AUTO_DEPAD, 0,
-			     XM_ACPT_ALL_MCAST, 1,
-			     XM_ACPT_ALL_UCAST, efx->promiscuous,
-			     XM_PASS_CRC_ERR, 1);
-	falcon_write(efx, &reg, XM_RX_CFG_REG);
+			     FRF_AB_XM_RXEN, 1,
+			     FRF_AB_XM_AUTO_DEPAD, 0,
+			     FRF_AB_XM_ACPT_ALL_MCAST, 1,
+			     FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous,
+			     FRF_AB_XM_PASS_CRC_ERR, 1);
+	efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
 
 	/* Set frame length */
 	max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
-	EFX_POPULATE_DWORD_1(reg, XM_MAX_RX_FRM_SIZE, max_frame_len);
-	falcon_write(efx, &reg, XM_RX_PARAM_REG);
+	EFX_POPULATE_DWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
+	efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
 	EFX_POPULATE_DWORD_2(reg,
-			     XM_MAX_TX_FRM_SIZE, max_frame_len,
-			     XM_TX_JUMBO_MODE, 1);
-	falcon_write(efx, &reg, XM_TX_PARAM_REG);
+			     FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
+			     FRF_AB_XM_TX_JUMBO_MODE, 1);
+	efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
 
 	EFX_POPULATE_DWORD_2(reg,
-			     XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
-			     XM_DIS_FCNTL, !rx_fc);
-	falcon_write(efx, &reg, XM_FC_REG);
+			     FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
+			     FRF_AB_XM_DIS_FCNTL, !rx_fc);
+	efx_writeo(efx, &reg, FR_AB_XM_FC);
 
 	/* Set MAC address */
-	EFX_POPULATE_DWORD_4(reg,
-			     XM_ADR_0, efx->net_dev->dev_addr[0],
-			     XM_ADR_1, efx->net_dev->dev_addr[1],
-			     XM_ADR_2, efx->net_dev->dev_addr[2],
-			     XM_ADR_3, efx->net_dev->dev_addr[3]);
-	falcon_write(efx, &reg, XM_ADR_LO_REG);
-	EFX_POPULATE_DWORD_2(reg,
-			     XM_ADR_4, efx->net_dev->dev_addr[4],
-			     XM_ADR_5, efx->net_dev->dev_addr[5]);
-	falcon_write(efx, &reg, XM_ADR_HI_REG);
+	memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
+	efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
+	memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
+	efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
 }
 
 static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
@@ -212,12 +205,13 @@
 		bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
 		bool reset_xgxs;
 
-		falcon_read(efx, &reg, XX_CORE_STAT_REG);
-		old_xgxs_loopback = EFX_OWORD_FIELD(reg, XX_XGXS_LB_EN);
-		old_xgmii_loopback = EFX_OWORD_FIELD(reg, XX_XGMII_LB_EN);
+		efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+		old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
+		old_xgmii_loopback =
+			EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
 
-		falcon_read(efx, &reg, XX_SD_CTL_REG);
-		old_xaui_loopback = EFX_OWORD_FIELD(reg, XX_LPBKA);
+		efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
+		old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
 
 		/* The PHY driver may have turned XAUI off */
 		reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
@@ -228,20 +222,20 @@
 			falcon_reset_xaui(efx);
 	}
 
-	falcon_read(efx, &reg, XX_CORE_STAT_REG);
-	EFX_SET_OWORD_FIELD(reg, XX_FORCE_SIG,
+	efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
 			    (xgxs_loopback || xaui_loopback) ?
-			    XX_FORCE_SIG_DECODE_FORCED : 0);
-	EFX_SET_OWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback);
-	EFX_SET_OWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback);
-	falcon_write(efx, &reg, XX_CORE_STAT_REG);
+			    FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
+	efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
 
-	falcon_read(efx, &reg, XX_SD_CTL_REG);
-	EFX_SET_OWORD_FIELD(reg, XX_LPBKD, xaui_loopback);
-	EFX_SET_OWORD_FIELD(reg, XX_LPBKC, xaui_loopback);
-	EFX_SET_OWORD_FIELD(reg, XX_LPBKB, xaui_loopback);
-	EFX_SET_OWORD_FIELD(reg, XX_LPBKA, xaui_loopback);
-	falcon_write(efx, &reg, XX_SD_CTL_REG);
+	efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
+	EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
+	efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
 }
 
 
diff --git a/drivers/net/sfc/gmii.h b/drivers/net/sfc/gmii.h
deleted file mode 100644
index dfccaa7..0000000
--- a/drivers/net/sfc/gmii.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#ifndef EFX_GMII_H
-#define EFX_GMII_H
-
-/*
- * GMII interface
- */
-
-#include <linux/mii.h>
-
-/* GMII registers, excluding registers already defined as MII
- * registers in mii.h
- */
-#define GMII_IER		0x12	/* Interrupt enable register */
-#define GMII_ISR		0x13	/* Interrupt status register */
-
-/* Interrupt enable register */
-#define IER_ANEG_ERR		0x8000	/* Bit 15 - autonegotiation error */
-#define IER_SPEED_CHG		0x4000	/* Bit 14 - speed changed */
-#define IER_DUPLEX_CHG		0x2000	/* Bit 13 - duplex changed */
-#define IER_PAGE_RCVD		0x1000	/* Bit 12 - page received */
-#define IER_ANEG_DONE		0x0800	/* Bit 11 - autonegotiation complete */
-#define IER_LINK_CHG		0x0400	/* Bit 10 - link status changed */
-#define IER_SYM_ERR		0x0200	/* Bit 9 - symbol error */
-#define IER_FALSE_CARRIER	0x0100	/* Bit 8 - false carrier */
-#define IER_FIFO_ERR		0x0080	/* Bit 7 - FIFO over/underflow */
-#define IER_MDIX_CHG		0x0040	/* Bit 6 - MDI crossover changed */
-#define IER_DOWNSHIFT		0x0020	/* Bit 5 - downshift */
-#define IER_ENERGY		0x0010	/* Bit 4 - energy detect */
-#define IER_DTE_POWER		0x0004	/* Bit 2 - DTE power detect */
-#define IER_POLARITY_CHG	0x0002	/* Bit 1 - polarity changed */
-#define IER_JABBER		0x0001	/* Bit 0 - jabber */
-
-/* Interrupt status register */
-#define ISR_ANEG_ERR		0x8000	/* Bit 15 - autonegotiation error */
-#define ISR_SPEED_CHG		0x4000	/* Bit 14 - speed changed */
-#define ISR_DUPLEX_CHG		0x2000	/* Bit 13 - duplex changed */
-#define ISR_PAGE_RCVD		0x1000	/* Bit 12 - page received */
-#define ISR_ANEG_DONE		0x0800	/* Bit 11 - autonegotiation complete */
-#define ISR_LINK_CHG		0x0400	/* Bit 10 - link status changed */
-#define ISR_SYM_ERR		0x0200	/* Bit 9 - symbol error */
-#define ISR_FALSE_CARRIER	0x0100	/* Bit 8 - false carrier */
-#define ISR_FIFO_ERR		0x0080	/* Bit 7 - FIFO over/underflow */
-#define ISR_MDIX_CHG		0x0040	/* Bit 6 - MDI crossover changed */
-#define ISR_DOWNSHIFT		0x0020	/* Bit 5 - downshift */
-#define ISR_ENERGY		0x0010	/* Bit 4 - energy detect */
-#define ISR_DTE_POWER		0x0004	/* Bit 2 - DTE power detect */
-#define ISR_POLARITY_CHG	0x0002	/* Bit 1 - polarity changed */
-#define ISR_JABBER		0x0001	/* Bit 0 - jabber */
-
-#endif /* EFX_GMII_H */
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
new file mode 100644
index 0000000..b89177c
--- /dev/null
+++ b/drivers/net/sfc/io.h
@@ -0,0 +1,256 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_IO_H
+#define EFX_IO_H
+
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+/**************************************************************************
+ *
+ * NIC register I/O
+ *
+ **************************************************************************
+ *
+ * Notes on locking strategy:
+ *
+ * Most NIC registers require 16-byte (or 8-byte, for SRAM) atomic writes
+ * which necessitates locking.
+ * Under normal operation few writes to NIC registers are made and these
+ * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special
+ * cased to allow 4-byte (hence lockless) accesses.
+ *
+ * It *is* safe to write to these 4-byte registers in the middle of an
+ * access to an 8-byte or 16-byte register.  We therefore use a
+ * spinlock to protect accesses to the larger registers, but no locks
+ * for the 4-byte registers.
+ *
+ * A write barrier is needed to ensure that DW3 is written after DW0/1/2
+ * due to the way the 16byte registers are "collected" in the BIU.
+ *
+ * We also lock when carrying out reads, to ensure consistency of the
+ * data (made possible since the BIU reads all 128 bits into a cache).
+ * Reads are very rare, so this isn't a significant performance
+ * impact.  (Most data transferred from NIC to host is DMAed directly
+ * into host memory).
+ *
+ * I/O BAR access uses locks for both reads and writes (but is only provided
+ * for testing purposes).
+ */
+
+#if BITS_PER_LONG == 64
+#define EFX_USE_QWORD_IO 1
+#endif
+
+#ifdef EFX_USE_QWORD_IO
+static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
+				  unsigned int reg)
+{
+	__raw_writeq((__force u64)value, efx->membase + reg);
+}
+static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg)
+{
+	return (__force __le64)__raw_readq(efx->membase + reg);
+}
+#endif
+
+static inline void _efx_writed(struct efx_nic *efx, __le32 value,
+				  unsigned int reg)
+{
+	__raw_writel((__force u32)value, efx->membase + reg);
+}
+static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
+{
+	return (__force __le32)__raw_readl(efx->membase + reg);
+}
+
+/* Writes to a normal 16-byte Efx register, locking as appropriate. */
+static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
+			      unsigned int reg)
+{
+	unsigned long flags __attribute__ ((unused));
+
+	EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg,
+		    EFX_OWORD_VAL(*value));
+
+	spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+	_efx_writeq(efx, value->u64[0], reg + 0);
+	wmb();
+	_efx_writeq(efx, value->u64[1], reg + 8);
+#else
+	_efx_writed(efx, value->u32[0], reg + 0);
+	_efx_writed(efx, value->u32[1], reg + 4);
+	_efx_writed(efx, value->u32[2], reg + 8);
+	wmb();
+	_efx_writed(efx, value->u32[3], reg + 12);
+#endif
+	mmiowb();
+	spin_unlock_irqrestore(&efx->biu_lock, flags);
+}
+
+/* Write an 8-byte NIC SRAM entry through the supplied mapping,
+ * locking as appropriate. */
+static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
+				   efx_qword_t *value, unsigned int index)
+{
+	unsigned int addr = index * sizeof(*value);
+	unsigned long flags __attribute__ ((unused));
+
+	EFX_REGDUMP(efx, "writing SRAM address %x with " EFX_QWORD_FMT "\n",
+		    addr, EFX_QWORD_VAL(*value));
+
+	spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+	__raw_writeq((__force u64)value->u64[0], membase + addr);
+#else
+	__raw_writel((__force u32)value->u32[0], membase + addr);
+	wmb();
+	__raw_writel((__force u32)value->u32[1], membase + addr + 4);
+#endif
+	mmiowb();
+	spin_unlock_irqrestore(&efx->biu_lock, flags);
+}
+
+/* Write dword to NIC register that allows partial writes
+ *
+ * Some registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
+ * TX_DESC_UPD_REG) can be written to as a single dword.  This allows
+ * for lockless writes.
+ */
+static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
+			      unsigned int reg)
+{
+	EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n",
+		    reg, EFX_DWORD_VAL(*value));
+
+	/* No lock required */
+	_efx_writed(efx, value->u32[0], reg);
+}
+
+/* Read from a NIC register
+ *
+ * This reads an entire 16-byte register in one go, locking as
+ * appropriate.  It is essential to read the first dword first, as this
+ * prompts the NIC to load the current value into the shadow register.
+ */
+static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
+			     unsigned int reg)
+{
+	unsigned long flags __attribute__ ((unused));
+
+	spin_lock_irqsave(&efx->biu_lock, flags);
+	value->u32[0] = _efx_readd(efx, reg + 0);
+	rmb();
+	value->u32[1] = _efx_readd(efx, reg + 4);
+	value->u32[2] = _efx_readd(efx, reg + 8);
+	value->u32[3] = _efx_readd(efx, reg + 12);
+	spin_unlock_irqrestore(&efx->biu_lock, flags);
+
+	EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg,
+		    EFX_OWORD_VAL(*value));
+}
+
+/* Read an 8-byte SRAM entry through supplied mapping,
+ * locking as appropriate. */
+static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
+				  efx_qword_t *value, unsigned int index)
+{
+	unsigned int addr = index * sizeof(*value);
+	unsigned long flags __attribute__ ((unused));
+
+	spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+	value->u64[0] = (__force __le64)__raw_readq(membase + addr);
+#else
+	value->u32[0] = (__force __le32)__raw_readl(membase + addr);
+	rmb();
+	value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
+#endif
+	spin_unlock_irqrestore(&efx->biu_lock, flags);
+
+	EFX_REGDUMP(efx, "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
+		    addr, EFX_QWORD_VAL(*value));
+}
+
+/* Read dword from register that allows partial writes (sic) */
+static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
+				unsigned int reg)
+{
+	value->u32[0] = _efx_readd(efx, reg);
+	EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n",
+		    reg, EFX_DWORD_VAL(*value));
+}
+
+/* Write to a register forming part of a table */
+static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value,
+				      unsigned int reg, unsigned int index)
+{
+	efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* Read to a register forming part of a table */
+static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
+				     unsigned int reg, unsigned int index)
+{
+	efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* Write to a dword register forming part of a table */
+static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
+				       unsigned int reg, unsigned int index)
+{
+	efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* Page-mapped register block size */
+#define EFX_PAGE_BLOCK_SIZE 0x2000
+
+/* Calculate offset to page-mapped register block */
+#define EFX_PAGED_REG(page, reg) \
+	((page) * EFX_PAGE_BLOCK_SIZE + (reg))
+
+/* As for efx_writeo(), but for a page-mapped register. */
+static inline void efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
+				   unsigned int reg, unsigned int page)
+{
+	efx_writeo(efx, value, EFX_PAGED_REG(page, reg));
+}
+
+/* As for efx_writed(), but for a page-mapped register. */
+static inline void efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
+				   unsigned int reg, unsigned int page)
+{
+	efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+}
+
+/* Write dword to page-mapped register with an extra lock.
+ *
+ * As for efx_writed_page(), but for a register that suffers from
+ * SFC bug 3181. Take out a lock so the BIU collector cannot be
+ * confused. */
+static inline void efx_writed_page_locked(struct efx_nic *efx,
+					  efx_dword_t *value,
+					  unsigned int reg,
+					  unsigned int page)
+{
+	unsigned long flags __attribute__ ((unused));
+
+	if (page == 0) {
+		spin_lock_irqsave(&efx->biu_lock, flags);
+		efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+		spin_unlock_irqrestore(&efx->biu_lock, flags);
+	} else {
+		efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+	}
+}
+
+#endif /* EFX_IO_H */
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 6c33459..231e580 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -14,7 +14,6 @@
 #include <linux/delay.h>
 #include "net_driver.h"
 #include "mdio_10g.h"
-#include "boards.h"
 #include "workarounds.h"
 
 unsigned efx_mdio_id_oui(u32 id)
@@ -249,7 +248,7 @@
 int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
 {
 	struct ethtool_cmd prev;
-	u32 required;
+	bool xnp;
 	int reg;
 
 	efx->phy_op->get_settings(efx, &prev);
@@ -266,86 +265,60 @@
 		return -EINVAL;
 
 	/* Check that PHY supports these settings */
-	if (ecmd->autoneg) {
-		required = SUPPORTED_Autoneg;
-	} else if (ecmd->duplex) {
-		switch (ecmd->speed) {
-		case SPEED_10:  required = SUPPORTED_10baseT_Full;  break;
-		case SPEED_100: required = SUPPORTED_100baseT_Full; break;
-		default:        return -EINVAL;
-		}
-	} else {
-		switch (ecmd->speed) {
-		case SPEED_10:  required = SUPPORTED_10baseT_Half;  break;
-		case SPEED_100: required = SUPPORTED_100baseT_Half; break;
-		default:        return -EINVAL;
-		}
-	}
-	required |= ecmd->advertising;
-	if (required & ~prev.supported)
+	if (!ecmd->autoneg ||
+	    (ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported)
 		return -EINVAL;
 
-	if (ecmd->autoneg) {
-		bool xnp = (ecmd->advertising & ADVERTISED_10000baseT_Full
-			    || EFX_WORKAROUND_13204(efx));
+	xnp = (ecmd->advertising & ADVERTISED_10000baseT_Full
+	       || EFX_WORKAROUND_13204(efx));
 
-		/* Set up the base page */
-		reg = ADVERTISE_CSMA;
-		if (ecmd->advertising & ADVERTISED_10baseT_Half)
-			reg |= ADVERTISE_10HALF;
-		if (ecmd->advertising & ADVERTISED_10baseT_Full)
-			reg |= ADVERTISE_10FULL;
-		if (ecmd->advertising & ADVERTISED_100baseT_Half)
-			reg |= ADVERTISE_100HALF;
-		if (ecmd->advertising & ADVERTISED_100baseT_Full)
-			reg |= ADVERTISE_100FULL;
-		if (xnp)
-			reg |= ADVERTISE_RESV;
-		else if (ecmd->advertising & (ADVERTISED_1000baseT_Half |
-					      ADVERTISED_1000baseT_Full))
-			reg |= ADVERTISE_NPAGE;
-		reg |= mii_advertise_flowctrl(efx->wanted_fc);
-		efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
+	/* Set up the base page */
+	reg = ADVERTISE_CSMA;
+	if (ecmd->advertising & ADVERTISED_10baseT_Half)
+		reg |= ADVERTISE_10HALF;
+	if (ecmd->advertising & ADVERTISED_10baseT_Full)
+		reg |= ADVERTISE_10FULL;
+	if (ecmd->advertising & ADVERTISED_100baseT_Half)
+		reg |= ADVERTISE_100HALF;
+	if (ecmd->advertising & ADVERTISED_100baseT_Full)
+		reg |= ADVERTISE_100FULL;
+	if (xnp)
+		reg |= ADVERTISE_RESV;
+	else if (ecmd->advertising & (ADVERTISED_1000baseT_Half |
+				      ADVERTISED_1000baseT_Full))
+		reg |= ADVERTISE_NPAGE;
+	reg |= mii_advertise_flowctrl(efx->wanted_fc);
+	efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
 
-		/* Set up the (extended) next page if necessary */
-		if (efx->phy_op->set_npage_adv)
-			efx->phy_op->set_npage_adv(efx, ecmd->advertising);
+	/* Set up the (extended) next page if necessary */
+	if (efx->phy_op->set_npage_adv)
+		efx->phy_op->set_npage_adv(efx, ecmd->advertising);
 
-		/* Enable and restart AN */
-		reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
-		reg |= MDIO_AN_CTRL1_ENABLE;
-		if (!(EFX_WORKAROUND_15195(efx) &&
-		      LOOPBACK_MASK(efx) & efx->phy_op->loopbacks))
-			reg |= MDIO_AN_CTRL1_RESTART;
-		if (xnp)
-			reg |= MDIO_AN_CTRL1_XNP;
-		else
-			reg &= ~MDIO_AN_CTRL1_XNP;
-		efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
-	} else {
-		/* Disable AN */
-		efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_CTRL1,
-				  MDIO_AN_CTRL1_ENABLE, false);
-
-		/* Set the basic control bits */
-		reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1);
-		reg &= ~(MDIO_CTRL1_SPEEDSEL | MDIO_CTRL1_FULLDPLX);
-		if (ecmd->speed == SPEED_100)
-			reg |= MDIO_PMA_CTRL1_SPEED100;
-		if (ecmd->duplex)
-			reg |= MDIO_CTRL1_FULLDPLX;
-		efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, reg);
-	}
+	/* Enable and restart AN */
+	reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
+	reg |= MDIO_AN_CTRL1_ENABLE;
+	if (!(EFX_WORKAROUND_15195(efx) &&
+	      LOOPBACK_MASK(efx) & efx->phy_op->loopbacks))
+		reg |= MDIO_AN_CTRL1_RESTART;
+	if (xnp)
+		reg |= MDIO_AN_CTRL1_XNP;
+	else
+		reg &= ~MDIO_AN_CTRL1_XNP;
+	efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
 
 	return 0;
 }
 
 enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx)
 {
-	int lpa;
+	BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX));
 
-	if (!(efx->phy_op->mmds & MDIO_DEVS_AN))
+	if (!(efx->wanted_fc & EFX_FC_AUTO))
 		return efx->wanted_fc;
-	lpa = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA);
-	return efx_fc_resolve(efx->wanted_fc, lpa);
+
+	WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
+
+	return mii_resolve_flowctrl_fdx(
+		mii_advertise_flowctrl(efx->wanted_fc),
+		efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
 }
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index 6b14421..75b37f1 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -17,7 +17,6 @@
  */
 
 #include "efx.h"
-#include "boards.h"
 
 static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
 static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 298566d..bb3d258 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -327,7 +327,7 @@
  * @used_flags: Channel is used by net driver
  * @enabled: Channel enabled indicator
  * @irq: IRQ number (MSI and MSI-X only)
- * @irq_moderation: IRQ moderation value (in us)
+ * @irq_moderation: IRQ moderation value (in hardware ticks)
  * @napi_dev: Net device used with NAPI
  * @napi_str: NAPI control structure
  * @reset_work: Scheduled reset work thread
@@ -389,19 +389,6 @@
 };
 
 /**
- * struct efx_blinker - S/W LED blinking context
- * @state: Current state - on or off
- * @resubmit: Timer resubmission flag
- * @timer: Control timer for blinking
- */
-struct efx_blinker {
-	bool state;
-	bool resubmit;
-	struct timer_list timer;
-};
-
-
-/**
  * struct efx_board - board information
  * @type: Board model type
  * @major: Major rev. ('A', 'B' ...)
@@ -412,7 +399,9 @@
  * @blink: Starts/stops blinking
  * @monitor: Board-specific health check function
  * @fini: Cleanup function
- * @blinker: used to blink LEDs in software
+ * @blink_state: Current blink state
+ * @blink_resubmit: Blink timer resubmission flag
+ * @blink_timer: Blink timer
  * @hwmon_client: I2C client for hardware monitor
  * @ioexp_client: I2C client for power/port control
  */
@@ -429,7 +418,9 @@
 	int (*monitor) (struct efx_nic *nic);
 	void (*blink) (struct efx_nic *efx, bool start);
 	void (*fini) (struct efx_nic *nic);
-	struct efx_blinker blinker;
+	bool blink_state;
+	bool blink_resubmit;
+	struct timer_list blink_timer;
 	struct i2c_client *hwmon_client, *ioexp_client;
 };
 
@@ -506,17 +497,6 @@
 	EFX_XMAC = 2,
 };
 
-static inline enum efx_fc_type efx_fc_resolve(enum efx_fc_type wanted_fc,
-					      unsigned int lpa)
-{
-	BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX));
-
-	if (!(wanted_fc & EFX_FC_AUTO))
-		return wanted_fc;
-
-	return mii_resolve_flowctrl_fdx(mii_advertise_flowctrl(wanted_fc), lpa);
-}
-
 /**
  * struct efx_mac_operations - Efx MAC operations table
  * @reconfigure: Reconfigure MAC. Serialised by the mac_lock
@@ -537,7 +517,6 @@
  * @fini: Shut down PHY
  * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
  * @clear_interrupt: Clear down interrupt
- * @blink: Blink LEDs
  * @poll: Poll for hardware state. Serialised by the mac_lock.
  * @get_settings: Get ethtool settings. Serialised by the mac_lock.
  * @set_settings: Set ethtool settings. Serialised by the mac_lock.
@@ -697,10 +676,13 @@
  * @tx_queue: TX DMA queues
  * @rx_queue: RX DMA queues
  * @channel: Channels
+ * @next_buffer_table: First available buffer table id
  * @n_rx_queues: Number of RX queues
  * @n_channels: Number of channels in use
  * @rx_buffer_len: RX buffer length
  * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
+ * @int_error_count: Number of internal errors seen recently
+ * @int_error_expire: Time at which error count will be expired
  * @irq_status: Interrupt status buffer
  * @last_irq_cpu: Last CPU to handle interrupt.
  *	This register is written with the SMP processor ID whenever an
@@ -784,11 +766,15 @@
 	struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
 	struct efx_channel channel[EFX_MAX_CHANNELS];
 
+	unsigned next_buffer_table;
 	int n_rx_queues;
 	int n_channels;
 	unsigned int rx_buffer_len;
 	unsigned int rx_buffer_order;
 
+	unsigned int_error_count;
+	unsigned long int_error_expire;
+
 	struct efx_buffer irq_status;
 	volatile signed int last_irq_cpu;
 
@@ -869,14 +855,7 @@
  * @buf_tbl_base: Buffer table base address
  * @evq_ptr_tbl_base: Event queue pointer table base address
  * @evq_rptr_tbl_base: Event queue read-pointer table base address
- * @txd_ring_mask: TX descriptor ring size - 1 (must be a power of two - 1)
- * @rxd_ring_mask: RX descriptor ring size - 1 (must be a power of two - 1)
- * @evq_size: Event queue size (must be a power of two)
  * @max_dma_mask: Maximum possible DMA mask
- * @tx_dma_mask: TX DMA mask
- * @bug5391_mask: Address mask for bug 5391 workaround
- * @rx_xoff_thresh: RX FIFO XOFF watermark (bytes)
- * @rx_xon_thresh: RX FIFO XON watermark (bytes)
  * @rx_buffer_padding: Padding added to each RX buffer
  * @max_interrupt_mode: Highest capability interrupt mode supported
  *	from &enum efx_init_mode.
@@ -892,15 +871,8 @@
 	unsigned int evq_ptr_tbl_base;
 	unsigned int evq_rptr_tbl_base;
 
-	unsigned int txd_ring_mask;
-	unsigned int rxd_ring_mask;
-	unsigned int evq_size;
 	u64 max_dma_mask;
-	unsigned int tx_dma_mask;
-	unsigned bug5391_mask;
 
-	int rx_xoff_thresh;
-	int rx_xon_thresh;
 	unsigned int rx_buffer_padding;
 	unsigned int max_interrupt_mode;
 	unsigned int phys_addr_channels;
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index c1cff9c..b5150f3 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -23,9 +23,9 @@
 extern int sft9001_wait_boot(struct efx_nic *efx);
 
 /****************************************************************************
- * AMCC/Quake QT20xx PHYs
+ * AMCC/Quake QT202x PHYs
  */
-extern struct efx_phy_operations falcon_xfp_phy_ops;
+extern struct efx_phy_operations falcon_qt202x_phy_ops;
 
 /* These PHYs provide various H/W control states for LEDs */
 #define QUAKE_LED_LINK_INVAL	(0)
@@ -39,6 +39,6 @@
 #define QUAKE_LED_TXLINK	(0)
 #define QUAKE_LED_RXLINK	(8)
 
-extern void xfp_set_led(struct efx_nic *p, int led, int state);
+extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
 
 #endif
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/qt202x_phy.c
similarity index 73%
rename from drivers/net/sfc/xfp_phy.c
rename to drivers/net/sfc/qt202x_phy.c
index e6b3d5e..560eb18 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -7,8 +7,7 @@
  * by the Free Software Foundation, incorporated herein by reference.
  */
 /*
- * Driver for SFP+ and XFP optical PHYs plus some support specific to the
- * AMCC QT20xx adapters; see www.amcc.com for details
+ * Driver for AMCC QT202x SFP+ and XFP adapters; see www.amcc.com for details
  */
 
 #include <linux/timer.h>
@@ -18,13 +17,13 @@
 #include "phy.h"
 #include "falcon.h"
 
-#define XFP_REQUIRED_DEVS (MDIO_DEVS_PCS |	\
-			   MDIO_DEVS_PMAPMD |	\
-			   MDIO_DEVS_PHYXS)
+#define QT202X_REQUIRED_DEVS (MDIO_DEVS_PCS |		\
+			      MDIO_DEVS_PMAPMD |	\
+			      MDIO_DEVS_PHYXS)
 
-#define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) |		\
-		       (1 << LOOPBACK_PMAPMD) |		\
-		       (1 << LOOPBACK_NETWORK))
+#define QT202X_LOOPBACKS ((1 << LOOPBACK_PCS) |		\
+			  (1 << LOOPBACK_PMAPMD) |	\
+			  (1 << LOOPBACK_NETWORK))
 
 /****************************************************************************/
 /* Quake-specific MDIO registers */
@@ -45,18 +44,18 @@
 #define PCS_VEND1_REG	   	0xc000
 #define PCS_VEND1_LBTXD_LBN	5
 
-void xfp_set_led(struct efx_nic *p, int led, int mode)
+void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
 {
 	int addr = MDIO_QUAKE_LED0_REG + led;
 	efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode);
 }
 
-struct xfp_phy_data {
+struct qt202x_phy_data {
 	enum efx_phy_mode phy_mode;
 };
 
-#define XFP_MAX_RESET_TIME 500
-#define XFP_RESET_WAIT 10
+#define QT2022C2_MAX_RESET_TIME 500
+#define QT2022C2_RESET_WAIT 10
 
 static int qt2025c_wait_reset(struct efx_nic *efx)
 {
@@ -97,7 +96,7 @@
 	return 0;
 }
 
-static int xfp_reset_phy(struct efx_nic *efx)
+static int qt202x_reset_phy(struct efx_nic *efx)
 {
 	int rc;
 
@@ -111,8 +110,9 @@
 		/* Reset the PHYXS MMD. This is documented as doing
 		 * a complete soft reset. */
 		rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS,
-					XFP_MAX_RESET_TIME / XFP_RESET_WAIT,
-					XFP_RESET_WAIT);
+					QT2022C2_MAX_RESET_TIME /
+					QT2022C2_RESET_WAIT,
+					QT2022C2_RESET_WAIT);
 		if (rc < 0)
 			goto fail;
 	}
@@ -122,7 +122,7 @@
 
 	/* Check that all the MMDs we expect are present and responding. We
 	 * expect faults on some if the link is down, but not on the PHY XS */
-	rc = efx_mdio_check_mmds(efx, XFP_REQUIRED_DEVS, MDIO_DEVS_PHYXS);
+	rc = efx_mdio_check_mmds(efx, QT202X_REQUIRED_DEVS, MDIO_DEVS_PHYXS);
 	if (rc < 0)
 		goto fail;
 
@@ -135,13 +135,13 @@
 	return rc;
 }
 
-static int xfp_phy_init(struct efx_nic *efx)
+static int qt202x_phy_init(struct efx_nic *efx)
 {
-	struct xfp_phy_data *phy_data;
+	struct qt202x_phy_data *phy_data;
 	u32 devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
 	int rc;
 
-	phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL);
+	phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
 	if (!phy_data)
 		return -ENOMEM;
 	efx->phy_data = phy_data;
@@ -152,7 +152,7 @@
 
 	phy_data->phy_mode = efx->phy_mode;
 
-	rc = xfp_reset_phy(efx);
+	rc = qt202x_reset_phy(efx);
 
 	EFX_INFO(efx, "PHY init %s.\n",
 		 rc ? "failed" : "successful");
@@ -167,28 +167,28 @@
 	return rc;
 }
 
-static void xfp_phy_clear_interrupt(struct efx_nic *efx)
+static void qt202x_phy_clear_interrupt(struct efx_nic *efx)
 {
 	/* Read to clear link status alarm */
 	efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT);
 }
 
-static int xfp_link_ok(struct efx_nic *efx)
+static int qt202x_link_ok(struct efx_nic *efx)
 {
-	return efx_mdio_links_ok(efx, XFP_REQUIRED_DEVS);
+	return efx_mdio_links_ok(efx, QT202X_REQUIRED_DEVS);
 }
 
-static void xfp_phy_poll(struct efx_nic *efx)
+static void qt202x_phy_poll(struct efx_nic *efx)
 {
-	int link_up = xfp_link_ok(efx);
+	int link_up = qt202x_link_ok(efx);
 	/* Simulate a PHY event if link state has changed */
 	if (link_up != efx->link_up)
 		falcon_sim_phy_event(efx);
 }
 
-static void xfp_phy_reconfigure(struct efx_nic *efx)
+static void qt202x_phy_reconfigure(struct efx_nic *efx)
 {
-	struct xfp_phy_data *phy_data = efx->phy_data;
+	struct qt202x_phy_data *phy_data = efx->phy_data;
 
 	if (efx->phy_type == PHY_TYPE_QT2025C) {
 		/* There are several different register bits which can
@@ -207,7 +207,7 @@
 		/* Reset the PHY when moving from tx off to tx on */
 		if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) &&
 		    (phy_data->phy_mode & PHY_MODE_TX_DISABLED))
-			xfp_reset_phy(efx);
+			qt202x_reset_phy(efx);
 
 		efx_mdio_transmit_disable(efx);
 	}
@@ -215,18 +215,18 @@
 	efx_mdio_phy_reconfigure(efx);
 
 	phy_data->phy_mode = efx->phy_mode;
-	efx->link_up = xfp_link_ok(efx);
+	efx->link_up = qt202x_link_ok(efx);
 	efx->link_speed = 10000;
 	efx->link_fd = true;
 	efx->link_fc = efx->wanted_fc;
 }
 
-static void xfp_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
 {
 	mdio45_ethtool_gset(&efx->mdio, ecmd);
 }
 
-static void xfp_phy_fini(struct efx_nic *efx)
+static void qt202x_phy_fini(struct efx_nic *efx)
 {
 	/* Clobber the LED if it was blinking */
 	efx->board_info.blink(efx, false);
@@ -236,15 +236,15 @@
 	efx->phy_data = NULL;
 }
 
-struct efx_phy_operations falcon_xfp_phy_ops = {
+struct efx_phy_operations falcon_qt202x_phy_ops = {
 	.macs		 = EFX_XMAC,
-	.init            = xfp_phy_init,
-	.reconfigure     = xfp_phy_reconfigure,
-	.poll            = xfp_phy_poll,
-	.fini            = xfp_phy_fini,
-	.clear_interrupt = xfp_phy_clear_interrupt,
-	.get_settings    = xfp_phy_get_settings,
+	.init		 = qt202x_phy_init,
+	.reconfigure	 = qt202x_phy_reconfigure,
+	.poll	     	 = qt202x_phy_poll,
+	.fini	  	 = qt202x_phy_fini,
+	.clear_interrupt = qt202x_phy_clear_interrupt,
+	.get_settings	 = qt202x_phy_get_settings,
 	.set_settings	 = efx_mdio_set_settings,
-	.mmds            = XFP_REQUIRED_DEVS,
-	.loopbacks       = XFP_LOOPBACKS,
+	.mmds            = QT202X_REQUIRED_DEVS,
+	.loopbacks       = QT202X_LOOPBACKS,
 };
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
new file mode 100644
index 0000000..f336d83
--- /dev/null
+++ b/drivers/net/sfc/regs.h
@@ -0,0 +1,3180 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_REGS_H
+#define EFX_REGS_H
+
+/*
+ * Falcon hardware architecture definitions have a name prefix following
+ * the format:
+ *
+ *     F<type>_<min-rev><max-rev>_
+ *
+ * The following <type> strings are used:
+ *
+ *             MMIO register  MC register  Host memory structure
+ * -------------------------------------------------------------
+ * Address     R              MCR
+ * Bitfield    RF             MCRF         SF
+ * Enumerator  FE             MCFE         SE
+ *
+ * <min-rev> is the first revision to which the definition applies:
+ *
+ *     A: Falcon A1 (SFC4000AB)
+ *     B: Falcon B0 (SFC4000BA)
+ *     C: Siena A0 (SFL9021AA)
+ *
+ * If the definition has been changed or removed in later revisions
+ * then <max-rev> is the last revision to which the definition applies;
+ * otherwise it is "Z".
+ */
+
+/**************************************************************************
+ *
+ * Falcon/Siena registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/* ADR_REGION_REG: Address region register */
+#define	FR_AZ_ADR_REGION 0x00000000
+#define	FRF_AZ_ADR_REGION3_LBN 96
+#define	FRF_AZ_ADR_REGION3_WIDTH 18
+#define	FRF_AZ_ADR_REGION2_LBN 64
+#define	FRF_AZ_ADR_REGION2_WIDTH 18
+#define	FRF_AZ_ADR_REGION1_LBN 32
+#define	FRF_AZ_ADR_REGION1_WIDTH 18
+#define	FRF_AZ_ADR_REGION0_LBN 0
+#define	FRF_AZ_ADR_REGION0_WIDTH 18
+
+/* INT_EN_REG_KER: Kernel driver Interrupt enable register */
+#define	FR_AZ_INT_EN_KER 0x00000010
+#define	FRF_AZ_KER_INT_LEVE_SEL_LBN 8
+#define	FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6
+#define	FRF_AZ_KER_INT_CHAR_LBN 4
+#define	FRF_AZ_KER_INT_CHAR_WIDTH 1
+#define	FRF_AZ_KER_INT_KER_LBN 3
+#define	FRF_AZ_KER_INT_KER_WIDTH 1
+#define	FRF_AZ_DRV_INT_EN_KER_LBN 0
+#define	FRF_AZ_DRV_INT_EN_KER_WIDTH 1
+
+/* INT_EN_REG_CHAR: Char Driver interrupt enable register */
+#define	FR_BZ_INT_EN_CHAR 0x00000020
+#define	FRF_BZ_CHAR_INT_LEVE_SEL_LBN 8
+#define	FRF_BZ_CHAR_INT_LEVE_SEL_WIDTH 6
+#define	FRF_BZ_CHAR_INT_CHAR_LBN 4
+#define	FRF_BZ_CHAR_INT_CHAR_WIDTH 1
+#define	FRF_BZ_CHAR_INT_KER_LBN 3
+#define	FRF_BZ_CHAR_INT_KER_WIDTH 1
+#define	FRF_BZ_DRV_INT_EN_CHAR_LBN 0
+#define	FRF_BZ_DRV_INT_EN_CHAR_WIDTH 1
+
+/* INT_ADR_REG_KER: Interrupt host address for Kernel driver */
+#define	FR_AZ_INT_ADR_KER 0x00000030
+#define	FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64
+#define	FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1
+#define	FRF_AZ_INT_ADR_KER_LBN 0
+#define	FRF_AZ_INT_ADR_KER_WIDTH 64
+
+/* INT_ADR_REG_CHAR: Interrupt host address for Char driver */
+#define	FR_BZ_INT_ADR_CHAR 0x00000040
+#define	FRF_BZ_NORM_INT_VEC_DIS_CHAR_LBN 64
+#define	FRF_BZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1
+#define	FRF_BZ_INT_ADR_CHAR_LBN 0
+#define	FRF_BZ_INT_ADR_CHAR_WIDTH 64
+
+/* INT_ACK_KER: Kernel interrupt acknowledge register */
+#define	FR_AA_INT_ACK_KER 0x00000050
+#define	FRF_AA_INT_ACK_KER_FIELD_LBN 0
+#define	FRF_AA_INT_ACK_KER_FIELD_WIDTH 32
+
+/* INT_ISR0_REG: Function 0 Interrupt Acknowlege Status register */
+#define	FR_BZ_INT_ISR0 0x00000090
+#define	FRF_BZ_INT_ISR_REG_LBN 0
+#define	FRF_BZ_INT_ISR_REG_WIDTH 64
+
+/* HW_INIT_REG: Hardware initialization register */
+#define	FR_AZ_HW_INIT 0x000000c0
+#define	FRF_BB_BDMRD_CPLF_FULL_LBN 124
+#define	FRF_BB_BDMRD_CPLF_FULL_WIDTH 1
+#define	FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121
+#define	FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3
+#define	FRF_CZ_TX_MRG_TAGS_LBN 120
+#define	FRF_CZ_TX_MRG_TAGS_WIDTH 1
+#define	FRF_AB_TRGT_MASK_ALL_LBN 100
+#define	FRF_AB_TRGT_MASK_ALL_WIDTH 1
+#define	FRF_AZ_DOORBELL_DROP_LBN 92
+#define	FRF_AZ_DOORBELL_DROP_WIDTH 8
+#define	FRF_AB_TX_RREQ_MASK_EN_LBN 76
+#define	FRF_AB_TX_RREQ_MASK_EN_WIDTH 1
+#define	FRF_AB_PE_EIDLE_DIS_LBN 75
+#define	FRF_AB_PE_EIDLE_DIS_WIDTH 1
+#define	FRF_AA_FC_BLOCKING_EN_LBN 45
+#define	FRF_AA_FC_BLOCKING_EN_WIDTH 1
+#define	FRF_BZ_B2B_REQ_EN_LBN 45
+#define	FRF_BZ_B2B_REQ_EN_WIDTH 1
+#define	FRF_AA_B2B_REQ_EN_LBN 44
+#define	FRF_AA_B2B_REQ_EN_WIDTH 1
+#define	FRF_BB_FC_BLOCKING_EN_LBN 44
+#define	FRF_BB_FC_BLOCKING_EN_WIDTH 1
+#define	FRF_AZ_POST_WR_MASK_LBN 40
+#define	FRF_AZ_POST_WR_MASK_WIDTH 4
+#define	FRF_AZ_TLP_TC_LBN 34
+#define	FRF_AZ_TLP_TC_WIDTH 3
+#define	FRF_AZ_TLP_ATTR_LBN 32
+#define	FRF_AZ_TLP_ATTR_WIDTH 2
+#define	FRF_AB_INTB_VEC_LBN 24
+#define	FRF_AB_INTB_VEC_WIDTH 5
+#define	FRF_AB_INTA_VEC_LBN 16
+#define	FRF_AB_INTA_VEC_WIDTH 5
+#define	FRF_AZ_WD_TIMER_LBN 8
+#define	FRF_AZ_WD_TIMER_WIDTH 8
+#define	FRF_AZ_US_DISABLE_LBN 5
+#define	FRF_AZ_US_DISABLE_WIDTH 1
+#define	FRF_AZ_TLP_EP_LBN 4
+#define	FRF_AZ_TLP_EP_WIDTH 1
+#define	FRF_AZ_ATTR_SEL_LBN 3
+#define	FRF_AZ_ATTR_SEL_WIDTH 1
+#define	FRF_AZ_TD_SEL_LBN 1
+#define	FRF_AZ_TD_SEL_WIDTH 1
+#define	FRF_AZ_TLP_TD_LBN 0
+#define	FRF_AZ_TLP_TD_WIDTH 1
+
+/* EE_SPI_HCMD_REG: SPI host command register */
+#define	FR_AB_EE_SPI_HCMD 0x00000100
+#define	FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31
+#define	FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1
+#define	FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28
+#define	FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1
+#define	FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24
+#define	FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1
+#define	FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16
+#define	FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5
+#define	FRF_AB_EE_SPI_HCMD_READ_LBN 15
+#define	FRF_AB_EE_SPI_HCMD_READ_WIDTH 1
+#define	FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12
+#define	FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2
+#define	FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8
+#define	FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2
+#define	FRF_AB_EE_SPI_HCMD_ENC_LBN 0
+#define	FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8
+
+/* USR_EV_CFG: User Level Event Configuration register */
+#define	FR_CZ_USR_EV_CFG 0x00000100
+#define	FRF_CZ_USREV_DIS_LBN 16
+#define	FRF_CZ_USREV_DIS_WIDTH 1
+#define	FRF_CZ_DFLT_EVQ_LBN 0
+#define	FRF_CZ_DFLT_EVQ_WIDTH 10
+
+/* EE_SPI_HADR_REG: SPI host address register */
+#define	FR_AB_EE_SPI_HADR 0x00000110
+#define	FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24
+#define	FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8
+#define	FRF_AB_EE_SPI_HADR_ADR_LBN 0
+#define	FRF_AB_EE_SPI_HADR_ADR_WIDTH 24
+
+/* EE_SPI_HDATA_REG: SPI host data register */
+#define	FR_AB_EE_SPI_HDATA 0x00000120
+#define	FRF_AB_EE_SPI_HDATA3_LBN 96
+#define	FRF_AB_EE_SPI_HDATA3_WIDTH 32
+#define	FRF_AB_EE_SPI_HDATA2_LBN 64
+#define	FRF_AB_EE_SPI_HDATA2_WIDTH 32
+#define	FRF_AB_EE_SPI_HDATA1_LBN 32
+#define	FRF_AB_EE_SPI_HDATA1_WIDTH 32
+#define	FRF_AB_EE_SPI_HDATA0_LBN 0
+#define	FRF_AB_EE_SPI_HDATA0_WIDTH 32
+
+/* EE_BASE_PAGE_REG: Expansion ROM base mirror register */
+#define	FR_AB_EE_BASE_PAGE 0x00000130
+#define	FRF_AB_EE_EXPROM_MASK_LBN 16
+#define	FRF_AB_EE_EXPROM_MASK_WIDTH 13
+#define	FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0
+#define	FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13
+
+/* EE_VPD_CFG0_REG: SPI/VPD configuration register 0 */
+#define	FR_AB_EE_VPD_CFG0 0x00000140
+#define	FRF_AB_EE_SF_FASTRD_EN_LBN 127
+#define	FRF_AB_EE_SF_FASTRD_EN_WIDTH 1
+#define	FRF_AB_EE_SF_CLOCK_DIV_LBN 120
+#define	FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7
+#define	FRF_AB_EE_VPD_WIP_POLL_LBN 119
+#define	FRF_AB_EE_VPD_WIP_POLL_WIDTH 1
+#define	FRF_AB_EE_EE_CLOCK_DIV_LBN 112
+#define	FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7
+#define	FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96
+#define	FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16
+#define	FRF_AB_EE_VPDW_LENGTH_LBN 80
+#define	FRF_AB_EE_VPDW_LENGTH_WIDTH 15
+#define	FRF_AB_EE_VPDW_BASE_LBN 64
+#define	FRF_AB_EE_VPDW_BASE_WIDTH 15
+#define	FRF_AB_EE_VPD_WR_CMD_EN_LBN 56
+#define	FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8
+#define	FRF_AB_EE_VPD_BASE_LBN 32
+#define	FRF_AB_EE_VPD_BASE_WIDTH 24
+#define	FRF_AB_EE_VPD_LENGTH_LBN 16
+#define	FRF_AB_EE_VPD_LENGTH_WIDTH 15
+#define	FRF_AB_EE_VPD_AD_SIZE_LBN 8
+#define	FRF_AB_EE_VPD_AD_SIZE_WIDTH 5
+#define	FRF_AB_EE_VPD_ACCESS_ON_LBN 5
+#define	FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1
+#define	FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4
+#define	FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1
+#define	FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2
+#define	FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1
+#define	FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1
+#define	FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1
+#define	FRF_AB_EE_VPD_EN_LBN 0
+#define	FRF_AB_EE_VPD_EN_WIDTH 1
+
+/* EE_VPD_SW_CNTL_REG: VPD access SW control register */
+#define	FR_AB_EE_VPD_SW_CNTL 0x00000150
+#define	FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31
+#define	FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1
+#define	FRF_AB_EE_VPD_CYC_WRITE_LBN 28
+#define	FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1
+#define	FRF_AB_EE_VPD_CYC_ADR_LBN 0
+#define	FRF_AB_EE_VPD_CYC_ADR_WIDTH 15
+
+/* EE_VPD_SW_DATA_REG: VPD access SW data register */
+#define	FR_AB_EE_VPD_SW_DATA 0x00000160
+#define	FRF_AB_EE_VPD_CYC_DAT_LBN 0
+#define	FRF_AB_EE_VPD_CYC_DAT_WIDTH 32
+
+/* PBMX_DBG_IADDR_REG: Capture Module address register */
+#define	FR_CZ_PBMX_DBG_IADDR 0x000001f0
+#define	FRF_CZ_PBMX_DBG_IADDR_LBN 0
+#define	FRF_CZ_PBMX_DBG_IADDR_WIDTH 32
+
+/* PCIE_CORE_INDIRECT_REG: Indirect Access to PCIE Core registers */
+#define	FR_BB_PCIE_CORE_INDIRECT 0x000001f0
+#define	FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32
+#define	FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32
+#define	FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15
+#define	FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1
+#define	FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0
+#define	FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12
+
+/* PBMX_DBG_IDATA_REG: Capture Module data register */
+#define	FR_CZ_PBMX_DBG_IDATA 0x000001f8
+#define	FRF_CZ_PBMX_DBG_IDATA_LBN 0
+#define	FRF_CZ_PBMX_DBG_IDATA_WIDTH 64
+
+/* NIC_STAT_REG: NIC status register */
+#define	FR_AB_NIC_STAT 0x00000200
+#define	FRF_BB_AER_DIS_LBN 34
+#define	FRF_BB_AER_DIS_WIDTH 1
+#define	FRF_BB_EE_STRAP_EN_LBN 31
+#define	FRF_BB_EE_STRAP_EN_WIDTH 1
+#define	FRF_BB_EE_STRAP_LBN 24
+#define	FRF_BB_EE_STRAP_WIDTH 4
+#define	FRF_BB_REVISION_ID_LBN 17
+#define	FRF_BB_REVISION_ID_WIDTH 7
+#define	FRF_AB_ONCHIP_SRAM_LBN 16
+#define	FRF_AB_ONCHIP_SRAM_WIDTH 1
+#define	FRF_AB_SF_PRST_LBN 9
+#define	FRF_AB_SF_PRST_WIDTH 1
+#define	FRF_AB_EE_PRST_LBN 8
+#define	FRF_AB_EE_PRST_WIDTH 1
+#define	FRF_AB_ATE_MODE_LBN 3
+#define	FRF_AB_ATE_MODE_WIDTH 1
+#define	FRF_AB_STRAP_PINS_LBN 0
+#define	FRF_AB_STRAP_PINS_WIDTH 3
+
+/* GPIO_CTL_REG: GPIO control register */
+#define	FR_AB_GPIO_CTL 0x00000210
+#define	FRF_AB_GPIO_OUT3_LBN 112
+#define	FRF_AB_GPIO_OUT3_WIDTH 16
+#define	FRF_AB_GPIO_IN3_LBN 104
+#define	FRF_AB_GPIO_IN3_WIDTH 8
+#define	FRF_AB_GPIO_PWRUP_VALUE3_LBN 96
+#define	FRF_AB_GPIO_PWRUP_VALUE3_WIDTH 8
+#define	FRF_AB_GPIO_OUT2_LBN 80
+#define	FRF_AB_GPIO_OUT2_WIDTH 16
+#define	FRF_AB_GPIO_IN2_LBN 72
+#define	FRF_AB_GPIO_IN2_WIDTH 8
+#define	FRF_AB_GPIO_PWRUP_VALUE2_LBN 64
+#define	FRF_AB_GPIO_PWRUP_VALUE2_WIDTH 8
+#define	FRF_AB_GPIO15_OEN_LBN 63
+#define	FRF_AB_GPIO15_OEN_WIDTH 1
+#define	FRF_AB_GPIO14_OEN_LBN 62
+#define	FRF_AB_GPIO14_OEN_WIDTH 1
+#define	FRF_AB_GPIO13_OEN_LBN 61
+#define	FRF_AB_GPIO13_OEN_WIDTH 1
+#define	FRF_AB_GPIO12_OEN_LBN 60
+#define	FRF_AB_GPIO12_OEN_WIDTH 1
+#define	FRF_AB_GPIO11_OEN_LBN 59
+#define	FRF_AB_GPIO11_OEN_WIDTH 1
+#define	FRF_AB_GPIO10_OEN_LBN 58
+#define	FRF_AB_GPIO10_OEN_WIDTH 1
+#define	FRF_AB_GPIO9_OEN_LBN 57
+#define	FRF_AB_GPIO9_OEN_WIDTH 1
+#define	FRF_AB_GPIO8_OEN_LBN 56
+#define	FRF_AB_GPIO8_OEN_WIDTH 1
+#define	FRF_AB_GPIO15_OUT_LBN 55
+#define	FRF_AB_GPIO15_OUT_WIDTH 1
+#define	FRF_AB_GPIO14_OUT_LBN 54
+#define	FRF_AB_GPIO14_OUT_WIDTH 1
+#define	FRF_AB_GPIO13_OUT_LBN 53
+#define	FRF_AB_GPIO13_OUT_WIDTH 1
+#define	FRF_AB_GPIO12_OUT_LBN 52
+#define	FRF_AB_GPIO12_OUT_WIDTH 1
+#define	FRF_AB_GPIO11_OUT_LBN 51
+#define	FRF_AB_GPIO11_OUT_WIDTH 1
+#define	FRF_AB_GPIO10_OUT_LBN 50
+#define	FRF_AB_GPIO10_OUT_WIDTH 1
+#define	FRF_AB_GPIO9_OUT_LBN 49
+#define	FRF_AB_GPIO9_OUT_WIDTH 1
+#define	FRF_AB_GPIO8_OUT_LBN 48
+#define	FRF_AB_GPIO8_OUT_WIDTH 1
+#define	FRF_AB_GPIO15_IN_LBN 47
+#define	FRF_AB_GPIO15_IN_WIDTH 1
+#define	FRF_AB_GPIO14_IN_LBN 46
+#define	FRF_AB_GPIO14_IN_WIDTH 1
+#define	FRF_AB_GPIO13_IN_LBN 45
+#define	FRF_AB_GPIO13_IN_WIDTH 1
+#define	FRF_AB_GPIO12_IN_LBN 44
+#define	FRF_AB_GPIO12_IN_WIDTH 1
+#define	FRF_AB_GPIO11_IN_LBN 43
+#define	FRF_AB_GPIO11_IN_WIDTH 1
+#define	FRF_AB_GPIO10_IN_LBN 42
+#define	FRF_AB_GPIO10_IN_WIDTH 1
+#define	FRF_AB_GPIO9_IN_LBN 41
+#define	FRF_AB_GPIO9_IN_WIDTH 1
+#define	FRF_AB_GPIO8_IN_LBN 40
+#define	FRF_AB_GPIO8_IN_WIDTH 1
+#define	FRF_AB_GPIO15_PWRUP_VALUE_LBN 39
+#define	FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO14_PWRUP_VALUE_LBN 38
+#define	FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO13_PWRUP_VALUE_LBN 37
+#define	FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO12_PWRUP_VALUE_LBN 36
+#define	FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO11_PWRUP_VALUE_LBN 35
+#define	FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO10_PWRUP_VALUE_LBN 34
+#define	FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO9_PWRUP_VALUE_LBN 33
+#define	FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO8_PWRUP_VALUE_LBN 32
+#define	FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_CLK156_OUT_EN_LBN 31
+#define	FRF_AB_CLK156_OUT_EN_WIDTH 1
+#define	FRF_AB_USE_NIC_CLK_LBN 30
+#define	FRF_AB_USE_NIC_CLK_WIDTH 1
+#define	FRF_AB_GPIO5_OEN_LBN 29
+#define	FRF_AB_GPIO5_OEN_WIDTH 1
+#define	FRF_AB_GPIO4_OEN_LBN 28
+#define	FRF_AB_GPIO4_OEN_WIDTH 1
+#define	FRF_AB_GPIO3_OEN_LBN 27
+#define	FRF_AB_GPIO3_OEN_WIDTH 1
+#define	FRF_AB_GPIO2_OEN_LBN 26
+#define	FRF_AB_GPIO2_OEN_WIDTH 1
+#define	FRF_AB_GPIO1_OEN_LBN 25
+#define	FRF_AB_GPIO1_OEN_WIDTH 1
+#define	FRF_AB_GPIO0_OEN_LBN 24
+#define	FRF_AB_GPIO0_OEN_WIDTH 1
+#define	FRF_AB_GPIO7_OUT_LBN 23
+#define	FRF_AB_GPIO7_OUT_WIDTH 1
+#define	FRF_AB_GPIO6_OUT_LBN 22
+#define	FRF_AB_GPIO6_OUT_WIDTH 1
+#define	FRF_AB_GPIO5_OUT_LBN 21
+#define	FRF_AB_GPIO5_OUT_WIDTH 1
+#define	FRF_AB_GPIO4_OUT_LBN 20
+#define	FRF_AB_GPIO4_OUT_WIDTH 1
+#define	FRF_AB_GPIO3_OUT_LBN 19
+#define	FRF_AB_GPIO3_OUT_WIDTH 1
+#define	FRF_AB_GPIO2_OUT_LBN 18
+#define	FRF_AB_GPIO2_OUT_WIDTH 1
+#define	FRF_AB_GPIO1_OUT_LBN 17
+#define	FRF_AB_GPIO1_OUT_WIDTH 1
+#define	FRF_AB_GPIO0_OUT_LBN 16
+#define	FRF_AB_GPIO0_OUT_WIDTH 1
+#define	FRF_AB_GPIO7_IN_LBN 15
+#define	FRF_AB_GPIO7_IN_WIDTH 1
+#define	FRF_AB_GPIO6_IN_LBN 14
+#define	FRF_AB_GPIO6_IN_WIDTH 1
+#define	FRF_AB_GPIO5_IN_LBN 13
+#define	FRF_AB_GPIO5_IN_WIDTH 1
+#define	FRF_AB_GPIO4_IN_LBN 12
+#define	FRF_AB_GPIO4_IN_WIDTH 1
+#define	FRF_AB_GPIO3_IN_LBN 11
+#define	FRF_AB_GPIO3_IN_WIDTH 1
+#define	FRF_AB_GPIO2_IN_LBN 10
+#define	FRF_AB_GPIO2_IN_WIDTH 1
+#define	FRF_AB_GPIO1_IN_LBN 9
+#define	FRF_AB_GPIO1_IN_WIDTH 1
+#define	FRF_AB_GPIO0_IN_LBN 8
+#define	FRF_AB_GPIO0_IN_WIDTH 1
+#define	FRF_AB_GPIO7_PWRUP_VALUE_LBN 7
+#define	FRF_AB_GPIO7_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO6_PWRUP_VALUE_LBN 6
+#define	FRF_AB_GPIO6_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO5_PWRUP_VALUE_LBN 5
+#define	FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO4_PWRUP_VALUE_LBN 4
+#define	FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO3_PWRUP_VALUE_LBN 3
+#define	FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO2_PWRUP_VALUE_LBN 2
+#define	FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO1_PWRUP_VALUE_LBN 1
+#define	FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO0_PWRUP_VALUE_LBN 0
+#define	FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1
+
+/* GLB_CTL_REG: Global control register */
+#define	FR_AB_GLB_CTL 0x00000220
+#define	FRF_AB_EXT_PHY_RST_CTL_LBN 63
+#define	FRF_AB_EXT_PHY_RST_CTL_WIDTH 1
+#define	FRF_AB_XAUI_SD_RST_CTL_LBN 62
+#define	FRF_AB_XAUI_SD_RST_CTL_WIDTH 1
+#define	FRF_AB_PCIE_SD_RST_CTL_LBN 61
+#define	FRF_AB_PCIE_SD_RST_CTL_WIDTH 1
+#define	FRF_AA_PCIX_RST_CTL_LBN 60
+#define	FRF_AA_PCIX_RST_CTL_WIDTH 1
+#define	FRF_BB_BIU_RST_CTL_LBN 60
+#define	FRF_BB_BIU_RST_CTL_WIDTH 1
+#define	FRF_AB_PCIE_STKY_RST_CTL_LBN 59
+#define	FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1
+#define	FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58
+#define	FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1
+#define	FRF_AB_PCIE_CORE_RST_CTL_LBN 57
+#define	FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1
+#define	FRF_AB_XGRX_RST_CTL_LBN 56
+#define	FRF_AB_XGRX_RST_CTL_WIDTH 1
+#define	FRF_AB_XGTX_RST_CTL_LBN 55
+#define	FRF_AB_XGTX_RST_CTL_WIDTH 1
+#define	FRF_AB_EM_RST_CTL_LBN 54
+#define	FRF_AB_EM_RST_CTL_WIDTH 1
+#define	FRF_AB_EV_RST_CTL_LBN 53
+#define	FRF_AB_EV_RST_CTL_WIDTH 1
+#define	FRF_AB_SR_RST_CTL_LBN 52
+#define	FRF_AB_SR_RST_CTL_WIDTH 1
+#define	FRF_AB_RX_RST_CTL_LBN 51
+#define	FRF_AB_RX_RST_CTL_WIDTH 1
+#define	FRF_AB_TX_RST_CTL_LBN 50
+#define	FRF_AB_TX_RST_CTL_WIDTH 1
+#define	FRF_AB_EE_RST_CTL_LBN 49
+#define	FRF_AB_EE_RST_CTL_WIDTH 1
+#define	FRF_AB_CS_RST_CTL_LBN 48
+#define	FRF_AB_CS_RST_CTL_WIDTH 1
+#define	FRF_AB_HOT_RST_CTL_LBN 40
+#define	FRF_AB_HOT_RST_CTL_WIDTH 2
+#define	FRF_AB_RST_EXT_PHY_LBN 31
+#define	FRF_AB_RST_EXT_PHY_WIDTH 1
+#define	FRF_AB_RST_XAUI_SD_LBN 30
+#define	FRF_AB_RST_XAUI_SD_WIDTH 1
+#define	FRF_AB_RST_PCIE_SD_LBN 29
+#define	FRF_AB_RST_PCIE_SD_WIDTH 1
+#define	FRF_AA_RST_PCIX_LBN 28
+#define	FRF_AA_RST_PCIX_WIDTH 1
+#define	FRF_BB_RST_BIU_LBN 28
+#define	FRF_BB_RST_BIU_WIDTH 1
+#define	FRF_AB_RST_PCIE_STKY_LBN 27
+#define	FRF_AB_RST_PCIE_STKY_WIDTH 1
+#define	FRF_AB_RST_PCIE_NSTKY_LBN 26
+#define	FRF_AB_RST_PCIE_NSTKY_WIDTH 1
+#define	FRF_AB_RST_PCIE_CORE_LBN 25
+#define	FRF_AB_RST_PCIE_CORE_WIDTH 1
+#define	FRF_AB_RST_XGRX_LBN 24
+#define	FRF_AB_RST_XGRX_WIDTH 1
+#define	FRF_AB_RST_XGTX_LBN 23
+#define	FRF_AB_RST_XGTX_WIDTH 1
+#define	FRF_AB_RST_EM_LBN 22
+#define	FRF_AB_RST_EM_WIDTH 1
+#define	FRF_AB_RST_EV_LBN 21
+#define	FRF_AB_RST_EV_WIDTH 1
+#define	FRF_AB_RST_SR_LBN 20
+#define	FRF_AB_RST_SR_WIDTH 1
+#define	FRF_AB_RST_RX_LBN 19
+#define	FRF_AB_RST_RX_WIDTH 1
+#define	FRF_AB_RST_TX_LBN 18
+#define	FRF_AB_RST_TX_WIDTH 1
+#define	FRF_AB_RST_SF_LBN 17
+#define	FRF_AB_RST_SF_WIDTH 1
+#define	FRF_AB_RST_CS_LBN 16
+#define	FRF_AB_RST_CS_WIDTH 1
+#define	FRF_AB_INT_RST_DUR_LBN 4
+#define	FRF_AB_INT_RST_DUR_WIDTH 3
+#define	FRF_AB_EXT_PHY_RST_DUR_LBN 1
+#define	FRF_AB_EXT_PHY_RST_DUR_WIDTH 3
+#define	FFE_AB_EXT_PHY_RST_DUR_10240US 7
+#define	FFE_AB_EXT_PHY_RST_DUR_5120US 6
+#define	FFE_AB_EXT_PHY_RST_DUR_2560US 5
+#define	FFE_AB_EXT_PHY_RST_DUR_1280US 4
+#define	FFE_AB_EXT_PHY_RST_DUR_640US 3
+#define	FFE_AB_EXT_PHY_RST_DUR_320US 2
+#define	FFE_AB_EXT_PHY_RST_DUR_160US 1
+#define	FFE_AB_EXT_PHY_RST_DUR_80US 0
+#define	FRF_AB_SWRST_LBN 0
+#define	FRF_AB_SWRST_WIDTH 1
+
+/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
+#define	FR_AZ_FATAL_INTR_KER 0x00000230
+#define	FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44
+#define	FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1
+#define	FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43
+#define	FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1
+#define	FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43
+#define	FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42
+#define	FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41
+#define	FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40
+#define	FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39
+#define	FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38
+#define	FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37
+#define	FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36
+#define	FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35
+#define	FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34
+#define	FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33
+#define	FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32
+#define	FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1
+#define	FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12
+#define	FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1
+#define	FRF_AB_PCI_BUSERR_INT_KER_LBN 11
+#define	FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1
+#define	FRF_CZ_MBU_PERR_INT_KER_LBN 11
+#define	FRF_CZ_MBU_PERR_INT_KER_WIDTH 1
+#define	FRF_AZ_SRAM_OOB_INT_KER_LBN 10
+#define	FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1
+#define	FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9
+#define	FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1
+#define	FRF_AZ_MEM_PERR_INT_KER_LBN 8
+#define	FRF_AZ_MEM_PERR_INT_KER_WIDTH 1
+#define	FRF_AZ_RBUF_OWN_INT_KER_LBN 7
+#define	FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1
+#define	FRF_AZ_TBUF_OWN_INT_KER_LBN 6
+#define	FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1
+#define	FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5
+#define	FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1
+#define	FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4
+#define	FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1
+#define	FRF_AZ_EVQ_OWN_INT_KER_LBN 3
+#define	FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1
+#define	FRF_AZ_EVF_OFLO_INT_KER_LBN 2
+#define	FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1
+#define	FRF_AZ_ILL_ADR_INT_KER_LBN 1
+#define	FRF_AZ_ILL_ADR_INT_KER_WIDTH 1
+#define	FRF_AZ_SRM_PERR_INT_KER_LBN 0
+#define	FRF_AZ_SRM_PERR_INT_KER_WIDTH 1
+
+/* FATAL_INTR_REG_CHAR: Fatal interrupt register for Char */
+#define	FR_BZ_FATAL_INTR_CHAR 0x00000240
+#define	FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44
+#define	FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1
+#define	FRF_BB_PCI_BUSERR_INT_CHAR_EN_LBN 43
+#define	FRF_BB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1
+#define	FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43
+#define	FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_SRAM_OOB_INT_CHAR_EN_LBN 42
+#define	FRF_BZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_BUFID_OOB_INT_CHAR_EN_LBN 41
+#define	FRF_BZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_MEM_PERR_INT_CHAR_EN_LBN 40
+#define	FRF_BZ_MEM_PERR_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_RBUF_OWN_INT_CHAR_EN_LBN 39
+#define	FRF_BZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_TBUF_OWN_INT_CHAR_EN_LBN 38
+#define	FRF_BZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37
+#define	FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36
+#define	FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_EVQ_OWN_INT_CHAR_EN_LBN 35
+#define	FRF_BZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_EVF_OFLO_INT_CHAR_EN_LBN 34
+#define	FRF_BZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_ILL_ADR_INT_CHAR_EN_LBN 33
+#define	FRF_BZ_ILL_ADR_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_SRM_PERR_INT_CHAR_EN_LBN 32
+#define	FRF_BZ_SRM_PERR_INT_CHAR_EN_WIDTH 1
+#define	FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12
+#define	FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1
+#define	FRF_BB_PCI_BUSERR_INT_CHAR_LBN 11
+#define	FRF_BB_PCI_BUSERR_INT_CHAR_WIDTH 1
+#define	FRF_CZ_MBU_PERR_INT_CHAR_LBN 11
+#define	FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1
+#define	FRF_BZ_SRAM_OOB_INT_CHAR_LBN 10
+#define	FRF_BZ_SRAM_OOB_INT_CHAR_WIDTH 1
+#define	FRF_BZ_BUFID_DC_OOB_INT_CHAR_LBN 9
+#define	FRF_BZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1
+#define	FRF_BZ_MEM_PERR_INT_CHAR_LBN 8
+#define	FRF_BZ_MEM_PERR_INT_CHAR_WIDTH 1
+#define	FRF_BZ_RBUF_OWN_INT_CHAR_LBN 7
+#define	FRF_BZ_RBUF_OWN_INT_CHAR_WIDTH 1
+#define	FRF_BZ_TBUF_OWN_INT_CHAR_LBN 6
+#define	FRF_BZ_TBUF_OWN_INT_CHAR_WIDTH 1
+#define	FRF_BZ_RDESCQ_OWN_INT_CHAR_LBN 5
+#define	FRF_BZ_RDESCQ_OWN_INT_CHAR_WIDTH 1
+#define	FRF_BZ_TDESCQ_OWN_INT_CHAR_LBN 4
+#define	FRF_BZ_TDESCQ_OWN_INT_CHAR_WIDTH 1
+#define	FRF_BZ_EVQ_OWN_INT_CHAR_LBN 3
+#define	FRF_BZ_EVQ_OWN_INT_CHAR_WIDTH 1
+#define	FRF_BZ_EVF_OFLO_INT_CHAR_LBN 2
+#define	FRF_BZ_EVF_OFLO_INT_CHAR_WIDTH 1
+#define	FRF_BZ_ILL_ADR_INT_CHAR_LBN 1
+#define	FRF_BZ_ILL_ADR_INT_CHAR_WIDTH 1
+#define	FRF_BZ_SRM_PERR_INT_CHAR_LBN 0
+#define	FRF_BZ_SRM_PERR_INT_CHAR_WIDTH 1
+
+/* DP_CTRL_REG: Datapath control register */
+#define	FR_BZ_DP_CTRL 0x00000250
+#define	FRF_BZ_FLS_EVQ_ID_LBN 0
+#define	FRF_BZ_FLS_EVQ_ID_WIDTH 12
+
+/* MEM_STAT_REG: Memory status register */
+#define	FR_AZ_MEM_STAT 0x00000260
+#define	FRF_AB_MEM_PERR_VEC_LBN 53
+#define	FRF_AB_MEM_PERR_VEC_WIDTH 38
+#define	FRF_AB_MBIST_CORR_LBN 38
+#define	FRF_AB_MBIST_CORR_WIDTH 15
+#define	FRF_AB_MBIST_ERR_LBN 0
+#define	FRF_AB_MBIST_ERR_WIDTH 40
+#define	FRF_CZ_MEM_PERR_VEC_LBN 0
+#define	FRF_CZ_MEM_PERR_VEC_WIDTH 35
+
+/* CS_DEBUG_REG: Debug register */
+#define	FR_AZ_CS_DEBUG 0x00000270
+#define	FRF_AB_GLB_DEBUG2_SEL_LBN 50
+#define	FRF_AB_GLB_DEBUG2_SEL_WIDTH 3
+#define	FRF_AB_DEBUG_BLK_SEL2_LBN 47
+#define	FRF_AB_DEBUG_BLK_SEL2_WIDTH 3
+#define	FRF_AB_DEBUG_BLK_SEL1_LBN 44
+#define	FRF_AB_DEBUG_BLK_SEL1_WIDTH 3
+#define	FRF_AB_DEBUG_BLK_SEL0_LBN 41
+#define	FRF_AB_DEBUG_BLK_SEL0_WIDTH 3
+#define	FRF_CZ_CS_PORT_NUM_LBN 40
+#define	FRF_CZ_CS_PORT_NUM_WIDTH 2
+#define	FRF_AB_MISC_DEBUG_ADDR_LBN 36
+#define	FRF_AB_MISC_DEBUG_ADDR_WIDTH 5
+#define	FRF_AB_SERDES_DEBUG_ADDR_LBN 31
+#define	FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5
+#define	FRF_CZ_CS_PORT_FPE_LBN 1
+#define	FRF_CZ_CS_PORT_FPE_WIDTH 35
+#define	FRF_AB_EM_DEBUG_ADDR_LBN 26
+#define	FRF_AB_EM_DEBUG_ADDR_WIDTH 5
+#define	FRF_AB_SR_DEBUG_ADDR_LBN 21
+#define	FRF_AB_SR_DEBUG_ADDR_WIDTH 5
+#define	FRF_AB_EV_DEBUG_ADDR_LBN 16
+#define	FRF_AB_EV_DEBUG_ADDR_WIDTH 5
+#define	FRF_AB_RX_DEBUG_ADDR_LBN 11
+#define	FRF_AB_RX_DEBUG_ADDR_WIDTH 5
+#define	FRF_AB_TX_DEBUG_ADDR_LBN 6
+#define	FRF_AB_TX_DEBUG_ADDR_WIDTH 5
+#define	FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1
+#define	FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5
+#define	FRF_AZ_CS_DEBUG_EN_LBN 0
+#define	FRF_AZ_CS_DEBUG_EN_WIDTH 1
+
+/* DRIVER_REG: Driver scratch register [0-7] */
+#define	FR_AZ_DRIVER 0x00000280
+#define	FR_AZ_DRIVER_STEP 16
+#define	FR_AZ_DRIVER_ROWS 8
+#define	FRF_AZ_DRIVER_DW0_LBN 0
+#define	FRF_AZ_DRIVER_DW0_WIDTH 32
+
+/* ALTERA_BUILD_REG: Altera build register */
+#define	FR_AZ_ALTERA_BUILD 0x00000300
+#define	FRF_AZ_ALTERA_BUILD_VER_LBN 0
+#define	FRF_AZ_ALTERA_BUILD_VER_WIDTH 32
+
+/* CSR_SPARE_REG: Spare register */
+#define	FR_AZ_CSR_SPARE 0x00000310
+#define	FRF_AB_MEM_PERR_EN_LBN 64
+#define	FRF_AB_MEM_PERR_EN_WIDTH 38
+#define	FRF_CZ_MEM_PERR_EN_LBN 64
+#define	FRF_CZ_MEM_PERR_EN_WIDTH 35
+#define	FRF_AB_MEM_PERR_EN_TX_DATA_LBN 72
+#define	FRF_AB_MEM_PERR_EN_TX_DATA_WIDTH 2
+#define	FRF_AZ_CSR_SPARE_BITS_LBN 0
+#define	FRF_AZ_CSR_SPARE_BITS_WIDTH 32
+
+/* PCIE_SD_CTL0123_REG: PCIE SerDes control register 0 to 3 */
+#define	FR_AB_PCIE_SD_CTL0123 0x00000320
+#define	FRF_AB_PCIE_TESTSIG_H_LBN 96
+#define	FRF_AB_PCIE_TESTSIG_H_WIDTH 19
+#define	FRF_AB_PCIE_TESTSIG_L_LBN 64
+#define	FRF_AB_PCIE_TESTSIG_L_WIDTH 19
+#define	FRF_AB_PCIE_OFFSET_LBN 56
+#define	FRF_AB_PCIE_OFFSET_WIDTH 8
+#define	FRF_AB_PCIE_OFFSETEN_H_LBN 55
+#define	FRF_AB_PCIE_OFFSETEN_H_WIDTH 1
+#define	FRF_AB_PCIE_OFFSETEN_L_LBN 54
+#define	FRF_AB_PCIE_OFFSETEN_L_WIDTH 1
+#define	FRF_AB_PCIE_HIVMODE_H_LBN 53
+#define	FRF_AB_PCIE_HIVMODE_H_WIDTH 1
+#define	FRF_AB_PCIE_HIVMODE_L_LBN 52
+#define	FRF_AB_PCIE_HIVMODE_L_WIDTH 1
+#define	FRF_AB_PCIE_PARRESET_H_LBN 51
+#define	FRF_AB_PCIE_PARRESET_H_WIDTH 1
+#define	FRF_AB_PCIE_PARRESET_L_LBN 50
+#define	FRF_AB_PCIE_PARRESET_L_WIDTH 1
+#define	FRF_AB_PCIE_LPBKWDRV_H_LBN 49
+#define	FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1
+#define	FRF_AB_PCIE_LPBKWDRV_L_LBN 48
+#define	FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1
+#define	FRF_AB_PCIE_LPBK_LBN 40
+#define	FRF_AB_PCIE_LPBK_WIDTH 8
+#define	FRF_AB_PCIE_PARLPBK_LBN 32
+#define	FRF_AB_PCIE_PARLPBK_WIDTH 8
+#define	FRF_AB_PCIE_RXTERMADJ_H_LBN 30
+#define	FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2
+#define	FRF_AB_PCIE_RXTERMADJ_L_LBN 28
+#define	FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2
+#define	FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3
+#define	FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2
+#define	FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1
+#define	FFE_AB_PCIE_RXTERMADJ_NOMNL 0
+#define	FRF_AB_PCIE_TXTERMADJ_H_LBN 26
+#define	FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2
+#define	FRF_AB_PCIE_TXTERMADJ_L_LBN 24
+#define	FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2
+#define	FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3
+#define	FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2
+#define	FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1
+#define	FFE_AB_PCIE_TXTERMADJ_NOMNL 0
+#define	FRF_AB_PCIE_RXEQCTL_H_LBN 18
+#define	FRF_AB_PCIE_RXEQCTL_H_WIDTH 2
+#define	FRF_AB_PCIE_RXEQCTL_L_LBN 16
+#define	FRF_AB_PCIE_RXEQCTL_L_WIDTH 2
+#define	FFE_AB_PCIE_RXEQCTL_OFF_ALT 3
+#define	FFE_AB_PCIE_RXEQCTL_OFF 2
+#define	FFE_AB_PCIE_RXEQCTL_MIN 1
+#define	FFE_AB_PCIE_RXEQCTL_MAX 0
+#define	FRF_AB_PCIE_HIDRV_LBN 8
+#define	FRF_AB_PCIE_HIDRV_WIDTH 8
+#define	FRF_AB_PCIE_LODRV_LBN 0
+#define	FRF_AB_PCIE_LODRV_WIDTH 8
+
+/* PCIE_SD_CTL45_REG: PCIE SerDes control register 4 and 5 */
+#define	FR_AB_PCIE_SD_CTL45 0x00000330
+#define	FRF_AB_PCIE_DTX7_LBN 60
+#define	FRF_AB_PCIE_DTX7_WIDTH 4
+#define	FRF_AB_PCIE_DTX6_LBN 56
+#define	FRF_AB_PCIE_DTX6_WIDTH 4
+#define	FRF_AB_PCIE_DTX5_LBN 52
+#define	FRF_AB_PCIE_DTX5_WIDTH 4
+#define	FRF_AB_PCIE_DTX4_LBN 48
+#define	FRF_AB_PCIE_DTX4_WIDTH 4
+#define	FRF_AB_PCIE_DTX3_LBN 44
+#define	FRF_AB_PCIE_DTX3_WIDTH 4
+#define	FRF_AB_PCIE_DTX2_LBN 40
+#define	FRF_AB_PCIE_DTX2_WIDTH 4
+#define	FRF_AB_PCIE_DTX1_LBN 36
+#define	FRF_AB_PCIE_DTX1_WIDTH 4
+#define	FRF_AB_PCIE_DTX0_LBN 32
+#define	FRF_AB_PCIE_DTX0_WIDTH 4
+#define	FRF_AB_PCIE_DEQ7_LBN 28
+#define	FRF_AB_PCIE_DEQ7_WIDTH 4
+#define	FRF_AB_PCIE_DEQ6_LBN 24
+#define	FRF_AB_PCIE_DEQ6_WIDTH 4
+#define	FRF_AB_PCIE_DEQ5_LBN 20
+#define	FRF_AB_PCIE_DEQ5_WIDTH 4
+#define	FRF_AB_PCIE_DEQ4_LBN 16
+#define	FRF_AB_PCIE_DEQ4_WIDTH 4
+#define	FRF_AB_PCIE_DEQ3_LBN 12
+#define	FRF_AB_PCIE_DEQ3_WIDTH 4
+#define	FRF_AB_PCIE_DEQ2_LBN 8
+#define	FRF_AB_PCIE_DEQ2_WIDTH 4
+#define	FRF_AB_PCIE_DEQ1_LBN 4
+#define	FRF_AB_PCIE_DEQ1_WIDTH 4
+#define	FRF_AB_PCIE_DEQ0_LBN 0
+#define	FRF_AB_PCIE_DEQ0_WIDTH 4
+
+/* PCIE_PCS_CTL_STAT_REG: PCIE PCS control and status register */
+#define	FR_AB_PCIE_PCS_CTL_STAT 0x00000340
+#define	FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52
+#define	FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4
+#define	FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48
+#define	FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4
+#define	FRF_AB_PCIE_PRBSERR_LBN 40
+#define	FRF_AB_PCIE_PRBSERR_WIDTH 8
+#define	FRF_AB_PCIE_PRBSERRH0_LBN 32
+#define	FRF_AB_PCIE_PRBSERRH0_WIDTH 8
+#define	FRF_AB_PCIE_FASTINIT_H_LBN 15
+#define	FRF_AB_PCIE_FASTINIT_H_WIDTH 1
+#define	FRF_AB_PCIE_FASTINIT_L_LBN 14
+#define	FRF_AB_PCIE_FASTINIT_L_WIDTH 1
+#define	FRF_AB_PCIE_CTCDISABLE_H_LBN 13
+#define	FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1
+#define	FRF_AB_PCIE_CTCDISABLE_L_LBN 12
+#define	FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1
+#define	FRF_AB_PCIE_PRBSSYNC_H_LBN 11
+#define	FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1
+#define	FRF_AB_PCIE_PRBSSYNC_L_LBN 10
+#define	FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1
+#define	FRF_AB_PCIE_PRBSERRACK_H_LBN 9
+#define	FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1
+#define	FRF_AB_PCIE_PRBSERRACK_L_LBN 8
+#define	FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1
+#define	FRF_AB_PCIE_PRBSSEL_LBN 0
+#define	FRF_AB_PCIE_PRBSSEL_WIDTH 8
+
+/* DEBUG_DATA_OUT_REG: Live Debug and Debug 2 out ports */
+#define	FR_BB_DEBUG_DATA_OUT 0x00000350
+#define	FRF_BB_DEBUG2_PORT_LBN 25
+#define	FRF_BB_DEBUG2_PORT_WIDTH 15
+#define	FRF_BB_DEBUG1_PORT_LBN 0
+#define	FRF_BB_DEBUG1_PORT_WIDTH 25
+
+/* EVQ_RPTR_REGP0: Event queue read pointer register */
+#define	FR_BZ_EVQ_RPTR_P0 0x00000400
+#define	FR_BZ_EVQ_RPTR_P0_STEP 8192
+#define	FR_BZ_EVQ_RPTR_P0_ROWS 1024
+/* EVQ_RPTR_REG_KER: Event queue read pointer register */
+#define	FR_AA_EVQ_RPTR_KER 0x00011b00
+#define	FR_AA_EVQ_RPTR_KER_STEP 4
+#define	FR_AA_EVQ_RPTR_KER_ROWS 4
+/* EVQ_RPTR_REG: Event queue read pointer register */
+#define	FR_BZ_EVQ_RPTR 0x00fa0000
+#define	FR_BZ_EVQ_RPTR_STEP 16
+#define	FR_BB_EVQ_RPTR_ROWS 4096
+#define	FR_CZ_EVQ_RPTR_ROWS 1024
+/* EVQ_RPTR_REGP123: Event queue read pointer register */
+#define	FR_BB_EVQ_RPTR_P123 0x01000400
+#define	FR_BB_EVQ_RPTR_P123_STEP 8192
+#define	FR_BB_EVQ_RPTR_P123_ROWS 3072
+#define	FRF_AZ_EVQ_RPTR_VLD_LBN 15
+#define	FRF_AZ_EVQ_RPTR_VLD_WIDTH 1
+#define	FRF_AZ_EVQ_RPTR_LBN 0
+#define	FRF_AZ_EVQ_RPTR_WIDTH 15
+
+/* TIMER_COMMAND_REGP0: Timer Command Registers */
+#define	FR_BZ_TIMER_COMMAND_P0 0x00000420
+#define	FR_BZ_TIMER_COMMAND_P0_STEP 8192
+#define	FR_BZ_TIMER_COMMAND_P0_ROWS 1024
+/* TIMER_COMMAND_REG_KER: Timer Command Registers */
+#define	FR_AA_TIMER_COMMAND_KER 0x00000420
+#define	FR_AA_TIMER_COMMAND_KER_STEP 8192
+#define	FR_AA_TIMER_COMMAND_KER_ROWS 4
+/* TIMER_COMMAND_REGP123: Timer Command Registers */
+#define	FR_BB_TIMER_COMMAND_P123 0x01000420
+#define	FR_BB_TIMER_COMMAND_P123_STEP 8192
+#define	FR_BB_TIMER_COMMAND_P123_ROWS 3072
+#define	FRF_CZ_TC_TIMER_MODE_LBN 14
+#define	FRF_CZ_TC_TIMER_MODE_WIDTH 2
+#define	FRF_AB_TC_TIMER_MODE_LBN 12
+#define	FRF_AB_TC_TIMER_MODE_WIDTH 2
+#define	FRF_CZ_TC_TIMER_VAL_LBN 0
+#define	FRF_CZ_TC_TIMER_VAL_WIDTH 14
+#define	FRF_AB_TC_TIMER_VAL_LBN 0
+#define	FRF_AB_TC_TIMER_VAL_WIDTH 12
+
+/* DRV_EV_REG: Driver generated event register */
+#define	FR_AZ_DRV_EV 0x00000440
+#define	FRF_AZ_DRV_EV_QID_LBN 64
+#define	FRF_AZ_DRV_EV_QID_WIDTH 12
+#define	FRF_AZ_DRV_EV_DATA_LBN 0
+#define	FRF_AZ_DRV_EV_DATA_WIDTH 64
+
+/* EVQ_CTL_REG: Event queue control register */
+#define	FR_AZ_EVQ_CTL 0x00000450
+#define	FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15
+#define	FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10
+#define	FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15
+#define	FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6
+#define	FRF_AZ_EVQ_OWNERR_CTL_LBN 14
+#define	FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1
+#define	FRF_AZ_EVQ_FIFO_AF_TH_LBN 7
+#define	FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7
+#define	FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0
+#define	FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7
+
+/* EVQ_CNT1_REG: Event counter 1 register */
+#define	FR_AZ_EVQ_CNT1 0x00000460
+#define	FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120
+#define	FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7
+#define	FRF_AZ_EVQ_CNT_TOBIU_LBN 100
+#define	FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20
+#define	FRF_AZ_EVQ_TX_REQ_CNT_LBN 80
+#define	FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_RX_REQ_CNT_LBN 60
+#define	FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_EM_REQ_CNT_LBN 40
+#define	FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20
+#define	FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0
+#define	FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20
+
+/* EVQ_CNT2_REG: Event counter 2 register */
+#define	FR_AZ_EVQ_CNT2 0x00000470
+#define	FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104
+#define	FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84
+#define	FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_RDY_CNT_LBN 80
+#define	FRF_AZ_EVQ_RDY_CNT_WIDTH 4
+#define	FRF_AZ_EVQ_WU_REQ_CNT_LBN 60
+#define	FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_WET_REQ_CNT_LBN 40
+#define	FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20
+#define	FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_TM_REQ_CNT_LBN 0
+#define	FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20
+
+/* USR_EV_REG: Event mailbox register */
+#define	FR_CZ_USR_EV 0x00000540
+#define	FR_CZ_USR_EV_STEP 8192
+#define	FR_CZ_USR_EV_ROWS 1024
+#define	FRF_CZ_USR_EV_DATA_LBN 0
+#define	FRF_CZ_USR_EV_DATA_WIDTH 32
+
+/* BUF_TBL_CFG_REG: Buffer table configuration register */
+#define	FR_AZ_BUF_TBL_CFG 0x00000600
+#define	FRF_AZ_BUF_TBL_MODE_LBN 3
+#define	FRF_AZ_BUF_TBL_MODE_WIDTH 1
+
+/* SRM_RX_DC_CFG_REG: SRAM receive descriptor cache configuration register */
+#define	FR_AZ_SRM_RX_DC_CFG 0x00000610
+#define	FRF_AZ_SRM_CLK_TMP_EN_LBN 21
+#define	FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1
+#define	FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0
+#define	FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21
+
+/* SRM_TX_DC_CFG_REG: SRAM transmit descriptor cache configuration register */
+#define	FR_AZ_SRM_TX_DC_CFG 0x00000620
+#define	FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0
+#define	FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21
+
+/* SRM_CFG_REG: SRAM configuration register */
+#define	FR_AZ_SRM_CFG 0x00000630
+#define	FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5
+#define	FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1
+#define	FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4
+#define	FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1
+#define	FRF_AZ_SRM_INIT_EN_LBN 3
+#define	FRF_AZ_SRM_INIT_EN_WIDTH 1
+#define	FRF_AZ_SRM_NUM_BANK_LBN 2
+#define	FRF_AZ_SRM_NUM_BANK_WIDTH 1
+#define	FRF_AZ_SRM_BANK_SIZE_LBN 0
+#define	FRF_AZ_SRM_BANK_SIZE_WIDTH 2
+
+/* BUF_TBL_UPD_REG: Buffer table update register */
+#define	FR_AZ_BUF_TBL_UPD 0x00000650
+#define	FRF_AZ_BUF_UPD_CMD_LBN 63
+#define	FRF_AZ_BUF_UPD_CMD_WIDTH 1
+#define	FRF_AZ_BUF_CLR_CMD_LBN 62
+#define	FRF_AZ_BUF_CLR_CMD_WIDTH 1
+#define	FRF_AZ_BUF_CLR_END_ID_LBN 32
+#define	FRF_AZ_BUF_CLR_END_ID_WIDTH 20
+#define	FRF_AZ_BUF_CLR_START_ID_LBN 0
+#define	FRF_AZ_BUF_CLR_START_ID_WIDTH 20
+
+/* SRM_UPD_EVQ_REG: Buffer table update register */
+#define	FR_AZ_SRM_UPD_EVQ 0x00000660
+#define	FRF_AZ_SRM_UPD_EVQ_ID_LBN 0
+#define	FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12
+
+/* SRAM_PARITY_REG: SRAM parity register. */
+#define	FR_AZ_SRAM_PARITY 0x00000670
+#define	FRF_CZ_BYPASS_ECC_LBN 3
+#define	FRF_CZ_BYPASS_ECC_WIDTH 1
+#define	FRF_CZ_SEC_INT_LBN 2
+#define	FRF_CZ_SEC_INT_WIDTH 1
+#define	FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1
+#define	FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1
+#define	FRF_AB_FORCE_SRAM_PERR_LBN 0
+#define	FRF_AB_FORCE_SRAM_PERR_WIDTH 1
+#define	FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0
+#define	FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1
+
+/* RX_CFG_REG: Receive configuration register */
+#define	FR_AZ_RX_CFG 0x00000800
+#define	FRF_CZ_RX_MIN_KBUF_SIZE_LBN 72
+#define	FRF_CZ_RX_MIN_KBUF_SIZE_WIDTH 14
+#define	FRF_CZ_RX_HDR_SPLIT_EN_LBN 71
+#define	FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1
+#define	FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62
+#define	FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9
+#define	FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53
+#define	FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9
+#define	FRF_CZ_RX_PRE_RFF_IPG_LBN 49
+#define	FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4
+#define	FRF_BZ_RX_TCP_SUP_LBN 48
+#define	FRF_BZ_RX_TCP_SUP_WIDTH 1
+#define	FRF_BZ_RX_INGR_EN_LBN 47
+#define	FRF_BZ_RX_INGR_EN_WIDTH 1
+#define	FRF_BZ_RX_IP_HASH_LBN 46
+#define	FRF_BZ_RX_IP_HASH_WIDTH 1
+#define	FRF_BZ_RX_HASH_ALG_LBN 45
+#define	FRF_BZ_RX_HASH_ALG_WIDTH 1
+#define	FRF_BZ_RX_HASH_INSRT_HDR_LBN 44
+#define	FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1
+#define	FRF_BZ_RX_DESC_PUSH_EN_LBN 43
+#define	FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1
+#define	FRF_BZ_RX_RDW_PATCH_EN_LBN 42
+#define	FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1
+#define	FRF_BB_RX_PCI_BURST_SIZE_LBN 39
+#define	FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3
+#define	FRF_BZ_RX_OWNERR_CTL_LBN 38
+#define	FRF_BZ_RX_OWNERR_CTL_WIDTH 1
+#define	FRF_BZ_RX_XON_TX_TH_LBN 33
+#define	FRF_BZ_RX_XON_TX_TH_WIDTH 5
+#define	FRF_AA_RX_DESC_PUSH_EN_LBN 35
+#define	FRF_AA_RX_DESC_PUSH_EN_WIDTH 1
+#define	FRF_AA_RX_RDW_PATCH_EN_LBN 34
+#define	FRF_AA_RX_RDW_PATCH_EN_WIDTH 1
+#define	FRF_AA_RX_PCI_BURST_SIZE_LBN 31
+#define	FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3
+#define	FRF_BZ_RX_XOFF_TX_TH_LBN 28
+#define	FRF_BZ_RX_XOFF_TX_TH_WIDTH 5
+#define	FRF_AA_RX_OWNERR_CTL_LBN 30
+#define	FRF_AA_RX_OWNERR_CTL_WIDTH 1
+#define	FRF_AA_RX_XON_TX_TH_LBN 25
+#define	FRF_AA_RX_XON_TX_TH_WIDTH 5
+#define	FRF_BZ_RX_USR_BUF_SIZE_LBN 19
+#define	FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9
+#define	FRF_AA_RX_XOFF_TX_TH_LBN 20
+#define	FRF_AA_RX_XOFF_TX_TH_WIDTH 5
+#define	FRF_AA_RX_USR_BUF_SIZE_LBN 11
+#define	FRF_AA_RX_USR_BUF_SIZE_WIDTH 9
+#define	FRF_BZ_RX_XON_MAC_TH_LBN 10
+#define	FRF_BZ_RX_XON_MAC_TH_WIDTH 9
+#define	FRF_AA_RX_XON_MAC_TH_LBN 6
+#define	FRF_AA_RX_XON_MAC_TH_WIDTH 5
+#define	FRF_BZ_RX_XOFF_MAC_TH_LBN 1
+#define	FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9
+#define	FRF_AA_RX_XOFF_MAC_TH_LBN 1
+#define	FRF_AA_RX_XOFF_MAC_TH_WIDTH 5
+#define	FRF_AZ_RX_XOFF_MAC_EN_LBN 0
+#define	FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1
+
+/* RX_FILTER_CTL_REG: Receive filter control registers */
+#define	FR_BZ_RX_FILTER_CTL 0x00000810
+#define	FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94
+#define	FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8
+#define	FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86
+#define	FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8
+#define	FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85
+#define	FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1
+#define	FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69
+#define	FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16
+#define	FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57
+#define	FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12
+#define	FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56
+#define	FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define	FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55
+#define	FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define	FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43
+#define	FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12
+#define	FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42
+#define	FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define	FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41
+#define	FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define	FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40
+#define	FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1
+#define	FRF_BZ_UDP_FULL_SRCH_LIMIT_LBN 32
+#define	FRF_BZ_UDP_FULL_SRCH_LIMIT_WIDTH 8
+#define	FRF_BZ_NUM_KER_LBN 24
+#define	FRF_BZ_NUM_KER_WIDTH 2
+#define	FRF_BZ_UDP_WILD_SRCH_LIMIT_LBN 16
+#define	FRF_BZ_UDP_WILD_SRCH_LIMIT_WIDTH 8
+#define	FRF_BZ_TCP_WILD_SRCH_LIMIT_LBN 8
+#define	FRF_BZ_TCP_WILD_SRCH_LIMIT_WIDTH 8
+#define	FRF_BZ_TCP_FULL_SRCH_LIMIT_LBN 0
+#define	FRF_BZ_TCP_FULL_SRCH_LIMIT_WIDTH 8
+
+/* RX_FLUSH_DESCQ_REG: Receive flush descriptor queue register */
+#define	FR_AZ_RX_FLUSH_DESCQ 0x00000820
+#define	FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24
+#define	FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1
+#define	FRF_AZ_RX_FLUSH_DESCQ_LBN 0
+#define	FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12
+
+/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
+#define	FR_BZ_RX_DESC_UPD_P0 0x00000830
+#define	FR_BZ_RX_DESC_UPD_P0_STEP 8192
+#define	FR_BZ_RX_DESC_UPD_P0_ROWS 1024
+/* RX_DESC_UPD_REG_KER: Receive descriptor update register. */
+#define	FR_AA_RX_DESC_UPD_KER 0x00000830
+#define	FR_AA_RX_DESC_UPD_KER_STEP 8192
+#define	FR_AA_RX_DESC_UPD_KER_ROWS 4
+/* RX_DESC_UPD_REGP123: Receive descriptor update register. */
+#define	FR_BB_RX_DESC_UPD_P123 0x01000830
+#define	FR_BB_RX_DESC_UPD_P123_STEP 8192
+#define	FR_BB_RX_DESC_UPD_P123_ROWS 3072
+#define	FRF_AZ_RX_DESC_WPTR_LBN 96
+#define	FRF_AZ_RX_DESC_WPTR_WIDTH 12
+#define	FRF_AZ_RX_DESC_PUSH_CMD_LBN 95
+#define	FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1
+#define	FRF_AZ_RX_DESC_LBN 0
+#define	FRF_AZ_RX_DESC_WIDTH 64
+
+/* RX_DC_CFG_REG: Receive descriptor cache configuration register */
+#define	FR_AZ_RX_DC_CFG 0x00000840
+#define	FRF_AB_RX_MAX_PF_LBN 2
+#define	FRF_AB_RX_MAX_PF_WIDTH 2
+#define	FRF_AZ_RX_DC_SIZE_LBN 0
+#define	FRF_AZ_RX_DC_SIZE_WIDTH 2
+#define	FFE_AZ_RX_DC_SIZE_64 3
+#define	FFE_AZ_RX_DC_SIZE_32 2
+#define	FFE_AZ_RX_DC_SIZE_16 1
+#define	FFE_AZ_RX_DC_SIZE_8 0
+
+/* RX_DC_PF_WM_REG: Receive descriptor cache pre-fetch watermark register */
+#define	FR_AZ_RX_DC_PF_WM 0x00000850
+#define	FRF_AZ_RX_DC_PF_HWM_LBN 6
+#define	FRF_AZ_RX_DC_PF_HWM_WIDTH 6
+#define	FRF_AZ_RX_DC_PF_LWM_LBN 0
+#define	FRF_AZ_RX_DC_PF_LWM_WIDTH 6
+
+/* RX_RSS_TKEY_REG: RSS Toeplitz hash key */
+#define	FR_BZ_RX_RSS_TKEY 0x00000860
+#define	FRF_BZ_RX_RSS_TKEY_HI_LBN 64
+#define	FRF_BZ_RX_RSS_TKEY_HI_WIDTH 64
+#define	FRF_BZ_RX_RSS_TKEY_LO_LBN 0
+#define	FRF_BZ_RX_RSS_TKEY_LO_WIDTH 64
+
+/* RX_NODESC_DROP_REG: Receive dropped packet counter register */
+#define	FR_AZ_RX_NODESC_DROP 0x00000880
+#define	FRF_CZ_RX_NODESC_DROP_CNT_LBN 0
+#define	FRF_CZ_RX_NODESC_DROP_CNT_WIDTH 32
+#define	FRF_AB_RX_NODESC_DROP_CNT_LBN 0
+#define	FRF_AB_RX_NODESC_DROP_CNT_WIDTH 16
+
+/* RX_SELF_RST_REG: Receive self reset register */
+#define	FR_AA_RX_SELF_RST 0x00000890
+#define	FRF_AA_RX_ISCSI_DIS_LBN 17
+#define	FRF_AA_RX_ISCSI_DIS_WIDTH 1
+#define	FRF_AA_RX_SW_RST_REG_LBN 16
+#define	FRF_AA_RX_SW_RST_REG_WIDTH 1
+#define FRF_AA_RX_NODESC_WAIT_DIS_LBN 9
+#define FRF_AA_RX_NODESC_WAIT_DIS_WIDTH 1
+#define	FRF_AA_RX_SELF_RST_EN_LBN 8
+#define	FRF_AA_RX_SELF_RST_EN_WIDTH 1
+#define	FRF_AA_RX_MAX_PF_LAT_LBN 4
+#define	FRF_AA_RX_MAX_PF_LAT_WIDTH 4
+#define	FRF_AA_RX_MAX_LU_LAT_LBN 0
+#define	FRF_AA_RX_MAX_LU_LAT_WIDTH 4
+
+/* RX_DEBUG_REG: undocumented register */
+#define	FR_AZ_RX_DEBUG 0x000008a0
+#define	FRF_AZ_RX_DEBUG_LBN 0
+#define	FRF_AZ_RX_DEBUG_WIDTH 64
+
+/* RX_PUSH_DROP_REG: Receive descriptor push dropped counter register */
+#define	FR_AZ_RX_PUSH_DROP 0x000008b0
+#define	FRF_AZ_RX_PUSH_DROP_CNT_LBN 0
+#define	FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32
+
+/* RX_RSS_IPV6_REG1: IPv6 RSS Toeplitz hash key low bytes */
+#define	FR_CZ_RX_RSS_IPV6_REG1 0x000008d0
+#define	FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0
+#define	FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128
+
+/* RX_RSS_IPV6_REG2: IPv6 RSS Toeplitz hash key middle bytes */
+#define	FR_CZ_RX_RSS_IPV6_REG2 0x000008e0
+#define	FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0
+#define	FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128
+
+/* RX_RSS_IPV6_REG3: IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings */
+#define	FR_CZ_RX_RSS_IPV6_REG3 0x000008f0
+#define	FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66
+#define	FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1
+#define	FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65
+#define	FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1
+#define	FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64
+#define	FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1
+#define	FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0
+#define	FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64
+
+/* TX_FLUSH_DESCQ_REG: Transmit flush descriptor queue register */
+#define	FR_AZ_TX_FLUSH_DESCQ 0x00000a00
+#define	FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12
+#define	FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1
+#define	FRF_AZ_TX_FLUSH_DESCQ_LBN 0
+#define	FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12
+
+/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
+#define	FR_BZ_TX_DESC_UPD_P0 0x00000a10
+#define	FR_BZ_TX_DESC_UPD_P0_STEP 8192
+#define	FR_BZ_TX_DESC_UPD_P0_ROWS 1024
+/* TX_DESC_UPD_REG_KER: Transmit descriptor update register. */
+#define	FR_AA_TX_DESC_UPD_KER 0x00000a10
+#define	FR_AA_TX_DESC_UPD_KER_STEP 8192
+#define	FR_AA_TX_DESC_UPD_KER_ROWS 8
+/* TX_DESC_UPD_REGP123: Transmit descriptor update register. */
+#define	FR_BB_TX_DESC_UPD_P123 0x01000a10
+#define	FR_BB_TX_DESC_UPD_P123_STEP 8192
+#define	FR_BB_TX_DESC_UPD_P123_ROWS 3072
+#define	FRF_AZ_TX_DESC_WPTR_LBN 96
+#define	FRF_AZ_TX_DESC_WPTR_WIDTH 12
+#define	FRF_AZ_TX_DESC_PUSH_CMD_LBN 95
+#define	FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1
+#define	FRF_AZ_TX_DESC_LBN 0
+#define	FRF_AZ_TX_DESC_WIDTH 95
+
+/* TX_DC_CFG_REG: Transmit descriptor cache configuration register */
+#define	FR_AZ_TX_DC_CFG 0x00000a20
+#define	FRF_AZ_TX_DC_SIZE_LBN 0
+#define	FRF_AZ_TX_DC_SIZE_WIDTH 2
+#define	FFE_AZ_TX_DC_SIZE_32 2
+#define	FFE_AZ_TX_DC_SIZE_16 1
+#define	FFE_AZ_TX_DC_SIZE_8 0
+
+/* TX_CHKSM_CFG_REG: Transmit checksum configuration register */
+#define	FR_AA_TX_CHKSM_CFG 0x00000a30
+#define	FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96
+#define	FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32
+#define	FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64
+#define	FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32
+#define	FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32
+#define	FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32
+#define	FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0
+#define	FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32
+
+/* TX_CFG_REG: Transmit configuration register */
+#define	FR_AZ_TX_CFG 0x00000a50
+#define	FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114
+#define	FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113
+#define	FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1
+#define	FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105
+#define	FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97
+#define	FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89
+#define	FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81
+#define	FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73
+#define	FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65
+#define	FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64
+#define	FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1
+#define	FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48
+#define	FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16
+#define	FRF_CZ_TX_FILTER_EN_BIT_LBN 47
+#define	FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1
+#define	FRF_AZ_TX_IP_ID_P0_OFS_LBN 16
+#define	FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15
+#define	FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5
+#define	FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1
+#define	FRF_AZ_TX_P1_PRI_EN_LBN 4
+#define	FRF_AZ_TX_P1_PRI_EN_WIDTH 1
+#define	FRF_AZ_TX_OWNERR_CTL_LBN 2
+#define	FRF_AZ_TX_OWNERR_CTL_WIDTH 1
+#define	FRF_AA_TX_NON_IP_DROP_DIS_LBN 1
+#define	FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1
+#define	FRF_AZ_TX_IP_ID_REP_EN_LBN 0
+#define	FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1
+
+/* TX_PUSH_DROP_REG: Transmit push dropped register */
+#define	FR_AZ_TX_PUSH_DROP 0x00000a60
+#define	FRF_AZ_TX_PUSH_DROP_CNT_LBN 0
+#define	FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32
+
+/* TX_RESERVED_REG: Transmit configuration register */
+#define	FR_AZ_TX_RESERVED 0x00000a80
+#define	FRF_AZ_TX_EVT_CNT_LBN 121
+#define	FRF_AZ_TX_EVT_CNT_WIDTH 7
+#define	FRF_AZ_TX_PREF_AGE_CNT_LBN 119
+#define	FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2
+#define	FRF_AZ_TX_RD_COMP_TMR_LBN 96
+#define	FRF_AZ_TX_RD_COMP_TMR_WIDTH 23
+#define	FRF_AZ_TX_PUSH_EN_LBN 89
+#define	FRF_AZ_TX_PUSH_EN_WIDTH 1
+#define	FRF_AZ_TX_PUSH_CHK_DIS_LBN 88
+#define	FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1
+#define	FRF_AZ_TX_D_FF_FULL_P0_LBN 85
+#define	FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1
+#define	FRF_AZ_TX_DMAR_ST_P0_LBN 81
+#define	FRF_AZ_TX_DMAR_ST_P0_WIDTH 1
+#define	FRF_AZ_TX_DMAQ_ST_LBN 78
+#define	FRF_AZ_TX_DMAQ_ST_WIDTH 1
+#define	FRF_AZ_TX_RX_SPACER_LBN 64
+#define	FRF_AZ_TX_RX_SPACER_WIDTH 8
+#define	FRF_AZ_TX_DROP_ABORT_EN_LBN 60
+#define	FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1
+#define	FRF_AZ_TX_SOFT_EVT_EN_LBN 59
+#define	FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1
+#define	FRF_AZ_TX_PS_EVT_DIS_LBN 58
+#define	FRF_AZ_TX_PS_EVT_DIS_WIDTH 1
+#define	FRF_AZ_TX_RX_SPACER_EN_LBN 57
+#define	FRF_AZ_TX_RX_SPACER_EN_WIDTH 1
+#define	FRF_AZ_TX_XP_TIMER_LBN 52
+#define	FRF_AZ_TX_XP_TIMER_WIDTH 5
+#define	FRF_AZ_TX_PREF_SPACER_LBN 44
+#define	FRF_AZ_TX_PREF_SPACER_WIDTH 8
+#define	FRF_AZ_TX_PREF_WD_TMR_LBN 22
+#define	FRF_AZ_TX_PREF_WD_TMR_WIDTH 22
+#define	FRF_AZ_TX_ONLY1TAG_LBN 21
+#define	FRF_AZ_TX_ONLY1TAG_WIDTH 1
+#define	FRF_AZ_TX_PREF_THRESHOLD_LBN 19
+#define	FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2
+#define	FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18
+#define	FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1
+#define	FRF_AZ_TX_DIS_NON_IP_EV_LBN 17
+#define	FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1
+#define	FRF_AA_TX_DMA_FF_THR_LBN 16
+#define	FRF_AA_TX_DMA_FF_THR_WIDTH 1
+#define	FRF_AZ_TX_DMA_SPACER_LBN 8
+#define	FRF_AZ_TX_DMA_SPACER_WIDTH 8
+#define	FRF_AA_TX_TCP_DIS_LBN 7
+#define	FRF_AA_TX_TCP_DIS_WIDTH 1
+#define	FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7
+#define	FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1
+#define	FRF_AA_TX_IP_DIS_LBN 6
+#define	FRF_AA_TX_IP_DIS_WIDTH 1
+#define	FRF_AZ_TX_MAX_CPL_LBN 2
+#define	FRF_AZ_TX_MAX_CPL_WIDTH 2
+#define	FFE_AZ_TX_MAX_CPL_16 3
+#define	FFE_AZ_TX_MAX_CPL_8 2
+#define	FFE_AZ_TX_MAX_CPL_4 1
+#define	FFE_AZ_TX_MAX_CPL_NOLIMIT 0
+#define	FRF_AZ_TX_MAX_PREF_LBN 0
+#define	FRF_AZ_TX_MAX_PREF_WIDTH 2
+#define	FFE_AZ_TX_MAX_PREF_32 3
+#define	FFE_AZ_TX_MAX_PREF_16 2
+#define	FFE_AZ_TX_MAX_PREF_8 1
+#define	FFE_AZ_TX_MAX_PREF_OFF 0
+
+/* TX_PACE_REG: Transmit pace control register */
+#define	FR_BZ_TX_PACE 0x00000a90
+#define	FRF_BZ_TX_PACE_SB_NOT_AF_LBN 19
+#define	FRF_BZ_TX_PACE_SB_NOT_AF_WIDTH 10
+#define	FRF_BZ_TX_PACE_SB_AF_LBN 9
+#define	FRF_BZ_TX_PACE_SB_AF_WIDTH 10
+#define	FRF_BZ_TX_PACE_FB_BASE_LBN 5
+#define	FRF_BZ_TX_PACE_FB_BASE_WIDTH 4
+#define	FRF_BZ_TX_PACE_BIN_TH_LBN 0
+#define	FRF_BZ_TX_PACE_BIN_TH_WIDTH 5
+
+/* TX_PACE_DROP_QID_REG: PACE Drop QID Counter */
+#define	FR_BZ_TX_PACE_DROP_QID 0x00000aa0
+#define	FRF_BZ_TX_PACE_QID_DRP_CNT_LBN 0
+#define	FRF_BZ_TX_PACE_QID_DRP_CNT_WIDTH 16
+
+/* TX_VLAN_REG: Transmit VLAN tag register */
+#define	FR_BB_TX_VLAN 0x00000ae0
+#define	FRF_BB_TX_VLAN_EN_LBN 127
+#define	FRF_BB_TX_VLAN_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN7_PORT1_EN_LBN 125
+#define	FRF_BB_TX_VLAN7_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN7_PORT0_EN_LBN 124
+#define	FRF_BB_TX_VLAN7_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN7_LBN 112
+#define	FRF_BB_TX_VLAN7_WIDTH 12
+#define	FRF_BB_TX_VLAN6_PORT1_EN_LBN 109
+#define	FRF_BB_TX_VLAN6_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN6_PORT0_EN_LBN 108
+#define	FRF_BB_TX_VLAN6_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN6_LBN 96
+#define	FRF_BB_TX_VLAN6_WIDTH 12
+#define	FRF_BB_TX_VLAN5_PORT1_EN_LBN 93
+#define	FRF_BB_TX_VLAN5_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN5_PORT0_EN_LBN 92
+#define	FRF_BB_TX_VLAN5_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN5_LBN 80
+#define	FRF_BB_TX_VLAN5_WIDTH 12
+#define	FRF_BB_TX_VLAN4_PORT1_EN_LBN 77
+#define	FRF_BB_TX_VLAN4_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN4_PORT0_EN_LBN 76
+#define	FRF_BB_TX_VLAN4_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN4_LBN 64
+#define	FRF_BB_TX_VLAN4_WIDTH 12
+#define	FRF_BB_TX_VLAN3_PORT1_EN_LBN 61
+#define	FRF_BB_TX_VLAN3_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN3_PORT0_EN_LBN 60
+#define	FRF_BB_TX_VLAN3_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN3_LBN 48
+#define	FRF_BB_TX_VLAN3_WIDTH 12
+#define	FRF_BB_TX_VLAN2_PORT1_EN_LBN 45
+#define	FRF_BB_TX_VLAN2_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN2_PORT0_EN_LBN 44
+#define	FRF_BB_TX_VLAN2_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN2_LBN 32
+#define	FRF_BB_TX_VLAN2_WIDTH 12
+#define	FRF_BB_TX_VLAN1_PORT1_EN_LBN 29
+#define	FRF_BB_TX_VLAN1_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN1_PORT0_EN_LBN 28
+#define	FRF_BB_TX_VLAN1_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN1_LBN 16
+#define	FRF_BB_TX_VLAN1_WIDTH 12
+#define	FRF_BB_TX_VLAN0_PORT1_EN_LBN 13
+#define	FRF_BB_TX_VLAN0_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN0_PORT0_EN_LBN 12
+#define	FRF_BB_TX_VLAN0_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN0_LBN 0
+#define	FRF_BB_TX_VLAN0_WIDTH 12
+
+/* TX_IPFIL_PORTEN_REG: Transmit filter control register */
+#define	FR_BZ_TX_IPFIL_PORTEN 0x00000af0
+#define	FRF_BZ_TX_MADR0_FIL_EN_LBN 64
+#define	FRF_BZ_TX_MADR0_FIL_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL31_PORT_EN_LBN 62
+#define	FRF_BB_TX_IPFIL31_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL30_PORT_EN_LBN 60
+#define	FRF_BB_TX_IPFIL30_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL29_PORT_EN_LBN 58
+#define	FRF_BB_TX_IPFIL29_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL28_PORT_EN_LBN 56
+#define	FRF_BB_TX_IPFIL28_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL27_PORT_EN_LBN 54
+#define	FRF_BB_TX_IPFIL27_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL26_PORT_EN_LBN 52
+#define	FRF_BB_TX_IPFIL26_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL25_PORT_EN_LBN 50
+#define	FRF_BB_TX_IPFIL25_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL24_PORT_EN_LBN 48
+#define	FRF_BB_TX_IPFIL24_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL23_PORT_EN_LBN 46
+#define	FRF_BB_TX_IPFIL23_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL22_PORT_EN_LBN 44
+#define	FRF_BB_TX_IPFIL22_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL21_PORT_EN_LBN 42
+#define	FRF_BB_TX_IPFIL21_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL20_PORT_EN_LBN 40
+#define	FRF_BB_TX_IPFIL20_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL19_PORT_EN_LBN 38
+#define	FRF_BB_TX_IPFIL19_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL18_PORT_EN_LBN 36
+#define	FRF_BB_TX_IPFIL18_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL17_PORT_EN_LBN 34
+#define	FRF_BB_TX_IPFIL17_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL16_PORT_EN_LBN 32
+#define	FRF_BB_TX_IPFIL16_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL15_PORT_EN_LBN 30
+#define	FRF_BB_TX_IPFIL15_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL14_PORT_EN_LBN 28
+#define	FRF_BB_TX_IPFIL14_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL13_PORT_EN_LBN 26
+#define	FRF_BB_TX_IPFIL13_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL12_PORT_EN_LBN 24
+#define	FRF_BB_TX_IPFIL12_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL11_PORT_EN_LBN 22
+#define	FRF_BB_TX_IPFIL11_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL10_PORT_EN_LBN 20
+#define	FRF_BB_TX_IPFIL10_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL9_PORT_EN_LBN 18
+#define	FRF_BB_TX_IPFIL9_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL8_PORT_EN_LBN 16
+#define	FRF_BB_TX_IPFIL8_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL7_PORT_EN_LBN 14
+#define	FRF_BB_TX_IPFIL7_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL6_PORT_EN_LBN 12
+#define	FRF_BB_TX_IPFIL6_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL5_PORT_EN_LBN 10
+#define	FRF_BB_TX_IPFIL5_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL4_PORT_EN_LBN 8
+#define	FRF_BB_TX_IPFIL4_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL3_PORT_EN_LBN 6
+#define	FRF_BB_TX_IPFIL3_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL2_PORT_EN_LBN 4
+#define	FRF_BB_TX_IPFIL2_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL1_PORT_EN_LBN 2
+#define	FRF_BB_TX_IPFIL1_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL0_PORT_EN_LBN 0
+#define	FRF_BB_TX_IPFIL0_PORT_EN_WIDTH 1
+
+/* TX_IPFIL_TBL: Transmit IP source address filter table */
+#define	FR_BB_TX_IPFIL_TBL 0x00000b00
+#define	FR_BB_TX_IPFIL_TBL_STEP 16
+#define	FR_BB_TX_IPFIL_TBL_ROWS 16
+#define	FRF_BB_TX_IPFIL_MASK_1_LBN 96
+#define	FRF_BB_TX_IPFIL_MASK_1_WIDTH 32
+#define	FRF_BB_TX_IP_SRC_ADR_1_LBN 64
+#define	FRF_BB_TX_IP_SRC_ADR_1_WIDTH 32
+#define	FRF_BB_TX_IPFIL_MASK_0_LBN 32
+#define	FRF_BB_TX_IPFIL_MASK_0_WIDTH 32
+#define	FRF_BB_TX_IP_SRC_ADR_0_LBN 0
+#define	FRF_BB_TX_IP_SRC_ADR_0_WIDTH 32
+
+/* MD_TXD_REG: PHY management transmit data register */
+#define	FR_AB_MD_TXD 0x00000c00
+#define	FRF_AB_MD_TXD_LBN 0
+#define	FRF_AB_MD_TXD_WIDTH 16
+
+/* MD_RXD_REG: PHY management receive data register */
+#define	FR_AB_MD_RXD 0x00000c10
+#define	FRF_AB_MD_RXD_LBN 0
+#define	FRF_AB_MD_RXD_WIDTH 16
+
+/* MD_CS_REG: PHY management configuration & status register */
+#define	FR_AB_MD_CS 0x00000c20
+#define	FRF_AB_MD_RD_EN_CMD_LBN 15
+#define	FRF_AB_MD_RD_EN_CMD_WIDTH 1
+#define	FRF_AB_MD_WR_EN_CMD_LBN 14
+#define	FRF_AB_MD_WR_EN_CMD_WIDTH 1
+#define	FRF_AB_MD_ADDR_CMD_LBN 13
+#define	FRF_AB_MD_ADDR_CMD_WIDTH 1
+#define	FRF_AB_MD_PT_LBN 7
+#define	FRF_AB_MD_PT_WIDTH 3
+#define	FRF_AB_MD_PL_LBN 6
+#define	FRF_AB_MD_PL_WIDTH 1
+#define	FRF_AB_MD_INT_CLR_LBN 5
+#define	FRF_AB_MD_INT_CLR_WIDTH 1
+#define	FRF_AB_MD_GC_LBN 4
+#define	FRF_AB_MD_GC_WIDTH 1
+#define	FRF_AB_MD_PRSP_LBN 3
+#define	FRF_AB_MD_PRSP_WIDTH 1
+#define	FRF_AB_MD_RIC_LBN 2
+#define	FRF_AB_MD_RIC_WIDTH 1
+#define	FRF_AB_MD_RDC_LBN 1
+#define	FRF_AB_MD_RDC_WIDTH 1
+#define	FRF_AB_MD_WRC_LBN 0
+#define	FRF_AB_MD_WRC_WIDTH 1
+
+/* MD_PHY_ADR_REG: PHY management PHY address register */
+#define	FR_AB_MD_PHY_ADR 0x00000c30
+#define	FRF_AB_MD_PHY_ADR_LBN 0
+#define	FRF_AB_MD_PHY_ADR_WIDTH 16
+
+/* MD_ID_REG: PHY management ID register */
+#define	FR_AB_MD_ID 0x00000c40
+#define	FRF_AB_MD_PRT_ADR_LBN 11
+#define	FRF_AB_MD_PRT_ADR_WIDTH 5
+#define	FRF_AB_MD_DEV_ADR_LBN 6
+#define	FRF_AB_MD_DEV_ADR_WIDTH 5
+
+/* MD_STAT_REG: PHY management status & mask register */
+#define	FR_AB_MD_STAT 0x00000c50
+#define	FRF_AB_MD_PINT_LBN 4
+#define	FRF_AB_MD_PINT_WIDTH 1
+#define	FRF_AB_MD_DONE_LBN 3
+#define	FRF_AB_MD_DONE_WIDTH 1
+#define	FRF_AB_MD_BSERR_LBN 2
+#define	FRF_AB_MD_BSERR_WIDTH 1
+#define	FRF_AB_MD_LNFL_LBN 1
+#define	FRF_AB_MD_LNFL_WIDTH 1
+#define	FRF_AB_MD_BSY_LBN 0
+#define	FRF_AB_MD_BSY_WIDTH 1
+
+/* MAC_STAT_DMA_REG: Port MAC statistical counter DMA register */
+#define	FR_AB_MAC_STAT_DMA 0x00000c60
+#define	FRF_AB_MAC_STAT_DMA_CMD_LBN 48
+#define	FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1
+#define	FRF_AB_MAC_STAT_DMA_ADR_LBN 0
+#define	FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48
+
+/* MAC_CTRL_REG: Port MAC control register */
+#define	FR_AB_MAC_CTRL 0x00000c80
+#define	FRF_AB_MAC_XOFF_VAL_LBN 16
+#define	FRF_AB_MAC_XOFF_VAL_WIDTH 16
+#define	FRF_BB_TXFIFO_DRAIN_EN_LBN 7
+#define	FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1
+#define	FRF_AB_MAC_XG_DISTXCRC_LBN 5
+#define	FRF_AB_MAC_XG_DISTXCRC_WIDTH 1
+#define	FRF_AB_MAC_BCAD_ACPT_LBN 4
+#define	FRF_AB_MAC_BCAD_ACPT_WIDTH 1
+#define	FRF_AB_MAC_UC_PROM_LBN 3
+#define	FRF_AB_MAC_UC_PROM_WIDTH 1
+#define	FRF_AB_MAC_LINK_STATUS_LBN 2
+#define	FRF_AB_MAC_LINK_STATUS_WIDTH 1
+#define	FRF_AB_MAC_SPEED_LBN 0
+#define	FRF_AB_MAC_SPEED_WIDTH 2
+#define	FFE_AB_MAC_SPEED_10G 3
+#define	FFE_AB_MAC_SPEED_1G 2
+#define	FFE_AB_MAC_SPEED_100M 1
+#define	FFE_AB_MAC_SPEED_10M 0
+
+/* GEN_MODE_REG: General Purpose mode register (external interrupt mask) */
+#define	FR_BB_GEN_MODE 0x00000c90
+#define	FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3
+#define	FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1
+#define	FRF_BB_XG_PHY_INT_POL_SEL_LBN 2
+#define	FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1
+#define	FRF_BB_XFP_PHY_INT_MASK_LBN 1
+#define	FRF_BB_XFP_PHY_INT_MASK_WIDTH 1
+#define	FRF_BB_XG_PHY_INT_MASK_LBN 0
+#define	FRF_BB_XG_PHY_INT_MASK_WIDTH 1
+
+/* MAC_MC_HASH_REG0: Multicast address hash table */
+#define	FR_AB_MAC_MC_HASH_REG0 0x00000ca0
+#define	FRF_AB_MAC_MCAST_HASH0_LBN 0
+#define	FRF_AB_MAC_MCAST_HASH0_WIDTH 128
+
+/* MAC_MC_HASH_REG1: Multicast address hash table */
+#define	FR_AB_MAC_MC_HASH_REG1 0x00000cb0
+#define	FRF_AB_MAC_MCAST_HASH1_LBN 0
+#define	FRF_AB_MAC_MCAST_HASH1_WIDTH 128
+
+/* GM_CFG1_REG: GMAC configuration register 1 */
+#define	FR_AB_GM_CFG1 0x00000e00
+#define	FRF_AB_GM_SW_RST_LBN 31
+#define	FRF_AB_GM_SW_RST_WIDTH 1
+#define	FRF_AB_GM_SIM_RST_LBN 30
+#define	FRF_AB_GM_SIM_RST_WIDTH 1
+#define	FRF_AB_GM_RST_RX_MAC_CTL_LBN 19
+#define	FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1
+#define	FRF_AB_GM_RST_TX_MAC_CTL_LBN 18
+#define	FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1
+#define	FRF_AB_GM_RST_RX_FUNC_LBN 17
+#define	FRF_AB_GM_RST_RX_FUNC_WIDTH 1
+#define	FRF_AB_GM_RST_TX_FUNC_LBN 16
+#define	FRF_AB_GM_RST_TX_FUNC_WIDTH 1
+#define	FRF_AB_GM_LOOP_LBN 8
+#define	FRF_AB_GM_LOOP_WIDTH 1
+#define	FRF_AB_GM_RX_FC_EN_LBN 5
+#define	FRF_AB_GM_RX_FC_EN_WIDTH 1
+#define	FRF_AB_GM_TX_FC_EN_LBN 4
+#define	FRF_AB_GM_TX_FC_EN_WIDTH 1
+#define	FRF_AB_GM_SYNC_RXEN_LBN 3
+#define	FRF_AB_GM_SYNC_RXEN_WIDTH 1
+#define	FRF_AB_GM_RX_EN_LBN 2
+#define	FRF_AB_GM_RX_EN_WIDTH 1
+#define	FRF_AB_GM_SYNC_TXEN_LBN 1
+#define	FRF_AB_GM_SYNC_TXEN_WIDTH 1
+#define	FRF_AB_GM_TX_EN_LBN 0
+#define	FRF_AB_GM_TX_EN_WIDTH 1
+
+/* GM_CFG2_REG: GMAC configuration register 2 */
+#define	FR_AB_GM_CFG2 0x00000e10
+#define	FRF_AB_GM_PAMBL_LEN_LBN 12
+#define	FRF_AB_GM_PAMBL_LEN_WIDTH 4
+#define	FRF_AB_GM_IF_MODE_LBN 8
+#define	FRF_AB_GM_IF_MODE_WIDTH 2
+#define	FFE_AB_IF_MODE_BYTE_MODE 2
+#define	FFE_AB_IF_MODE_NIBBLE_MODE 1
+#define	FRF_AB_GM_HUGE_FRM_EN_LBN 5
+#define	FRF_AB_GM_HUGE_FRM_EN_WIDTH 1
+#define	FRF_AB_GM_LEN_CHK_LBN 4
+#define	FRF_AB_GM_LEN_CHK_WIDTH 1
+#define	FRF_AB_GM_PAD_CRC_EN_LBN 2
+#define	FRF_AB_GM_PAD_CRC_EN_WIDTH 1
+#define	FRF_AB_GM_CRC_EN_LBN 1
+#define	FRF_AB_GM_CRC_EN_WIDTH 1
+#define	FRF_AB_GM_FD_LBN 0
+#define	FRF_AB_GM_FD_WIDTH 1
+
+/* GM_IPG_REG: GMAC IPG register */
+#define	FR_AB_GM_IPG 0x00000e20
+#define	FRF_AB_GM_NONB2B_IPG1_LBN 24
+#define	FRF_AB_GM_NONB2B_IPG1_WIDTH 7
+#define	FRF_AB_GM_NONB2B_IPG2_LBN 16
+#define	FRF_AB_GM_NONB2B_IPG2_WIDTH 7
+#define	FRF_AB_GM_MIN_IPG_ENF_LBN 8
+#define	FRF_AB_GM_MIN_IPG_ENF_WIDTH 8
+#define	FRF_AB_GM_B2B_IPG_LBN 0
+#define	FRF_AB_GM_B2B_IPG_WIDTH 7
+
+/* GM_HD_REG: GMAC half duplex register */
+#define	FR_AB_GM_HD 0x00000e30
+#define	FRF_AB_GM_ALT_BOFF_VAL_LBN 20
+#define	FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4
+#define	FRF_AB_GM_ALT_BOFF_EN_LBN 19
+#define	FRF_AB_GM_ALT_BOFF_EN_WIDTH 1
+#define	FRF_AB_GM_BP_NO_BOFF_LBN 18
+#define	FRF_AB_GM_BP_NO_BOFF_WIDTH 1
+#define	FRF_AB_GM_DIS_BOFF_LBN 17
+#define	FRF_AB_GM_DIS_BOFF_WIDTH 1
+#define	FRF_AB_GM_EXDEF_TX_EN_LBN 16
+#define	FRF_AB_GM_EXDEF_TX_EN_WIDTH 1
+#define	FRF_AB_GM_RTRY_LIMIT_LBN 12
+#define	FRF_AB_GM_RTRY_LIMIT_WIDTH 4
+#define	FRF_AB_GM_COL_WIN_LBN 0
+#define	FRF_AB_GM_COL_WIN_WIDTH 10
+
+/* GM_MAX_FLEN_REG: GMAC maximum frame length register */
+#define	FR_AB_GM_MAX_FLEN 0x00000e40
+#define	FRF_AB_GM_MAX_FLEN_LBN 0
+#define	FRF_AB_GM_MAX_FLEN_WIDTH 16
+
+/* GM_TEST_REG: GMAC test register */
+#define	FR_AB_GM_TEST 0x00000e70
+#define	FRF_AB_GM_MAX_BOFF_LBN 3
+#define	FRF_AB_GM_MAX_BOFF_WIDTH 1
+#define	FRF_AB_GM_REG_TX_FLOW_EN_LBN 2
+#define	FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1
+#define	FRF_AB_GM_TEST_PAUSE_LBN 1
+#define	FRF_AB_GM_TEST_PAUSE_WIDTH 1
+#define	FRF_AB_GM_SHORT_SLOT_LBN 0
+#define	FRF_AB_GM_SHORT_SLOT_WIDTH 1
+
+/* GM_ADR1_REG: GMAC station address register 1 */
+#define	FR_AB_GM_ADR1 0x00000f00
+#define	FRF_AB_GM_ADR_B0_LBN 24
+#define	FRF_AB_GM_ADR_B0_WIDTH 8
+#define	FRF_AB_GM_ADR_B1_LBN 16
+#define	FRF_AB_GM_ADR_B1_WIDTH 8
+#define	FRF_AB_GM_ADR_B2_LBN 8
+#define	FRF_AB_GM_ADR_B2_WIDTH 8
+#define	FRF_AB_GM_ADR_B3_LBN 0
+#define	FRF_AB_GM_ADR_B3_WIDTH 8
+
+/* GM_ADR2_REG: GMAC station address register 2 */
+#define	FR_AB_GM_ADR2 0x00000f10
+#define	FRF_AB_GM_ADR_B4_LBN 24
+#define	FRF_AB_GM_ADR_B4_WIDTH 8
+#define	FRF_AB_GM_ADR_B5_LBN 16
+#define	FRF_AB_GM_ADR_B5_WIDTH 8
+
+/* GMF_CFG0_REG: GMAC FIFO configuration register 0 */
+#define	FR_AB_GMF_CFG0 0x00000f20
+#define	FRF_AB_GMF_FTFENRPLY_LBN 20
+#define	FRF_AB_GMF_FTFENRPLY_WIDTH 1
+#define	FRF_AB_GMF_STFENRPLY_LBN 19
+#define	FRF_AB_GMF_STFENRPLY_WIDTH 1
+#define	FRF_AB_GMF_FRFENRPLY_LBN 18
+#define	FRF_AB_GMF_FRFENRPLY_WIDTH 1
+#define	FRF_AB_GMF_SRFENRPLY_LBN 17
+#define	FRF_AB_GMF_SRFENRPLY_WIDTH 1
+#define	FRF_AB_GMF_WTMENRPLY_LBN 16
+#define	FRF_AB_GMF_WTMENRPLY_WIDTH 1
+#define	FRF_AB_GMF_FTFENREQ_LBN 12
+#define	FRF_AB_GMF_FTFENREQ_WIDTH 1
+#define	FRF_AB_GMF_STFENREQ_LBN 11
+#define	FRF_AB_GMF_STFENREQ_WIDTH 1
+#define	FRF_AB_GMF_FRFENREQ_LBN 10
+#define	FRF_AB_GMF_FRFENREQ_WIDTH 1
+#define	FRF_AB_GMF_SRFENREQ_LBN 9
+#define	FRF_AB_GMF_SRFENREQ_WIDTH 1
+#define	FRF_AB_GMF_WTMENREQ_LBN 8
+#define	FRF_AB_GMF_WTMENREQ_WIDTH 1
+#define	FRF_AB_GMF_HSTRSTFT_LBN 4
+#define	FRF_AB_GMF_HSTRSTFT_WIDTH 1
+#define	FRF_AB_GMF_HSTRSTST_LBN 3
+#define	FRF_AB_GMF_HSTRSTST_WIDTH 1
+#define	FRF_AB_GMF_HSTRSTFR_LBN 2
+#define	FRF_AB_GMF_HSTRSTFR_WIDTH 1
+#define	FRF_AB_GMF_HSTRSTSR_LBN 1
+#define	FRF_AB_GMF_HSTRSTSR_WIDTH 1
+#define	FRF_AB_GMF_HSTRSTWT_LBN 0
+#define	FRF_AB_GMF_HSTRSTWT_WIDTH 1
+
+/* GMF_CFG1_REG: GMAC FIFO configuration register 1 */
+#define	FR_AB_GMF_CFG1 0x00000f30
+#define	FRF_AB_GMF_CFGFRTH_LBN 16
+#define	FRF_AB_GMF_CFGFRTH_WIDTH 5
+#define	FRF_AB_GMF_CFGXOFFRTX_LBN 0
+#define	FRF_AB_GMF_CFGXOFFRTX_WIDTH 16
+
+/* GMF_CFG2_REG: GMAC FIFO configuration register 2 */
+#define	FR_AB_GMF_CFG2 0x00000f40
+#define	FRF_AB_GMF_CFGHWM_LBN 16
+#define	FRF_AB_GMF_CFGHWM_WIDTH 6
+#define	FRF_AB_GMF_CFGLWM_LBN 0
+#define	FRF_AB_GMF_CFGLWM_WIDTH 6
+
+/* GMF_CFG3_REG: GMAC FIFO configuration register 3 */
+#define	FR_AB_GMF_CFG3 0x00000f50
+#define	FRF_AB_GMF_CFGHWMFT_LBN 16
+#define	FRF_AB_GMF_CFGHWMFT_WIDTH 6
+#define	FRF_AB_GMF_CFGFTTH_LBN 0
+#define	FRF_AB_GMF_CFGFTTH_WIDTH 6
+
+/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
+#define	FR_AB_GMF_CFG4 0x00000f60
+#define	FRF_AB_GMF_HSTFLTRFRM_LBN 0
+#define	FRF_AB_GMF_HSTFLTRFRM_WIDTH 18
+
+/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
+#define	FR_AB_GMF_CFG5 0x00000f70
+#define	FRF_AB_GMF_CFGHDPLX_LBN 22
+#define	FRF_AB_GMF_CFGHDPLX_WIDTH 1
+#define	FRF_AB_GMF_SRFULL_LBN 21
+#define	FRF_AB_GMF_SRFULL_WIDTH 1
+#define	FRF_AB_GMF_HSTSRFULLCLR_LBN 20
+#define	FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1
+#define	FRF_AB_GMF_CFGBYTMODE_LBN 19
+#define	FRF_AB_GMF_CFGBYTMODE_WIDTH 1
+#define	FRF_AB_GMF_HSTDRPLT64_LBN 18
+#define	FRF_AB_GMF_HSTDRPLT64_WIDTH 1
+#define	FRF_AB_GMF_HSTFLTRFRMDC_LBN 0
+#define	FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18
+
+/* TX_SRC_MAC_TBL: Transmit IP source address filter table */
+#define	FR_BB_TX_SRC_MAC_TBL 0x00001000
+#define	FR_BB_TX_SRC_MAC_TBL_STEP 16
+#define	FR_BB_TX_SRC_MAC_TBL_ROWS 16
+#define	FRF_BB_TX_SRC_MAC_ADR_1_LBN 64
+#define	FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48
+#define	FRF_BB_TX_SRC_MAC_ADR_0_LBN 0
+#define	FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48
+
+/* TX_SRC_MAC_CTL_REG: Transmit MAC source address filter control */
+#define	FR_BB_TX_SRC_MAC_CTL 0x00001100
+#define	FRF_BB_TX_SRC_DROP_CTR_LBN 16
+#define	FRF_BB_TX_SRC_DROP_CTR_WIDTH 16
+#define	FRF_BB_TX_SRC_FLTR_EN_LBN 15
+#define	FRF_BB_TX_SRC_FLTR_EN_WIDTH 1
+#define	FRF_BB_TX_DROP_CTR_CLR_LBN 12
+#define	FRF_BB_TX_DROP_CTR_CLR_WIDTH 1
+#define	FRF_BB_TX_MAC_QID_SEL_LBN 0
+#define	FRF_BB_TX_MAC_QID_SEL_WIDTH 3
+
+/* XM_ADR_LO_REG: XGMAC address register low */
+#define	FR_AB_XM_ADR_LO 0x00001200
+#define	FRF_AB_XM_ADR_LO_LBN 0
+#define	FRF_AB_XM_ADR_LO_WIDTH 32
+
+/* XM_ADR_HI_REG: XGMAC address register high */
+#define	FR_AB_XM_ADR_HI 0x00001210
+#define	FRF_AB_XM_ADR_HI_LBN 0
+#define	FRF_AB_XM_ADR_HI_WIDTH 16
+
+/* XM_GLB_CFG_REG: XGMAC global configuration */
+#define	FR_AB_XM_GLB_CFG 0x00001220
+#define	FRF_AB_XM_RMTFLT_GEN_LBN 17
+#define	FRF_AB_XM_RMTFLT_GEN_WIDTH 1
+#define	FRF_AB_XM_DEBUG_MODE_LBN 16
+#define	FRF_AB_XM_DEBUG_MODE_WIDTH 1
+#define	FRF_AB_XM_RX_STAT_EN_LBN 11
+#define	FRF_AB_XM_RX_STAT_EN_WIDTH 1
+#define	FRF_AB_XM_TX_STAT_EN_LBN 10
+#define	FRF_AB_XM_TX_STAT_EN_WIDTH 1
+#define	FRF_AB_XM_RX_JUMBO_MODE_LBN 6
+#define	FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1
+#define	FRF_AB_XM_WAN_MODE_LBN 5
+#define	FRF_AB_XM_WAN_MODE_WIDTH 1
+#define	FRF_AB_XM_INTCLR_MODE_LBN 3
+#define	FRF_AB_XM_INTCLR_MODE_WIDTH 1
+#define	FRF_AB_XM_CORE_RST_LBN 0
+#define	FRF_AB_XM_CORE_RST_WIDTH 1
+
+/* XM_TX_CFG_REG: XGMAC transmit configuration */
+#define	FR_AB_XM_TX_CFG 0x00001230
+#define	FRF_AB_XM_TX_PROG_LBN 24
+#define	FRF_AB_XM_TX_PROG_WIDTH 1
+#define	FRF_AB_XM_IPG_LBN 16
+#define	FRF_AB_XM_IPG_WIDTH 4
+#define	FRF_AB_XM_FCNTL_LBN 10
+#define	FRF_AB_XM_FCNTL_WIDTH 1
+#define	FRF_AB_XM_TXCRC_LBN 8
+#define	FRF_AB_XM_TXCRC_WIDTH 1
+#define	FRF_AB_XM_EDRC_LBN 6
+#define	FRF_AB_XM_EDRC_WIDTH 1
+#define	FRF_AB_XM_AUTO_PAD_LBN 5
+#define	FRF_AB_XM_AUTO_PAD_WIDTH 1
+#define	FRF_AB_XM_TX_PRMBL_LBN 2
+#define	FRF_AB_XM_TX_PRMBL_WIDTH 1
+#define	FRF_AB_XM_TXEN_LBN 1
+#define	FRF_AB_XM_TXEN_WIDTH 1
+#define	FRF_AB_XM_TX_RST_LBN 0
+#define	FRF_AB_XM_TX_RST_WIDTH 1
+
+/* XM_RX_CFG_REG: XGMAC receive configuration */
+#define	FR_AB_XM_RX_CFG 0x00001240
+#define	FRF_AB_XM_PASS_LENERR_LBN 26
+#define	FRF_AB_XM_PASS_LENERR_WIDTH 1
+#define	FRF_AB_XM_PASS_CRC_ERR_LBN 25
+#define	FRF_AB_XM_PASS_CRC_ERR_WIDTH 1
+#define	FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24
+#define	FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1
+#define	FRF_AB_XM_REJ_BCAST_LBN 20
+#define	FRF_AB_XM_REJ_BCAST_WIDTH 1
+#define	FRF_AB_XM_ACPT_ALL_MCAST_LBN 11
+#define	FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1
+#define	FRF_AB_XM_ACPT_ALL_UCAST_LBN 9
+#define	FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1
+#define	FRF_AB_XM_AUTO_DEPAD_LBN 8
+#define	FRF_AB_XM_AUTO_DEPAD_WIDTH 1
+#define	FRF_AB_XM_RXCRC_LBN 3
+#define	FRF_AB_XM_RXCRC_WIDTH 1
+#define	FRF_AB_XM_RX_PRMBL_LBN 2
+#define	FRF_AB_XM_RX_PRMBL_WIDTH 1
+#define	FRF_AB_XM_RXEN_LBN 1
+#define	FRF_AB_XM_RXEN_WIDTH 1
+#define	FRF_AB_XM_RX_RST_LBN 0
+#define	FRF_AB_XM_RX_RST_WIDTH 1
+
+/* XM_MGT_INT_MASK: documentation to be written for sum_XM_MGT_INT_MASK */
+#define	FR_AB_XM_MGT_INT_MASK 0x00001250
+#define	FRF_AB_XM_MSK_STA_INTR_LBN 16
+#define	FRF_AB_XM_MSK_STA_INTR_WIDTH 1
+#define	FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9
+#define	FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1
+#define	FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8
+#define	FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1
+#define	FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2
+#define	FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1
+#define	FRF_AB_XM_MSK_RMTFLT_LBN 1
+#define	FRF_AB_XM_MSK_RMTFLT_WIDTH 1
+#define	FRF_AB_XM_MSK_LCLFLT_LBN 0
+#define	FRF_AB_XM_MSK_LCLFLT_WIDTH 1
+
+/* XM_FC_REG: XGMAC flow control register */
+#define	FR_AB_XM_FC 0x00001270
+#define	FRF_AB_XM_PAUSE_TIME_LBN 16
+#define	FRF_AB_XM_PAUSE_TIME_WIDTH 16
+#define	FRF_AB_XM_RX_MAC_STAT_LBN 11
+#define	FRF_AB_XM_RX_MAC_STAT_WIDTH 1
+#define	FRF_AB_XM_TX_MAC_STAT_LBN 10
+#define	FRF_AB_XM_TX_MAC_STAT_WIDTH 1
+#define	FRF_AB_XM_MCNTL_PASS_LBN 8
+#define	FRF_AB_XM_MCNTL_PASS_WIDTH 2
+#define	FRF_AB_XM_REJ_CNTL_UCAST_LBN 6
+#define	FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1
+#define	FRF_AB_XM_REJ_CNTL_MCAST_LBN 5
+#define	FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1
+#define	FRF_AB_XM_ZPAUSE_LBN 2
+#define	FRF_AB_XM_ZPAUSE_WIDTH 1
+#define	FRF_AB_XM_XMIT_PAUSE_LBN 1
+#define	FRF_AB_XM_XMIT_PAUSE_WIDTH 1
+#define	FRF_AB_XM_DIS_FCNTL_LBN 0
+#define	FRF_AB_XM_DIS_FCNTL_WIDTH 1
+
+/* XM_PAUSE_TIME_REG: XGMAC pause time register */
+#define	FR_AB_XM_PAUSE_TIME 0x00001290
+#define	FRF_AB_XM_TX_PAUSE_CNT_LBN 16
+#define	FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16
+#define	FRF_AB_XM_RX_PAUSE_CNT_LBN 0
+#define	FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16
+
+/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
+#define	FR_AB_XM_TX_PARAM 0x000012d0
+#define	FRF_AB_XM_TX_JUMBO_MODE_LBN 31
+#define	FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1
+#define	FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19
+#define	FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11
+#define	FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16
+#define	FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3
+#define	FRF_AB_XM_PAD_CHAR_LBN 0
+#define	FRF_AB_XM_PAD_CHAR_WIDTH 8
+
+/* XM_RX_PARAM_REG: XGMAC receive parameter register */
+#define	FR_AB_XM_RX_PARAM 0x000012e0
+#define	FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3
+#define	FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11
+#define	FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0
+#define	FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3
+
+/* XM_MGT_INT_MSK_REG: XGMAC management interrupt mask register */
+#define	FR_AB_XM_MGT_INT_MSK 0x000012f0
+#define	FRF_AB_XM_STAT_CNTR_OF_LBN 9
+#define	FRF_AB_XM_STAT_CNTR_OF_WIDTH 1
+#define	FRF_AB_XM_STAT_CNTR_HF_LBN 8
+#define	FRF_AB_XM_STAT_CNTR_HF_WIDTH 1
+#define	FRF_AB_XM_PRMBLE_ERR_LBN 2
+#define	FRF_AB_XM_PRMBLE_ERR_WIDTH 1
+#define	FRF_AB_XM_RMTFLT_LBN 1
+#define	FRF_AB_XM_RMTFLT_WIDTH 1
+#define	FRF_AB_XM_LCLFLT_LBN 0
+#define	FRF_AB_XM_LCLFLT_WIDTH 1
+
+/* XX_PWR_RST_REG: XGXS/XAUI powerdown/reset register */
+#define	FR_AB_XX_PWR_RST 0x00001300
+#define	FRF_AB_XX_PWRDND_SIG_LBN 31
+#define	FRF_AB_XX_PWRDND_SIG_WIDTH 1
+#define	FRF_AB_XX_PWRDNC_SIG_LBN 30
+#define	FRF_AB_XX_PWRDNC_SIG_WIDTH 1
+#define	FRF_AB_XX_PWRDNB_SIG_LBN 29
+#define	FRF_AB_XX_PWRDNB_SIG_WIDTH 1
+#define	FRF_AB_XX_PWRDNA_SIG_LBN 28
+#define	FRF_AB_XX_PWRDNA_SIG_WIDTH 1
+#define	FRF_AB_XX_SIM_MODE_LBN 27
+#define	FRF_AB_XX_SIM_MODE_WIDTH 1
+#define	FRF_AB_XX_RSTPLLCD_SIG_LBN 25
+#define	FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1
+#define	FRF_AB_XX_RSTPLLAB_SIG_LBN 24
+#define	FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1
+#define	FRF_AB_XX_RESETD_SIG_LBN 23
+#define	FRF_AB_XX_RESETD_SIG_WIDTH 1
+#define	FRF_AB_XX_RESETC_SIG_LBN 22
+#define	FRF_AB_XX_RESETC_SIG_WIDTH 1
+#define	FRF_AB_XX_RESETB_SIG_LBN 21
+#define	FRF_AB_XX_RESETB_SIG_WIDTH 1
+#define	FRF_AB_XX_RESETA_SIG_LBN 20
+#define	FRF_AB_XX_RESETA_SIG_WIDTH 1
+#define	FRF_AB_XX_RSTXGXSRX_SIG_LBN 18
+#define	FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1
+#define	FRF_AB_XX_RSTXGXSTX_SIG_LBN 17
+#define	FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1
+#define	FRF_AB_XX_SD_RST_ACT_LBN 16
+#define	FRF_AB_XX_SD_RST_ACT_WIDTH 1
+#define	FRF_AB_XX_PWRDND_EN_LBN 15
+#define	FRF_AB_XX_PWRDND_EN_WIDTH 1
+#define	FRF_AB_XX_PWRDNC_EN_LBN 14
+#define	FRF_AB_XX_PWRDNC_EN_WIDTH 1
+#define	FRF_AB_XX_PWRDNB_EN_LBN 13
+#define	FRF_AB_XX_PWRDNB_EN_WIDTH 1
+#define	FRF_AB_XX_PWRDNA_EN_LBN 12
+#define	FRF_AB_XX_PWRDNA_EN_WIDTH 1
+#define	FRF_AB_XX_RSTPLLCD_EN_LBN 9
+#define	FRF_AB_XX_RSTPLLCD_EN_WIDTH 1
+#define	FRF_AB_XX_RSTPLLAB_EN_LBN 8
+#define	FRF_AB_XX_RSTPLLAB_EN_WIDTH 1
+#define	FRF_AB_XX_RESETD_EN_LBN 7
+#define	FRF_AB_XX_RESETD_EN_WIDTH 1
+#define	FRF_AB_XX_RESETC_EN_LBN 6
+#define	FRF_AB_XX_RESETC_EN_WIDTH 1
+#define	FRF_AB_XX_RESETB_EN_LBN 5
+#define	FRF_AB_XX_RESETB_EN_WIDTH 1
+#define	FRF_AB_XX_RESETA_EN_LBN 4
+#define	FRF_AB_XX_RESETA_EN_WIDTH 1
+#define	FRF_AB_XX_RSTXGXSRX_EN_LBN 2
+#define	FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1
+#define	FRF_AB_XX_RSTXGXSTX_EN_LBN 1
+#define	FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1
+#define	FRF_AB_XX_RST_XX_EN_LBN 0
+#define	FRF_AB_XX_RST_XX_EN_WIDTH 1
+
+/* XX_SD_CTL_REG: XGXS/XAUI powerdown/reset control register */
+#define	FR_AB_XX_SD_CTL 0x00001310
+#define	FRF_AB_XX_TERMADJ1_LBN 17
+#define	FRF_AB_XX_TERMADJ1_WIDTH 1
+#define	FRF_AB_XX_TERMADJ0_LBN 16
+#define	FRF_AB_XX_TERMADJ0_WIDTH 1
+#define	FRF_AB_XX_HIDRVD_LBN 15
+#define	FRF_AB_XX_HIDRVD_WIDTH 1
+#define	FRF_AB_XX_LODRVD_LBN 14
+#define	FRF_AB_XX_LODRVD_WIDTH 1
+#define	FRF_AB_XX_HIDRVC_LBN 13
+#define	FRF_AB_XX_HIDRVC_WIDTH 1
+#define	FRF_AB_XX_LODRVC_LBN 12
+#define	FRF_AB_XX_LODRVC_WIDTH 1
+#define	FRF_AB_XX_HIDRVB_LBN 11
+#define	FRF_AB_XX_HIDRVB_WIDTH 1
+#define	FRF_AB_XX_LODRVB_LBN 10
+#define	FRF_AB_XX_LODRVB_WIDTH 1
+#define	FRF_AB_XX_HIDRVA_LBN 9
+#define	FRF_AB_XX_HIDRVA_WIDTH 1
+#define	FRF_AB_XX_LODRVA_LBN 8
+#define	FRF_AB_XX_LODRVA_WIDTH 1
+#define	FRF_AB_XX_LPBKD_LBN 3
+#define	FRF_AB_XX_LPBKD_WIDTH 1
+#define	FRF_AB_XX_LPBKC_LBN 2
+#define	FRF_AB_XX_LPBKC_WIDTH 1
+#define	FRF_AB_XX_LPBKB_LBN 1
+#define	FRF_AB_XX_LPBKB_WIDTH 1
+#define	FRF_AB_XX_LPBKA_LBN 0
+#define	FRF_AB_XX_LPBKA_WIDTH 1
+
+/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
+#define	FR_AB_XX_TXDRV_CTL 0x00001320
+#define	FRF_AB_XX_DEQD_LBN 28
+#define	FRF_AB_XX_DEQD_WIDTH 4
+#define	FRF_AB_XX_DEQC_LBN 24
+#define	FRF_AB_XX_DEQC_WIDTH 4
+#define	FRF_AB_XX_DEQB_LBN 20
+#define	FRF_AB_XX_DEQB_WIDTH 4
+#define	FRF_AB_XX_DEQA_LBN 16
+#define	FRF_AB_XX_DEQA_WIDTH 4
+#define	FRF_AB_XX_DTXD_LBN 12
+#define	FRF_AB_XX_DTXD_WIDTH 4
+#define	FRF_AB_XX_DTXC_LBN 8
+#define	FRF_AB_XX_DTXC_WIDTH 4
+#define	FRF_AB_XX_DTXB_LBN 4
+#define	FRF_AB_XX_DTXB_WIDTH 4
+#define	FRF_AB_XX_DTXA_LBN 0
+#define	FRF_AB_XX_DTXA_WIDTH 4
+
+/* XX_PRBS_CTL_REG: documentation to be written for sum_XX_PRBS_CTL_REG */
+#define	FR_AB_XX_PRBS_CTL 0x00001330
+#define	FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30
+#define	FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29
+#define	FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28
+#define	FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26
+#define	FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25
+#define	FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24
+#define	FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22
+#define	FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21
+#define	FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20
+#define	FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18
+#define	FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17
+#define	FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16
+#define	FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14
+#define	FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13
+#define	FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12
+#define	FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10
+#define	FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9
+#define	FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8
+#define	FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6
+#define	FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5
+#define	FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4
+#define	FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2
+#define	FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1
+#define	FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0
+#define	FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1
+
+/* XX_PRBS_CHK_REG: documentation to be written for sum_XX_PRBS_CHK_REG */
+#define	FR_AB_XX_PRBS_CHK 0x00001340
+#define	FRF_AB_XX_REV_LB_EN_LBN 16
+#define	FRF_AB_XX_REV_LB_EN_WIDTH 1
+#define	FRF_AB_XX_CH3_DEG_DET_LBN 15
+#define	FRF_AB_XX_CH3_DEG_DET_WIDTH 1
+#define	FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14
+#define	FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1
+#define	FRF_AB_XX_CH3_PRBS_FRUN_LBN 13
+#define	FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1
+#define	FRF_AB_XX_CH3_ERR_CHK_LBN 12
+#define	FRF_AB_XX_CH3_ERR_CHK_WIDTH 1
+#define	FRF_AB_XX_CH2_DEG_DET_LBN 11
+#define	FRF_AB_XX_CH2_DEG_DET_WIDTH 1
+#define	FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10
+#define	FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1
+#define	FRF_AB_XX_CH2_PRBS_FRUN_LBN 9
+#define	FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1
+#define	FRF_AB_XX_CH2_ERR_CHK_LBN 8
+#define	FRF_AB_XX_CH2_ERR_CHK_WIDTH 1
+#define	FRF_AB_XX_CH1_DEG_DET_LBN 7
+#define	FRF_AB_XX_CH1_DEG_DET_WIDTH 1
+#define	FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6
+#define	FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1
+#define	FRF_AB_XX_CH1_PRBS_FRUN_LBN 5
+#define	FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1
+#define	FRF_AB_XX_CH1_ERR_CHK_LBN 4
+#define	FRF_AB_XX_CH1_ERR_CHK_WIDTH 1
+#define	FRF_AB_XX_CH0_DEG_DET_LBN 3
+#define	FRF_AB_XX_CH0_DEG_DET_WIDTH 1
+#define	FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2
+#define	FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1
+#define	FRF_AB_XX_CH0_PRBS_FRUN_LBN 1
+#define	FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1
+#define	FRF_AB_XX_CH0_ERR_CHK_LBN 0
+#define	FRF_AB_XX_CH0_ERR_CHK_WIDTH 1
+
+/* XX_PRBS_ERR_REG: documentation to be written for sum_XX_PRBS_ERR_REG */
+#define	FR_AB_XX_PRBS_ERR 0x00001350
+#define	FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24
+#define	FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8
+#define	FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16
+#define	FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8
+#define	FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8
+#define	FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8
+#define	FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0
+#define	FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8
+
+/* XX_CORE_STAT_REG: XAUI XGXS core status register */
+#define	FR_AB_XX_CORE_STAT 0x00001360
+#define	FRF_AB_XX_FORCE_SIG3_LBN 31
+#define	FRF_AB_XX_FORCE_SIG3_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG3_VAL_LBN 30
+#define	FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG2_LBN 29
+#define	FRF_AB_XX_FORCE_SIG2_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG2_VAL_LBN 28
+#define	FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG1_LBN 27
+#define	FRF_AB_XX_FORCE_SIG1_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG1_VAL_LBN 26
+#define	FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG0_LBN 25
+#define	FRF_AB_XX_FORCE_SIG0_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG0_VAL_LBN 24
+#define	FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1
+#define	FRF_AB_XX_XGXS_LB_EN_LBN 23
+#define	FRF_AB_XX_XGXS_LB_EN_WIDTH 1
+#define	FRF_AB_XX_XGMII_LB_EN_LBN 22
+#define	FRF_AB_XX_XGMII_LB_EN_WIDTH 1
+#define	FRF_AB_XX_MATCH_FAULT_LBN 21
+#define	FRF_AB_XX_MATCH_FAULT_WIDTH 1
+#define	FRF_AB_XX_ALIGN_DONE_LBN 20
+#define	FRF_AB_XX_ALIGN_DONE_WIDTH 1
+#define	FRF_AB_XX_SYNC_STAT3_LBN 19
+#define	FRF_AB_XX_SYNC_STAT3_WIDTH 1
+#define	FRF_AB_XX_SYNC_STAT2_LBN 18
+#define	FRF_AB_XX_SYNC_STAT2_WIDTH 1
+#define	FRF_AB_XX_SYNC_STAT1_LBN 17
+#define	FRF_AB_XX_SYNC_STAT1_WIDTH 1
+#define	FRF_AB_XX_SYNC_STAT0_LBN 16
+#define	FRF_AB_XX_SYNC_STAT0_WIDTH 1
+#define	FRF_AB_XX_COMMA_DET_CH3_LBN 15
+#define	FRF_AB_XX_COMMA_DET_CH3_WIDTH 1
+#define	FRF_AB_XX_COMMA_DET_CH2_LBN 14
+#define	FRF_AB_XX_COMMA_DET_CH2_WIDTH 1
+#define	FRF_AB_XX_COMMA_DET_CH1_LBN 13
+#define	FRF_AB_XX_COMMA_DET_CH1_WIDTH 1
+#define	FRF_AB_XX_COMMA_DET_CH0_LBN 12
+#define	FRF_AB_XX_COMMA_DET_CH0_WIDTH 1
+#define	FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11
+#define	FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1
+#define	FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10
+#define	FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1
+#define	FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9
+#define	FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1
+#define	FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8
+#define	FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1
+#define	FRF_AB_XX_CHAR_ERR_CH3_LBN 7
+#define	FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1
+#define	FRF_AB_XX_CHAR_ERR_CH2_LBN 6
+#define	FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1
+#define	FRF_AB_XX_CHAR_ERR_CH1_LBN 5
+#define	FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1
+#define	FRF_AB_XX_CHAR_ERR_CH0_LBN 4
+#define	FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1
+#define	FRF_AB_XX_DISPERR_CH3_LBN 3
+#define	FRF_AB_XX_DISPERR_CH3_WIDTH 1
+#define	FRF_AB_XX_DISPERR_CH2_LBN 2
+#define	FRF_AB_XX_DISPERR_CH2_WIDTH 1
+#define	FRF_AB_XX_DISPERR_CH1_LBN 1
+#define	FRF_AB_XX_DISPERR_CH1_WIDTH 1
+#define	FRF_AB_XX_DISPERR_CH0_LBN 0
+#define	FRF_AB_XX_DISPERR_CH0_WIDTH 1
+
+/* RX_DESC_PTR_TBL_KER: Receive descriptor pointer table */
+#define	FR_AA_RX_DESC_PTR_TBL_KER 0x00011800
+#define	FR_AA_RX_DESC_PTR_TBL_KER_STEP 16
+#define	FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4
+/* RX_DESC_PTR_TBL: Receive descriptor pointer table */
+#define	FR_BZ_RX_DESC_PTR_TBL 0x00f40000
+#define	FR_BZ_RX_DESC_PTR_TBL_STEP 16
+#define	FR_BB_RX_DESC_PTR_TBL_ROWS 4096
+#define	FR_CZ_RX_DESC_PTR_TBL_ROWS 1024
+#define	FRF_CZ_RX_HDR_SPLIT_LBN 90
+#define	FRF_CZ_RX_HDR_SPLIT_WIDTH 1
+#define	FRF_AA_RX_RESET_LBN 89
+#define	FRF_AA_RX_RESET_WIDTH 1
+#define	FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88
+#define	FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1
+#define	FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87
+#define	FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1
+#define	FRF_AZ_RX_DESC_PREF_ACT_LBN 86
+#define	FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1
+#define	FRF_AZ_RX_DC_HW_RPTR_LBN 80
+#define	FRF_AZ_RX_DC_HW_RPTR_WIDTH 6
+#define	FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68
+#define	FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12
+#define	FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56
+#define	FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12
+#define	FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36
+#define	FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define	FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24
+#define	FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12
+#define	FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10
+#define	FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14
+#define	FRF_AZ_RX_DESCQ_LABEL_LBN 5
+#define	FRF_AZ_RX_DESCQ_LABEL_WIDTH 5
+#define	FRF_AZ_RX_DESCQ_SIZE_LBN 3
+#define	FRF_AZ_RX_DESCQ_SIZE_WIDTH 2
+#define	FFE_AZ_RX_DESCQ_SIZE_4K 3
+#define	FFE_AZ_RX_DESCQ_SIZE_2K 2
+#define	FFE_AZ_RX_DESCQ_SIZE_1K 1
+#define	FFE_AZ_RX_DESCQ_SIZE_512 0
+#define	FRF_AZ_RX_DESCQ_TYPE_LBN 2
+#define	FRF_AZ_RX_DESCQ_TYPE_WIDTH 1
+#define	FRF_AZ_RX_DESCQ_JUMBO_LBN 1
+#define	FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1
+#define	FRF_AZ_RX_DESCQ_EN_LBN 0
+#define	FRF_AZ_RX_DESCQ_EN_WIDTH 1
+
+/* TX_DESC_PTR_TBL_KER: Transmit descriptor pointer */
+#define	FR_AA_TX_DESC_PTR_TBL_KER 0x00011900
+#define	FR_AA_TX_DESC_PTR_TBL_KER_STEP 16
+#define	FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8
+/* TX_DESC_PTR_TBL: Transmit descriptor pointer */
+#define	FR_BZ_TX_DESC_PTR_TBL 0x00f50000
+#define	FR_BZ_TX_DESC_PTR_TBL_STEP 16
+#define	FR_BB_TX_DESC_PTR_TBL_ROWS 4096
+#define	FR_CZ_TX_DESC_PTR_TBL_ROWS 1024
+#define	FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94
+#define	FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2
+#define	FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93
+#define	FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1
+#define	FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92
+#define	FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1
+#define	FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91
+#define	FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1
+#define	FRF_BZ_TX_IP_CHKSM_DIS_LBN 90
+#define	FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1
+#define	FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89
+#define	FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1
+#define	FRF_AZ_TX_DESCQ_EN_LBN 88
+#define	FRF_AZ_TX_DESCQ_EN_WIDTH 1
+#define	FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87
+#define	FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1
+#define	FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86
+#define	FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1
+#define	FRF_AZ_TX_DC_HW_RPTR_LBN 80
+#define	FRF_AZ_TX_DC_HW_RPTR_WIDTH 6
+#define	FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68
+#define	FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12
+#define	FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56
+#define	FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12
+#define	FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36
+#define	FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define	FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24
+#define	FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12
+#define	FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10
+#define	FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14
+#define	FRF_AZ_TX_DESCQ_LABEL_LBN 5
+#define	FRF_AZ_TX_DESCQ_LABEL_WIDTH 5
+#define	FRF_AZ_TX_DESCQ_SIZE_LBN 3
+#define	FRF_AZ_TX_DESCQ_SIZE_WIDTH 2
+#define	FFE_AZ_TX_DESCQ_SIZE_4K 3
+#define	FFE_AZ_TX_DESCQ_SIZE_2K 2
+#define	FFE_AZ_TX_DESCQ_SIZE_1K 1
+#define	FFE_AZ_TX_DESCQ_SIZE_512 0
+#define	FRF_AZ_TX_DESCQ_TYPE_LBN 1
+#define	FRF_AZ_TX_DESCQ_TYPE_WIDTH 2
+#define	FRF_AZ_TX_DESCQ_FLUSH_LBN 0
+#define	FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1
+
+/* EVQ_PTR_TBL_KER: Event queue pointer table */
+#define	FR_AA_EVQ_PTR_TBL_KER 0x00011a00
+#define	FR_AA_EVQ_PTR_TBL_KER_STEP 16
+#define	FR_AA_EVQ_PTR_TBL_KER_ROWS 4
+/* EVQ_PTR_TBL: Event queue pointer table */
+#define	FR_BZ_EVQ_PTR_TBL 0x00f60000
+#define	FR_BZ_EVQ_PTR_TBL_STEP 16
+#define	FR_CZ_EVQ_PTR_TBL_ROWS 1024
+#define	FR_BB_EVQ_PTR_TBL_ROWS 4096
+#define	FRF_BZ_EVQ_RPTR_IGN_LBN 40
+#define	FRF_BZ_EVQ_RPTR_IGN_WIDTH 1
+#define	FRF_AB_EVQ_WKUP_OR_INT_EN_LBN 39
+#define	FRF_AB_EVQ_WKUP_OR_INT_EN_WIDTH 1
+#define	FRF_CZ_EVQ_DOS_PROTECT_EN_LBN 39
+#define	FRF_CZ_EVQ_DOS_PROTECT_EN_WIDTH 1
+#define	FRF_AZ_EVQ_NXT_WPTR_LBN 24
+#define	FRF_AZ_EVQ_NXT_WPTR_WIDTH 15
+#define	FRF_AZ_EVQ_EN_LBN 23
+#define	FRF_AZ_EVQ_EN_WIDTH 1
+#define	FRF_AZ_EVQ_SIZE_LBN 20
+#define	FRF_AZ_EVQ_SIZE_WIDTH 3
+#define	FFE_AZ_EVQ_SIZE_32K 6
+#define	FFE_AZ_EVQ_SIZE_16K 5
+#define	FFE_AZ_EVQ_SIZE_8K 4
+#define	FFE_AZ_EVQ_SIZE_4K 3
+#define	FFE_AZ_EVQ_SIZE_2K 2
+#define	FFE_AZ_EVQ_SIZE_1K 1
+#define	FFE_AZ_EVQ_SIZE_512 0
+#define	FRF_AZ_EVQ_BUF_BASE_ID_LBN 0
+#define	FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20
+
+/* BUF_HALF_TBL_KER: Buffer table in half buffer table mode direct access by driver */
+#define	FR_AA_BUF_HALF_TBL_KER 0x00018000
+#define	FR_AA_BUF_HALF_TBL_KER_STEP 8
+#define	FR_AA_BUF_HALF_TBL_KER_ROWS 4096
+/* BUF_HALF_TBL: Buffer table in half buffer table mode direct access by driver */
+#define	FR_BZ_BUF_HALF_TBL 0x00800000
+#define	FR_BZ_BUF_HALF_TBL_STEP 8
+#define	FR_CZ_BUF_HALF_TBL_ROWS 147456
+#define	FR_BB_BUF_HALF_TBL_ROWS 524288
+#define	FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44
+#define	FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20
+#define	FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32
+#define	FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12
+#define	FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12
+#define	FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20
+#define	FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0
+#define	FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
+
+/* BUF_FULL_TBL_KER: Buffer table in full buffer table mode direct access by driver */
+#define	FR_AA_BUF_FULL_TBL_KER 0x00018000
+#define	FR_AA_BUF_FULL_TBL_KER_STEP 8
+#define	FR_AA_BUF_FULL_TBL_KER_ROWS 4096
+/* BUF_FULL_TBL: Buffer table in full buffer table mode direct access by driver */
+#define	FR_BZ_BUF_FULL_TBL 0x00800000
+#define	FR_BZ_BUF_FULL_TBL_STEP 8
+#define	FR_CZ_BUF_FULL_TBL_ROWS 147456
+#define	FR_BB_BUF_FULL_TBL_ROWS 917504
+#define	FRF_AZ_BUF_FULL_UNUSED_LBN 51
+#define	FRF_AZ_BUF_FULL_UNUSED_WIDTH 13
+#define	FRF_AZ_IP_DAT_BUF_SIZE_LBN 50
+#define	FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1
+#define	FRF_AZ_BUF_ADR_REGION_LBN 48
+#define	FRF_AZ_BUF_ADR_REGION_WIDTH 2
+#define	FFE_AZ_BUF_ADR_REGN3 3
+#define	FFE_AZ_BUF_ADR_REGN2 2
+#define	FFE_AZ_BUF_ADR_REGN1 1
+#define	FFE_AZ_BUF_ADR_REGN0 0
+#define	FRF_AZ_BUF_ADR_FBUF_LBN 14
+#define	FRF_AZ_BUF_ADR_FBUF_WIDTH 34
+#define	FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0
+#define	FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14
+
+/* RX_FILTER_TBL0: TCP/IPv4 Receive filter table */
+#define	FR_BZ_RX_FILTER_TBL0 0x00f00000
+#define	FR_BZ_RX_FILTER_TBL0_STEP 32
+#define	FR_BZ_RX_FILTER_TBL0_ROWS 8192
+/* RX_FILTER_TBL1: TCP/IPv4 Receive filter table */
+#define	FR_BB_RX_FILTER_TBL1 0x00f00010
+#define	FR_BB_RX_FILTER_TBL1_STEP 32
+#define	FR_BB_RX_FILTER_TBL1_ROWS 8192
+#define	FRF_BZ_RSS_EN_LBN 110
+#define	FRF_BZ_RSS_EN_WIDTH 1
+#define	FRF_BZ_SCATTER_EN_LBN 109
+#define	FRF_BZ_SCATTER_EN_WIDTH 1
+#define	FRF_BZ_TCP_UDP_LBN 108
+#define	FRF_BZ_TCP_UDP_WIDTH 1
+#define	FRF_BZ_RXQ_ID_LBN 96
+#define	FRF_BZ_RXQ_ID_WIDTH 12
+#define	FRF_BZ_DEST_IP_LBN 64
+#define	FRF_BZ_DEST_IP_WIDTH 32
+#define	FRF_BZ_DEST_PORT_TCP_LBN 48
+#define	FRF_BZ_DEST_PORT_TCP_WIDTH 16
+#define	FRF_BZ_SRC_IP_LBN 16
+#define	FRF_BZ_SRC_IP_WIDTH 32
+#define	FRF_BZ_SRC_TCP_DEST_UDP_LBN 0
+#define	FRF_BZ_SRC_TCP_DEST_UDP_WIDTH 16
+
+/* RX_MAC_FILTER_TBL0: Receive Ethernet filter table */
+#define	FR_CZ_RX_MAC_FILTER_TBL0 0x00f00010
+#define	FR_CZ_RX_MAC_FILTER_TBL0_STEP 32
+#define	FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512
+#define	FRF_CZ_RMFT_RSS_EN_LBN 75
+#define	FRF_CZ_RMFT_RSS_EN_WIDTH 1
+#define	FRF_CZ_RMFT_SCATTER_EN_LBN 74
+#define	FRF_CZ_RMFT_SCATTER_EN_WIDTH 1
+#define	FRF_CZ_RMFT_IP_OVERRIDE_LBN 73
+#define	FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1
+#define	FRF_CZ_RMFT_RXQ_ID_LBN 61
+#define	FRF_CZ_RMFT_RXQ_ID_WIDTH 12
+#define	FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
+#define	FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
+#define	FRF_CZ_RMFT_DEST_MAC_LBN 16
+#define	FRF_CZ_RMFT_DEST_MAC_WIDTH 44
+#define	FRF_CZ_RMFT_VLAN_ID_LBN 0
+#define	FRF_CZ_RMFT_VLAN_ID_WIDTH 12
+
+/* TIMER_TBL: Timer table */
+#define	FR_BZ_TIMER_TBL 0x00f70000
+#define	FR_BZ_TIMER_TBL_STEP 16
+#define	FR_CZ_TIMER_TBL_ROWS 1024
+#define	FR_BB_TIMER_TBL_ROWS 4096
+#define	FRF_CZ_TIMER_Q_EN_LBN 33
+#define	FRF_CZ_TIMER_Q_EN_WIDTH 1
+#define	FRF_CZ_INT_ARMD_LBN 32
+#define	FRF_CZ_INT_ARMD_WIDTH 1
+#define	FRF_CZ_INT_PEND_LBN 31
+#define	FRF_CZ_INT_PEND_WIDTH 1
+#define	FRF_CZ_HOST_NOTIFY_MODE_LBN 30
+#define	FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1
+#define	FRF_CZ_RELOAD_TIMER_VAL_LBN 16
+#define	FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14
+#define	FRF_CZ_TIMER_MODE_LBN 14
+#define	FRF_CZ_TIMER_MODE_WIDTH 2
+#define	FFE_CZ_TIMER_MODE_INT_HLDOFF 3
+#define	FFE_CZ_TIMER_MODE_TRIG_START 2
+#define	FFE_CZ_TIMER_MODE_IMMED_START 1
+#define	FFE_CZ_TIMER_MODE_DIS 0
+#define	FRF_BB_TIMER_MODE_LBN 12
+#define	FRF_BB_TIMER_MODE_WIDTH 2
+#define	FFE_BB_TIMER_MODE_INT_HLDOFF 2
+#define	FFE_BB_TIMER_MODE_TRIG_START 2
+#define	FFE_BB_TIMER_MODE_IMMED_START 1
+#define	FFE_BB_TIMER_MODE_DIS 0
+#define	FRF_CZ_TIMER_VAL_LBN 0
+#define	FRF_CZ_TIMER_VAL_WIDTH 14
+#define	FRF_BB_TIMER_VAL_LBN 0
+#define	FRF_BB_TIMER_VAL_WIDTH 12
+
+/* TX_PACE_TBL: Transmit pacing table */
+#define	FR_BZ_TX_PACE_TBL 0x00f80000
+#define	FR_BZ_TX_PACE_TBL_STEP 16
+#define	FR_CZ_TX_PACE_TBL_ROWS 1024
+#define	FR_BB_TX_PACE_TBL_ROWS 4096
+#define	FRF_BZ_TX_PACE_LBN 0
+#define	FRF_BZ_TX_PACE_WIDTH 5
+
+/* RX_INDIRECTION_TBL: RX Indirection Table */
+#define	FR_BZ_RX_INDIRECTION_TBL 0x00fb0000
+#define	FR_BZ_RX_INDIRECTION_TBL_STEP 16
+#define	FR_BZ_RX_INDIRECTION_TBL_ROWS 128
+#define	FRF_BZ_IT_QUEUE_LBN 0
+#define	FRF_BZ_IT_QUEUE_WIDTH 6
+
+/* TX_FILTER_TBL0: TCP/IPv4 Transmit filter table */
+#define	FR_CZ_TX_FILTER_TBL0 0x00fc0000
+#define	FR_CZ_TX_FILTER_TBL0_STEP 16
+#define	FR_CZ_TX_FILTER_TBL0_ROWS 8192
+#define	FRF_CZ_TIFT_TCP_UDP_LBN 108
+#define	FRF_CZ_TIFT_TCP_UDP_WIDTH 1
+#define	FRF_CZ_TIFT_TXQ_ID_LBN 96
+#define	FRF_CZ_TIFT_TXQ_ID_WIDTH 12
+#define	FRF_CZ_TIFT_DEST_IP_LBN 64
+#define	FRF_CZ_TIFT_DEST_IP_WIDTH 32
+#define	FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48
+#define	FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16
+#define	FRF_CZ_TIFT_SRC_IP_LBN 16
+#define	FRF_CZ_TIFT_SRC_IP_WIDTH 32
+#define	FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0
+#define	FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16
+
+/* TX_MAC_FILTER_TBL0: Transmit Ethernet filter table */
+#define	FR_CZ_TX_MAC_FILTER_TBL0 0x00fe0000
+#define	FR_CZ_TX_MAC_FILTER_TBL0_STEP 16
+#define	FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512
+#define	FRF_CZ_TMFT_TXQ_ID_LBN 61
+#define	FRF_CZ_TMFT_TXQ_ID_WIDTH 12
+#define	FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
+#define	FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
+#define	FRF_CZ_TMFT_SRC_MAC_LBN 16
+#define	FRF_CZ_TMFT_SRC_MAC_WIDTH 44
+#define	FRF_CZ_TMFT_VLAN_ID_LBN 0
+#define	FRF_CZ_TMFT_VLAN_ID_WIDTH 12
+
+/* MC_TREG_SMEM: MC Shared Memory */
+#define	FR_CZ_MC_TREG_SMEM 0x00ff0000
+#define	FR_CZ_MC_TREG_SMEM_STEP 4
+#define	FR_CZ_MC_TREG_SMEM_ROWS 512
+#define	FRF_CZ_MC_TREG_SMEM_ROW_LBN 0
+#define	FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32
+
+/* MSIX_VECTOR_TABLE: MSIX Vector Table */
+#define	FR_BB_MSIX_VECTOR_TABLE 0x00ff0000
+#define	FR_BZ_MSIX_VECTOR_TABLE_STEP 16
+#define	FR_BB_MSIX_VECTOR_TABLE_ROWS 64
+/* MSIX_VECTOR_TABLE: MSIX Vector Table */
+#define	FR_CZ_MSIX_VECTOR_TABLE 0x00000000
+/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */
+#define	FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024
+#define	FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97
+#define	FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31
+#define	FRF_BZ_MSIX_VECTOR_MASK_LBN 96
+#define	FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1
+#define	FRF_BZ_MSIX_MESSAGE_DATA_LBN 64
+#define	FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32
+#define	FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32
+#define	FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32
+#define	FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0
+#define	FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32
+
+/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define	FR_BB_MSIX_PBA_TABLE 0x00ff2000
+#define	FR_BZ_MSIX_PBA_TABLE_STEP 4
+#define	FR_BB_MSIX_PBA_TABLE_ROWS 2
+/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define	FR_CZ_MSIX_PBA_TABLE 0x00008000
+/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */
+#define	FR_CZ_MSIX_PBA_TABLE_ROWS 32
+#define	FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0
+#define	FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32
+
+/* SRM_DBG_REG: SRAM debug access */
+#define	FR_BZ_SRM_DBG 0x03000000
+#define	FR_BZ_SRM_DBG_STEP 8
+#define	FR_CZ_SRM_DBG_ROWS 262144
+#define	FR_BB_SRM_DBG_ROWS 2097152
+#define	FRF_BZ_SRM_DBG_LBN 0
+#define	FRF_BZ_SRM_DBG_WIDTH 64
+
+/* TB_MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define	FR_CZ_TB_MSIX_PBA_TABLE 0x00008000
+#define	FR_CZ_TB_MSIX_PBA_TABLE_STEP 4
+#define	FR_CZ_TB_MSIX_PBA_TABLE_ROWS 1024
+#define	FRF_CZ_TB_MSIX_PBA_PEND_DWORD_LBN 0
+#define	FRF_CZ_TB_MSIX_PBA_PEND_DWORD_WIDTH 32
+
+/* DRIVER_EV */
+#define	FSF_AZ_DRIVER_EV_SUBCODE_LBN 56
+#define	FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4
+#define	FSE_BZ_TX_DSC_ERROR_EV 15
+#define	FSE_BZ_RX_DSC_ERROR_EV 14
+#define	FSE_AA_RX_RECOVER_EV 11
+#define	FSE_AZ_TIMER_EV 10
+#define	FSE_AZ_TX_PKT_NON_TCP_UDP 9
+#define	FSE_AZ_WAKE_UP_EV 6
+#define	FSE_AZ_SRM_UPD_DONE_EV 5
+#define	FSE_AB_EVQ_NOT_EN_EV 3
+#define	FSE_AZ_EVQ_INIT_DONE_EV 2
+#define	FSE_AZ_RX_DESCQ_FLS_DONE_EV 1
+#define	FSE_AZ_TX_DESCQ_FLS_DONE_EV 0
+#define	FSF_AZ_DRIVER_EV_SUBDATA_LBN 0
+#define	FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14
+
+/* EVENT_ENTRY */
+#define	FSF_AZ_EV_CODE_LBN 60
+#define	FSF_AZ_EV_CODE_WIDTH 4
+#define	FSE_CZ_EV_CODE_MCDI_EV 12
+#define	FSE_CZ_EV_CODE_USER_EV 8
+#define	FSE_AZ_EV_CODE_DRV_GEN_EV 7
+#define	FSE_AZ_EV_CODE_GLOBAL_EV 6
+#define	FSE_AZ_EV_CODE_DRIVER_EV 5
+#define	FSE_AZ_EV_CODE_TX_EV 2
+#define	FSE_AZ_EV_CODE_RX_EV 0
+#define	FSF_AZ_EV_DATA_LBN 0
+#define	FSF_AZ_EV_DATA_WIDTH 60
+
+/* GLOBAL_EV */
+#define	FSF_BB_GLB_EV_RX_RECOVERY_LBN 12
+#define	FSF_BB_GLB_EV_RX_RECOVERY_WIDTH 1
+#define	FSF_AA_GLB_EV_RX_RECOVERY_LBN 11
+#define	FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1
+#define	FSF_BB_GLB_EV_XG_MGT_INTR_LBN 11
+#define	FSF_BB_GLB_EV_XG_MGT_INTR_WIDTH 1
+#define	FSF_AB_GLB_EV_XFP_PHY0_INTR_LBN 10
+#define	FSF_AB_GLB_EV_XFP_PHY0_INTR_WIDTH 1
+#define	FSF_AB_GLB_EV_XG_PHY0_INTR_LBN 9
+#define	FSF_AB_GLB_EV_XG_PHY0_INTR_WIDTH 1
+#define	FSF_AB_GLB_EV_G_PHY0_INTR_LBN 7
+#define	FSF_AB_GLB_EV_G_PHY0_INTR_WIDTH 1
+
+/* LEGACY_INT_VEC */
+#define	FSF_AZ_NET_IVEC_FATAL_INT_LBN 64
+#define	FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1
+#define	FSF_AZ_NET_IVEC_INT_Q_LBN 40
+#define	FSF_AZ_NET_IVEC_INT_Q_WIDTH 4
+#define	FSF_AZ_NET_IVEC_INT_FLAG_LBN 32
+#define	FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1
+#define	FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1
+#define	FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1
+#define	FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0
+#define	FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1
+
+/* MC_XGMAC_FLTR_RULE_DEF */
+#define	FSF_CZ_MC_XFRC_MODE_LBN 416
+#define	FSF_CZ_MC_XFRC_MODE_WIDTH 1
+#define	FSE_CZ_MC_XFRC_MODE_LAYERED 1
+#define	FSE_CZ_MC_XFRC_MODE_SIMPLE 0
+#define	FSF_CZ_MC_XFRC_HASH_LBN 384
+#define	FSF_CZ_MC_XFRC_HASH_WIDTH 32
+#define	FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256
+#define	FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128
+#define	FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128
+#define	FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128
+#define	FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0
+#define	FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128
+
+/* RX_EV */
+#define	FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58
+#define	FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1
+#define	FSF_CZ_RX_EV_IPV6_PKT_LBN 57
+#define	FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1
+#define	FSF_AZ_RX_EV_PKT_OK_LBN 56
+#define	FSF_AZ_RX_EV_PKT_OK_WIDTH 1
+#define	FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55
+#define	FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54
+#define	FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53
+#define	FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
+#define	FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
+#define	FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50
+#define	FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_FRM_TRUNC_LBN 49
+#define	FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1
+#define	FSF_AA_RX_EV_DRIB_NIB_LBN 49
+#define	FSF_AA_RX_EV_DRIB_NIB_WIDTH 1
+#define	FSF_AZ_RX_EV_TOBE_DISC_LBN 47
+#define	FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1
+#define	FSF_AZ_RX_EV_PKT_TYPE_LBN 44
+#define	FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3
+#define	FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5
+#define	FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4
+#define	FSE_AZ_RX_EV_PKT_TYPE_VLAN 3
+#define	FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2
+#define	FSE_AZ_RX_EV_PKT_TYPE_LLC 1
+#define	FSE_AZ_RX_EV_PKT_TYPE_ETH 0
+#define	FSF_AZ_RX_EV_HDR_TYPE_LBN 42
+#define	FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2
+#define	FSE_AZ_RX_EV_HDR_TYPE_OTHER 3
+#define	FSE_AB_RX_EV_HDR_TYPE_IPV4_OTHER 2
+#define	FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2
+#define	FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP 1
+#define	FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1
+#define	FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP 0
+#define	FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0
+#define	FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41
+#define	FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1
+#define	FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40
+#define	FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1
+#define	FSF_AZ_RX_EV_MCAST_PKT_LBN 39
+#define	FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1
+#define	FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37
+#define	FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1
+#define	FSF_AZ_RX_EV_Q_LABEL_LBN 32
+#define	FSF_AZ_RX_EV_Q_LABEL_WIDTH 5
+#define	FSF_AZ_RX_EV_JUMBO_CONT_LBN 31
+#define	FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1
+#define	FSF_AZ_RX_EV_PORT_LBN 30
+#define	FSF_AZ_RX_EV_PORT_WIDTH 1
+#define	FSF_AZ_RX_EV_BYTE_CNT_LBN 16
+#define	FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14
+#define	FSF_AZ_RX_EV_SOP_LBN 15
+#define	FSF_AZ_RX_EV_SOP_WIDTH 1
+#define	FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14
+#define	FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1
+#define	FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13
+#define	FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12
+#define	FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_DESC_PTR_LBN 0
+#define	FSF_AZ_RX_EV_DESC_PTR_WIDTH 12
+
+/* RX_KER_DESC */
+#define	FSF_AZ_RX_KER_BUF_SIZE_LBN 48
+#define	FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14
+#define	FSF_AZ_RX_KER_BUF_REGION_LBN 46
+#define	FSF_AZ_RX_KER_BUF_REGION_WIDTH 2
+#define	FSF_AZ_RX_KER_BUF_ADDR_LBN 0
+#define	FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46
+
+/* RX_USER_DESC */
+#define	FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20
+#define	FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12
+#define	FSF_AZ_RX_USER_BUF_ID_LBN 0
+#define	FSF_AZ_RX_USER_BUF_ID_WIDTH 20
+
+/* TX_EV */
+#define	FSF_AZ_TX_EV_PKT_ERR_LBN 38
+#define	FSF_AZ_TX_EV_PKT_ERR_WIDTH 1
+#define	FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37
+#define	FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1
+#define	FSF_AZ_TX_EV_Q_LABEL_LBN 32
+#define	FSF_AZ_TX_EV_Q_LABEL_WIDTH 5
+#define	FSF_AZ_TX_EV_PORT_LBN 16
+#define	FSF_AZ_TX_EV_PORT_WIDTH 1
+#define	FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15
+#define	FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1
+#define	FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14
+#define	FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define	FSF_AZ_TX_EV_COMP_LBN 12
+#define	FSF_AZ_TX_EV_COMP_WIDTH 1
+#define	FSF_AZ_TX_EV_DESC_PTR_LBN 0
+#define	FSF_AZ_TX_EV_DESC_PTR_WIDTH 12
+
+/* TX_KER_DESC */
+#define	FSF_AZ_TX_KER_CONT_LBN 62
+#define	FSF_AZ_TX_KER_CONT_WIDTH 1
+#define	FSF_AZ_TX_KER_BYTE_COUNT_LBN 48
+#define	FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14
+#define	FSF_AZ_TX_KER_BUF_REGION_LBN 46
+#define	FSF_AZ_TX_KER_BUF_REGION_WIDTH 2
+#define	FSF_AZ_TX_KER_BUF_ADDR_LBN 0
+#define	FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46
+
+/* TX_USER_DESC */
+#define	FSF_AZ_TX_USER_SW_EV_EN_LBN 48
+#define	FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1
+#define	FSF_AZ_TX_USER_CONT_LBN 46
+#define	FSF_AZ_TX_USER_CONT_WIDTH 1
+#define	FSF_AZ_TX_USER_BYTE_CNT_LBN 33
+#define	FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13
+#define	FSF_AZ_TX_USER_BUF_ID_LBN 13
+#define	FSF_AZ_TX_USER_BUF_ID_WIDTH 20
+#define	FSF_AZ_TX_USER_BYTE_OFS_LBN 0
+#define	FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13
+
+/* USER_EV */
+#define	FSF_CZ_USER_QID_LBN 32
+#define	FSF_CZ_USER_QID_WIDTH 10
+#define	FSF_CZ_USER_EV_REG_VALUE_LBN 0
+#define	FSF_CZ_USER_EV_REG_VALUE_WIDTH 32
+
+/**************************************************************************
+ *
+ * Falcon B0 PCIe core indirect registers
+ *
+ **************************************************************************
+ */
+
+#define FPCR_BB_PCIE_DEVICE_CTRL_STAT 0x68
+
+#define FPCR_BB_PCIE_LINK_CTRL_STAT 0x70
+
+#define FPCR_BB_ACK_RPL_TIMER 0x700
+#define FPCRF_BB_ACK_TL_LBN 0
+#define FPCRF_BB_ACK_TL_WIDTH 16
+#define FPCRF_BB_RPL_TL_LBN 16
+#define FPCRF_BB_RPL_TL_WIDTH 16
+
+#define FPCR_BB_ACK_FREQ 0x70C
+#define FPCRF_BB_ACK_FREQ_LBN 0
+#define FPCRF_BB_ACK_FREQ_WIDTH 7
+
+/**************************************************************************
+ *
+ * Pseudo-registers and fields
+ *
+ **************************************************************************
+ */
+
+/* Interrupt acknowledge work-around register (A0/A1 only) */
+#define FR_AA_WORK_AROUND_BROKEN_PCI_READS 0x0070
+
+/* EE_SPI_HCMD_REG: SPI host command register */
+/* Values for the EE_SPI_HCMD_SF_SEL register field */
+#define FFE_AB_SPI_DEVICE_EEPROM 0
+#define FFE_AB_SPI_DEVICE_FLASH 1
+
+/* NIC_STAT_REG: NIC status register */
+#define FRF_AB_STRAP_10G_LBN 2
+#define FRF_AB_STRAP_10G_WIDTH 1
+#define FRF_AA_STRAP_PCIE_LBN 0
+#define FRF_AA_STRAP_PCIE_WIDTH 1
+
+/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
+#define FRF_AZ_FATAL_INTR_LBN 0
+#define FRF_AZ_FATAL_INTR_WIDTH 12
+
+/* SRM_CFG_REG: SRAM configuration register */
+/* We treat the number of SRAM banks and bank size as a single field */
+#define	FRF_AZ_SRM_NB_SZ_LBN FRF_AZ_SRM_BANK_SIZE_LBN
+#define	FRF_AZ_SRM_NB_SZ_WIDTH \
+	(FRF_AZ_SRM_BANK_SIZE_WIDTH + FRF_AZ_SRM_NUM_BANK_WIDTH)
+#define FFE_AB_SRM_NB1_SZ2M 0
+#define FFE_AB_SRM_NB1_SZ4M 1
+#define FFE_AB_SRM_NB1_SZ8M 2
+#define FFE_AB_SRM_NB_SZ_DEF 3
+#define FFE_AB_SRM_NB2_SZ4M 4
+#define FFE_AB_SRM_NB2_SZ8M 5
+#define FFE_AB_SRM_NB2_SZ16M 6
+#define FFE_AB_SRM_NB_SZ_RES 7
+
+/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
+/* We write just the last dword of these registers */
+#define	FR_AZ_RX_DESC_UPD_DWORD_P0 \
+	(BUILD_BUG_ON_ZERO(FR_AA_RX_DESC_UPD_KER != FR_BZ_RX_DESC_UPD_P0) + \
+	 FR_BZ_RX_DESC_UPD_P0 + 3 * 4)
+#define	FRF_AZ_RX_DESC_WPTR_DWORD_LBN (FRF_AZ_RX_DESC_WPTR_LBN - 3 * 32)
+#define	FRF_AZ_RX_DESC_WPTR_DWORD_WIDTH FRF_AZ_RX_DESC_WPTR_WIDTH
+
+/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
+#define FR_AZ_TX_DESC_UPD_DWORD_P0 \
+	(BUILD_BUG_ON_ZERO(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0) + \
+	 FR_BZ_TX_DESC_UPD_P0 + 3 * 4)
+#define	FRF_AZ_TX_DESC_WPTR_DWORD_LBN (FRF_AZ_TX_DESC_WPTR_LBN - 3 * 32)
+#define	FRF_AZ_TX_DESC_WPTR_DWORD_WIDTH FRF_AZ_TX_DESC_WPTR_WIDTH
+
+/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
+#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_LBN 12
+#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_WIDTH 1
+
+/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
+#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_LBN 12
+#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
+
+/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
+#define	FRF_AB_XM_MAX_TX_FRM_SIZE_LBN FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN
+#define	FRF_AB_XM_MAX_TX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH + \
+					 FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH)
+
+/* XM_RX_PARAM_REG: XGMAC receive parameter register */
+#define	FRF_AB_XM_MAX_RX_FRM_SIZE_LBN FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN
+#define	FRF_AB_XM_MAX_RX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH + \
+					 FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH)
+
+/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
+/* Default values */
+#define FFE_AB_XX_TXDRV_DEQ_DEF 0xe /* deq=.6 */
+#define FFE_AB_XX_TXDRV_DTX_DEF 0x5 /* 1.25 */
+#define FFE_AB_XX_SD_CTL_DRV_DEF 0  /* 20mA */
+
+/* XX_CORE_STAT_REG: XAUI XGXS core status register */
+/* XGXS all-lanes status fields */
+#define	FRF_AB_XX_SYNC_STAT_LBN FRF_AB_XX_SYNC_STAT0_LBN
+#define	FRF_AB_XX_SYNC_STAT_WIDTH 4
+#define	FRF_AB_XX_COMMA_DET_LBN FRF_AB_XX_COMMA_DET_CH0_LBN
+#define	FRF_AB_XX_COMMA_DET_WIDTH 4
+#define	FRF_AB_XX_CHAR_ERR_LBN FRF_AB_XX_CHAR_ERR_CH0_LBN
+#define	FRF_AB_XX_CHAR_ERR_WIDTH 4
+#define	FRF_AB_XX_DISPERR_LBN FRF_AB_XX_DISPERR_CH0_LBN
+#define	FRF_AB_XX_DISPERR_WIDTH 4
+#define	FFE_AB_XX_STAT_ALL_LANES 0xf
+#define	FRF_AB_XX_FORCE_SIG_LBN FRF_AB_XX_FORCE_SIG0_VAL_LBN
+#define	FRF_AB_XX_FORCE_SIG_WIDTH 8
+#define	FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff
+
+/* DRIVER_EV */
+/* Sub-fields of an RX flush completion event */
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12
+
+/* EVENT_ENTRY */
+/* Magic number field for event test */
+#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
+#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
+
+/**************************************************************************
+ *
+ * Falcon MAC stats
+ *
+ **************************************************************************
+ *
+ */
+
+#define GRxGoodOct_offset 0x0
+#define GRxGoodOct_WIDTH 48
+#define GRxBadOct_offset 0x8
+#define GRxBadOct_WIDTH 48
+#define GRxMissPkt_offset 0x10
+#define GRxMissPkt_WIDTH 32
+#define GRxFalseCRS_offset 0x14
+#define GRxFalseCRS_WIDTH 32
+#define GRxPausePkt_offset 0x18
+#define GRxPausePkt_WIDTH 32
+#define GRxBadPkt_offset 0x1C
+#define GRxBadPkt_WIDTH 32
+#define GRxUcastPkt_offset 0x20
+#define GRxUcastPkt_WIDTH 32
+#define GRxMcastPkt_offset 0x24
+#define GRxMcastPkt_WIDTH 32
+#define GRxBcastPkt_offset 0x28
+#define GRxBcastPkt_WIDTH 32
+#define GRxGoodLt64Pkt_offset 0x2C
+#define GRxGoodLt64Pkt_WIDTH 32
+#define GRxBadLt64Pkt_offset 0x30
+#define GRxBadLt64Pkt_WIDTH 32
+#define GRx64Pkt_offset 0x34
+#define GRx64Pkt_WIDTH 32
+#define GRx65to127Pkt_offset 0x38
+#define GRx65to127Pkt_WIDTH 32
+#define GRx128to255Pkt_offset 0x3C
+#define GRx128to255Pkt_WIDTH 32
+#define GRx256to511Pkt_offset 0x40
+#define GRx256to511Pkt_WIDTH 32
+#define GRx512to1023Pkt_offset 0x44
+#define GRx512to1023Pkt_WIDTH 32
+#define GRx1024to15xxPkt_offset 0x48
+#define GRx1024to15xxPkt_WIDTH 32
+#define GRx15xxtoJumboPkt_offset 0x4C
+#define GRx15xxtoJumboPkt_WIDTH 32
+#define GRxGtJumboPkt_offset 0x50
+#define GRxGtJumboPkt_WIDTH 32
+#define GRxFcsErr64to15xxPkt_offset 0x54
+#define GRxFcsErr64to15xxPkt_WIDTH 32
+#define GRxFcsErr15xxtoJumboPkt_offset 0x58
+#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
+#define GRxFcsErrGtJumboPkt_offset 0x5C
+#define GRxFcsErrGtJumboPkt_WIDTH 32
+#define GTxGoodBadOct_offset 0x80
+#define GTxGoodBadOct_WIDTH 48
+#define GTxGoodOct_offset 0x88
+#define GTxGoodOct_WIDTH 48
+#define GTxSglColPkt_offset 0x90
+#define GTxSglColPkt_WIDTH 32
+#define GTxMultColPkt_offset 0x94
+#define GTxMultColPkt_WIDTH 32
+#define GTxExColPkt_offset 0x98
+#define GTxExColPkt_WIDTH 32
+#define GTxDefPkt_offset 0x9C
+#define GTxDefPkt_WIDTH 32
+#define GTxLateCol_offset 0xA0
+#define GTxLateCol_WIDTH 32
+#define GTxExDefPkt_offset 0xA4
+#define GTxExDefPkt_WIDTH 32
+#define GTxPausePkt_offset 0xA8
+#define GTxPausePkt_WIDTH 32
+#define GTxBadPkt_offset 0xAC
+#define GTxBadPkt_WIDTH 32
+#define GTxUcastPkt_offset 0xB0
+#define GTxUcastPkt_WIDTH 32
+#define GTxMcastPkt_offset 0xB4
+#define GTxMcastPkt_WIDTH 32
+#define GTxBcastPkt_offset 0xB8
+#define GTxBcastPkt_WIDTH 32
+#define GTxLt64Pkt_offset 0xBC
+#define GTxLt64Pkt_WIDTH 32
+#define GTx64Pkt_offset 0xC0
+#define GTx64Pkt_WIDTH 32
+#define GTx65to127Pkt_offset 0xC4
+#define GTx65to127Pkt_WIDTH 32
+#define GTx128to255Pkt_offset 0xC8
+#define GTx128to255Pkt_WIDTH 32
+#define GTx256to511Pkt_offset 0xCC
+#define GTx256to511Pkt_WIDTH 32
+#define GTx512to1023Pkt_offset 0xD0
+#define GTx512to1023Pkt_WIDTH 32
+#define GTx1024to15xxPkt_offset 0xD4
+#define GTx1024to15xxPkt_WIDTH 32
+#define GTx15xxtoJumboPkt_offset 0xD8
+#define GTx15xxtoJumboPkt_WIDTH 32
+#define GTxGtJumboPkt_offset 0xDC
+#define GTxGtJumboPkt_WIDTH 32
+#define GTxNonTcpUdpPkt_offset 0xE0
+#define GTxNonTcpUdpPkt_WIDTH 16
+#define GTxMacSrcErrPkt_offset 0xE4
+#define GTxMacSrcErrPkt_WIDTH 16
+#define GTxIpSrcErrPkt_offset 0xE8
+#define GTxIpSrcErrPkt_WIDTH 16
+#define GDmaDone_offset 0xEC
+#define GDmaDone_WIDTH 32
+
+#define XgRxOctets_offset 0x0
+#define XgRxOctets_WIDTH 48
+#define XgRxOctetsOK_offset 0x8
+#define XgRxOctetsOK_WIDTH 48
+#define XgRxPkts_offset 0x10
+#define XgRxPkts_WIDTH 32
+#define XgRxPktsOK_offset 0x14
+#define XgRxPktsOK_WIDTH 32
+#define XgRxBroadcastPkts_offset 0x18
+#define XgRxBroadcastPkts_WIDTH 32
+#define XgRxMulticastPkts_offset 0x1C
+#define XgRxMulticastPkts_WIDTH 32
+#define XgRxUnicastPkts_offset 0x20
+#define XgRxUnicastPkts_WIDTH 32
+#define XgRxUndersizePkts_offset 0x24
+#define XgRxUndersizePkts_WIDTH 32
+#define XgRxOversizePkts_offset 0x28
+#define XgRxOversizePkts_WIDTH 32
+#define XgRxJabberPkts_offset 0x2C
+#define XgRxJabberPkts_WIDTH 32
+#define XgRxUndersizeFCSerrorPkts_offset 0x30
+#define XgRxUndersizeFCSerrorPkts_WIDTH 32
+#define XgRxDropEvents_offset 0x34
+#define XgRxDropEvents_WIDTH 32
+#define XgRxFCSerrorPkts_offset 0x38
+#define XgRxFCSerrorPkts_WIDTH 32
+#define XgRxAlignError_offset 0x3C
+#define XgRxAlignError_WIDTH 32
+#define XgRxSymbolError_offset 0x40
+#define XgRxSymbolError_WIDTH 32
+#define XgRxInternalMACError_offset 0x44
+#define XgRxInternalMACError_WIDTH 32
+#define XgRxControlPkts_offset 0x48
+#define XgRxControlPkts_WIDTH 32
+#define XgRxPausePkts_offset 0x4C
+#define XgRxPausePkts_WIDTH 32
+#define XgRxPkts64Octets_offset 0x50
+#define XgRxPkts64Octets_WIDTH 32
+#define XgRxPkts65to127Octets_offset 0x54
+#define XgRxPkts65to127Octets_WIDTH 32
+#define XgRxPkts128to255Octets_offset 0x58
+#define XgRxPkts128to255Octets_WIDTH 32
+#define XgRxPkts256to511Octets_offset 0x5C
+#define XgRxPkts256to511Octets_WIDTH 32
+#define XgRxPkts512to1023Octets_offset 0x60
+#define XgRxPkts512to1023Octets_WIDTH 32
+#define XgRxPkts1024to15xxOctets_offset 0x64
+#define XgRxPkts1024to15xxOctets_WIDTH 32
+#define XgRxPkts15xxtoMaxOctets_offset 0x68
+#define XgRxPkts15xxtoMaxOctets_WIDTH 32
+#define XgRxLengthError_offset 0x6C
+#define XgRxLengthError_WIDTH 32
+#define XgTxPkts_offset 0x80
+#define XgTxPkts_WIDTH 32
+#define XgTxOctets_offset 0x88
+#define XgTxOctets_WIDTH 48
+#define XgTxMulticastPkts_offset 0x90
+#define XgTxMulticastPkts_WIDTH 32
+#define XgTxBroadcastPkts_offset 0x94
+#define XgTxBroadcastPkts_WIDTH 32
+#define XgTxUnicastPkts_offset 0x98
+#define XgTxUnicastPkts_WIDTH 32
+#define XgTxControlPkts_offset 0x9C
+#define XgTxControlPkts_WIDTH 32
+#define XgTxPausePkts_offset 0xA0
+#define XgTxPausePkts_WIDTH 32
+#define XgTxPkts64Octets_offset 0xA4
+#define XgTxPkts64Octets_WIDTH 32
+#define XgTxPkts65to127Octets_offset 0xA8
+#define XgTxPkts65to127Octets_WIDTH 32
+#define XgTxPkts128to255Octets_offset 0xAC
+#define XgTxPkts128to255Octets_WIDTH 32
+#define XgTxPkts256to511Octets_offset 0xB0
+#define XgTxPkts256to511Octets_WIDTH 32
+#define XgTxPkts512to1023Octets_offset 0xB4
+#define XgTxPkts512to1023Octets_WIDTH 32
+#define XgTxPkts1024to15xxOctets_offset 0xB8
+#define XgTxPkts1024to15xxOctets_WIDTH 32
+#define XgTxPkts1519toMaxOctets_offset 0xBC
+#define XgTxPkts1519toMaxOctets_WIDTH 32
+#define XgTxUndersizePkts_offset 0xC0
+#define XgTxUndersizePkts_WIDTH 32
+#define XgTxOversizePkts_offset 0xC4
+#define XgTxOversizePkts_WIDTH 32
+#define XgTxNonTcpUdpPkt_offset 0xC8
+#define XgTxNonTcpUdpPkt_WIDTH 16
+#define XgTxMacSrcErrPkt_offset 0xCC
+#define XgTxMacSrcErrPkt_WIDTH 16
+#define XgTxIpSrcErrPkt_offset 0xD0
+#define XgTxIpSrcErrPkt_WIDTH 16
+#define XgDmaDone_offset 0xD4
+#define XgDmaDone_WIDTH 32
+
+#define FALCON_STATS_NOT_DONE 0x00000000
+#define FALCON_STATS_DONE 0xffffffff
+
+/* Interrupt status register bits */
+#define FATAL_INT_LBN 64
+#define FATAL_INT_WIDTH 1
+#define INT_EVQS_LBN 40
+#define INT_EVQS_WIDTH 4
+#define INT_FLAG_LBN 32
+#define INT_FLAG_WIDTH 1
+#define EVQ_FIFO_HF_LBN 1
+#define EVQ_FIFO_HF_WIDTH 1
+#define EVQ_FIFO_AF_LBN 0
+#define EVQ_FIFO_AF_WIDTH 1
+
+/**************************************************************************
+ *
+ * Falcon non-volatile configuration
+ *
+ **************************************************************************
+ */
+
+/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
+struct falcon_nvconfig_board_v2 {
+	__le16 nports;
+	u8 port0_phy_addr;
+	u8 port0_phy_type;
+	u8 port1_phy_addr;
+	u8 port1_phy_type;
+	__le16 asic_sub_revision;
+	__le16 board_revision;
+} __packed;
+
+/* Board configuration v3 extra information */
+struct falcon_nvconfig_board_v3 {
+	__le32 spi_device_type[2];
+} __packed;
+
+/* Bit numbers for spi_device_type */
+#define SPI_DEV_TYPE_SIZE_LBN 0
+#define SPI_DEV_TYPE_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
+#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
+#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
+#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
+#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
+#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
+#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_FIELD(type, field)					\
+	(((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
+
+#define FALCON_NVCONFIG_OFFSET 0x300
+
+#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
+struct falcon_nvconfig {
+	efx_oword_t ee_vpd_cfg_reg;			/* 0x300 */
+	u8 mac_address[2][8];			/* 0x310 */
+	efx_oword_t pcie_sd_ctl0123_reg;		/* 0x320 */
+	efx_oword_t pcie_sd_ctl45_reg;			/* 0x330 */
+	efx_oword_t pcie_pcs_ctl_stat_reg;		/* 0x340 */
+	efx_oword_t hw_init_reg;			/* 0x350 */
+	efx_oword_t nic_stat_reg;			/* 0x360 */
+	efx_oword_t glb_ctl_reg;			/* 0x370 */
+	efx_oword_t srm_cfg_reg;			/* 0x380 */
+	efx_oword_t spare_reg;				/* 0x390 */
+	__le16 board_magic_num;			/* 0x3A0 */
+	__le16 board_struct_ver;
+	__le16 board_checksum;
+	struct falcon_nvconfig_board_v2 board_v2;
+	efx_oword_t ee_base_page_reg;			/* 0x3B0 */
+	struct falcon_nvconfig_board_v3 board_v3;	/* 0x3C0 */
+} __packed;
+
+#endif /* EFX_REGS_H */
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 01f9432..a60c718 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -61,7 +61,7 @@
  *   rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
  *                      RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
  */
-static int rx_alloc_method = RX_ALLOC_METHOD_PAGE;
+static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
 
 #define RX_ALLOC_LEVEL_LRO 0x2000
 #define RX_ALLOC_LEVEL_MAX 0x3000
@@ -293,8 +293,7 @@
 	 * fill anyway.
 	 */
 	fill_level = (rx_queue->added_count - rx_queue->removed_count);
-	EFX_BUG_ON_PARANOID(fill_level >
-			    rx_queue->efx->type->rxd_ring_mask + 1);
+	EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
 
 	/* Don't fill if we don't need to */
 	if (fill_level >= rx_queue->fast_fill_trigger)
@@ -316,8 +315,7 @@
  retry:
 	/* Recalculate current fill level now that we have the lock */
 	fill_level = (rx_queue->added_count - rx_queue->removed_count);
-	EFX_BUG_ON_PARANOID(fill_level >
-			    rx_queue->efx->type->rxd_ring_mask + 1);
+	EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
 	space = rx_queue->fast_fill_limit - fill_level;
 	if (space < EFX_RX_BATCH)
 		goto out_unlock;
@@ -329,8 +327,7 @@
 
 	do {
 		for (i = 0; i < EFX_RX_BATCH; ++i) {
-			index = (rx_queue->added_count &
-				 rx_queue->efx->type->rxd_ring_mask);
+			index = rx_queue->added_count & EFX_RXQ_MASK;
 			rx_buf = efx_rx_buffer(rx_queue, index);
 			rc = efx_init_rx_buffer(rx_queue, rx_buf);
 			if (unlikely(rc))
@@ -444,9 +441,11 @@
  * the appropriate LRO method
  */
 static void efx_rx_packet_lro(struct efx_channel *channel,
-			      struct efx_rx_buffer *rx_buf)
+			      struct efx_rx_buffer *rx_buf,
+			      bool checksummed)
 {
 	struct napi_struct *napi = &channel->napi_str;
+	gro_result_t gro_result;
 
 	/* Pass the skb/page into the LRO engine */
 	if (rx_buf->page) {
@@ -454,6 +453,7 @@
 
 		if (!skb) {
 			put_page(rx_buf->page);
+			gro_result = GRO_DROP;
 			goto out;
 		}
 
@@ -466,19 +466,28 @@
 		skb->len = rx_buf->len;
 		skb->data_len = rx_buf->len;
 		skb->truesize += rx_buf->len;
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		skb->ip_summed =
+			checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
 
-		napi_gro_frags(napi);
+		gro_result = napi_gro_frags(napi);
 
 out:
 		EFX_BUG_ON_PARANOID(rx_buf->skb);
 		rx_buf->page = NULL;
 	} else {
 		EFX_BUG_ON_PARANOID(!rx_buf->skb);
+		EFX_BUG_ON_PARANOID(!checksummed);
 
-		napi_gro_receive(napi, rx_buf->skb);
+		gro_result = napi_gro_receive(napi, rx_buf->skb);
 		rx_buf->skb = NULL;
 	}
+
+	if (gro_result == GRO_NORMAL) {
+		channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
+	} else if (gro_result != GRO_DROP) {
+		channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
+		channel->irq_mod_score += 2;
+	}
 }
 
 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
@@ -570,7 +579,7 @@
 	}
 
 	if (likely(checksummed || rx_buf->page)) {
-		efx_rx_packet_lro(channel, rx_buf);
+		efx_rx_packet_lro(channel, rx_buf, checksummed);
 		goto done;
 	}
 
@@ -629,7 +638,7 @@
 	EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);
 
 	/* Allocate RX buffers */
-	rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer);
+	rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer);
 	rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
 	if (!rx_queue->buffer)
 		return -ENOMEM;
@@ -644,7 +653,6 @@
 
 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
 {
-	struct efx_nic *efx = rx_queue->efx;
 	unsigned int max_fill, trigger, limit;
 
 	EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
@@ -657,7 +665,7 @@
 	rx_queue->min_overfill = -1U;
 
 	/* Initialise limit fields */
-	max_fill = efx->type->rxd_ring_mask + 1 - EFX_RXD_HEAD_ROOM;
+	max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM;
 	trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
 	limit = max_fill * min(rx_refill_limit, 100U) / 100U;
 
@@ -680,7 +688,7 @@
 
 	/* Release RX buffers NB start at index 0 not current HW ptr */
 	if (rx_queue->buffer) {
-		for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) {
+		for (i = 0; i <= EFX_RXQ_MASK; i++) {
 			rx_buf = efx_rx_buffer(rx_queue, i);
 			efx_fini_rx_buffer(rx_queue, rx_buf);
 		}
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 817c7ef..7a9386f 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -24,10 +24,9 @@
 #include "efx.h"
 #include "falcon.h"
 #include "selftest.h"
-#include "boards.h"
 #include "workarounds.h"
 #include "spi.h"
-#include "falcon_io.h"
+#include "io.h"
 #include "mdio_10g.h"
 
 /*
@@ -527,7 +526,7 @@
 
 	for (i = 0; i < 3; i++) {
 		/* Determine how many packets to send */
-		state->packet_count = (efx->type->txd_ring_mask + 1) / 3;
+		state->packet_count = EFX_TXQ_SIZE / 3;
 		state->packet_count = min(1 << (i << 2), state->packet_count);
 		state->skbs = kzalloc(sizeof(state->skbs[0]) *
 				      state->packet_count, GFP_KERNEL);
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
deleted file mode 100644
index cee00ad..0000000
--- a/drivers/net/sfc/sfe4001.c
+++ /dev/null
@@ -1,435 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2008 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-/*****************************************************************************
- * Support for the SFE4001 and SFN4111T NICs.
- *
- * The SFE4001 does not power-up fully at reset due to its high power
- * consumption.  We control its power via a PCA9539 I/O expander.
- * Both boards have a MAX6647 temperature monitor which we expose to
- * the lm90 driver.
- *
- * This also provides minimal support for reflashing the PHY, which is
- * initiated by resetting it with the FLASH_CFG_1 pin pulled down.
- * On SFE4001 rev A2 and later this is connected to the 3V3X output of
- * the IO-expander; on the SFN4111T it is connected to Falcon's GPIO3.
- * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually
- * exclusive with the network device being open.
- */
-
-#include <linux/delay.h>
-#include <linux/rtnetlink.h>
-#include "net_driver.h"
-#include "efx.h"
-#include "phy.h"
-#include "boards.h"
-#include "falcon.h"
-#include "falcon_hwdefs.h"
-#include "falcon_io.h"
-#include "mac.h"
-#include "workarounds.h"
-
-/**************************************************************************
- *
- * I2C IO Expander device
- *
- **************************************************************************/
-#define	PCA9539 0x74
-
-#define	P0_IN 0x00
-#define	P0_OUT 0x02
-#define	P0_INVERT 0x04
-#define	P0_CONFIG 0x06
-
-#define	P0_EN_1V0X_LBN 0
-#define	P0_EN_1V0X_WIDTH 1
-#define	P0_EN_1V2_LBN 1
-#define	P0_EN_1V2_WIDTH 1
-#define	P0_EN_2V5_LBN 2
-#define	P0_EN_2V5_WIDTH 1
-#define	P0_EN_3V3X_LBN 3
-#define	P0_EN_3V3X_WIDTH 1
-#define	P0_EN_5V_LBN 4
-#define	P0_EN_5V_WIDTH 1
-#define	P0_SHORTEN_JTAG_LBN 5
-#define	P0_SHORTEN_JTAG_WIDTH 1
-#define	P0_X_TRST_LBN 6
-#define	P0_X_TRST_WIDTH 1
-#define	P0_DSP_RESET_LBN 7
-#define	P0_DSP_RESET_WIDTH 1
-
-#define	P1_IN 0x01
-#define	P1_OUT 0x03
-#define	P1_INVERT 0x05
-#define	P1_CONFIG 0x07
-
-#define	P1_AFE_PWD_LBN 0
-#define	P1_AFE_PWD_WIDTH 1
-#define	P1_DSP_PWD25_LBN 1
-#define	P1_DSP_PWD25_WIDTH 1
-#define	P1_RESERVED_LBN 2
-#define	P1_RESERVED_WIDTH 2
-#define	P1_SPARE_LBN 4
-#define	P1_SPARE_WIDTH 4
-
-/* Temperature Sensor */
-#define MAX664X_REG_RSL		0x02
-#define MAX664X_REG_WLHO	0x0B
-
-static void sfe4001_poweroff(struct efx_nic *efx)
-{
-	struct i2c_client *ioexp_client = efx->board_info.ioexp_client;
-	struct i2c_client *hwmon_client = efx->board_info.hwmon_client;
-
-	/* Turn off all power rails and disable outputs */
-	i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff);
-	i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff);
-	i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff);
-
-	/* Clear any over-temperature alert */
-	i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
-}
-
-static int sfe4001_poweron(struct efx_nic *efx)
-{
-	struct i2c_client *hwmon_client = efx->board_info.hwmon_client;
-	struct i2c_client *ioexp_client = efx->board_info.ioexp_client;
-	unsigned int i, j;
-	int rc;
-	u8 out;
-
-	/* Clear any previous over-temperature alert */
-	rc = i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
-	if (rc < 0)
-		return rc;
-
-	/* Enable port 0 and port 1 outputs on IO expander */
-	rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
-	if (rc)
-		return rc;
-	rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
-				       0xff & ~(1 << P1_SPARE_LBN));
-	if (rc)
-		goto fail_on;
-
-	/* If PHY power is on, turn it all off and wait 1 second to
-	 * ensure a full reset.
-	 */
-	rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT);
-	if (rc < 0)
-		goto fail_on;
-	out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
-		       (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
-		       (0 << P0_EN_1V0X_LBN));
-	if (rc != out) {
-		EFX_INFO(efx, "power-cycling PHY\n");
-		rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
-		if (rc)
-			goto fail_on;
-		schedule_timeout_uninterruptible(HZ);
-	}
-
-	for (i = 0; i < 20; ++i) {
-		/* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
-		out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
-			       (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
-			       (1 << P0_X_TRST_LBN));
-		if (efx->phy_mode & PHY_MODE_SPECIAL)
-			out |= 1 << P0_EN_3V3X_LBN;
-
-		rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
-		if (rc)
-			goto fail_on;
-		msleep(10);
-
-		/* Turn on 1V power rail */
-		out &= ~(1 << P0_EN_1V0X_LBN);
-		rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
-		if (rc)
-			goto fail_on;
-
-		EFX_INFO(efx, "waiting for DSP boot (attempt %d)...\n", i);
-
-		/* In flash config mode, DSP does not turn on AFE, so
-		 * just wait 1 second.
-		 */
-		if (efx->phy_mode & PHY_MODE_SPECIAL) {
-			schedule_timeout_uninterruptible(HZ);
-			return 0;
-		}
-
-		for (j = 0; j < 10; ++j) {
-			msleep(100);
-
-			/* Check DSP has asserted AFE power line */
-			rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
-			if (rc < 0)
-				goto fail_on;
-			if (rc & (1 << P1_AFE_PWD_LBN))
-				return 0;
-		}
-	}
-
-	EFX_INFO(efx, "timed out waiting for DSP boot\n");
-	rc = -ETIMEDOUT;
-fail_on:
-	sfe4001_poweroff(efx);
-	return rc;
-}
-
-static int sfn4111t_reset(struct efx_nic *efx)
-{
-	efx_oword_t reg;
-
-	/* GPIO 3 and the GPIO register are shared with I2C, so block that */
-	mutex_lock(&efx->i2c_adap.bus_lock);
-
-	/* Pull RST_N (GPIO 2) low then let it up again, setting the
-	 * FLASH_CFG_1 strap (GPIO 3) appropriately.  Only change the
-	 * output enables; the output levels should always be 0 (low)
-	 * and we rely on external pull-ups. */
-	falcon_read(efx, &reg, GPIO_CTL_REG_KER);
-	EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, true);
-	falcon_write(efx, &reg, GPIO_CTL_REG_KER);
-	msleep(1000);
-	EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, false);
-	EFX_SET_OWORD_FIELD(reg, GPIO3_OEN,
-			    !!(efx->phy_mode & PHY_MODE_SPECIAL));
-	falcon_write(efx, &reg, GPIO_CTL_REG_KER);
-	msleep(1);
-
-	mutex_unlock(&efx->i2c_adap.bus_lock);
-
-	ssleep(1);
-	return 0;
-}
-
-static ssize_t show_phy_flash_cfg(struct device *dev,
-				  struct device_attribute *attr, char *buf)
-{
-	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
-	return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
-}
-
-static ssize_t set_phy_flash_cfg(struct device *dev,
-				 struct device_attribute *attr,
-				 const char *buf, size_t count)
-{
-	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
-	enum efx_phy_mode old_mode, new_mode;
-	int err;
-
-	rtnl_lock();
-	old_mode = efx->phy_mode;
-	if (count == 0 || *buf == '0')
-		new_mode = old_mode & ~PHY_MODE_SPECIAL;
-	else
-		new_mode = PHY_MODE_SPECIAL;
-	if (old_mode == new_mode) {
-		err = 0;
-	} else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
-		err = -EBUSY;
-	} else {
-		/* Reset the PHY, reconfigure the MAC and enable/disable
-		 * MAC stats accordingly. */
-		efx->phy_mode = new_mode;
-		if (new_mode & PHY_MODE_SPECIAL)
-			efx_stats_disable(efx);
-		if (efx->board_info.type == EFX_BOARD_SFE4001)
-			err = sfe4001_poweron(efx);
-		else
-			err = sfn4111t_reset(efx);
-		efx_reconfigure_port(efx);
-		if (!(new_mode & PHY_MODE_SPECIAL))
-			efx_stats_enable(efx);
-	}
-	rtnl_unlock();
-
-	return err ? err : count;
-}
-
-static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg);
-
-static void sfe4001_fini(struct efx_nic *efx)
-{
-	EFX_INFO(efx, "%s\n", __func__);
-
-	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
-	sfe4001_poweroff(efx);
-	i2c_unregister_device(efx->board_info.ioexp_client);
-	i2c_unregister_device(efx->board_info.hwmon_client);
-}
-
-static int sfe4001_check_hw(struct efx_nic *efx)
-{
-	s32 status;
-
-	/* If XAUI link is up then do not monitor */
-	if (EFX_WORKAROUND_7884(efx) && efx->mac_up)
-		return 0;
-
-	/* Check the powered status of the PHY. Lack of power implies that
-	 * the MAX6647 has shut down power to it, probably due to a temp.
-	 * alarm. Reading the power status rather than the MAX6647 status
-	 * directly because the later is read-to-clear and would thus
-	 * start to power up the PHY again when polled, causing us to blip
-	 * the power undesirably.
-	 * We know we can read from the IO expander because we did
-	 * it during power-on. Assume failure now is bad news. */
-	status = i2c_smbus_read_byte_data(efx->board_info.ioexp_client, P1_IN);
-	if (status >= 0 &&
-	    (status & ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN))) != 0)
-		return 0;
-
-	/* Use board power control, not PHY power control */
-	sfe4001_poweroff(efx);
-	efx->phy_mode = PHY_MODE_OFF;
-
-	return (status < 0) ? -EIO : -ERANGE;
-}
-
-static struct i2c_board_info sfe4001_hwmon_info = {
-	I2C_BOARD_INFO("max6647", 0x4e),
-};
-
-/* This board uses an I2C expander to provider power to the PHY, which needs to
- * be turned on before the PHY can be used.
- * Context: Process context, rtnl lock held
- */
-int sfe4001_init(struct efx_nic *efx)
-{
-	int rc;
-
-#if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE)
-	efx->board_info.hwmon_client =
-		i2c_new_device(&efx->i2c_adap, &sfe4001_hwmon_info);
-#else
-	efx->board_info.hwmon_client =
-		i2c_new_dummy(&efx->i2c_adap, sfe4001_hwmon_info.addr);
-#endif
-	if (!efx->board_info.hwmon_client)
-		return -EIO;
-
-	/* Raise board/PHY high limit from 85 to 90 degrees Celsius */
-	rc = i2c_smbus_write_byte_data(efx->board_info.hwmon_client,
-				       MAX664X_REG_WLHO, 90);
-	if (rc)
-		goto fail_hwmon;
-
-	efx->board_info.ioexp_client = i2c_new_dummy(&efx->i2c_adap, PCA9539);
-	if (!efx->board_info.ioexp_client) {
-		rc = -EIO;
-		goto fail_hwmon;
-	}
-
-	/* 10Xpress has fixed-function LED pins, so there is no board-specific
-	 * blink code. */
-	efx->board_info.blink = tenxpress_phy_blink;
-
-	efx->board_info.monitor = sfe4001_check_hw;
-	efx->board_info.fini = sfe4001_fini;
-
-	if (efx->phy_mode & PHY_MODE_SPECIAL) {
-		/* PHY won't generate a 156.25 MHz clock and MAC stats fetch
-		 * will fail. */
-		efx_stats_disable(efx);
-	}
-	rc = sfe4001_poweron(efx);
-	if (rc)
-		goto fail_ioexp;
-
-	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
-	if (rc)
-		goto fail_on;
-
-	EFX_INFO(efx, "PHY is powered on\n");
-	return 0;
-
-fail_on:
-	sfe4001_poweroff(efx);
-fail_ioexp:
-	i2c_unregister_device(efx->board_info.ioexp_client);
-fail_hwmon:
-	i2c_unregister_device(efx->board_info.hwmon_client);
-	return rc;
-}
-
-static int sfn4111t_check_hw(struct efx_nic *efx)
-{
-	s32 status;
-
-	/* If XAUI link is up then do not monitor */
-	if (EFX_WORKAROUND_7884(efx) && efx->mac_up)
-		return 0;
-
-	/* Test LHIGH, RHIGH, FAULT, EOT and IOT alarms */
-	status = i2c_smbus_read_byte_data(efx->board_info.hwmon_client,
-					  MAX664X_REG_RSL);
-	if (status < 0)
-		return -EIO;
-	if (status & 0x57)
-		return -ERANGE;
-	return 0;
-}
-
-static void sfn4111t_fini(struct efx_nic *efx)
-{
-	EFX_INFO(efx, "%s\n", __func__);
-
-	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
-	i2c_unregister_device(efx->board_info.hwmon_client);
-}
-
-static struct i2c_board_info sfn4111t_a0_hwmon_info = {
-	I2C_BOARD_INFO("max6647", 0x4e),
-};
-
-static struct i2c_board_info sfn4111t_r5_hwmon_info = {
-	I2C_BOARD_INFO("max6646", 0x4d),
-};
-
-int sfn4111t_init(struct efx_nic *efx)
-{
-	int i = 0;
-	int rc;
-
-	efx->board_info.hwmon_client =
-		i2c_new_device(&efx->i2c_adap,
-			       (efx->board_info.minor < 5) ?
-			       &sfn4111t_a0_hwmon_info :
-			       &sfn4111t_r5_hwmon_info);
-	if (!efx->board_info.hwmon_client)
-		return -EIO;
-
-	efx->board_info.blink = tenxpress_phy_blink;
-	efx->board_info.monitor = sfn4111t_check_hw;
-	efx->board_info.fini = sfn4111t_fini;
-
-	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
-	if (rc)
-		goto fail_hwmon;
-
-	do {
-		if (efx->phy_mode & PHY_MODE_SPECIAL) {
-			/* PHY may not generate a 156.25 MHz clock and MAC
-			 * stats fetch will fail. */
-			efx_stats_disable(efx);
-			sfn4111t_reset(efx);
-		}
-		rc = sft9001_wait_boot(efx);
-		if (rc == 0)
-			return 0;
-		efx->phy_mode = PHY_MODE_SPECIAL;
-	} while (rc == -EINVAL && ++i < 2);
-
-	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
-fail_hwmon:
-	i2c_unregister_device(efx->board_info.hwmon_client);
-	return rc;
-}
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 1a3495c..390b27b 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -14,8 +14,7 @@
 #include "mdio_10g.h"
 #include "falcon.h"
 #include "phy.h"
-#include "falcon_hwdefs.h"
-#include "boards.h"
+#include "regs.h"
 #include "workarounds.h"
 #include "selftest.h"
 
@@ -752,6 +751,7 @@
 
 	mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa);
 
+	ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
 	if (efx->phy_type != PHY_TYPE_SFX7101) {
 		ecmd->supported |= (SUPPORTED_100baseT_Full |
 				    SUPPORTED_1000baseT_Full);
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 489c4de..303919a 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -26,8 +26,7 @@
  * The tx_queue descriptor ring fill-level must fall below this value
  * before we restart the netif queue
  */
-#define EFX_NETDEV_TX_THRESHOLD(_tx_queue)	\
-	(_tx_queue->efx->type->txd_ring_mask / 2u)
+#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
 
 /* We want to be able to nest calls to netif_stop_queue(), since each
  * channel can have an individual stop on the queue.
@@ -125,6 +124,24 @@
 }
 
 
+static inline unsigned
+efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
+{
+	/* Depending on the NIC revision, we can use descriptor
+	 * lengths up to 8K or 8K-1.  However, since PCI Express
+	 * devices must split read requests at 4K boundaries, there is
+	 * little benefit from using descriptors that cross those
+	 * boundaries and we keep things simple by not doing so.
+	 */
+	unsigned len = (~dma_addr & 0xfff) + 1;
+
+	/* Work around hardware bug for unaligned buffers. */
+	if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
+		len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
+
+	return len;
+}
+
 /*
  * Add a socket buffer to a TX queue
  *
@@ -147,7 +164,7 @@
 	skb_frag_t *fragment;
 	struct page *page;
 	int page_offset;
-	unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign;
+	unsigned int len, unmap_len = 0, fill_level, insert_ptr;
 	dma_addr_t dma_addr, unmap_addr = 0;
 	unsigned int dma_len;
 	bool unmap_single;
@@ -171,7 +188,7 @@
 	}
 
 	fill_level = tx_queue->insert_count - tx_queue->old_read_count;
-	q_space = efx->type->txd_ring_mask - 1 - fill_level;
+	q_space = EFX_TXQ_MASK - 1 - fill_level;
 
 	/* Map for DMA.  Use pci_map_single rather than pci_map_page
 	 * since this is more efficient on machines with sparse
@@ -208,16 +225,14 @@
 					&tx_queue->read_count;
 				fill_level = (tx_queue->insert_count
 					      - tx_queue->old_read_count);
-				q_space = (efx->type->txd_ring_mask - 1 -
-					   fill_level);
+				q_space = EFX_TXQ_MASK - 1 - fill_level;
 				if (unlikely(q_space-- <= 0))
 					goto stop;
 				smp_mb();
 				--tx_queue->stopped;
 			}
 
-			insert_ptr = (tx_queue->insert_count &
-				      efx->type->txd_ring_mask);
+			insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
 			buffer = &tx_queue->buffer[insert_ptr];
 			efx_tsoh_free(tx_queue, buffer);
 			EFX_BUG_ON_PARANOID(buffer->tsoh);
@@ -226,14 +241,10 @@
 			EFX_BUG_ON_PARANOID(!buffer->continuation);
 			EFX_BUG_ON_PARANOID(buffer->unmap_len);
 
-			dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1);
-			if (likely(dma_len > len))
+			dma_len = efx_max_tx_len(efx, dma_addr);
+			if (likely(dma_len >= len))
 				dma_len = len;
 
-			misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
-			if (misalign && dma_len + misalign > 512)
-				dma_len = 512 - misalign;
-
 			/* Fill out per descriptor fields */
 			buffer->len = dma_len;
 			buffer->dma_addr = dma_addr;
@@ -289,7 +300,7 @@
 	/* Work backwards until we hit the original insert pointer value */
 	while (tx_queue->insert_count != tx_queue->write_count) {
 		--tx_queue->insert_count;
-		insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask;
+		insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
 		buffer = &tx_queue->buffer[insert_ptr];
 		efx_dequeue_buffer(tx_queue, buffer);
 		buffer->len = 0;
@@ -318,10 +329,9 @@
 {
 	struct efx_nic *efx = tx_queue->efx;
 	unsigned int stop_index, read_ptr;
-	unsigned int mask = tx_queue->efx->type->txd_ring_mask;
 
-	stop_index = (index + 1) & mask;
-	read_ptr = tx_queue->read_count & mask;
+	stop_index = (index + 1) & EFX_TXQ_MASK;
+	read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
 
 	while (read_ptr != stop_index) {
 		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
@@ -338,7 +348,7 @@
 		buffer->len = 0;
 
 		++tx_queue->read_count;
-		read_ptr = tx_queue->read_count & mask;
+		read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
 	}
 }
 
@@ -391,7 +401,7 @@
 	unsigned fill_level;
 	struct efx_nic *efx = tx_queue->efx;
 
-	EFX_BUG_ON_PARANOID(index > efx->type->txd_ring_mask);
+	EFX_BUG_ON_PARANOID(index > EFX_TXQ_MASK);
 
 	efx_dequeue_buffers(tx_queue, index);
 
@@ -401,7 +411,7 @@
 	smp_mb();
 	if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
 		fill_level = tx_queue->insert_count - tx_queue->read_count;
-		if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) {
+		if (fill_level < EFX_TXQ_THRESHOLD) {
 			EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
 
 			/* Do this under netif_tx_lock(), to avoid racing
@@ -425,11 +435,11 @@
 	EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue);
 
 	/* Allocate software ring */
-	txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer);
+	txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer);
 	tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
 	if (!tx_queue->buffer)
 		return -ENOMEM;
-	for (i = 0; i <= efx->type->txd_ring_mask; ++i)
+	for (i = 0; i <= EFX_TXQ_MASK; ++i)
 		tx_queue->buffer[i].continuation = true;
 
 	/* Allocate hardware ring */
@@ -468,8 +478,7 @@
 
 	/* Free any buffers left in the ring */
 	while (tx_queue->read_count != tx_queue->write_count) {
-		buffer = &tx_queue->buffer[tx_queue->read_count &
-					   tx_queue->efx->type->txd_ring_mask];
+		buffer = &tx_queue->buffer[tx_queue->read_count & EFX_TXQ_MASK];
 		efx_dequeue_buffer(tx_queue, buffer);
 		buffer->continuation = true;
 		buffer->len = 0;
@@ -708,14 +717,14 @@
 {
 	struct efx_tx_buffer *buffer;
 	struct efx_nic *efx = tx_queue->efx;
-	unsigned dma_len, fill_level, insert_ptr, misalign;
+	unsigned dma_len, fill_level, insert_ptr;
 	int q_space;
 
 	EFX_BUG_ON_PARANOID(len <= 0);
 
 	fill_level = tx_queue->insert_count - tx_queue->old_read_count;
 	/* -1 as there is no way to represent all descriptors used */
-	q_space = efx->type->txd_ring_mask - 1 - fill_level;
+	q_space = EFX_TXQ_MASK - 1 - fill_level;
 
 	while (1) {
 		if (unlikely(q_space-- <= 0)) {
@@ -731,7 +740,7 @@
 				*(volatile unsigned *)&tx_queue->read_count;
 			fill_level = (tx_queue->insert_count
 				      - tx_queue->old_read_count);
-			q_space = efx->type->txd_ring_mask - 1 - fill_level;
+			q_space = EFX_TXQ_MASK - 1 - fill_level;
 			if (unlikely(q_space-- <= 0)) {
 				*final_buffer = NULL;
 				return 1;
@@ -740,13 +749,13 @@
 			--tx_queue->stopped;
 		}
 
-		insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask;
+		insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
 		buffer = &tx_queue->buffer[insert_ptr];
 		++tx_queue->insert_count;
 
 		EFX_BUG_ON_PARANOID(tx_queue->insert_count -
 				    tx_queue->read_count >
-				    efx->type->txd_ring_mask);
+				    EFX_TXQ_MASK);
 
 		efx_tsoh_free(tx_queue, buffer);
 		EFX_BUG_ON_PARANOID(buffer->len);
@@ -757,12 +766,7 @@
 
 		buffer->dma_addr = dma_addr;
 
-		/* Ensure we do not cross a boundary unsupported by H/W */
-		dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1;
-
-		misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
-		if (misalign && dma_len + misalign > 512)
-			dma_len = 512 - misalign;
+		dma_len = efx_max_tx_len(efx, dma_addr);
 
 		/* If there is enough space to send then do so */
 		if (dma_len >= len)
@@ -792,8 +796,7 @@
 {
 	struct efx_tx_buffer *buffer;
 
-	buffer = &tx_queue->buffer[tx_queue->insert_count &
-				   tx_queue->efx->type->txd_ring_mask];
+	buffer = &tx_queue->buffer[tx_queue->insert_count & EFX_TXQ_MASK];
 	efx_tsoh_free(tx_queue, buffer);
 	EFX_BUG_ON_PARANOID(buffer->len);
 	EFX_BUG_ON_PARANOID(buffer->unmap_len);
@@ -818,7 +821,7 @@
 	while (tx_queue->insert_count != tx_queue->write_count) {
 		--tx_queue->insert_count;
 		buffer = &tx_queue->buffer[tx_queue->insert_count &
-					   tx_queue->efx->type->txd_ring_mask];
+					   EFX_TXQ_MASK];
 		efx_tsoh_free(tx_queue, buffer);
 		EFX_BUG_ON_PARANOID(buffer->skb);
 		buffer->len = 0;
@@ -1135,7 +1138,7 @@
 	unsigned i;
 
 	if (tx_queue->buffer) {
-		for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i)
+		for (i = 0; i <= EFX_TXQ_MASK; ++i)
 			efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
 	}
 
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index c821c15..3250299 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -41,6 +41,8 @@
 
 /* Spurious parity errors in TSORT buffers */
 #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
+/* Unaligned read request >512 bytes after aligning may break TSORT */
+#define EFX_WORKAROUND_5391 EFX_WORKAROUND_FALCON_A
 /* iSCSI parsing errors */
 #define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A
 /* RX events go missing */
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 161181a..5783f50 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -31,6 +31,8 @@
 #include <linux/cache.h>
 #include <linux/io.h>
 #include <linux/pm_runtime.h>
+#include <asm/cacheflush.h>
+
 #include "sh_eth.h"
 
 /* There is CPU dependent code */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 3a449d0..a3d9991 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -50,7 +50,7 @@
 #include "sky2.h"
 
 #define DRV_NAME		"sky2"
-#define DRV_VERSION		"1.25"
+#define DRV_VERSION		"1.26"
 #define PFX			DRV_NAME " "
 
 /*
@@ -102,6 +102,7 @@
 static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
 	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
+	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E01) }, /* SK-9E21M */
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },	/* DGE-560T */
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, 	/* DGE-550SX */
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) },	/* DGE-560SX */
@@ -139,6 +140,7 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
 	{ 0 }
 };
 
@@ -602,6 +604,16 @@
 		/* apply workaround for integrated resistors calibration */
 		gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17);
 		gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60);
+	} else if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
+		/* apply fixes in PHY AFE */
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff);
+
+		/* apply RDAC termination workaround */
+		gm_phy_write(hw, port, 24, 0x2800);
+		gm_phy_write(hw, port, 23, 0x2001);
+
+		/* set page register back to 0 */
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
 	} else if (hw->chip_id != CHIP_ID_YUKON_EX &&
 		   hw->chip_id < CHIP_ID_YUKON_SUPR) {
 		/* no effect on Yukon-XL */
@@ -786,8 +798,7 @@
 
 	if ( (hw->chip_id == CHIP_ID_YUKON_EX &&
 	      hw->chip_rev != CHIP_REV_YU_EX_A0) ||
-	     hw->chip_id == CHIP_ID_YUKON_FE_P ||
-	     hw->chip_id == CHIP_ID_YUKON_SUPR) {
+	     hw->chip_id >= CHIP_ID_YUKON_FE_P) {
 		/* Yukon-Extreme B0 and further Extreme devices */
 		/* enable Store & Forward mode for TX */
 
@@ -925,8 +936,14 @@
 
 	/* On chips without ram buffer, pause is controled by MAC level */
 	if (!(hw->flags & SKY2_HW_RAM_BUFFER)) {
-		sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
-		sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
+		/* Pause threshold is scaled by 8 in bytes */
+		if (hw->chip_id == CHIP_ID_YUKON_FE_P
+			&& hw->chip_rev == CHIP_REV_YU_FE2_A0)
+			reg = 1568 / 8;
+		else
+			reg = 1024 / 8;
+		sky2_write16(hw, SK_REG(port, RX_GMF_UP_THR), reg);
+		sky2_write16(hw, SK_REG(port, RX_GMF_LP_THR), 768 / 8);
 
 		sky2_set_tx_stfwd(hw, port);
 	}
@@ -1397,6 +1414,31 @@
 
 	/* Tell chip about available buffers */
 	sky2_rx_update(sky2, rxq);
+
+	if (hw->chip_id == CHIP_ID_YUKON_EX ||
+	    hw->chip_id == CHIP_ID_YUKON_SUPR) {
+		/*
+		 * Disable flushing of non ASF packets;
+		 * must be done after initializing the BMUs;
+		 * drivers without ASF support should do this too, otherwise
+		 * it may happen that they cannot run on ASF devices;
+		 * remember that the MAC FIFO isn't reset during initialization.
+		 */
+		sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_MACSEC_FLUSH_OFF);
+	}
+
+	if (hw->chip_id >= CHIP_ID_YUKON_SUPR) {
+		/* Enable RX Home Address & Routing Header checksum fix */
+		sky2_write16(hw, SK_REG(sky2->port, RX_GMF_FL_CTRL),
+			     RX_IPV6_SA_MOB_ENA | RX_IPV6_DA_MOB_ENA);
+
+		/* Enable TX Home Address & Routing Header checksum fix */
+		sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST),
+			     TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN);
+	}
+
+
+
 	return 0;
 nomem:
 	sky2_rx_clean(sky2);
@@ -2096,6 +2138,25 @@
 	spin_unlock(&sky2->phy_lock);
 }
 
+/* Special quick link interrupt (Yukon-2 Optima only) */
+static void sky2_qlink_intr(struct sky2_hw *hw)
+{
+	struct sky2_port *sky2 = netdev_priv(hw->dev[0]);
+	u32 imask;
+	u16 phy;
+
+	/* disable irq */
+	imask = sky2_read32(hw, B0_IMSK);
+	imask &= ~Y2_IS_PHY_QLNK;
+	sky2_write32(hw, B0_IMSK, imask);
+
+	/* reset PHY Link Detect */
+	phy = sky2_pci_read16(hw, PSM_CONFIG_REG4);
+	sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1);
+
+	sky2_link_up(sky2);
+}
+
 /* Transmit timeout is only called if we are running, carrier is up
  * and tx queue is full (stopped).
  */
@@ -2765,6 +2826,9 @@
 	if (status & Y2_IS_IRQ_PHY2)
 		sky2_phy_intr(hw, 1);
 
+	if (status & Y2_IS_PHY_QLNK)
+		sky2_qlink_intr(hw);
+
 	while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) {
 		work_done += sky2_status_intr(hw, work_limit - work_done, idx);
 
@@ -2814,6 +2878,7 @@
 	case CHIP_ID_YUKON_EX:
 	case CHIP_ID_YUKON_SUPR:
 	case CHIP_ID_YUKON_UL_2:
+	case CHIP_ID_YUKON_OPT:
 		return 125;
 
 	case CHIP_ID_YUKON_FE:
@@ -2903,6 +2968,7 @@
 		break;
 
 	case CHIP_ID_YUKON_UL_2:
+	case CHIP_ID_YUKON_OPT:
 		hw->flags = SKY2_HW_GIGABIT
 			| SKY2_HW_ADV_POWER_CTL;
 		break;
@@ -2985,6 +3051,52 @@
 			sky2_write16(hw, SK_REG(i, GMAC_CTRL),
 				     GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON
 				     | GMC_BYP_RETR_ON);
+
+	}
+
+	if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev > CHIP_REV_YU_SU_B0) {
+		/* enable MACSec clock gating */
+		sky2_pci_write32(hw, PCI_DEV_REG3, P_CLK_MACSEC_DIS);
+	}
+
+	if (hw->chip_id == CHIP_ID_YUKON_OPT) {
+		u16 reg;
+		u32 msk;
+
+		if (hw->chip_rev == 0) {
+			/* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */
+			sky2_write32(hw, Y2_PEX_PHY_DATA, (0x80UL << 16) | (1 << 7));
+
+			/* set PHY Link Detect Timer to 1.1 second (11x 100ms) */
+			reg = 10;
+		} else {
+			/* set PHY Link Detect Timer to 0.4 second (4x 100ms) */
+			reg = 3;
+		}
+
+		reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
+
+		/* reset PHY Link Detect */
+		sky2_pci_write16(hw, PSM_CONFIG_REG4,
+				 reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT);
+		sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
+
+
+		/* enable PHY Quick Link */
+		msk = sky2_read32(hw, B0_IMSK);
+		msk |= Y2_IS_PHY_QLNK;
+		sky2_write32(hw, B0_IMSK, msk);
+
+		/* check if PSMv2 was running before */
+		reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
+		if (reg & PCI_EXP_LNKCTL_ASPMC) {
+			int cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+			/* restore the PCIe Link Control register */
+			sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg);
+		}
+
+		/* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
+		sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
 	}
 
 	/* Clear I2C IRQ noise */
@@ -4405,9 +4517,11 @@
 		"FE+",		/* 0xb8 */
 		"Supreme",	/* 0xb9 */
 		"UL 2",		/* 0xba */
+		"Unknown",	/* 0xbb */
+		"Optima",	/* 0xbc */
 	};
 
-	if (chipid >= CHIP_ID_YUKON_XL && chipid < CHIP_ID_YUKON_UL_2)
+	if (chipid >= CHIP_ID_YUKON_XL && chipid < CHIP_ID_YUKON_OPT)
 		strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz);
 	else
 		snprintf(buf, sz, "(chip %#x)", chipid);
@@ -4537,6 +4651,8 @@
 		goto err_out_free_netdev;
 	}
 
+	netif_carrier_off(dev);
+
 	netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
 
 	err = request_irq(pdev->irq, sky2_intr,
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index ed54129..365d79c 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -16,6 +16,13 @@
 	PCI_DEV_REG5    = 0x88,
 	PCI_CFG_REG_0	= 0x90,
 	PCI_CFG_REG_1	= 0x94,
+
+	PSM_CONFIG_REG0  = 0x98,
+	PSM_CONFIG_REG1	 = 0x9C,
+	PSM_CONFIG_REG2  = 0x160,
+	PSM_CONFIG_REG3  = 0x164,
+	PSM_CONFIG_REG4  = 0x168,
+
 };
 
 /* Yukon-2 */
@@ -48,6 +55,37 @@
 	PCI_USEDATA64	= 1<<0,		/* Use 64Bit Data bus ext */
 };
 
+/*	PCI_OUR_REG_3		32 bit	Our Register 3 (Yukon-ECU only) */
+enum pci_dev_reg_3 {
+	P_CLK_ASF_REGS_DIS	= 1<<18,/* Disable Clock ASF (Yukon-Ext.) */
+	P_CLK_COR_REGS_D0_DIS	= 1<<17,/* Disable Clock Core Regs D0 */
+	P_CLK_MACSEC_DIS	= 1<<17,/* Disable Clock MACSec (Yukon-Ext.) */
+	P_CLK_PCI_REGS_D0_DIS	= 1<<16,/* Disable Clock PCI  Regs D0 */
+	P_CLK_COR_YTB_ARB_DIS	= 1<<15,/* Disable Clock YTB  Arbiter */
+	P_CLK_MAC_LNK1_D3_DIS	= 1<<14,/* Disable Clock MAC  Link1 D3 */
+	P_CLK_COR_LNK1_D0_DIS	= 1<<13,/* Disable Clock Core Link1 D0 */
+	P_CLK_MAC_LNK1_D0_DIS	= 1<<12,/* Disable Clock MAC  Link1 D0 */
+	P_CLK_COR_LNK1_D3_DIS	= 1<<11,/* Disable Clock Core Link1 D3 */
+	P_CLK_PCI_MST_ARB_DIS	= 1<<10,/* Disable Clock PCI  Master Arb. */
+	P_CLK_COR_REGS_D3_DIS	= 1<<9,	/* Disable Clock Core Regs D3 */
+	P_CLK_PCI_REGS_D3_DIS	= 1<<8,	/* Disable Clock PCI  Regs D3 */
+	P_CLK_REF_LNK1_GM_DIS	= 1<<7,	/* Disable Clock Ref. Link1 GMAC */
+	P_CLK_COR_LNK1_GM_DIS	= 1<<6,	/* Disable Clock Core Link1 GMAC */
+	P_CLK_PCI_COMMON_DIS	= 1<<5,	/* Disable Clock PCI  Common */
+	P_CLK_COR_COMMON_DIS	= 1<<4,	/* Disable Clock Core Common */
+	P_CLK_PCI_LNK1_BMU_DIS	= 1<<3,	/* Disable Clock PCI  Link1 BMU */
+	P_CLK_COR_LNK1_BMU_DIS	= 1<<2,	/* Disable Clock Core Link1 BMU */
+	P_CLK_PCI_LNK1_BIU_DIS	= 1<<1,	/* Disable Clock PCI  Link1 BIU */
+	P_CLK_COR_LNK1_BIU_DIS	= 1<<0,	/* Disable Clock Core Link1 BIU */
+	PCIE_OUR3_WOL_D3_COLD_SET = P_CLK_ASF_REGS_DIS |
+				    P_CLK_COR_REGS_D0_DIS |
+				    P_CLK_COR_LNK1_D0_DIS |
+				    P_CLK_MAC_LNK1_D0_DIS |
+				    P_CLK_PCI_MST_ARB_DIS |
+				    P_CLK_COR_COMMON_DIS |
+				    P_CLK_COR_LNK1_BMU_DIS,
+};
+
 /*	PCI_OUR_REG_4		32 bit	Our Register 4 (Yukon-ECU only) */
 enum pci_dev_reg_4 {
 				/* (Link Training & Status State Machine) */
@@ -114,7 +152,7 @@
 				     P_GAT_PCIE_RX_EL_IDLE,
 };
 
-#/*	PCI_CFG_REG_1			32 bit	Config Register 1 (Yukon-Ext only) */
+/*	PCI_CFG_REG_1			32 bit	Config Register 1 (Yukon-Ext only) */
 enum pci_cfg_reg1 {
 	P_CF1_DIS_REL_EVT_RST	= 1<<24, /* Dis. Rel. Event during PCIE reset */
 										/* Bit 23..21: Release Clock on Event */
@@ -145,6 +183,72 @@
 					P_CF1_ENA_TXBMU_WR_IDLE,
 };
 
+/* Yukon-Optima */
+enum {
+	PSM_CONFIG_REG1_AC_PRESENT_STATUS = 1<<31,   /* AC Present Status */
+
+	PSM_CONFIG_REG1_PTP_CLK_SEL	  = 1<<29,   /* PTP Clock Select */
+	PSM_CONFIG_REG1_PTP_MODE	  = 1<<28,   /* PTP Mode */
+
+	PSM_CONFIG_REG1_MUX_PHY_LINK	  = 1<<27,   /* PHY Energy Detect Event */
+
+	PSM_CONFIG_REG1_EN_PIN63_AC_PRESENT = 1<<26,  /* Enable LED_DUPLEX for ac_present */
+	PSM_CONFIG_REG1_EN_PCIE_TIMER	  = 1<<25,    /* Enable PCIe Timer */
+	PSM_CONFIG_REG1_EN_SPU_TIMER	  = 1<<24,    /* Enable SPU Timer */
+	PSM_CONFIG_REG1_POLARITY_AC_PRESENT = 1<<23,  /* AC Present Polarity */
+
+	PSM_CONFIG_REG1_EN_AC_PRESENT	  = 1<<21,    /* Enable AC Present */
+
+	PSM_CONFIG_REG1_EN_GPHY_INT_PSM	= 1<<20,      /* Enable GPHY INT for PSM */
+	PSM_CONFIG_REG1_DIS_PSM_TIMER	= 1<<19,      /* Disable PSM Timer */
+};
+
+/* Yukon-Supreme */
+enum {
+	PSM_CONFIG_REG1_GPHY_ENERGY_STS	= 1<<31, /* GPHY Energy Detect Status */
+
+	PSM_CONFIG_REG1_UART_MODE_MSK	= 3<<29, /* UART_Mode */
+	PSM_CONFIG_REG1_CLK_RUN_ASF	= 1<<28, /* Enable Clock Free Running for ASF Subsystem */
+	PSM_CONFIG_REG1_UART_CLK_DISABLE= 1<<27, /* Disable UART clock */
+	PSM_CONFIG_REG1_VAUX_ONE	= 1<<26, /* Tie internal Vaux to 1'b1 */
+	PSM_CONFIG_REG1_UART_FC_RI_VAL	= 1<<25, /* Default value for UART_RI_n */
+	PSM_CONFIG_REG1_UART_FC_DCD_VAL	= 1<<24, /* Default value for UART_DCD_n */
+	PSM_CONFIG_REG1_UART_FC_DSR_VAL	= 1<<23, /* Default value for UART_DSR_n */
+	PSM_CONFIG_REG1_UART_FC_CTS_VAL	= 1<<22, /* Default value for UART_CTS_n */
+	PSM_CONFIG_REG1_LATCH_VAUX	= 1<<21, /* Enable Latch current Vaux_avlbl */
+	PSM_CONFIG_REG1_FORCE_TESTMODE_INPUT= 1<<20, /* Force Testmode pin as input PAD */
+	PSM_CONFIG_REG1_UART_RST	= 1<<19, /* UART_RST */
+	PSM_CONFIG_REG1_PSM_PCIE_L1_POL	= 1<<18, /* PCIE L1 Event Polarity for PSM */
+	PSM_CONFIG_REG1_TIMER_STAT	= 1<<17, /* PSM Timer Status */
+	PSM_CONFIG_REG1_GPHY_INT	= 1<<16, /* GPHY INT Status */
+	PSM_CONFIG_REG1_FORCE_TESTMODE_ZERO= 1<<15, /* Force internal Testmode as 1'b0 */
+	PSM_CONFIG_REG1_EN_INT_ASPM_CLKREQ = 1<<14, /* ENABLE INT for CLKRUN on ASPM and CLKREQ */
+	PSM_CONFIG_REG1_EN_SND_TASK_ASPM_CLKREQ	= 1<<13, /* ENABLE Snd_task for CLKRUN on ASPM and CLKREQ */
+	PSM_CONFIG_REG1_DIS_CLK_GATE_SND_TASK	= 1<<12, /* Disable CLK_GATE control snd_task */
+	PSM_CONFIG_REG1_DIS_FF_CHIAN_SND_INTA	= 1<<11, /* Disable flip-flop chain for sndmsg_inta */
+
+	PSM_CONFIG_REG1_DIS_LOADER	= 1<<9, /* Disable Loader SM after PSM Goes back to IDLE */
+	PSM_CONFIG_REG1_DO_PWDN		= 1<<8, /* Do Power Down, Start PSM Scheme */
+	PSM_CONFIG_REG1_DIS_PIG		= 1<<7, /* Disable Plug-in-Go SM after PSM Goes back to IDLE */
+	PSM_CONFIG_REG1_DIS_PERST	= 1<<6, /* Disable Internal PCIe Reset after PSM Goes back to IDLE */
+	PSM_CONFIG_REG1_EN_REG18_PD	= 1<<5, /* Enable REG18 Power Down for PSM */
+	PSM_CONFIG_REG1_EN_PSM_LOAD	= 1<<4, /* Disable EEPROM Loader after PSM Goes back to IDLE */
+	PSM_CONFIG_REG1_EN_PSM_HOT_RST	= 1<<3, /* Enable PCIe Hot Reset for PSM */
+	PSM_CONFIG_REG1_EN_PSM_PERST	= 1<<2, /* Enable PCIe Reset Event for PSM */
+	PSM_CONFIG_REG1_EN_PSM_PCIE_L1	= 1<<1, /* Enable PCIe L1 Event for PSM */
+	PSM_CONFIG_REG1_EN_PSM		= 1<<0, /* Enable PSM Scheme */
+};
+
+/*	PSM_CONFIG_REG4				0x0168	PSM Config Register 4 */
+enum {
+						/* PHY Link Detect Timer */
+	PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_MSK = 0xf<<4,
+	PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE = 4,
+
+	PSM_CONFIG_REG4_DEBUG_TIMER	    = 1<<1, /* Debug Timer */
+	PSM_CONFIG_REG4_RST_PHY_LINK_DETECT = 1<<0, /* Reset GPHY Link Detect */
+};
+
 
 #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
 			       PCI_STATUS_SIG_SYSTEM_ERROR | \
@@ -197,6 +301,9 @@
 	B2_I2C_IRQ	= 0x0168,
 	B2_I2C_SW	= 0x016c,
 
+	Y2_PEX_PHY_DATA = 0x0170,
+	Y2_PEX_PHY_ADDR = 0x0172,
+
 	B3_RAM_ADDR	= 0x0180,
 	B3_RAM_DATA_LO	= 0x0184,
 	B3_RAM_DATA_HI	= 0x0188,
@@ -317,6 +424,10 @@
 	Y2_IS_CHK_TXS2	= 1<<9,		/* Descriptor error TXS 2 */
 	Y2_IS_CHK_TXA2	= 1<<8,		/* Descriptor error TXA 2 */
 
+	Y2_IS_PSM_ACK	= 1<<7,		/* PSM Acknowledge (Yukon-Optima only) */
+	Y2_IS_PTP_TIST	= 1<<6,		/* PTP Time Stamp (Yukon-Optima only) */
+	Y2_IS_PHY_QLNK	= 1<<5,		/* PHY Quick Link (Yukon-Optima only) */
+
 	Y2_IS_IRQ_PHY1	= 1<<4,		/* Interrupt from PHY 1 */
 	Y2_IS_IRQ_MAC1	= 1<<3,		/* Interrupt from MAC 1 */
 	Y2_IS_CHK_RX1	= 1<<2,		/* Descriptor error Rx 1 */
@@ -435,6 +546,7 @@
  	CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */
 	CHIP_ID_YUKON_SUPR = 0xb9, /* YUKON-2 Supreme */
 	CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */
+	CHIP_ID_YUKON_OPT  = 0xbc, /* YUKON-2 Optima */
 };
 enum yukon_ec_rev {
 	CHIP_REV_YU_EC_A1    = 0,  /* Chip Rev. for Yukon-EC A1/A0 */
@@ -459,6 +571,8 @@
 };
 enum yukon_supr_rev {
 	CHIP_REV_YU_SU_A0    = 0,
+	CHIP_REV_YU_SU_B0    = 1,
+	CHIP_REV_YU_SU_B1    = 3,
 };
 
 
@@ -513,6 +627,12 @@
 	TIM_T_STEP	= 1<<0,	/* Test step */
 };
 
+/*	Y2_PEX_PHY_ADDR/DATA		PEX PHY address and data reg  (Yukon-2 only) */
+enum {
+	PEX_RD_ACCESS	= 1<<31, /* Access Mode Read = 1, Write = 0 */
+	PEX_DB_ACCESS	= 1<<30, /* Access to debug register */
+};
+
 /*	B3_RAM_ADDR		32 bit	RAM Address, to read or write */
 					/* Bit 31..19:	reserved */
 #define RAM_ADR_RAN	0x0007ffffL	/* Bit 18.. 0:	RAM Address Range */
@@ -688,10 +808,11 @@
 	RX_GMF_AF_THR	= 0x0c44,/* 32 bit	Rx GMAC FIFO Almost Full Thresh. */
 	RX_GMF_CTRL_T	= 0x0c48,/* 32 bit	Rx GMAC FIFO Control/Test */
 	RX_GMF_FL_MSK	= 0x0c4c,/* 32 bit	Rx GMAC FIFO Flush Mask */
-	RX_GMF_FL_THR	= 0x0c50,/* 32 bit	Rx GMAC FIFO Flush Threshold */
+	RX_GMF_FL_THR	= 0x0c50,/* 16 bit	Rx GMAC FIFO Flush Threshold */
+	RX_GMF_FL_CTRL	= 0x0c52,/* 16 bit	Rx GMAC FIFO Flush Control */
 	RX_GMF_TR_THR	= 0x0c54,/* 32 bit	Rx Truncation Threshold (Yukon-2) */
-	RX_GMF_UP_THR	= 0x0c58,/*  8 bit	Rx Upper Pause Thr (Yukon-EC_U) */
-	RX_GMF_LP_THR	= 0x0c5a,/*  8 bit	Rx Lower Pause Thr (Yukon-EC_U) */
+	RX_GMF_UP_THR	= 0x0c58,/* 16 bit	Rx Upper Pause Thr (Yukon-EC_U) */
+	RX_GMF_LP_THR	= 0x0c5a,/* 16 bit	Rx Lower Pause Thr (Yukon-EC_U) */
 	RX_GMF_VLAN	= 0x0c5c,/* 32 bit	Rx VLAN Type Register (Yukon-2) */
 	RX_GMF_WP	= 0x0c60,/* 32 bit	Rx GMAC FIFO Write Pointer */
 
@@ -754,6 +875,42 @@
 	BMU_TX_CLR_IRQ_TCP	= 1<<11, /* Clear IRQ on TCP segment length mismatch */
 };
 
+/*	TBMU_TEST			0x06B8	Transmit BMU Test Register */
+enum {
+	TBMU_TEST_BMU_TX_CHK_AUTO_OFF		= 1<<31, /* BMU Tx Checksum Auto Calculation Disable */
+	TBMU_TEST_BMU_TX_CHK_AUTO_ON		= 1<<30, /* BMU Tx Checksum Auto Calculation Enable */
+	TBMU_TEST_HOME_ADD_PAD_FIX1_EN		= 1<<29, /* Home Address Paddiing FIX1 Enable */
+	TBMU_TEST_HOME_ADD_PAD_FIX1_DIS		= 1<<28, /* Home Address Paddiing FIX1 Disable */
+	TBMU_TEST_ROUTING_ADD_FIX_EN		= 1<<27, /* Routing Address Fix Enable */
+	TBMU_TEST_ROUTING_ADD_FIX_DIS		= 1<<26, /* Routing Address Fix Disable */
+	TBMU_TEST_HOME_ADD_FIX_EN		= 1<<25, /* Home address checksum fix enable */
+	TBMU_TEST_HOME_ADD_FIX_DIS		= 1<<24, /* Home address checksum fix disable */
+
+	TBMU_TEST_TEST_RSPTR_ON			= 1<<22, /* Testmode Shadow Read Ptr On */
+	TBMU_TEST_TEST_RSPTR_OFF		= 1<<21, /* Testmode Shadow Read Ptr Off */
+	TBMU_TEST_TESTSTEP_RSPTR		= 1<<20, /* Teststep Shadow Read Ptr */
+
+	TBMU_TEST_TEST_RPTR_ON			= 1<<18, /* Testmode Read Ptr On */
+	TBMU_TEST_TEST_RPTR_OFF			= 1<<17, /* Testmode Read Ptr Off */
+	TBMU_TEST_TESTSTEP_RPTR			= 1<<16, /* Teststep Read Ptr */
+
+	TBMU_TEST_TEST_WSPTR_ON			= 1<<14, /* Testmode Shadow Write Ptr On */
+	TBMU_TEST_TEST_WSPTR_OFF		= 1<<13, /* Testmode Shadow Write Ptr Off */
+	TBMU_TEST_TESTSTEP_WSPTR		= 1<<12, /* Teststep Shadow Write Ptr */
+
+	TBMU_TEST_TEST_WPTR_ON			= 1<<10, /* Testmode Write Ptr On */
+	TBMU_TEST_TEST_WPTR_OFF			= 1<<9, /* Testmode Write Ptr Off */
+	TBMU_TEST_TESTSTEP_WPTR			= 1<<8,			/* Teststep Write Ptr */
+
+	TBMU_TEST_TEST_REQ_NB_ON		= 1<<6, /* Testmode Req Nbytes/Addr On */
+	TBMU_TEST_TEST_REQ_NB_OFF		= 1<<5, /* Testmode Req Nbytes/Addr Off */
+	TBMU_TEST_TESTSTEP_REQ_NB		= 1<<4, /* Teststep Req Nbytes/Addr */
+
+	TBMU_TEST_TEST_DONE_IDX_ON		= 1<<2, /* Testmode Done Index On */
+	TBMU_TEST_TEST_DONE_IDX_OFF		= 1<<1, /* Testmode Done Index Off */
+	TBMU_TEST_TESTSTEP_DONE_IDX		= 1<<0,	/* Teststep Done Index */
+};
+
 /* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
 /* PREF_UNIT_CTRL	32 bit	Prefetch Control register */
 enum {
@@ -1674,6 +1831,12 @@
 
 /*	RX_GMF_CTRL_T	32 bit	Rx GMAC FIFO Control/Test */
 enum {
+	RX_GCLKMAC_ENA	= 1<<31,	/* RX MAC Clock Gating Enable */
+	RX_GCLKMAC_OFF	= 1<<30,
+
+	RX_STFW_DIS	= 1<<29,	/* RX Store and Forward Enable */
+	RX_STFW_ENA	= 1<<28,
+
 	RX_TRUNC_ON	= 1<<27,  	/* enable  packet truncation */
 	RX_TRUNC_OFF	= 1<<26, 	/* disable packet truncation */
 	RX_VLAN_STRIP_ON = 1<<25,	/* enable  VLAN stripping */
@@ -1711,6 +1874,20 @@
 	GMF_RX_CTRL_DEF	= GMF_OPER_ON | GMF_RX_F_FL_ON,
 };
 
+/*	RX_GMF_FL_CTRL	16 bit	Rx GMAC FIFO Flush Control (Yukon-Supreme) */
+enum {
+	RX_IPV6_SA_MOB_ENA	= 1<<9,	/* IPv6 SA Mobility Support Enable */
+	RX_IPV6_SA_MOB_DIS	= 1<<8,	/* IPv6 SA Mobility Support Disable */
+	RX_IPV6_DA_MOB_ENA	= 1<<7,	/* IPv6 DA Mobility Support Enable */
+	RX_IPV6_DA_MOB_DIS	= 1<<6,	/* IPv6 DA Mobility Support Disable */
+	RX_PTR_SYNCDLY_ENA	= 1<<5,	/* Pointers Delay Synch Enable */
+	RX_PTR_SYNCDLY_DIS	= 1<<4,	/* Pointers Delay Synch Disable */
+	RX_ASF_NEWFLAG_ENA	= 1<<3,	/* RX ASF Flag New Logic Enable */
+	RX_ASF_NEWFLAG_DIS	= 1<<2,	/* RX ASF Flag New Logic Disable */
+	RX_FLSH_MISSPKT_ENA	= 1<<1,	/* RX Flush Miss-Packet Enable */
+	RX_FLSH_MISSPKT_DIS	= 1<<0,	/* RX Flush Miss-Packet Disable */
+};
+
 /*	TX_GMF_EA		32 bit	Tx GMAC FIFO End Address */
 enum {
 	TX_DYN_WM_ENA	= 3,	/* Yukon-FE+ specific */
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
new file mode 100644
index 0000000..35eaa52
--- /dev/null
+++ b/drivers/net/stmmac/Kconfig
@@ -0,0 +1,53 @@
+config STMMAC_ETH
+	tristate "STMicroelectronics 10/100/1000 Ethernet driver"
+	select MII
+	select PHYLIB
+	depends on NETDEVICES && CPU_SUBTYPE_ST40
+	help
+	  This is the driver for the ST MAC 10/100/1000 on-chip Ethernet
+	  controllers. ST Ethernet IPs are built around a Synopsys IP Core.
+
+if STMMAC_ETH
+
+config STMMAC_DA
+	bool "STMMAC DMA arbitration scheme"
+	default n
+	help
+	  Selecting this option, rx has priority over Tx (only for Giga
+	  Ethernet device).
+	  By default, the DMA arbitration scheme is based on Round-robin
+	  (rx:tx priority is 1:1).
+
+config STMMAC_DUAL_MAC
+	bool "STMMAC: dual mac support (EXPERIMENTAL)"
+	default n
+        depends on EXPERIMENTAL && STMMAC_ETH && !STMMAC_TIMER
+	help
+	  Some ST SoCs (for example the stx7141 and stx7200c2) have two
+	  Ethernet Controllers. This option turns on the second Ethernet
+	  device on this kind of platforms.
+
+config STMMAC_TIMER
+	bool "STMMAC Timer optimisation"
+	default n
+	help
+	  Use an external timer for mitigating the number of network
+	  interrupts.
+
+choice
+        prompt "Select Timer device"
+        depends on STMMAC_TIMER
+
+config STMMAC_TMU_TIMER
+        bool "TMU channel 2"
+        depends on CPU_SH4
+	help
+
+config STMMAC_RTC_TIMER
+        bool "Real time clock"
+        depends on RTC_CLASS
+	help
+
+endchoice
+
+endif
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
new file mode 100644
index 0000000..b2d7a55
--- /dev/null
+++ b/drivers/net/stmmac/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_STMMAC_ETH) += stmmac.o
+stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
+stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
+		mac100.o  gmac.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
new file mode 100644
index 0000000..e49e518
--- /dev/null
+++ b/drivers/net/stmmac/common.h
@@ -0,0 +1,330 @@
+/*******************************************************************************
+  STMMAC Common Header File
+
+  Copyright (C) 2007-2009  STMicroelectronics Ltd
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "descs.h"
+#include <linux/io.h>
+
+/* *********************************************
+   DMA CRS Control and Status Register Mapping
+ * *********************************************/
+#define DMA_BUS_MODE		0x00001000	/* Bus Mode */
+#define DMA_XMT_POLL_DEMAND	0x00001004	/* Transmit Poll Demand */
+#define DMA_RCV_POLL_DEMAND	0x00001008	/* Received Poll Demand */
+#define DMA_RCV_BASE_ADDR	0x0000100c	/* Receive List Base */
+#define DMA_TX_BASE_ADDR	0x00001010	/* Transmit List Base */
+#define DMA_STATUS		0x00001014	/* Status Register */
+#define DMA_CONTROL		0x00001018	/* Ctrl (Operational Mode) */
+#define DMA_INTR_ENA		0x0000101c	/* Interrupt Enable */
+#define DMA_MISSED_FRAME_CTR	0x00001020	/* Missed Frame Counter */
+#define DMA_CUR_TX_BUF_ADDR	0x00001050	/* Current Host Tx Buffer */
+#define DMA_CUR_RX_BUF_ADDR	0x00001054	/* Current Host Rx Buffer */
+
+/* ********************************
+   DMA Control register defines
+ * ********************************/
+#define DMA_CONTROL_ST		0x00002000	/* Start/Stop Transmission */
+#define DMA_CONTROL_SR		0x00000002	/* Start/Stop Receive */
+
+/* **************************************
+   DMA Interrupt Enable register defines
+ * **************************************/
+/**** NORMAL INTERRUPT ****/
+#define DMA_INTR_ENA_NIE 0x00010000	/* Normal Summary */
+#define DMA_INTR_ENA_TIE 0x00000001	/* Transmit Interrupt */
+#define DMA_INTR_ENA_TUE 0x00000004	/* Transmit Buffer Unavailable */
+#define DMA_INTR_ENA_RIE 0x00000040	/* Receive Interrupt */
+#define DMA_INTR_ENA_ERE 0x00004000	/* Early Receive */
+
+#define DMA_INTR_NORMAL	(DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
+			DMA_INTR_ENA_TIE)
+
+/**** ABNORMAL INTERRUPT ****/
+#define DMA_INTR_ENA_AIE 0x00008000	/* Abnormal Summary */
+#define DMA_INTR_ENA_FBE 0x00002000	/* Fatal Bus Error */
+#define DMA_INTR_ENA_ETE 0x00000400	/* Early Transmit */
+#define DMA_INTR_ENA_RWE 0x00000200	/* Receive Watchdog */
+#define DMA_INTR_ENA_RSE 0x00000100	/* Receive Stopped */
+#define DMA_INTR_ENA_RUE 0x00000080	/* Receive Buffer Unavailable */
+#define DMA_INTR_ENA_UNE 0x00000020	/* Tx Underflow */
+#define DMA_INTR_ENA_OVE 0x00000010	/* Receive Overflow */
+#define DMA_INTR_ENA_TJE 0x00000008	/* Transmit Jabber */
+#define DMA_INTR_ENA_TSE 0x00000002	/* Transmit Stopped */
+
+#define DMA_INTR_ABNORMAL	(DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
+				DMA_INTR_ENA_UNE)
+
+/* DMA default interrupt mask */
+#define DMA_INTR_DEFAULT_MASK	(DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+
+/* ****************************
+ *  DMA Status register defines
+ * ****************************/
+#define DMA_STATUS_GPI		0x10000000	/* PMT interrupt */
+#define DMA_STATUS_GMI		0x08000000	/* MMC interrupt */
+#define DMA_STATUS_GLI		0x04000000	/* GMAC Line interface int. */
+#define DMA_STATUS_GMI		0x08000000
+#define DMA_STATUS_GLI		0x04000000
+#define DMA_STATUS_EB_MASK	0x00380000	/* Error Bits Mask */
+#define DMA_STATUS_EB_TX_ABORT	0x00080000	/* Error Bits - TX Abort */
+#define DMA_STATUS_EB_RX_ABORT	0x00100000	/* Error Bits - RX Abort */
+#define DMA_STATUS_TS_MASK	0x00700000	/* Transmit Process State */
+#define DMA_STATUS_TS_SHIFT	20
+#define DMA_STATUS_RS_MASK	0x000e0000	/* Receive Process State */
+#define DMA_STATUS_RS_SHIFT	17
+#define DMA_STATUS_NIS	0x00010000	/* Normal Interrupt Summary */
+#define DMA_STATUS_AIS	0x00008000	/* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI	0x00004000	/* Early Receive Interrupt */
+#define DMA_STATUS_FBI	0x00002000	/* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI	0x00000400	/* Early Transmit Interrupt */
+#define DMA_STATUS_RWT	0x00000200	/* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS	0x00000100	/* Receive Process Stopped */
+#define DMA_STATUS_RU	0x00000080	/* Receive Buffer Unavailable */
+#define DMA_STATUS_RI	0x00000040	/* Receive Interrupt */
+#define DMA_STATUS_UNF	0x00000020	/* Transmit Underflow */
+#define DMA_STATUS_OVF	0x00000010	/* Receive Overflow */
+#define DMA_STATUS_TJT	0x00000008	/* Transmit Jabber Timeout */
+#define DMA_STATUS_TU	0x00000004	/* Transmit Buffer Unavailable */
+#define DMA_STATUS_TPS	0x00000002	/* Transmit Process Stopped */
+#define DMA_STATUS_TI	0x00000001	/* Transmit Interrupt */
+
+/* Other defines */
+#define HASH_TABLE_SIZE 64
+#define PAUSE_TIME 0x200
+
+/* Flow Control defines */
+#define FLOW_OFF	0
+#define FLOW_RX		1
+#define FLOW_TX		2
+#define FLOW_AUTO	(FLOW_TX | FLOW_RX)
+
+/* DMA STORE-AND-FORWARD Operation Mode */
+#define SF_DMA_MODE 1
+
+#define HW_CSUM 1
+#define NO_HW_CSUM 0
+
+/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
+#define BUF_SIZE_16KiB 16384
+#define BUF_SIZE_8KiB 8192
+#define BUF_SIZE_4KiB 4096
+#define BUF_SIZE_2KiB 2048
+
+/* Power Down and WOL */
+#define PMT_NOT_SUPPORTED 0
+#define PMT_SUPPORTED 1
+
+/* Common MAC defines */
+#define MAC_CTRL_REG		0x00000000	/* MAC Control */
+#define MAC_ENABLE_TX		0x00000008	/* Transmitter Enable */
+#define MAC_RNABLE_RX		0x00000004	/* Receiver Enable */
+
+/* MAC Management Counters register */
+#define MMC_CONTROL		0x00000100	/* MMC Control */
+#define MMC_HIGH_INTR		0x00000104	/* MMC High Interrupt */
+#define MMC_LOW_INTR		0x00000108	/* MMC Low Interrupt */
+#define MMC_HIGH_INTR_MASK	0x0000010c	/* MMC High Interrupt Mask */
+#define MMC_LOW_INTR_MASK	0x00000110	/* MMC Low Interrupt Mask */
+
+#define MMC_CONTROL_MAX_FRM_MASK	0x0003ff8	/* Maximum Frame Size */
+#define MMC_CONTROL_MAX_FRM_SHIFT	3
+#define MMC_CONTROL_MAX_FRAME		0x7FF
+
+struct stmmac_extra_stats {
+	/* Transmit errors */
+	unsigned long tx_underflow ____cacheline_aligned;
+	unsigned long tx_carrier;
+	unsigned long tx_losscarrier;
+	unsigned long tx_heartbeat;
+	unsigned long tx_deferred;
+	unsigned long tx_vlan;
+	unsigned long tx_jabber;
+	unsigned long tx_frame_flushed;
+	unsigned long tx_payload_error;
+	unsigned long tx_ip_header_error;
+	/* Receive errors */
+	unsigned long rx_desc;
+	unsigned long rx_partial;
+	unsigned long rx_runt;
+	unsigned long rx_toolong;
+	unsigned long rx_collision;
+	unsigned long rx_crc;
+	unsigned long rx_lenght;
+	unsigned long rx_mii;
+	unsigned long rx_multicast;
+	unsigned long rx_gmac_overflow;
+	unsigned long rx_watchdog;
+	unsigned long da_rx_filter_fail;
+	unsigned long sa_rx_filter_fail;
+	unsigned long rx_missed_cntr;
+	unsigned long rx_overflow_cntr;
+	unsigned long rx_vlan;
+	/* Tx/Rx IRQ errors */
+	unsigned long tx_undeflow_irq;
+	unsigned long tx_process_stopped_irq;
+	unsigned long tx_jabber_irq;
+	unsigned long rx_overflow_irq;
+	unsigned long rx_buf_unav_irq;
+	unsigned long rx_process_stopped_irq;
+	unsigned long rx_watchdog_irq;
+	unsigned long tx_early_irq;
+	unsigned long fatal_bus_error_irq;
+	/* Extra info */
+	unsigned long threshold;
+	unsigned long tx_pkt_n;
+	unsigned long rx_pkt_n;
+	unsigned long poll_n;
+	unsigned long sched_timer_n;
+	unsigned long normal_irq_n;
+};
+
+/* GMAC core can compute the checksums in HW. */
+enum rx_frame_status {
+	good_frame = 0,
+	discard_frame = 1,
+	csum_none = 2,
+};
+
+static inline void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
+			 unsigned int high, unsigned int low)
+{
+	unsigned long data;
+
+	data = (addr[5] << 8) | addr[4];
+	writel(data, ioaddr + high);
+	data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+	writel(data, ioaddr + low);
+
+	return;
+}
+
+static inline void stmmac_get_mac_addr(unsigned long ioaddr,
+				unsigned char *addr, unsigned int high,
+				unsigned int low)
+{
+	unsigned int hi_addr, lo_addr;
+
+	/* Read the MAC address from the hardware */
+	hi_addr = readl(ioaddr + high);
+	lo_addr = readl(ioaddr + low);
+
+	/* Extract the MAC address from the high and low words */
+	addr[0] = lo_addr & 0xff;
+	addr[1] = (lo_addr >> 8) & 0xff;
+	addr[2] = (lo_addr >> 16) & 0xff;
+	addr[3] = (lo_addr >> 24) & 0xff;
+	addr[4] = hi_addr & 0xff;
+	addr[5] = (hi_addr >> 8) & 0xff;
+
+	return;
+}
+
+struct stmmac_ops {
+	/* MAC core initialization */
+	void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
+	/* DMA core initialization */
+	int (*dma_init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
+	/* Dump MAC registers */
+	void (*dump_mac_regs) (unsigned long ioaddr);
+	/* Dump DMA registers */
+	void (*dump_dma_regs) (unsigned long ioaddr);
+	/* Set tx/rx threshold in the csr6 register
+	 * An invalid value enables the store-and-forward mode */
+	void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
+	/* To track extra statistic (if supported) */
+	void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
+				   unsigned long ioaddr);
+	/* RX descriptor ring initialization */
+	void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
+				int disable_rx_ic);
+	/* TX descriptor ring initialization */
+	void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
+
+	/* Invoked by the xmit function to prepare the tx descriptor */
+	void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
+				 int csum_flag);
+	/* Set/get the owner of the descriptor */
+	void (*set_tx_owner) (struct dma_desc *p);
+	int (*get_tx_owner) (struct dma_desc *p);
+	/* Invoked by the xmit function to close the tx descriptor */
+	void (*close_tx_desc) (struct dma_desc *p);
+	/* Clean the tx descriptor as soon as the tx irq is received */
+	void (*release_tx_desc) (struct dma_desc *p);
+	/* Clear interrupt on tx frame completion. When this bit is
+	 * set an interrupt happens as soon as the frame is transmitted */
+	void (*clear_tx_ic) (struct dma_desc *p);
+	/* Last tx segment reports the transmit status */
+	int (*get_tx_ls) (struct dma_desc *p);
+	/* Return the transmit status looking at the TDES1 */
+	int (*tx_status) (void *data, struct stmmac_extra_stats *x,
+			  struct dma_desc *p, unsigned long ioaddr);
+	/* Get the buffer size from the descriptor */
+	int (*get_tx_len) (struct dma_desc *p);
+	/* Handle extra events on specific interrupts hw dependent */
+	void (*host_irq_status) (unsigned long ioaddr);
+	int (*get_rx_owner) (struct dma_desc *p);
+	void (*set_rx_owner) (struct dma_desc *p);
+	/* Get the receive frame size */
+	int (*get_rx_frame_len) (struct dma_desc *p);
+	/* Return the reception status looking at the RDES1 */
+	int (*rx_status) (void *data, struct stmmac_extra_stats *x,
+			  struct dma_desc *p);
+	/* Multicast filter setting */
+	void (*set_filter) (struct net_device *dev);
+	/* Flow control setting */
+	void (*flow_ctrl) (unsigned long ioaddr, unsigned int duplex,
+			   unsigned int fc, unsigned int pause_time);
+	/* Set power management mode (e.g. magic frame) */
+	void (*pmt) (unsigned long ioaddr, unsigned long mode);
+	/* Set/Get Unicast MAC addresses */
+	void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr,
+			     unsigned int reg_n);
+	void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr,
+			     unsigned int reg_n);
+};
+
+struct mac_link {
+	int port;
+	int duplex;
+	int speed;
+};
+
+struct mii_regs {
+	unsigned int addr;	/* MII Address */
+	unsigned int data;	/* MII Data */
+};
+
+struct hw_cap {
+	unsigned int version;	/* Core Version register (GMAC) */
+	unsigned int pmt;	/* Power-Down mode (GMAC) */
+	struct mac_link link;
+	struct mii_regs mii;
+};
+
+struct mac_device_info {
+	struct hw_cap hw;
+	struct stmmac_ops *ops;
+};
+
+struct mac_device_info *gmac_setup(unsigned long addr);
+struct mac_device_info *mac100_setup(unsigned long addr);
diff --git a/drivers/net/stmmac/descs.h b/drivers/net/stmmac/descs.h
new file mode 100644
index 0000000..6d2a0b2
--- /dev/null
+++ b/drivers/net/stmmac/descs.h
@@ -0,0 +1,163 @@
+/*******************************************************************************
+  Header File to describe the DMA descriptors
+  Use enhanced descriptors in case of GMAC Cores.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+struct dma_desc {
+	/* Receive descriptor */
+	union {
+		struct {
+			/* RDES0 */
+			u32 reserved1:1;
+			u32 crc_error:1;
+			u32 dribbling:1;
+			u32 mii_error:1;
+			u32 receive_watchdog:1;
+			u32 frame_type:1;
+			u32 collision:1;
+			u32 frame_too_long:1;
+			u32 last_descriptor:1;
+			u32 first_descriptor:1;
+			u32 multicast_frame:1;
+			u32 run_frame:1;
+			u32 length_error:1;
+			u32 partial_frame_error:1;
+			u32 descriptor_error:1;
+			u32 error_summary:1;
+			u32 frame_length:14;
+			u32 filtering_fail:1;
+			u32 own:1;
+			/* RDES1 */
+			u32 buffer1_size:11;
+			u32 buffer2_size:11;
+			u32 reserved2:2;
+			u32 second_address_chained:1;
+			u32 end_ring:1;
+			u32 reserved3:5;
+			u32 disable_ic:1;
+		} rx;
+		struct {
+			/* RDES0 */
+			u32 payload_csum_error:1;
+			u32 crc_error:1;
+			u32 dribbling:1;
+			u32 error_gmii:1;
+			u32 receive_watchdog:1;
+			u32 frame_type:1;
+			u32 late_collision:1;
+			u32 ipc_csum_error:1;
+			u32 last_descriptor:1;
+			u32 first_descriptor:1;
+			u32 vlan_tag:1;
+			u32 overflow_error:1;
+			u32 length_error:1;
+			u32 sa_filter_fail:1;
+			u32 descriptor_error:1;
+			u32 error_summary:1;
+			u32 frame_length:14;
+			u32 da_filter_fail:1;
+			u32 own:1;
+			/* RDES1 */
+			u32 buffer1_size:13;
+			u32 reserved1:1;
+			u32 second_address_chained:1;
+			u32 end_ring:1;
+			u32 buffer2_size:13;
+			u32 reserved2:2;
+			u32 disable_ic:1;
+		} erx;		/* -- enhanced -- */
+
+		/* Transmit descriptor */
+		struct {
+			/* TDES0 */
+			u32 deferred:1;
+			u32 underflow_error:1;
+			u32 excessive_deferral:1;
+			u32 collision_count:4;
+			u32 heartbeat_fail:1;
+			u32 excessive_collisions:1;
+			u32 late_collision:1;
+			u32 no_carrier:1;
+			u32 loss_carrier:1;
+			u32 reserved1:3;
+			u32 error_summary:1;
+			u32 reserved2:15;
+			u32 own:1;
+			/* TDES1 */
+			u32 buffer1_size:11;
+			u32 buffer2_size:11;
+			u32 reserved3:1;
+			u32 disable_padding:1;
+			u32 second_address_chained:1;
+			u32 end_ring:1;
+			u32 crc_disable:1;
+			u32 reserved4:2;
+			u32 first_segment:1;
+			u32 last_segment:1;
+			u32 interrupt:1;
+		} tx;
+		struct {
+			/* TDES0 */
+			u32 deferred:1;
+			u32 underflow_error:1;
+			u32 excessive_deferral:1;
+			u32 collision_count:4;
+			u32 vlan_frame:1;
+			u32 excessive_collisions:1;
+			u32 late_collision:1;
+			u32 no_carrier:1;
+			u32 loss_carrier:1;
+			u32 payload_error:1;
+			u32 frame_flushed:1;
+			u32 jabber_timeout:1;
+			u32 error_summary:1;
+			u32 ip_header_error:1;
+			u32 time_stamp_status:1;
+			u32 reserved1:2;
+			u32 second_address_chained:1;
+			u32 end_ring:1;
+			u32 checksum_insertion:2;
+			u32 reserved2:1;
+			u32 time_stamp_enable:1;
+			u32 disable_padding:1;
+			u32 crc_disable:1;
+			u32 first_segment:1;
+			u32 last_segment:1;
+			u32 interrupt:1;
+			u32 own:1;
+			/* TDES1 */
+			u32 buffer1_size:13;
+			u32 reserved3:3;
+			u32 buffer2_size:13;
+			u32 reserved4:3;
+		} etx;		/* -- enhanced -- */
+	} des01;
+	unsigned int des2;
+	unsigned int des3;
+};
+
+/* Transmit checksum insertion control */
+enum tdes_csum_insertion {
+	cic_disabled = 0,	/* Checksum Insertion Control */
+	cic_only_ip = 1,	/* Only IP header */
+	cic_no_pseudoheader = 2,	/* IP header but pseudoheader
+					 * is not calculated */
+	cic_full = 3,		/* IP header and pseudoheader */
+};
diff --git a/drivers/net/stmmac/gmac.c b/drivers/net/stmmac/gmac.c
new file mode 100644
index 0000000..b624bb5
--- /dev/null
+++ b/drivers/net/stmmac/gmac.c
@@ -0,0 +1,693 @@
+/*******************************************************************************
+  This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+  DWC Ether MAC 10/100/1000 Universal version 3.41a  has been used for
+  developing this code.
+
+  Copyright (C) 2007-2009  STMicroelectronics Ltd
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/netdevice.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include "stmmac.h"
+#include "gmac.h"
+
+#undef GMAC_DEBUG
+/*#define GMAC_DEBUG*/
+#undef FRAME_FILTER_DEBUG
+/*#define FRAME_FILTER_DEBUG*/
+#ifdef GMAC_DEBUG
+#define DBG(fmt, args...)  printk(fmt, ## args)
+#else
+#define DBG(fmt, args...)  do { } while (0)
+#endif
+
+static void gmac_dump_regs(unsigned long ioaddr)
+{
+	int i;
+	pr_info("\t----------------------------------------------\n"
+	       "\t  GMAC registers (base addr = 0x%8x)\n"
+	       "\t----------------------------------------------\n",
+	       (unsigned int)ioaddr);
+
+	for (i = 0; i < 55; i++) {
+		int offset = i * 4;
+		pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
+		       offset, readl(ioaddr + offset));
+	}
+	return;
+}
+
+static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
+{
+	u32 value = readl(ioaddr + DMA_BUS_MODE);
+	/* DMA SW reset */
+	value |= DMA_BUS_MODE_SFT_RESET;
+	writel(value, ioaddr + DMA_BUS_MODE);
+	do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
+
+	value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
+	    ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
+	     (pbl << DMA_BUS_MODE_RPBL_SHIFT));
+
+#ifdef CONFIG_STMMAC_DA
+	value |= DMA_BUS_MODE_DA;	/* Rx has priority over tx */
+#endif
+	writel(value, ioaddr + DMA_BUS_MODE);
+
+	/* Mask interrupts by writing to CSR7 */
+	writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+
+	/* The base address of the RX/TX descriptor lists must be written into
+	 * DMA CSR3 and CSR4, respectively. */
+	writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
+	writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
+
+	return 0;
+}
+
+/* Transmit FIFO flush operation */
+static void gmac_flush_tx_fifo(unsigned long ioaddr)
+{
+	u32 csr6 = readl(ioaddr + DMA_CONTROL);
+	writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
+
+	do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
+}
+
+static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
+				    int rxmode)
+{
+	u32 csr6 = readl(ioaddr + DMA_CONTROL);
+
+	if (txmode == SF_DMA_MODE) {
+		DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n");
+		/* Transmit COE type 2 cannot be done in cut-through mode. */
+		csr6 |= DMA_CONTROL_TSF;
+		/* Operating on second frame increase the performance
+		 * especially when transmit store-and-forward is used.*/
+		csr6 |= DMA_CONTROL_OSF;
+	} else {
+		DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
+			      " (threshold = %d)\n", txmode);
+		csr6 &= ~DMA_CONTROL_TSF;
+		csr6 &= DMA_CONTROL_TC_TX_MASK;
+		/* Set the transmit threashold */
+		if (txmode <= 32)
+			csr6 |= DMA_CONTROL_TTC_32;
+		else if (txmode <= 64)
+			csr6 |= DMA_CONTROL_TTC_64;
+		else if (txmode <= 128)
+			csr6 |= DMA_CONTROL_TTC_128;
+		else if (txmode <= 192)
+			csr6 |= DMA_CONTROL_TTC_192;
+		else
+			csr6 |= DMA_CONTROL_TTC_256;
+	}
+
+	if (rxmode == SF_DMA_MODE) {
+		DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n");
+		csr6 |= DMA_CONTROL_RSF;
+	} else {
+		DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
+			      " (threshold = %d)\n", rxmode);
+		csr6 &= ~DMA_CONTROL_RSF;
+		csr6 &= DMA_CONTROL_TC_RX_MASK;
+		if (rxmode <= 32)
+			csr6 |= DMA_CONTROL_RTC_32;
+		else if (rxmode <= 64)
+			csr6 |= DMA_CONTROL_RTC_64;
+		else if (rxmode <= 96)
+			csr6 |= DMA_CONTROL_RTC_96;
+		else
+			csr6 |= DMA_CONTROL_RTC_128;
+	}
+
+	writel(csr6, ioaddr + DMA_CONTROL);
+	return;
+}
+
+/* Not yet implemented --- no RMON module */
+static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
+				   unsigned long ioaddr)
+{
+	return;
+}
+
+static void gmac_dump_dma_regs(unsigned long ioaddr)
+{
+	int i;
+	pr_info(" DMA registers\n");
+	for (i = 0; i < 22; i++) {
+		if ((i < 9) || (i > 17)) {
+			int offset = i * 4;
+			pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i,
+			       (DMA_BUS_MODE + offset),
+			       readl(ioaddr + DMA_BUS_MODE + offset));
+		}
+	}
+	return;
+}
+
+static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
+				    struct dma_desc *p, unsigned long ioaddr)
+{
+	int ret = 0;
+	struct net_device_stats *stats = (struct net_device_stats *)data;
+
+	if (unlikely(p->des01.etx.error_summary)) {
+		DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
+		if (unlikely(p->des01.etx.jabber_timeout)) {
+			DBG(KERN_ERR "\tjabber_timeout error\n");
+			x->tx_jabber++;
+		}
+
+		if (unlikely(p->des01.etx.frame_flushed)) {
+			DBG(KERN_ERR "\tframe_flushed error\n");
+			x->tx_frame_flushed++;
+			gmac_flush_tx_fifo(ioaddr);
+		}
+
+		if (unlikely(p->des01.etx.loss_carrier)) {
+			DBG(KERN_ERR "\tloss_carrier error\n");
+			x->tx_losscarrier++;
+			stats->tx_carrier_errors++;
+		}
+		if (unlikely(p->des01.etx.no_carrier)) {
+			DBG(KERN_ERR "\tno_carrier error\n");
+			x->tx_carrier++;
+			stats->tx_carrier_errors++;
+		}
+		if (unlikely(p->des01.etx.late_collision)) {
+			DBG(KERN_ERR "\tlate_collision error\n");
+			stats->collisions += p->des01.etx.collision_count;
+		}
+		if (unlikely(p->des01.etx.excessive_collisions)) {
+			DBG(KERN_ERR "\texcessive_collisions\n");
+			stats->collisions += p->des01.etx.collision_count;
+		}
+		if (unlikely(p->des01.etx.excessive_deferral)) {
+			DBG(KERN_INFO "\texcessive tx_deferral\n");
+			x->tx_deferred++;
+		}
+
+		if (unlikely(p->des01.etx.underflow_error)) {
+			DBG(KERN_ERR "\tunderflow error\n");
+			gmac_flush_tx_fifo(ioaddr);
+			x->tx_underflow++;
+		}
+
+		if (unlikely(p->des01.etx.ip_header_error)) {
+			DBG(KERN_ERR "\tTX IP header csum error\n");
+			x->tx_ip_header_error++;
+		}
+
+		if (unlikely(p->des01.etx.payload_error)) {
+			DBG(KERN_ERR "\tAddr/Payload csum error\n");
+			x->tx_payload_error++;
+			gmac_flush_tx_fifo(ioaddr);
+		}
+
+		ret = -1;
+	}
+
+	if (unlikely(p->des01.etx.deferred)) {
+		DBG(KERN_INFO "GMAC TX status: tx deferred\n");
+		x->tx_deferred++;
+	}
+#ifdef STMMAC_VLAN_TAG_USED
+	if (p->des01.etx.vlan_frame) {
+		DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
+		x->tx_vlan++;
+	}
+#endif
+
+	return ret;
+}
+
+static int gmac_get_tx_len(struct dma_desc *p)
+{
+	return p->des01.etx.buffer1_size;
+}
+
+static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
+{
+	int ret = good_frame;
+	u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
+
+	/* bits 5 7 0 | Frame status
+	 * ----------------------------------------------------------
+	 *      0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects)
+	 *      1 0 0 | IPv4/6 No CSUM errorS.
+	 *      1 0 1 | IPv4/6 CSUM PAYLOAD error
+	 *      1 1 0 | IPv4/6 CSUM IP HR error
+	 *      1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
+	 *      0 0 1 | IPv4/6 unsupported IP PAYLOAD
+	 *      0 1 1 | COE bypassed.. no IPv4/6 frame
+	 *      0 1 0 | Reserved.
+	 */
+	if (status == 0x0) {
+		DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
+		ret = good_frame;
+	} else if (status == 0x4) {
+		DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
+		ret = good_frame;
+	} else if (status == 0x5) {
+		DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
+		ret = csum_none;
+	} else if (status == 0x6) {
+		DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
+		ret = csum_none;
+	} else if (status == 0x7) {
+		DBG(KERN_ERR
+		    "RX Des0 status: IPv4/6 Header and Payload Error.\n");
+		ret = csum_none;
+	} else if (status == 0x1) {
+		DBG(KERN_ERR
+		    "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
+		ret = discard_frame;
+	} else if (status == 0x3) {
+		DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
+		ret = discard_frame;
+	}
+	return ret;
+}
+
+static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
+				    struct dma_desc *p)
+{
+	int ret = good_frame;
+	struct net_device_stats *stats = (struct net_device_stats *)data;
+
+	if (unlikely(p->des01.erx.error_summary)) {
+		DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx);
+		if (unlikely(p->des01.erx.descriptor_error)) {
+			DBG(KERN_ERR "\tdescriptor error\n");
+			x->rx_desc++;
+			stats->rx_length_errors++;
+		}
+		if (unlikely(p->des01.erx.overflow_error)) {
+			DBG(KERN_ERR "\toverflow error\n");
+			x->rx_gmac_overflow++;
+		}
+
+		if (unlikely(p->des01.erx.ipc_csum_error))
+			DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
+
+		if (unlikely(p->des01.erx.late_collision)) {
+			DBG(KERN_ERR "\tlate_collision error\n");
+			stats->collisions++;
+			stats->collisions++;
+		}
+		if (unlikely(p->des01.erx.receive_watchdog)) {
+			DBG(KERN_ERR "\treceive_watchdog error\n");
+			x->rx_watchdog++;
+		}
+		if (unlikely(p->des01.erx.error_gmii)) {
+			DBG(KERN_ERR "\tReceive Error\n");
+			x->rx_mii++;
+		}
+		if (unlikely(p->des01.erx.crc_error)) {
+			DBG(KERN_ERR "\tCRC error\n");
+			x->rx_crc++;
+			stats->rx_crc_errors++;
+		}
+		ret = discard_frame;
+	}
+
+	/* After a payload csum error, the ES bit is set.
+	 * It doesn't match with the information reported into the databook.
+	 * At any rate, we need to understand if the CSUM hw computation is ok
+	 * and report this info to the upper layers. */
+	ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error,
+		p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
+
+	if (unlikely(p->des01.erx.dribbling)) {
+		DBG(KERN_ERR "GMAC RX: dribbling error\n");
+		ret = discard_frame;
+	}
+	if (unlikely(p->des01.erx.sa_filter_fail)) {
+		DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
+		x->sa_rx_filter_fail++;
+		ret = discard_frame;
+	}
+	if (unlikely(p->des01.erx.da_filter_fail)) {
+		DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n");
+		x->da_rx_filter_fail++;
+		ret = discard_frame;
+	}
+	if (unlikely(p->des01.erx.length_error)) {
+		DBG(KERN_ERR "GMAC RX: length_error error\n");
+		x->rx_lenght++;
+		ret = discard_frame;
+	}
+#ifdef STMMAC_VLAN_TAG_USED
+	if (p->des01.erx.vlan_tag) {
+		DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
+		x->rx_vlan++;
+	}
+#endif
+	return ret;
+}
+
+static void gmac_irq_status(unsigned long ioaddr)
+{
+	u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+
+	/* Not used events (e.g. MMC interrupts) are not handled. */
+	if ((intr_status & mmc_tx_irq))
+		DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
+		    readl(ioaddr + GMAC_MMC_TX_INTR));
+	if (unlikely(intr_status & mmc_rx_irq))
+		DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
+		    readl(ioaddr + GMAC_MMC_RX_INTR));
+	if (unlikely(intr_status & mmc_rx_csum_offload_irq))
+		DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
+		    readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
+	if (unlikely(intr_status & pmt_irq)) {
+		DBG(KERN_DEBUG "GMAC: received Magic frame\n");
+		/* clear the PMT bits 5 and 6 by reading the PMT
+		 * status register. */
+		readl(ioaddr + GMAC_PMT);
+	}
+
+	return;
+}
+
+static void gmac_core_init(unsigned long ioaddr)
+{
+	u32 value = readl(ioaddr + GMAC_CONTROL);
+	value |= GMAC_CORE_INIT;
+	writel(value, ioaddr + GMAC_CONTROL);
+
+	/* STBus Bridge Configuration */
+	/*writel(0xc5608, ioaddr + 0x00007000);*/
+
+	/* Freeze MMC counters */
+	writel(0x8, ioaddr + GMAC_MMC_CTRL);
+	/* Mask GMAC interrupts */
+	writel(0x207, ioaddr + GMAC_INT_MASK);
+
+#ifdef STMMAC_VLAN_TAG_USED
+	/* Tag detection without filtering */
+	writel(0x0, ioaddr + GMAC_VLAN_TAG);
+#endif
+	return;
+}
+
+static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+				unsigned int reg_n)
+{
+	stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+				GMAC_ADDR_LOW(reg_n));
+}
+
+static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+				unsigned int reg_n)
+{
+	stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+				GMAC_ADDR_LOW(reg_n));
+}
+
+static void gmac_set_filter(struct net_device *dev)
+{
+	unsigned long ioaddr = dev->base_addr;
+	unsigned int value = 0;
+
+	DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
+	    __func__, dev->mc_count, dev->uc_count);
+
+	if (dev->flags & IFF_PROMISC)
+		value = GMAC_FRAME_FILTER_PR;
+	else if ((dev->mc_count > HASH_TABLE_SIZE)
+		   || (dev->flags & IFF_ALLMULTI)) {
+		value = GMAC_FRAME_FILTER_PM;	/* pass all multi */
+		writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
+		writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
+	} else if (dev->mc_count > 0) {
+		int i;
+		u32 mc_filter[2];
+		struct dev_mc_list *mclist;
+
+		/* Hash filter for multicast */
+		value = GMAC_FRAME_FILTER_HMC;
+
+		memset(mc_filter, 0, sizeof(mc_filter));
+		for (i = 0, mclist = dev->mc_list;
+		     mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+			/* The upper 6 bits of the calculated CRC are used to
+			   index the contens of the hash table */
+			int bit_nr =
+			    bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
+			/* The most significant bit determines the register to
+			 * use (H/L) while the other 5 bits determine the bit
+			 * within the register. */
+			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+		}
+		writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
+		writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
+	}
+
+	/* Handle multiple unicast addresses (perfect filtering)*/
+	if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES)
+		/* Switch to promiscuous mode is more than 16 addrs
+		   are required */
+		value |= GMAC_FRAME_FILTER_PR;
+	else {
+		int i;
+		struct dev_addr_list *uc_ptr = dev->uc_list;
+
+			for (i = 0; i < dev->uc_count; i++) {
+				gmac_set_umac_addr(ioaddr, uc_ptr->da_addr,
+						i + 1);
+
+				DBG(KERN_INFO "\t%d "
+				"- Unicast addr %02x:%02x:%02x:%02x:%02x:"
+				"%02x\n", i + 1,
+				uc_ptr->da_addr[0], uc_ptr->da_addr[1],
+				uc_ptr->da_addr[2], uc_ptr->da_addr[3],
+				uc_ptr->da_addr[4], uc_ptr->da_addr[5]);
+				uc_ptr = uc_ptr->next;
+		}
+	}
+
+#ifdef FRAME_FILTER_DEBUG
+	/* Enable Receive all mode (to debug filtering_fail errors) */
+	value |= GMAC_FRAME_FILTER_RA;
+#endif
+	writel(value, ioaddr + GMAC_FRAME_FILTER);
+
+	DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
+	    "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
+	    readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
+
+	return;
+}
+
+static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+			   unsigned int fc, unsigned int pause_time)
+{
+	unsigned int flow = 0;
+
+	DBG(KERN_DEBUG "GMAC Flow-Control:\n");
+	if (fc & FLOW_RX) {
+		DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
+		flow |= GMAC_FLOW_CTRL_RFE;
+	}
+	if (fc & FLOW_TX) {
+		DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
+		flow |= GMAC_FLOW_CTRL_TFE;
+	}
+
+	if (duplex) {
+		DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
+		flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
+	}
+
+	writel(flow, ioaddr + GMAC_FLOW_CTRL);
+	return;
+}
+
+static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
+{
+	unsigned int pmt = 0;
+
+	if (mode == WAKE_MAGIC) {
+		DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
+		pmt |= power_down | magic_pkt_en;
+	} else if (mode == WAKE_UCAST) {
+		DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
+		pmt |= global_unicast;
+	}
+
+	writel(pmt, ioaddr + GMAC_PMT);
+	return;
+}
+
+static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+				int disable_rx_ic)
+{
+	int i;
+	for (i = 0; i < ring_size; i++) {
+		p->des01.erx.own = 1;
+		p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
+		/* To support jumbo frames */
+		p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
+		if (i == ring_size - 1)
+			p->des01.erx.end_ring = 1;
+		if (disable_rx_ic)
+			p->des01.erx.disable_ic = 1;
+		p++;
+	}
+	return;
+}
+
+static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+{
+	int i;
+
+	for (i = 0; i < ring_size; i++) {
+		p->des01.etx.own = 0;
+		if (i == ring_size - 1)
+			p->des01.etx.end_ring = 1;
+		p++;
+	}
+
+	return;
+}
+
+static int gmac_get_tx_owner(struct dma_desc *p)
+{
+	return p->des01.etx.own;
+}
+
+static int gmac_get_rx_owner(struct dma_desc *p)
+{
+	return p->des01.erx.own;
+}
+
+static void gmac_set_tx_owner(struct dma_desc *p)
+{
+	p->des01.etx.own = 1;
+}
+
+static void gmac_set_rx_owner(struct dma_desc *p)
+{
+	p->des01.erx.own = 1;
+}
+
+static int gmac_get_tx_ls(struct dma_desc *p)
+{
+	return p->des01.etx.last_segment;
+}
+
+static void gmac_release_tx_desc(struct dma_desc *p)
+{
+	int ter = p->des01.etx.end_ring;
+
+	memset(p, 0, sizeof(struct dma_desc));
+	p->des01.etx.end_ring = ter;
+
+	return;
+}
+
+static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+				 int csum_flag)
+{
+	p->des01.etx.first_segment = is_fs;
+	if (unlikely(len > BUF_SIZE_4KiB)) {
+		p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
+		p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
+	} else {
+		p->des01.etx.buffer1_size = len;
+	}
+	if (likely(csum_flag))
+		p->des01.etx.checksum_insertion = cic_full;
+}
+
+static void gmac_clear_tx_ic(struct dma_desc *p)
+{
+	p->des01.etx.interrupt = 0;
+}
+
+static void gmac_close_tx_desc(struct dma_desc *p)
+{
+	p->des01.etx.last_segment = 1;
+	p->des01.etx.interrupt = 1;
+}
+
+static int gmac_get_rx_frame_len(struct dma_desc *p)
+{
+	return p->des01.erx.frame_length;
+}
+
+struct stmmac_ops gmac_driver = {
+	.core_init = gmac_core_init,
+	.dump_mac_regs = gmac_dump_regs,
+	.dma_init = gmac_dma_init,
+	.dump_dma_regs = gmac_dump_dma_regs,
+	.dma_mode = gmac_dma_operation_mode,
+	.dma_diagnostic_fr = gmac_dma_diagnostic_fr,
+	.tx_status = gmac_get_tx_frame_status,
+	.rx_status = gmac_get_rx_frame_status,
+	.get_tx_len = gmac_get_tx_len,
+	.set_filter = gmac_set_filter,
+	.flow_ctrl = gmac_flow_ctrl,
+	.pmt = gmac_pmt,
+	.init_rx_desc = gmac_init_rx_desc,
+	.init_tx_desc = gmac_init_tx_desc,
+	.get_tx_owner = gmac_get_tx_owner,
+	.get_rx_owner = gmac_get_rx_owner,
+	.release_tx_desc = gmac_release_tx_desc,
+	.prepare_tx_desc = gmac_prepare_tx_desc,
+	.clear_tx_ic = gmac_clear_tx_ic,
+	.close_tx_desc = gmac_close_tx_desc,
+	.get_tx_ls = gmac_get_tx_ls,
+	.set_tx_owner = gmac_set_tx_owner,
+	.set_rx_owner = gmac_set_rx_owner,
+	.get_rx_frame_len = gmac_get_rx_frame_len,
+	.host_irq_status = gmac_irq_status,
+	.set_umac_addr = gmac_set_umac_addr,
+	.get_umac_addr = gmac_get_umac_addr,
+};
+
+struct mac_device_info *gmac_setup(unsigned long ioaddr)
+{
+	struct mac_device_info *mac;
+	u32 uid = readl(ioaddr + GMAC_VERSION);
+
+	pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
+	       ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
+
+	mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+
+	mac->ops = &gmac_driver;
+	mac->hw.pmt = PMT_SUPPORTED;
+	mac->hw.link.port = GMAC_CONTROL_PS;
+	mac->hw.link.duplex = GMAC_CONTROL_DM;
+	mac->hw.link.speed = GMAC_CONTROL_FES;
+	mac->hw.mii.addr = GMAC_MII_ADDR;
+	mac->hw.mii.data = GMAC_MII_DATA;
+
+	return mac;
+}
diff --git a/drivers/net/stmmac/gmac.h b/drivers/net/stmmac/gmac.h
new file mode 100644
index 0000000..684a363
--- /dev/null
+++ b/drivers/net/stmmac/gmac.h
@@ -0,0 +1,204 @@
+/*******************************************************************************
+  Copyright (C) 2007-2009  STMicroelectronics Ltd
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#define GMAC_CONTROL		0x00000000	/* Configuration */
+#define GMAC_FRAME_FILTER	0x00000004	/* Frame Filter */
+#define GMAC_HASH_HIGH		0x00000008	/* Multicast Hash Table High */
+#define GMAC_HASH_LOW		0x0000000c	/* Multicast Hash Table Low */
+#define GMAC_MII_ADDR		0x00000010	/* MII Address */
+#define GMAC_MII_DATA		0x00000014	/* MII Data */
+#define GMAC_FLOW_CTRL		0x00000018	/* Flow Control */
+#define GMAC_VLAN_TAG		0x0000001c	/* VLAN Tag */
+#define GMAC_VERSION		0x00000020	/* GMAC CORE Version */
+#define GMAC_WAKEUP_FILTER	0x00000028	/* Wake-up Frame Filter */
+
+#define GMAC_INT_STATUS		0x00000038	/* interrupt status register */
+enum gmac_irq_status {
+	time_stamp_irq = 0x0200,
+	mmc_rx_csum_offload_irq = 0x0080,
+	mmc_tx_irq = 0x0040,
+	mmc_rx_irq = 0x0020,
+	mmc_irq = 0x0010,
+	pmt_irq = 0x0008,
+	pcs_ane_irq = 0x0004,
+	pcs_link_irq = 0x0002,
+	rgmii_irq = 0x0001,
+};
+#define GMAC_INT_MASK		0x0000003c	/* interrupt mask register */
+
+/* PMT Control and Status */
+#define GMAC_PMT		0x0000002c
+enum power_event {
+	pointer_reset = 0x80000000,
+	global_unicast = 0x00000200,
+	wake_up_rx_frame = 0x00000040,
+	magic_frame = 0x00000020,
+	wake_up_frame_en = 0x00000004,
+	magic_pkt_en = 0x00000002,
+	power_down = 0x00000001,
+};
+
+/* GMAC HW ADDR regs */
+#define GMAC_ADDR_HIGH(reg)		(0x00000040+(reg * 8))
+#define GMAC_ADDR_LOW(reg)		(0x00000044+(reg * 8))
+#define GMAC_MAX_UNICAST_ADDRESSES	16
+
+#define GMAC_AN_CTRL	0x000000c0	/* AN control */
+#define GMAC_AN_STATUS	0x000000c4	/* AN status */
+#define GMAC_ANE_ADV	0x000000c8	/* Auto-Neg. Advertisement */
+#define GMAC_ANE_LINK	0x000000cc	/* Auto-Neg. link partener ability */
+#define GMAC_ANE_EXP	0x000000d0	/* ANE expansion */
+#define GMAC_TBI	0x000000d4	/* TBI extend status */
+#define GMAC_GMII_STATUS 0x000000d8	/* S/R-GMII status */
+
+/* GMAC Configuration defines */
+#define GMAC_CONTROL_TC	0x01000000	/* Transmit Conf. in RGMII/SGMII */
+#define GMAC_CONTROL_WD	0x00800000	/* Disable Watchdog on receive */
+#define GMAC_CONTROL_JD	0x00400000	/* Jabber disable */
+#define GMAC_CONTROL_BE	0x00200000	/* Frame Burst Enable */
+#define GMAC_CONTROL_JE	0x00100000	/* Jumbo frame */
+enum inter_frame_gap {
+	GMAC_CONTROL_IFG_88 = 0x00040000,
+	GMAC_CONTROL_IFG_80 = 0x00020000,
+	GMAC_CONTROL_IFG_40 = 0x000e0000,
+};
+#define GMAC_CONTROL_DCRS	0x00010000 /* Disable carrier sense during tx */
+#define GMAC_CONTROL_PS		0x00008000 /* Port Select 0:GMI 1:MII */
+#define GMAC_CONTROL_FES	0x00004000 /* Speed 0:10 1:100 */
+#define GMAC_CONTROL_DO		0x00002000 /* Disable Rx Own */
+#define GMAC_CONTROL_LM		0x00001000 /* Loop-back mode */
+#define GMAC_CONTROL_DM		0x00000800 /* Duplex Mode */
+#define GMAC_CONTROL_IPC	0x00000400 /* Checksum Offload */
+#define GMAC_CONTROL_DR		0x00000200 /* Disable Retry */
+#define GMAC_CONTROL_LUD	0x00000100 /* Link up/down */
+#define GMAC_CONTROL_ACS	0x00000080 /* Automatic Pad Stripping */
+#define GMAC_CONTROL_DC		0x00000010 /* Deferral Check */
+#define GMAC_CONTROL_TE		0x00000008 /* Transmitter Enable */
+#define GMAC_CONTROL_RE		0x00000004 /* Receiver Enable */
+
+#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
+			GMAC_CONTROL_IPC | GMAC_CONTROL_JE | GMAC_CONTROL_BE)
+
+/* GMAC Frame Filter defines */
+#define GMAC_FRAME_FILTER_PR	0x00000001	/* Promiscuous Mode */
+#define GMAC_FRAME_FILTER_HUC	0x00000002	/* Hash Unicast */
+#define GMAC_FRAME_FILTER_HMC	0x00000004	/* Hash Multicast */
+#define GMAC_FRAME_FILTER_DAIF	0x00000008	/* DA Inverse Filtering */
+#define GMAC_FRAME_FILTER_PM	0x00000010	/* Pass all multicast */
+#define GMAC_FRAME_FILTER_DBF	0x00000020	/* Disable Broadcast frames */
+#define GMAC_FRAME_FILTER_SAIF	0x00000100	/* Inverse Filtering */
+#define GMAC_FRAME_FILTER_SAF	0x00000200	/* Source Address Filter */
+#define GMAC_FRAME_FILTER_HPF	0x00000400	/* Hash or perfect Filter */
+#define GMAC_FRAME_FILTER_RA	0x80000000	/* Receive all mode */
+/* GMII ADDR  defines */
+#define GMAC_MII_ADDR_WRITE	0x00000002	/* MII Write */
+#define GMAC_MII_ADDR_BUSY	0x00000001	/* MII Busy */
+/* GMAC FLOW CTRL defines */
+#define GMAC_FLOW_CTRL_PT_MASK	0xffff0000	/* Pause Time Mask */
+#define GMAC_FLOW_CTRL_PT_SHIFT	16
+#define GMAC_FLOW_CTRL_RFE	0x00000004	/* Rx Flow Control Enable */
+#define GMAC_FLOW_CTRL_TFE	0x00000002	/* Tx Flow Control Enable */
+#define GMAC_FLOW_CTRL_FCB_BPA	0x00000001	/* Flow Control Busy ... */
+
+/*--- DMA BLOCK defines ---*/
+/* DMA Bus Mode register defines */
+#define DMA_BUS_MODE_SFT_RESET	0x00000001	/* Software Reset */
+#define DMA_BUS_MODE_DA		0x00000002	/* Arbitration scheme */
+#define DMA_BUS_MODE_DSL_MASK	0x0000007c	/* Descriptor Skip Length */
+#define DMA_BUS_MODE_DSL_SHIFT	2	/*   (in DWORDS)      */
+/* Programmable burst length (passed thorugh platform)*/
+#define DMA_BUS_MODE_PBL_MASK	0x00003f00	/* Programmable Burst Len */
+#define DMA_BUS_MODE_PBL_SHIFT	8
+
+enum rx_tx_priority_ratio {
+	double_ratio = 0x00004000,	/*2:1 */
+	triple_ratio = 0x00008000,	/*3:1 */
+	quadruple_ratio = 0x0000c000,	/*4:1 */
+};
+
+#define DMA_BUS_MODE_FB		0x00010000	/* Fixed burst */
+#define DMA_BUS_MODE_RPBL_MASK	0x003e0000	/* Rx-Programmable Burst Len */
+#define DMA_BUS_MODE_RPBL_SHIFT	17
+#define DMA_BUS_MODE_USP	0x00800000
+#define DMA_BUS_MODE_4PBL	0x01000000
+#define DMA_BUS_MODE_AAL	0x02000000
+
+/* DMA CRS Control and Status Register Mapping */
+#define DMA_HOST_TX_DESC	  0x00001048	/* Current Host Tx descriptor */
+#define DMA_HOST_RX_DESC	  0x0000104c	/* Current Host Rx descriptor */
+/*  DMA Bus Mode register defines */
+#define DMA_BUS_PR_RATIO_MASK	  0x0000c000	/* Rx/Tx priority ratio */
+#define DMA_BUS_PR_RATIO_SHIFT	  14
+#define DMA_BUS_FB	  	  0x00010000	/* Fixed Burst */
+
+/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/
+#define DMA_CONTROL_DT		0x04000000 /* Disable Drop TCP/IP csum error */
+#define DMA_CONTROL_RSF		0x02000000 /* Receive Store and Forward */
+#define DMA_CONTROL_DFF		0x01000000 /* Disaable flushing */
+/* Theshold for Activating the FC */
+enum rfa {
+	act_full_minus_1 = 0x00800000,
+	act_full_minus_2 = 0x00800200,
+	act_full_minus_3 = 0x00800400,
+	act_full_minus_4 = 0x00800600,
+};
+/* Theshold for Deactivating the FC */
+enum rfd {
+	deac_full_minus_1 = 0x00400000,
+	deac_full_minus_2 = 0x00400800,
+	deac_full_minus_3 = 0x00401000,
+	deac_full_minus_4 = 0x00401800,
+};
+#define DMA_CONTROL_TSF		0x00200000 /* Transmit  Store and Forward */
+#define DMA_CONTROL_FTF		0x00100000 /* Flush transmit FIFO */
+
+enum ttc_control {
+	DMA_CONTROL_TTC_64 = 0x00000000,
+	DMA_CONTROL_TTC_128 = 0x00004000,
+	DMA_CONTROL_TTC_192 = 0x00008000,
+	DMA_CONTROL_TTC_256 = 0x0000c000,
+	DMA_CONTROL_TTC_40 = 0x00010000,
+	DMA_CONTROL_TTC_32 = 0x00014000,
+	DMA_CONTROL_TTC_24 = 0x00018000,
+	DMA_CONTROL_TTC_16 = 0x0001c000,
+};
+#define DMA_CONTROL_TC_TX_MASK	0xfffe3fff
+
+#define DMA_CONTROL_EFC		0x00000100
+#define DMA_CONTROL_FEF		0x00000080
+#define DMA_CONTROL_FUF		0x00000040
+
+enum rtc_control {
+	DMA_CONTROL_RTC_64 = 0x00000000,
+	DMA_CONTROL_RTC_32 = 0x00000008,
+	DMA_CONTROL_RTC_96 = 0x00000010,
+	DMA_CONTROL_RTC_128 = 0x00000018,
+};
+#define DMA_CONTROL_TC_RX_MASK	0xffffffe7
+
+#define DMA_CONTROL_OSF	0x00000004	/* Operate on second frame */
+
+/* MMC registers offset */
+#define GMAC_MMC_CTRL      0x100
+#define GMAC_MMC_RX_INTR   0x104
+#define GMAC_MMC_TX_INTR   0x108
+#define GMAC_MMC_RX_CSUM_OFFLOAD   0x208
diff --git a/drivers/net/stmmac/mac100.c b/drivers/net/stmmac/mac100.c
new file mode 100644
index 0000000..625171b
--- /dev/null
+++ b/drivers/net/stmmac/mac100.c
@@ -0,0 +1,517 @@
+/*******************************************************************************
+  This is the driver for the MAC 10/100 on-chip Ethernet controller
+  currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
+
+  DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
+  this code.
+
+  Copyright (C) 2007-2009  STMicroelectronics Ltd
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/netdevice.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include "common.h"
+#include "mac100.h"
+
+#undef MAC100_DEBUG
+/*#define MAC100_DEBUG*/
+#ifdef MAC100_DEBUG
+#define DBG(fmt, args...)  printk(fmt, ## args)
+#else
+#define DBG(fmt, args...)  do { } while (0)
+#endif
+
+static void mac100_core_init(unsigned long ioaddr)
+{
+	u32 value = readl(ioaddr + MAC_CONTROL);
+
+	writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
+
+#ifdef STMMAC_VLAN_TAG_USED
+	writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
+#endif
+	return;
+}
+
+static void mac100_dump_mac_regs(unsigned long ioaddr)
+{
+	pr_info("\t----------------------------------------------\n"
+	       "\t  MAC100 CSR (base addr = 0x%8x)\n"
+	       "\t----------------------------------------------\n",
+	       (unsigned int)ioaddr);
+	pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
+	       readl(ioaddr + MAC_CONTROL));
+	pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
+	       readl(ioaddr + MAC_ADDR_HIGH));
+	pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
+	       readl(ioaddr + MAC_ADDR_LOW));
+	pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
+			MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
+	pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
+			MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
+	pr_info("\tflow control (offset 0x%x): 0x%08x\n",
+		MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
+	pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
+	       readl(ioaddr + MAC_VLAN1));
+	pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
+	       readl(ioaddr + MAC_VLAN2));
+	pr_info("\n\tMAC management counter registers\n");
+	pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
+	       MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
+	pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
+	       MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
+	pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
+	       MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
+	pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
+	       MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
+	pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
+	       MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
+	return;
+}
+
+static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+			   u32 dma_rx)
+{
+	u32 value = readl(ioaddr + DMA_BUS_MODE);
+	/* DMA SW reset */
+	value |= DMA_BUS_MODE_SFT_RESET;
+	writel(value, ioaddr + DMA_BUS_MODE);
+	do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
+
+	/* Enable Application Access by writing to DMA CSR0 */
+	writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
+	       ioaddr + DMA_BUS_MODE);
+
+	/* Mask interrupts by writing to CSR7 */
+	writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+
+	/* The base address of the RX/TX descriptor lists must be written into
+	 * DMA CSR3 and CSR4, respectively. */
+	writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
+	writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
+
+	return 0;
+}
+
+/* Store and Forward capability is not used at all..
+ * The transmit threshold can be programmed by
+ * setting the TTC bits in the DMA control register.*/
+static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode,
+				      int rxmode)
+{
+	u32 csr6 = readl(ioaddr + DMA_CONTROL);
+
+	if (txmode <= 32)
+		csr6 |= DMA_CONTROL_TTC_32;
+	else if (txmode <= 64)
+		csr6 |= DMA_CONTROL_TTC_64;
+	else
+		csr6 |= DMA_CONTROL_TTC_128;
+
+	writel(csr6, ioaddr + DMA_CONTROL);
+
+	return;
+}
+
+static void mac100_dump_dma_regs(unsigned long ioaddr)
+{
+	int i;
+
+	DBG(KERN_DEBUG "MAC100 DMA CSR \n");
+	for (i = 0; i < 9; i++)
+		pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
+		       (DMA_BUS_MODE + i * 4),
+		       readl(ioaddr + DMA_BUS_MODE + i * 4));
+	DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
+	    DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
+	DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
+	    DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
+	return;
+}
+
+/* DMA controller has two counters to track the number of
+   the receive missed frames. */
+static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
+				     unsigned long ioaddr)
+{
+	struct net_device_stats *stats = (struct net_device_stats *)data;
+	u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
+
+	if (unlikely(csr8)) {
+		if (csr8 & DMA_MISSED_FRAME_OVE) {
+			stats->rx_over_errors += 0x800;
+			x->rx_overflow_cntr += 0x800;
+		} else {
+			unsigned int ove_cntr;
+			ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
+			stats->rx_over_errors += ove_cntr;
+			x->rx_overflow_cntr += ove_cntr;
+		}
+
+		if (csr8 & DMA_MISSED_FRAME_OVE_M) {
+			stats->rx_missed_errors += 0xffff;
+			x->rx_missed_cntr += 0xffff;
+		} else {
+			unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
+			stats->rx_missed_errors += miss_f;
+			x->rx_missed_cntr += miss_f;
+		}
+	}
+	return;
+}
+
+static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
+				      struct dma_desc *p, unsigned long ioaddr)
+{
+	int ret = 0;
+	struct net_device_stats *stats = (struct net_device_stats *)data;
+
+	if (unlikely(p->des01.tx.error_summary)) {
+		if (unlikely(p->des01.tx.underflow_error)) {
+			x->tx_underflow++;
+			stats->tx_fifo_errors++;
+		}
+		if (unlikely(p->des01.tx.no_carrier)) {
+			x->tx_carrier++;
+			stats->tx_carrier_errors++;
+		}
+		if (unlikely(p->des01.tx.loss_carrier)) {
+			x->tx_losscarrier++;
+			stats->tx_carrier_errors++;
+		}
+		if (unlikely((p->des01.tx.excessive_deferral) ||
+			     (p->des01.tx.excessive_collisions) ||
+			     (p->des01.tx.late_collision)))
+			stats->collisions += p->des01.tx.collision_count;
+		ret = -1;
+	}
+	if (unlikely(p->des01.tx.heartbeat_fail)) {
+		x->tx_heartbeat++;
+		stats->tx_heartbeat_errors++;
+		ret = -1;
+	}
+	if (unlikely(p->des01.tx.deferred))
+		x->tx_deferred++;
+
+	return ret;
+}
+
+static int mac100_get_tx_len(struct dma_desc *p)
+{
+	return p->des01.tx.buffer1_size;
+}
+
+/* This function verifies if each incoming frame has some errors
+ * and, if required, updates the multicast statistics.
+ * In case of success, it returns csum_none becasue the device
+ * is not able to compute the csum in HW. */
+static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
+				      struct dma_desc *p)
+{
+	int ret = csum_none;
+	struct net_device_stats *stats = (struct net_device_stats *)data;
+
+	if (unlikely(p->des01.rx.last_descriptor == 0)) {
+		pr_warning("mac100 Error: Oversized Ethernet "
+			   "frame spanned multiple buffers\n");
+		stats->rx_length_errors++;
+		return discard_frame;
+	}
+
+	if (unlikely(p->des01.rx.error_summary)) {
+		if (unlikely(p->des01.rx.descriptor_error))
+			x->rx_desc++;
+		if (unlikely(p->des01.rx.partial_frame_error))
+			x->rx_partial++;
+		if (unlikely(p->des01.rx.run_frame))
+			x->rx_runt++;
+		if (unlikely(p->des01.rx.frame_too_long))
+			x->rx_toolong++;
+		if (unlikely(p->des01.rx.collision)) {
+			x->rx_collision++;
+			stats->collisions++;
+		}
+		if (unlikely(p->des01.rx.crc_error)) {
+			x->rx_crc++;
+			stats->rx_crc_errors++;
+		}
+		ret = discard_frame;
+	}
+	if (unlikely(p->des01.rx.dribbling))
+		ret = discard_frame;
+
+	if (unlikely(p->des01.rx.length_error)) {
+		x->rx_lenght++;
+		ret = discard_frame;
+	}
+	if (unlikely(p->des01.rx.mii_error)) {
+		x->rx_mii++;
+		ret = discard_frame;
+	}
+	if (p->des01.rx.multicast_frame) {
+		x->rx_multicast++;
+		stats->multicast++;
+	}
+	return ret;
+}
+
+static void mac100_irq_status(unsigned long ioaddr)
+{
+	return;
+}
+
+static void mac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+			  unsigned int reg_n)
+{
+	stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
+}
+
+static void mac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+			  unsigned int reg_n)
+{
+	stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
+}
+
+static void mac100_set_filter(struct net_device *dev)
+{
+	unsigned long ioaddr = dev->base_addr;
+	u32 value = readl(ioaddr + MAC_CONTROL);
+
+	if (dev->flags & IFF_PROMISC) {
+		value |= MAC_CONTROL_PR;
+		value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
+			   MAC_CONTROL_HP);
+	} else if ((dev->mc_count > HASH_TABLE_SIZE)
+		   || (dev->flags & IFF_ALLMULTI)) {
+		value |= MAC_CONTROL_PM;
+		value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
+		writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
+		writel(0xffffffff, ioaddr + MAC_HASH_LOW);
+	} else if (dev->mc_count == 0) {	/* no multicast */
+		value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
+			   MAC_CONTROL_HO | MAC_CONTROL_HP);
+	} else {
+		int i;
+		u32 mc_filter[2];
+		struct dev_mc_list *mclist;
+
+		/* Perfect filter mode for physical address and Hash
+		   filter for multicast */
+		value |= MAC_CONTROL_HP;
+		value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF
+			   | MAC_CONTROL_HO);
+
+		memset(mc_filter, 0, sizeof(mc_filter));
+		for (i = 0, mclist = dev->mc_list;
+		     mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+			/* The upper 6 bits of the calculated CRC are used to
+			 * index the contens of the hash table */
+			int bit_nr =
+			    ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+			/* The most significant bit determines the register to
+			 * use (H/L) while the other 5 bits determine the bit
+			 * within the register. */
+			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+		}
+		writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
+		writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
+	}
+
+	writel(value, ioaddr + MAC_CONTROL);
+
+	DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
+	    "HI 0x%08x, LO 0x%08x\n",
+	    __func__, readl(ioaddr + MAC_CONTROL),
+	    readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
+	return;
+}
+
+static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+			     unsigned int fc, unsigned int pause_time)
+{
+	unsigned int flow = MAC_FLOW_CTRL_ENABLE;
+
+	if (duplex)
+		flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
+	writel(flow, ioaddr + MAC_FLOW_CTRL);
+
+	return;
+}
+
+/* No PMT module supported in our SoC  for the Ethernet Controller. */
+static void mac100_pmt(unsigned long ioaddr, unsigned long mode)
+{
+	return;
+}
+
+static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+				int disable_rx_ic)
+{
+	int i;
+	for (i = 0; i < ring_size; i++) {
+		p->des01.rx.own = 1;
+		p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
+		if (i == ring_size - 1)
+			p->des01.rx.end_ring = 1;
+		if (disable_rx_ic)
+			p->des01.rx.disable_ic = 1;
+		p++;
+	}
+	return;
+}
+
+static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+{
+	int i;
+	for (i = 0; i < ring_size; i++) {
+		p->des01.tx.own = 0;
+		if (i == ring_size - 1)
+			p->des01.tx.end_ring = 1;
+		p++;
+	}
+	return;
+}
+
+static int mac100_get_tx_owner(struct dma_desc *p)
+{
+	return p->des01.tx.own;
+}
+
+static int mac100_get_rx_owner(struct dma_desc *p)
+{
+	return p->des01.rx.own;
+}
+
+static void mac100_set_tx_owner(struct dma_desc *p)
+{
+	p->des01.tx.own = 1;
+}
+
+static void mac100_set_rx_owner(struct dma_desc *p)
+{
+	p->des01.rx.own = 1;
+}
+
+static int mac100_get_tx_ls(struct dma_desc *p)
+{
+	return p->des01.tx.last_segment;
+}
+
+static void mac100_release_tx_desc(struct dma_desc *p)
+{
+	int ter = p->des01.tx.end_ring;
+
+	/* clean field used within the xmit */
+	p->des01.tx.first_segment = 0;
+	p->des01.tx.last_segment = 0;
+	p->des01.tx.buffer1_size = 0;
+
+	/* clean status reported */
+	p->des01.tx.error_summary = 0;
+	p->des01.tx.underflow_error = 0;
+	p->des01.tx.no_carrier = 0;
+	p->des01.tx.loss_carrier = 0;
+	p->des01.tx.excessive_deferral = 0;
+	p->des01.tx.excessive_collisions = 0;
+	p->des01.tx.late_collision = 0;
+	p->des01.tx.heartbeat_fail = 0;
+	p->des01.tx.deferred = 0;
+
+	/* set termination field */
+	p->des01.tx.end_ring = ter;
+
+	return;
+}
+
+static void mac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+				   int csum_flag)
+{
+	p->des01.tx.first_segment = is_fs;
+	p->des01.tx.buffer1_size = len;
+}
+
+static void mac100_clear_tx_ic(struct dma_desc *p)
+{
+	p->des01.tx.interrupt = 0;
+}
+
+static void mac100_close_tx_desc(struct dma_desc *p)
+{
+	p->des01.tx.last_segment = 1;
+	p->des01.tx.interrupt = 1;
+}
+
+static int mac100_get_rx_frame_len(struct dma_desc *p)
+{
+	return p->des01.rx.frame_length;
+}
+
+struct stmmac_ops mac100_driver = {
+	.core_init = mac100_core_init,
+	.dump_mac_regs = mac100_dump_mac_regs,
+	.dma_init = mac100_dma_init,
+	.dump_dma_regs = mac100_dump_dma_regs,
+	.dma_mode = mac100_dma_operation_mode,
+	.dma_diagnostic_fr = mac100_dma_diagnostic_fr,
+	.tx_status = mac100_get_tx_frame_status,
+	.rx_status = mac100_get_rx_frame_status,
+	.get_tx_len = mac100_get_tx_len,
+	.set_filter = mac100_set_filter,
+	.flow_ctrl = mac100_flow_ctrl,
+	.pmt = mac100_pmt,
+	.init_rx_desc = mac100_init_rx_desc,
+	.init_tx_desc = mac100_init_tx_desc,
+	.get_tx_owner = mac100_get_tx_owner,
+	.get_rx_owner = mac100_get_rx_owner,
+	.release_tx_desc = mac100_release_tx_desc,
+	.prepare_tx_desc = mac100_prepare_tx_desc,
+	.clear_tx_ic = mac100_clear_tx_ic,
+	.close_tx_desc = mac100_close_tx_desc,
+	.get_tx_ls = mac100_get_tx_ls,
+	.set_tx_owner = mac100_set_tx_owner,
+	.set_rx_owner = mac100_set_rx_owner,
+	.get_rx_frame_len = mac100_get_rx_frame_len,
+	.host_irq_status = mac100_irq_status,
+	.set_umac_addr = mac100_set_umac_addr,
+	.get_umac_addr = mac100_get_umac_addr,
+};
+
+struct mac_device_info *mac100_setup(unsigned long ioaddr)
+{
+	struct mac_device_info *mac;
+
+	mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+
+	pr_info("\tMAC 10/100\n");
+
+	mac->ops = &mac100_driver;
+	mac->hw.pmt = PMT_NOT_SUPPORTED;
+	mac->hw.link.port = MAC_CONTROL_PS;
+	mac->hw.link.duplex = MAC_CONTROL_F;
+	mac->hw.link.speed = 0;
+	mac->hw.mii.addr = MAC_MII_ADDR;
+	mac->hw.mii.data = MAC_MII_DATA;
+
+	return mac;
+}
diff --git a/drivers/net/stmmac/mac100.h b/drivers/net/stmmac/mac100.h
new file mode 100644
index 0000000..0f8f110
--- /dev/null
+++ b/drivers/net/stmmac/mac100.h
@@ -0,0 +1,116 @@
+/*******************************************************************************
+  MAC 10/100 Header File
+
+  Copyright (C) 2007-2009  STMicroelectronics Ltd
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+/*----------------------------------------------------------------------------
+ *	 			MAC BLOCK defines
+ *---------------------------------------------------------------------------*/
+/* MAC CSR offset */
+#define MAC_CONTROL	0x00000000	/* MAC Control */
+#define MAC_ADDR_HIGH	0x00000004	/* MAC Address High */
+#define MAC_ADDR_LOW	0x00000008	/* MAC Address Low */
+#define MAC_HASH_HIGH	0x0000000c	/* Multicast Hash Table High */
+#define MAC_HASH_LOW	0x00000010	/* Multicast Hash Table Low */
+#define MAC_MII_ADDR	0x00000014	/* MII Address */
+#define MAC_MII_DATA	0x00000018	/* MII Data */
+#define MAC_FLOW_CTRL	0x0000001c	/* Flow Control */
+#define MAC_VLAN1	0x00000020	/* VLAN1 Tag */
+#define MAC_VLAN2	0x00000024	/* VLAN2 Tag */
+
+/* MAC CTRL defines */
+#define MAC_CONTROL_RA	0x80000000	/* Receive All Mode */
+#define MAC_CONTROL_BLE	0x40000000	/* Endian Mode */
+#define MAC_CONTROL_HBD	0x10000000	/* Heartbeat Disable */
+#define MAC_CONTROL_PS	0x08000000	/* Port Select */
+#define MAC_CONTROL_DRO	0x00800000	/* Disable Receive Own */
+#define MAC_CONTROL_EXT_LOOPBACK 0x00400000	/* Reserved (ext loopback?) */
+#define MAC_CONTROL_OM	0x00200000	/* Loopback Operating Mode */
+#define MAC_CONTROL_F	0x00100000	/* Full Duplex Mode */
+#define MAC_CONTROL_PM	0x00080000	/* Pass All Multicast */
+#define MAC_CONTROL_PR	0x00040000	/* Promiscuous Mode */
+#define MAC_CONTROL_IF	0x00020000	/* Inverse Filtering */
+#define MAC_CONTROL_PB	0x00010000	/* Pass Bad Frames */
+#define MAC_CONTROL_HO	0x00008000	/* Hash Only Filtering Mode */
+#define MAC_CONTROL_HP	0x00002000	/* Hash/Perfect Filtering Mode */
+#define MAC_CONTROL_LCC	0x00001000	/* Late Collision Control */
+#define MAC_CONTROL_DBF	0x00000800	/* Disable Broadcast Frames */
+#define MAC_CONTROL_DRTY	0x00000400	/* Disable Retry */
+#define MAC_CONTROL_ASTP	0x00000100	/* Automatic Pad Stripping */
+#define MAC_CONTROL_BOLMT_10	0x00000000	/* Back Off Limit 10 */
+#define MAC_CONTROL_BOLMT_8	0x00000040	/* Back Off Limit 8 */
+#define MAC_CONTROL_BOLMT_4	0x00000080	/* Back Off Limit 4 */
+#define MAC_CONTROL_BOLMT_1	0x000000c0	/* Back Off Limit 1 */
+#define MAC_CONTROL_DC		0x00000020	/* Deferral Check */
+#define MAC_CONTROL_TE		0x00000008	/* Transmitter Enable */
+#define MAC_CONTROL_RE		0x00000004	/* Receiver Enable */
+
+#define MAC_CORE_INIT (MAC_CONTROL_HBD | MAC_CONTROL_ASTP)
+
+/* MAC FLOW CTRL defines */
+#define MAC_FLOW_CTRL_PT_MASK	0xffff0000	/* Pause Time Mask */
+#define MAC_FLOW_CTRL_PT_SHIFT	16
+#define MAC_FLOW_CTRL_PASS	0x00000004	/* Pass Control Frames */
+#define MAC_FLOW_CTRL_ENABLE	0x00000002	/* Flow Control Enable */
+#define MAC_FLOW_CTRL_PAUSE	0x00000001	/* Flow Control Busy ... */
+
+/* MII ADDR  defines */
+#define MAC_MII_ADDR_WRITE	0x00000002	/* MII Write */
+#define MAC_MII_ADDR_BUSY	0x00000001	/* MII Busy */
+
+/*----------------------------------------------------------------------------
+ * 				DMA BLOCK defines
+ *---------------------------------------------------------------------------*/
+
+/* DMA Bus Mode register defines */
+#define DMA_BUS_MODE_DBO	0x00100000	/* Descriptor Byte Ordering */
+#define DMA_BUS_MODE_BLE	0x00000080	/* Big Endian/Little Endian */
+#define DMA_BUS_MODE_PBL_MASK	0x00003f00	/* Programmable Burst Len */
+#define DMA_BUS_MODE_PBL_SHIFT	8
+#define DMA_BUS_MODE_DSL_MASK	0x0000007c	/* Descriptor Skip Length */
+#define DMA_BUS_MODE_DSL_SHIFT	2	/*   (in DWORDS)      */
+#define DMA_BUS_MODE_BAR_BUS	0x00000002	/* Bar-Bus Arbitration */
+#define DMA_BUS_MODE_SFT_RESET	0x00000001	/* Software Reset */
+#define DMA_BUS_MODE_DEFAULT	0x00000000
+
+/* DMA Control register defines */
+#define DMA_CONTROL_SF		0x00200000	/* Store And Forward */
+
+/* Transmit Threshold Control */
+enum ttc_control {
+	DMA_CONTROL_TTC_DEFAULT = 0x00000000,	/* Threshold is 32 DWORDS */
+	DMA_CONTROL_TTC_64 = 0x00004000,	/* Threshold is 64 DWORDS */
+	DMA_CONTROL_TTC_128 = 0x00008000,	/* Threshold is 128 DWORDS */
+	DMA_CONTROL_TTC_256 = 0x0000c000,	/* Threshold is 256 DWORDS */
+	DMA_CONTROL_TTC_18 = 0x00400000,	/* Threshold is 18 DWORDS */
+	DMA_CONTROL_TTC_24 = 0x00404000,	/* Threshold is 24 DWORDS */
+	DMA_CONTROL_TTC_32 = 0x00408000,	/* Threshold is 32 DWORDS */
+	DMA_CONTROL_TTC_40 = 0x0040c000,	/* Threshold is 40 DWORDS */
+	DMA_CONTROL_SE = 0x00000008,	/* Stop On Empty */
+	DMA_CONTROL_OSF = 0x00000004,	/* Operate On 2nd Frame */
+};
+
+/* STMAC110 DMA Missed Frame Counter register defines */
+#define DMA_MISSED_FRAME_OVE	0x10000000	/* FIFO Overflow Overflow */
+#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000	/* Overflow Frame Counter */
+#define DMA_MISSED_FRAME_OVE_M	0x00010000	/* Missed Frame Overflow */
+#define DMA_MISSED_FRAME_M_CNTR	0x0000ffff	/* Missed Frame Couinter */
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
new file mode 100644
index 0000000..6d2eae3
--- /dev/null
+++ b/drivers/net/stmmac/stmmac.h
@@ -0,0 +1,98 @@
+/*******************************************************************************
+  Copyright (C) 2007-2009  STMicroelectronics Ltd
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#define DRV_MODULE_VERSION	"Oct_09"
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define STMMAC_VLAN_TAG_USED
+#include <linux/if_vlan.h>
+#endif
+
+#include "common.h"
+#ifdef CONFIG_STMMAC_TIMER
+#include "stmmac_timer.h"
+#endif
+
+struct stmmac_priv {
+	/* Frequently used values are kept adjacent for cache effect */
+	struct dma_desc *dma_tx ____cacheline_aligned;
+	dma_addr_t dma_tx_phy;
+	struct sk_buff **tx_skbuff;
+	unsigned int cur_tx;
+	unsigned int dirty_tx;
+	unsigned int dma_tx_size;
+	int tx_coe;
+	int tx_coalesce;
+
+	struct dma_desc *dma_rx ;
+	unsigned int cur_rx;
+	unsigned int dirty_rx;
+	struct sk_buff **rx_skbuff;
+	dma_addr_t *rx_skbuff_dma;
+	struct sk_buff_head rx_recycle;
+
+	struct net_device *dev;
+	int is_gmac;
+	dma_addr_t dma_rx_phy;
+	unsigned int dma_rx_size;
+	int rx_csum;
+	unsigned int dma_buf_sz;
+	struct device *device;
+	struct mac_device_info *mac_type;
+
+	struct stmmac_extra_stats xstats;
+	struct napi_struct napi;
+
+	phy_interface_t phy_interface;
+	int pbl;
+	int bus_id;
+	int phy_addr;
+	int phy_mask;
+	int (*phy_reset) (void *priv);
+	void (*fix_mac_speed) (void *priv, unsigned int speed);
+	void *bsp_priv;
+
+	int phy_irq;
+	struct phy_device *phydev;
+	int oldlink;
+	int speed;
+	int oldduplex;
+	unsigned int flow_ctrl;
+	unsigned int pause;
+	struct mii_bus *mii;
+
+	u32 msg_enable;
+	spinlock_t lock;
+	int wolopts;
+	int wolenabled;
+	int shutdown;
+#ifdef CONFIG_STMMAC_TIMER
+	struct stmmac_timer *tm;
+#endif
+#ifdef STMMAC_VLAN_TAG_USED
+	struct vlan_group *vlgrp;
+#endif
+};
+
+extern int stmmac_mdio_unregister(struct net_device *ndev);
+extern int stmmac_mdio_register(struct net_device *ndev);
+extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
new file mode 100644
index 0000000..694ebe6
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -0,0 +1,395 @@
+/*******************************************************************************
+  STMMAC Ethtool support
+
+  Copyright (C) 2007-2009  STMicroelectronics Ltd
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include "stmmac.h"
+
+#define REG_SPACE_SIZE	0x1054
+#define MAC100_ETHTOOL_NAME	"st_mac100"
+#define GMAC_ETHTOOL_NAME	"st_gmac"
+
+struct stmmac_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+};
+
+#define STMMAC_STAT(m)	\
+	{ #m, FIELD_SIZEOF(struct stmmac_extra_stats, m),	\
+	offsetof(struct stmmac_priv, xstats.m)}
+
+static const struct  stmmac_stats stmmac_gstrings_stats[] = {
+	STMMAC_STAT(tx_underflow),
+	STMMAC_STAT(tx_carrier),
+	STMMAC_STAT(tx_losscarrier),
+	STMMAC_STAT(tx_heartbeat),
+	STMMAC_STAT(tx_deferred),
+	STMMAC_STAT(tx_vlan),
+	STMMAC_STAT(rx_vlan),
+	STMMAC_STAT(tx_jabber),
+	STMMAC_STAT(tx_frame_flushed),
+	STMMAC_STAT(tx_payload_error),
+	STMMAC_STAT(tx_ip_header_error),
+	STMMAC_STAT(rx_desc),
+	STMMAC_STAT(rx_partial),
+	STMMAC_STAT(rx_runt),
+	STMMAC_STAT(rx_toolong),
+	STMMAC_STAT(rx_collision),
+	STMMAC_STAT(rx_crc),
+	STMMAC_STAT(rx_lenght),
+	STMMAC_STAT(rx_mii),
+	STMMAC_STAT(rx_multicast),
+	STMMAC_STAT(rx_gmac_overflow),
+	STMMAC_STAT(rx_watchdog),
+	STMMAC_STAT(da_rx_filter_fail),
+	STMMAC_STAT(sa_rx_filter_fail),
+	STMMAC_STAT(rx_missed_cntr),
+	STMMAC_STAT(rx_overflow_cntr),
+	STMMAC_STAT(tx_undeflow_irq),
+	STMMAC_STAT(tx_process_stopped_irq),
+	STMMAC_STAT(tx_jabber_irq),
+	STMMAC_STAT(rx_overflow_irq),
+	STMMAC_STAT(rx_buf_unav_irq),
+	STMMAC_STAT(rx_process_stopped_irq),
+	STMMAC_STAT(rx_watchdog_irq),
+	STMMAC_STAT(tx_early_irq),
+	STMMAC_STAT(fatal_bus_error_irq),
+	STMMAC_STAT(threshold),
+	STMMAC_STAT(tx_pkt_n),
+	STMMAC_STAT(rx_pkt_n),
+	STMMAC_STAT(poll_n),
+	STMMAC_STAT(sched_timer_n),
+	STMMAC_STAT(normal_irq_n),
+};
+#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
+
+void stmmac_ethtool_getdrvinfo(struct net_device *dev,
+			       struct ethtool_drvinfo *info)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+
+	if (!priv->is_gmac)
+		strcpy(info->driver, MAC100_ETHTOOL_NAME);
+	else
+		strcpy(info->driver, GMAC_ETHTOOL_NAME);
+
+	strcpy(info->version, DRV_MODULE_VERSION);
+	info->fw_version[0] = '\0';
+	info->n_stats = STMMAC_STATS_LEN;
+	return;
+}
+
+int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+	struct phy_device *phy = priv->phydev;
+	int rc;
+	if (phy == NULL) {
+		pr_err("%s: %s: PHY is not registered\n",
+		       __func__, dev->name);
+		return -ENODEV;
+	}
+	if (!netif_running(dev)) {
+		pr_err("%s: interface is disabled: we cannot track "
+		"link speed / duplex setting\n", dev->name);
+		return -EBUSY;
+	}
+	cmd->transceiver = XCVR_INTERNAL;
+	spin_lock_irq(&priv->lock);
+	rc = phy_ethtool_gset(phy, cmd);
+	spin_unlock_irq(&priv->lock);
+	return rc;
+}
+
+int stmmac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+	struct phy_device *phy = priv->phydev;
+	int rc;
+
+	spin_lock(&priv->lock);
+	rc = phy_ethtool_sset(phy, cmd);
+	spin_unlock(&priv->lock);
+
+	return rc;
+}
+
+u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+	return priv->msg_enable;
+}
+
+void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+	priv->msg_enable = level;
+
+}
+
+int stmmac_check_if_running(struct net_device *dev)
+{
+	if (!netif_running(dev))
+		return -EBUSY;
+	return 0;
+}
+
+int stmmac_ethtool_get_regs_len(struct net_device *dev)
+{
+	return REG_SPACE_SIZE;
+}
+
+void stmmac_ethtool_gregs(struct net_device *dev,
+			  struct ethtool_regs *regs, void *space)
+{
+	int i;
+	u32 *reg_space = (u32 *) space;
+
+	struct stmmac_priv *priv = netdev_priv(dev);
+
+	memset(reg_space, 0x0, REG_SPACE_SIZE);
+
+	if (!priv->is_gmac) {
+		/* MAC registers */
+		for (i = 0; i < 12; i++)
+			reg_space[i] = readl(dev->base_addr + (i * 4));
+		/* DMA registers */
+		for (i = 0; i < 9; i++)
+			reg_space[i + 12] =
+			    readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
+		reg_space[22] = readl(dev->base_addr + DMA_CUR_TX_BUF_ADDR);
+		reg_space[23] = readl(dev->base_addr + DMA_CUR_RX_BUF_ADDR);
+	} else {
+		/* MAC registers */
+		for (i = 0; i < 55; i++)
+			reg_space[i] = readl(dev->base_addr + (i * 4));
+		/* DMA registers */
+		for (i = 0; i < 22; i++)
+			reg_space[i + 55] =
+			    readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
+	}
+
+	return;
+}
+
+int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
+{
+	if (data)
+		netdev->features |= NETIF_F_HW_CSUM;
+	else
+		netdev->features &= ~NETIF_F_HW_CSUM;
+
+	return 0;
+}
+
+u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+
+	return priv->rx_csum;
+}
+
+static void
+stmmac_get_pauseparam(struct net_device *netdev,
+		      struct ethtool_pauseparam *pause)
+{
+	struct stmmac_priv *priv = netdev_priv(netdev);
+
+	spin_lock(&priv->lock);
+
+	pause->rx_pause = 0;
+	pause->tx_pause = 0;
+	pause->autoneg = priv->phydev->autoneg;
+
+	if (priv->flow_ctrl & FLOW_RX)
+		pause->rx_pause = 1;
+	if (priv->flow_ctrl & FLOW_TX)
+		pause->tx_pause = 1;
+
+	spin_unlock(&priv->lock);
+	return;
+}
+
+static int
+stmmac_set_pauseparam(struct net_device *netdev,
+		      struct ethtool_pauseparam *pause)
+{
+	struct stmmac_priv *priv = netdev_priv(netdev);
+	struct phy_device *phy = priv->phydev;
+	int new_pause = FLOW_OFF;
+	int ret = 0;
+
+	spin_lock(&priv->lock);
+
+	if (pause->rx_pause)
+		new_pause |= FLOW_RX;
+	if (pause->tx_pause)
+		new_pause |= FLOW_TX;
+
+	priv->flow_ctrl = new_pause;
+
+	if (phy->autoneg) {
+		if (netif_running(netdev)) {
+			struct ethtool_cmd cmd;
+			/* auto-negotiation automatically restarted */
+			cmd.cmd = ETHTOOL_NWAY_RST;
+			cmd.supported = phy->supported;
+			cmd.advertising = phy->advertising;
+			cmd.autoneg = phy->autoneg;
+			cmd.speed = phy->speed;
+			cmd.duplex = phy->duplex;
+			cmd.phy_address = phy->addr;
+			ret = phy_ethtool_sset(phy, &cmd);
+		}
+	} else {
+		unsigned long ioaddr = netdev->base_addr;
+		priv->mac_type->ops->flow_ctrl(ioaddr, phy->duplex,
+					       priv->flow_ctrl, priv->pause);
+	}
+	spin_unlock(&priv->lock);
+	return ret;
+}
+
+static void stmmac_get_ethtool_stats(struct net_device *dev,
+				 struct ethtool_stats *dummy, u64 *data)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+	unsigned long ioaddr = dev->base_addr;
+	int i;
+
+	/* Update HW stats if supported */
+	priv->mac_type->ops->dma_diagnostic_fr(&dev->stats, &priv->xstats,
+					       ioaddr);
+
+	for (i = 0; i < STMMAC_STATS_LEN; i++) {
+		char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
+		data[i] = (stmmac_gstrings_stats[i].sizeof_stat ==
+		sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
+	}
+
+	return;
+}
+
+static int stmmac_get_sset_count(struct net_device *netdev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return STMMAC_STATS_LEN;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+	int i;
+	u8 *p = data;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < STMMAC_STATS_LEN; i++) {
+			memcpy(p, stmmac_gstrings_stats[i].stat_string,
+				ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+	return;
+}
+
+/* Currently only support WOL through Magic packet. */
+static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+
+	spin_lock_irq(&priv->lock);
+	if (priv->wolenabled == PMT_SUPPORTED) {
+		wol->supported = WAKE_MAGIC;
+		wol->wolopts = priv->wolopts;
+	}
+	spin_unlock_irq(&priv->lock);
+}
+
+static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+	u32 support = WAKE_MAGIC;
+
+	if (priv->wolenabled == PMT_NOT_SUPPORTED)
+		return -EINVAL;
+
+	if (wol->wolopts & ~support)
+		return -EINVAL;
+
+	if (wol->wolopts == 0)
+		device_set_wakeup_enable(priv->device, 0);
+	else
+		device_set_wakeup_enable(priv->device, 1);
+
+	spin_lock_irq(&priv->lock);
+	priv->wolopts = wol->wolopts;
+	spin_unlock_irq(&priv->lock);
+
+	return 0;
+}
+
+static struct ethtool_ops stmmac_ethtool_ops = {
+	.begin = stmmac_check_if_running,
+	.get_drvinfo = stmmac_ethtool_getdrvinfo,
+	.get_settings = stmmac_ethtool_getsettings,
+	.set_settings = stmmac_ethtool_setsettings,
+	.get_msglevel = stmmac_ethtool_getmsglevel,
+	.set_msglevel = stmmac_ethtool_setmsglevel,
+	.get_regs = stmmac_ethtool_gregs,
+	.get_regs_len = stmmac_ethtool_get_regs_len,
+	.get_link = ethtool_op_get_link,
+	.get_rx_csum = stmmac_ethtool_get_rx_csum,
+	.get_tx_csum = ethtool_op_get_tx_csum,
+	.set_tx_csum = stmmac_ethtool_set_tx_csum,
+	.get_sg = ethtool_op_get_sg,
+	.set_sg = ethtool_op_set_sg,
+	.get_pauseparam = stmmac_get_pauseparam,
+	.set_pauseparam = stmmac_set_pauseparam,
+	.get_ethtool_stats = stmmac_get_ethtool_stats,
+	.get_strings = stmmac_get_strings,
+	.get_wol = stmmac_get_wol,
+	.set_wol = stmmac_set_wol,
+	.get_sset_count	= stmmac_get_sset_count,
+#ifdef NETIF_F_TSO
+	.get_tso = ethtool_op_get_tso,
+	.set_tso = ethtool_op_set_tso,
+#endif
+};
+
+void stmmac_set_ethtool_ops(struct net_device *netdev)
+{
+	SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops);
+}
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
new file mode 100644
index 0000000..c2f14dc
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -0,0 +1,2204 @@
+/*******************************************************************************
+  This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
+  ST Ethernet IPs are built around a Synopsys IP Core.
+
+  Copyright (C) 2007-2009  STMicroelectronics Ltd
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+
+  Documentation available at:
+	http://www.stlinux.com
+  Support available at:
+	https://bugzilla.stlinux.com/
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/dma-mapping.h>
+#include <linux/stm/soc.h>
+#include "stmmac.h"
+
+#define STMMAC_RESOURCE_NAME	"stmmaceth"
+#define PHY_RESOURCE_NAME	"stmmacphy"
+
+#undef STMMAC_DEBUG
+/*#define STMMAC_DEBUG*/
+#ifdef STMMAC_DEBUG
+#define DBG(nlevel, klevel, fmt, args...) \
+		((void)(netif_msg_##nlevel(priv) && \
+		printk(KERN_##klevel fmt, ## args)))
+#else
+#define DBG(nlevel, klevel, fmt, args...) do { } while (0)
+#endif
+
+#undef STMMAC_RX_DEBUG
+/*#define STMMAC_RX_DEBUG*/
+#ifdef STMMAC_RX_DEBUG
+#define RX_DBG(fmt, args...)  printk(fmt, ## args)
+#else
+#define RX_DBG(fmt, args...)  do { } while (0)
+#endif
+
+#undef STMMAC_XMIT_DEBUG
+/*#define STMMAC_XMIT_DEBUG*/
+#ifdef STMMAC_TX_DEBUG
+#define TX_DBG(fmt, args...)  printk(fmt, ## args)
+#else
+#define TX_DBG(fmt, args...)  do { } while (0)
+#endif
+
+#define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
+#define JUMBO_LEN	9000
+
+/* Module parameters */
+#define TX_TIMEO 5000 /* default 5 seconds */
+static int watchdog = TX_TIMEO;
+module_param(watchdog, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds");
+
+static int debug = -1;		/* -1: default, 0: no output, 16:  all */
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)");
+
+static int phyaddr = -1;
+module_param(phyaddr, int, S_IRUGO);
+MODULE_PARM_DESC(phyaddr, "Physical device address");
+
+#define DMA_TX_SIZE 256
+static int dma_txsize = DMA_TX_SIZE;
+module_param(dma_txsize, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list");
+
+#define DMA_RX_SIZE 256
+static int dma_rxsize = DMA_RX_SIZE;
+module_param(dma_rxsize, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list");
+
+static int flow_ctrl = FLOW_OFF;
+module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
+
+static int pause = PAUSE_TIME;
+module_param(pause, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(pause, "Flow Control Pause Time");
+
+#define TC_DEFAULT 64
+static int tc = TC_DEFAULT;
+module_param(tc, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tc, "DMA threshold control value");
+
+#define RX_NO_COALESCE	1	/* Always interrupt on completion */
+#define TX_NO_COALESCE	-1	/* No moderation by default */
+
+/* Pay attention to tune this parameter; take care of both
+ * hardware capability and network stabitily/performance impact.
+ * Many tests showed that ~4ms latency seems to be good enough. */
+#ifdef CONFIG_STMMAC_TIMER
+#define DEFAULT_PERIODIC_RATE	256
+static int tmrate = DEFAULT_PERIODIC_RATE;
+module_param(tmrate, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tmrate, "External timer freq. (default: 256Hz)");
+#endif
+
+#define DMA_BUFFER_SIZE	BUF_SIZE_2KiB
+static int buf_sz = DMA_BUFFER_SIZE;
+module_param(buf_sz, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(buf_sz, "DMA buffer size");
+
+/* In case of Giga ETH, we can enable/disable the COE for the
+ * transmit HW checksum computation.
+ * Note that, if tx csum is off in HW, SG will be still supported. */
+static int tx_coe = HW_CSUM;
+module_param(tx_coe, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tx_coe, "GMAC COE type 2 [on/off]");
+
+static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
+				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
+
+static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
+static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev);
+
+/**
+ * stmmac_verify_args - verify the driver parameters.
+ * Description: it verifies if some wrong parameter is passed to the driver.
+ * Note that wrong parameters are replaced with the default values.
+ */
+static void stmmac_verify_args(void)
+{
+	if (unlikely(watchdog < 0))
+		watchdog = TX_TIMEO;
+	if (unlikely(dma_rxsize < 0))
+		dma_rxsize = DMA_RX_SIZE;
+	if (unlikely(dma_txsize < 0))
+		dma_txsize = DMA_TX_SIZE;
+	if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB)))
+		buf_sz = DMA_BUFFER_SIZE;
+	if (unlikely(flow_ctrl > 1))
+		flow_ctrl = FLOW_AUTO;
+	else if (likely(flow_ctrl < 0))
+		flow_ctrl = FLOW_OFF;
+	if (unlikely((pause < 0) || (pause > 0xffff)))
+		pause = PAUSE_TIME;
+
+	return;
+}
+
+#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
+static void print_pkt(unsigned char *buf, int len)
+{
+	int j;
+	pr_info("len = %d byte, buf addr: 0x%p", len, buf);
+	for (j = 0; j < len; j++) {
+		if ((j % 16) == 0)
+			pr_info("\n %03x:", j);
+		pr_info(" %02x", buf[j]);
+	}
+	pr_info("\n");
+	return;
+}
+#endif
+
+/* minimum number of free TX descriptors required to wake up TX process */
+#define STMMAC_TX_THRESH(x)	(x->dma_tx_size/4)
+
+static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
+{
+	return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
+}
+
+/**
+ * stmmac_adjust_link
+ * @dev: net device structure
+ * Description: it adjusts the link parameters.
+ */
+static void stmmac_adjust_link(struct net_device *dev)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+	struct phy_device *phydev = priv->phydev;
+	unsigned long ioaddr = dev->base_addr;
+	unsigned long flags;
+	int new_state = 0;
+	unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
+
+	if (phydev == NULL)
+		return;
+
+	DBG(probe, DEBUG, "stmmac_adjust_link: called.  address %d link %d\n",
+	    phydev->addr, phydev->link);
+
+	spin_lock_irqsave(&priv->lock, flags);
+	if (phydev->link) {
+		u32 ctrl = readl(ioaddr + MAC_CTRL_REG);
+
+		/* Now we make sure that we can be in full duplex mode.
+		 * If not, we operate in half-duplex mode. */
+		if (phydev->duplex != priv->oldduplex) {
+			new_state = 1;
+			if (!(phydev->duplex))
+				ctrl &= ~priv->mac_type->hw.link.duplex;
+			else
+				ctrl |= priv->mac_type->hw.link.duplex;
+			priv->oldduplex = phydev->duplex;
+		}
+		/* Flow Control operation */
+		if (phydev->pause)
+			priv->mac_type->ops->flow_ctrl(ioaddr, phydev->duplex,
+						       fc, pause_time);
+
+		if (phydev->speed != priv->speed) {
+			new_state = 1;
+			switch (phydev->speed) {
+			case 1000:
+				if (likely(priv->is_gmac))
+					ctrl &= ~priv->mac_type->hw.link.port;
+				break;
+			case 100:
+			case 10:
+				if (priv->is_gmac) {
+					ctrl |= priv->mac_type->hw.link.port;
+					if (phydev->speed == SPEED_100) {
+						ctrl |=
+						    priv->mac_type->hw.link.
+						    speed;
+					} else {
+						ctrl &=
+						    ~(priv->mac_type->hw.
+						      link.speed);
+					}
+				} else {
+					ctrl &= ~priv->mac_type->hw.link.port;
+				}
+				priv->fix_mac_speed(priv->bsp_priv,
+						    phydev->speed);
+				break;
+			default:
+				if (netif_msg_link(priv))
+					pr_warning("%s: Speed (%d) is not 10"
+				       " or 100!\n", dev->name, phydev->speed);
+				break;
+			}
+
+			priv->speed = phydev->speed;
+		}
+
+		writel(ctrl, ioaddr + MAC_CTRL_REG);
+
+		if (!priv->oldlink) {
+			new_state = 1;
+			priv->oldlink = 1;
+		}
+	} else if (priv->oldlink) {
+		new_state = 1;
+		priv->oldlink = 0;
+		priv->speed = 0;
+		priv->oldduplex = -1;
+	}
+
+	if (new_state && netif_msg_link(priv))
+		phy_print_status(phydev);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
+}
+
+/**
+ * stmmac_init_phy - PHY initialization
+ * @dev: net device structure
+ * Description: it initializes the driver's PHY state, and attaches the PHY
+ * to the mac driver.
+ *  Return value:
+ *  0 on success
+ */
+static int stmmac_init_phy(struct net_device *dev)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+	struct phy_device *phydev;
+	char phy_id[BUS_ID_SIZE];	/* PHY to connect */
+	char bus_id[BUS_ID_SIZE];
+
+	priv->oldlink = 0;
+	priv->speed = 0;
+	priv->oldduplex = -1;
+
+	if (priv->phy_addr == -1) {
+		/* We don't have a PHY, so do nothing */
+		return 0;
+	}
+
+	snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
+	snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, bus_id, priv->phy_addr);
+	pr_debug("stmmac_init_phy:  trying to attach to %s\n", phy_id);
+
+	phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0,
+			priv->phy_interface);
+
+	if (IS_ERR(phydev)) {
+		pr_err("%s: Could not attach to PHY\n", dev->name);
+		return PTR_ERR(phydev);
+	}
+
+	/*
+	 * Broken HW is sometimes missing the pull-up resistor on the
+	 * MDIO line, which results in reads to non-existent devices returning
+	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
+	 * device as well.
+	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
+	 */
+	if (phydev->phy_id == 0) {
+		phy_disconnect(phydev);
+		return -ENODEV;
+	}
+	pr_debug("stmmac_init_phy:  %s: attached to PHY (UID 0x%x)"
+	       " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
+
+	priv->phydev = phydev;
+
+	return 0;
+}
+
+static inline void stmmac_mac_enable_rx(unsigned long ioaddr)
+{
+	u32 value = readl(ioaddr + MAC_CTRL_REG);
+	value |= MAC_RNABLE_RX;
+	/* Set the RE (receive enable bit into the MAC CTRL register).  */
+	writel(value, ioaddr + MAC_CTRL_REG);
+}
+
+static inline void stmmac_mac_enable_tx(unsigned long ioaddr)
+{
+	u32 value = readl(ioaddr + MAC_CTRL_REG);
+	value |= MAC_ENABLE_TX;
+	/* Set the TE (transmit enable bit into the MAC CTRL register).  */
+	writel(value, ioaddr + MAC_CTRL_REG);
+}
+
+static inline void stmmac_mac_disable_rx(unsigned long ioaddr)
+{
+	u32 value = readl(ioaddr + MAC_CTRL_REG);
+	value &= ~MAC_RNABLE_RX;
+	writel(value, ioaddr + MAC_CTRL_REG);
+}
+
+static inline void stmmac_mac_disable_tx(unsigned long ioaddr)
+{
+	u32 value = readl(ioaddr + MAC_CTRL_REG);
+	value &= ~MAC_ENABLE_TX;
+	writel(value, ioaddr + MAC_CTRL_REG);
+}
+
+/**
+ * display_ring
+ * @p: pointer to the ring.
+ * @size: size of the ring.
+ * Description: display all the descriptors within the ring.
+ */
+static void display_ring(struct dma_desc *p, int size)
+{
+	struct tmp_s {
+		u64 a;
+		unsigned int b;
+		unsigned int c;
+	};
+	int i;
+	for (i = 0; i < size; i++) {
+		struct tmp_s *x = (struct tmp_s *)(p + i);
+		pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
+		       i, (unsigned int)virt_to_phys(&p[i]),
+		       (unsigned int)(x->a), (unsigned int)((x->a) >> 32),
+		       x->b, x->c);
+		pr_info("\n");
+	}
+}
+
+/**
+ * init_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * Description:  this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers.
+ */
+static void init_dma_desc_rings(struct net_device *dev)
+{
+	int i;
+	struct stmmac_priv *priv = netdev_priv(dev);
+	struct sk_buff *skb;
+	unsigned int txsize = priv->dma_tx_size;
+	unsigned int rxsize = priv->dma_rx_size;
+	unsigned int bfsize = priv->dma_buf_sz;
+	int buff2_needed = 0;
+	int dis_ic = 0;
+
+#ifdef CONFIG_STMMAC_TIMER
+	/* Using Timers disable interrupts on completion for the reception */
+	dis_ic = 1;
+#endif
+	/* Set the Buffer size according to the MTU;
+	 * indeed, in case of jumbo we need to bump-up the buffer sizes.
+	 */
+	if (unlikely(dev->mtu >= BUF_SIZE_8KiB))
+		bfsize = BUF_SIZE_16KiB;
+	else if (unlikely(dev->mtu >= BUF_SIZE_4KiB))
+		bfsize = BUF_SIZE_8KiB;
+	else if (unlikely(dev->mtu >= BUF_SIZE_2KiB))
+		bfsize = BUF_SIZE_4KiB;
+	else if (unlikely(dev->mtu >= DMA_BUFFER_SIZE))
+		bfsize = BUF_SIZE_2KiB;
+	else
+		bfsize = DMA_BUFFER_SIZE;
+
+	/* If the MTU exceeds 8k so use the second buffer in the chain */
+	if (bfsize >= BUF_SIZE_8KiB)
+		buff2_needed = 1;
+
+	DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
+	    txsize, rxsize, bfsize);
+
+	priv->rx_skbuff_dma = kmalloc(rxsize * sizeof(dma_addr_t), GFP_KERNEL);
+	priv->rx_skbuff =
+	    kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL);
+	priv->dma_rx =
+	    (struct dma_desc *)dma_alloc_coherent(priv->device,
+						  rxsize *
+						  sizeof(struct dma_desc),
+						  &priv->dma_rx_phy,
+						  GFP_KERNEL);
+	priv->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * txsize,
+				       GFP_KERNEL);
+	priv->dma_tx =
+	    (struct dma_desc *)dma_alloc_coherent(priv->device,
+						  txsize *
+						  sizeof(struct dma_desc),
+						  &priv->dma_tx_phy,
+						  GFP_KERNEL);
+
+	if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) {
+		pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__);
+		return;
+	}
+
+	DBG(probe, INFO, "stmmac (%s) DMA desc rings: virt addr (Rx %p, "
+	    "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
+	    dev->name, priv->dma_rx, priv->dma_tx,
+	    (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
+
+	/* RX INITIALIZATION */
+	DBG(probe, INFO, "stmmac: SKB addresses:\n"
+			 "skb\t\tskb data\tdma data\n");
+
+	for (i = 0; i < rxsize; i++) {
+		struct dma_desc *p = priv->dma_rx + i;
+
+		skb = netdev_alloc_skb_ip_align(dev, bfsize);
+		if (unlikely(skb == NULL)) {
+			pr_err("%s: Rx init fails; skb is NULL\n", __func__);
+			break;
+		}
+		priv->rx_skbuff[i] = skb;
+		priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
+						bfsize, DMA_FROM_DEVICE);
+
+		p->des2 = priv->rx_skbuff_dma[i];
+		if (unlikely(buff2_needed))
+			p->des3 = p->des2 + BUF_SIZE_8KiB;
+		DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
+			priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
+	}
+	priv->cur_rx = 0;
+	priv->dirty_rx = (unsigned int)(i - rxsize);
+	priv->dma_buf_sz = bfsize;
+	buf_sz = bfsize;
+
+	/* TX INITIALIZATION */
+	for (i = 0; i < txsize; i++) {
+		priv->tx_skbuff[i] = NULL;
+		priv->dma_tx[i].des2 = 0;
+	}
+	priv->dirty_tx = 0;
+	priv->cur_tx = 0;
+
+	/* Clear the Rx/Tx descriptors */
+	priv->mac_type->ops->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
+	priv->mac_type->ops->init_tx_desc(priv->dma_tx, txsize);
+
+	if (netif_msg_hw(priv)) {
+		pr_info("RX descriptor ring:\n");
+		display_ring(priv->dma_rx, rxsize);
+		pr_info("TX descriptor ring:\n");
+		display_ring(priv->dma_tx, txsize);
+	}
+	return;
+}
+
+static void dma_free_rx_skbufs(struct stmmac_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->dma_rx_size; i++) {
+		if (priv->rx_skbuff[i]) {
+			dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
+					 priv->dma_buf_sz, DMA_FROM_DEVICE);
+			dev_kfree_skb_any(priv->rx_skbuff[i]);
+		}
+		priv->rx_skbuff[i] = NULL;
+	}
+	return;
+}
+
+static void dma_free_tx_skbufs(struct stmmac_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->dma_tx_size; i++) {
+		if (priv->tx_skbuff[i] != NULL) {
+			struct dma_desc *p = priv->dma_tx + i;
+			if (p->des2)
+				dma_unmap_single(priv->device, p->des2,
+				 priv->mac_type->ops->get_tx_len(p),
+				 DMA_TO_DEVICE);
+			dev_kfree_skb_any(priv->tx_skbuff[i]);
+			priv->tx_skbuff[i] = NULL;
+		}
+	}
+	return;
+}
+
+static void free_dma_desc_resources(struct stmmac_priv *priv)
+{
+	/* Release the DMA TX/RX socket buffers */
+	dma_free_rx_skbufs(priv);
+	dma_free_tx_skbufs(priv);
+
+	/* Free the region of consistent memory previously allocated for
+	 * the DMA */
+	dma_free_coherent(priv->device,
+			  priv->dma_tx_size * sizeof(struct dma_desc),
+			  priv->dma_tx, priv->dma_tx_phy);
+	dma_free_coherent(priv->device,
+			  priv->dma_rx_size * sizeof(struct dma_desc),
+			  priv->dma_rx, priv->dma_rx_phy);
+	kfree(priv->rx_skbuff_dma);
+	kfree(priv->rx_skbuff);
+	kfree(priv->tx_skbuff);
+
+	return;
+}
+
+/**
+ * stmmac_dma_start_tx
+ * @ioaddr: device I/O address
+ * Description:  this function starts the DMA tx process.
+ */
+static void stmmac_dma_start_tx(unsigned long ioaddr)
+{
+	u32 value = readl(ioaddr + DMA_CONTROL);
+	value |= DMA_CONTROL_ST;
+	writel(value, ioaddr + DMA_CONTROL);
+	return;
+}
+
+static void stmmac_dma_stop_tx(unsigned long ioaddr)
+{
+	u32 value = readl(ioaddr + DMA_CONTROL);
+	value &= ~DMA_CONTROL_ST;
+	writel(value, ioaddr + DMA_CONTROL);
+	return;
+}
+
+/**
+ * stmmac_dma_start_rx
+ * @ioaddr: device I/O address
+ * Description:  this function starts the DMA rx process.
+ */
+static void stmmac_dma_start_rx(unsigned long ioaddr)
+{
+	u32 value = readl(ioaddr + DMA_CONTROL);
+	value |= DMA_CONTROL_SR;
+	writel(value, ioaddr + DMA_CONTROL);
+
+	return;
+}
+
+static void stmmac_dma_stop_rx(unsigned long ioaddr)
+{
+	u32 value = readl(ioaddr + DMA_CONTROL);
+	value &= ~DMA_CONTROL_SR;
+	writel(value, ioaddr + DMA_CONTROL);
+
+	return;
+}
+
+/**
+ *  stmmac_dma_operation_mode - HW DMA operation mode
+ *  @priv : pointer to the private device structure.
+ *  Description: it sets the DMA operation mode: tx/rx DMA thresholds
+ *  or Store-And-Forward capability. It also verifies the COE for the
+ *  transmission in case of Giga ETH.
+ */
+static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
+{
+	if (!priv->is_gmac) {
+		/* MAC 10/100 */
+		priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 0);
+		priv->tx_coe = NO_HW_CSUM;
+	} else {
+		if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) {
+			priv->mac_type->ops->dma_mode(priv->dev->base_addr,
+						      SF_DMA_MODE, SF_DMA_MODE);
+			tc = SF_DMA_MODE;
+			priv->tx_coe = HW_CSUM;
+		} else {
+			/* Checksum computation is performed in software. */
+			priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc,
+						      SF_DMA_MODE);
+			priv->tx_coe = NO_HW_CSUM;
+		}
+	}
+	tx_coe = priv->tx_coe;
+
+	return;
+}
+
+#ifdef STMMAC_DEBUG
+/**
+ * show_tx_process_state
+ * @status: tx descriptor status field
+ * Description: it shows the Transmit Process State for CSR5[22:20]
+ */
+static void show_tx_process_state(unsigned int status)
+{
+	unsigned int state;
+	state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
+
+	switch (state) {
+	case 0:
+		pr_info("- TX (Stopped): Reset or Stop command\n");
+		break;
+	case 1:
+		pr_info("- TX (Running):Fetching the Tx desc\n");
+		break;
+	case 2:
+		pr_info("- TX (Running): Waiting for end of tx\n");
+		break;
+	case 3:
+		pr_info("- TX (Running): Reading the data "
+		       "and queuing the data into the Tx buf\n");
+		break;
+	case 6:
+		pr_info("- TX (Suspended): Tx Buff Underflow "
+		       "or an unavailable Transmit descriptor\n");
+		break;
+	case 7:
+		pr_info("- TX (Running): Closing Tx descriptor\n");
+		break;
+	default:
+		break;
+	}
+	return;
+}
+
+/**
+ * show_rx_process_state
+ * @status: rx descriptor status field
+ * Description: it shows the  Receive Process State for CSR5[19:17]
+ */
+static void show_rx_process_state(unsigned int status)
+{
+	unsigned int state;
+	state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
+
+	switch (state) {
+	case 0:
+		pr_info("- RX (Stopped): Reset or Stop command\n");
+		break;
+	case 1:
+		pr_info("- RX (Running): Fetching the Rx desc\n");
+		break;
+	case 2:
+		pr_info("- RX (Running):Checking for end of pkt\n");
+		break;
+	case 3:
+		pr_info("- RX (Running): Waiting for Rx pkt\n");
+		break;
+	case 4:
+		pr_info("- RX (Suspended): Unavailable Rx buf\n");
+		break;
+	case 5:
+		pr_info("- RX (Running): Closing Rx descriptor\n");
+		break;
+	case 6:
+		pr_info("- RX(Running): Flushing the current frame"
+		       " from the Rx buf\n");
+		break;
+	case 7:
+		pr_info("- RX (Running): Queuing the Rx frame"
+		       " from the Rx buf into memory\n");
+		break;
+	default:
+		break;
+	}
+	return;
+}
+#endif
+
+/**
+ * stmmac_tx:
+ * @priv: private driver structure
+ * Description: it reclaims resources after transmission completes.
+ */
+static void stmmac_tx(struct stmmac_priv *priv)
+{
+	unsigned int txsize = priv->dma_tx_size;
+	unsigned long ioaddr = priv->dev->base_addr;
+
+	while (priv->dirty_tx != priv->cur_tx) {
+		int last;
+		unsigned int entry = priv->dirty_tx % txsize;
+		struct sk_buff *skb = priv->tx_skbuff[entry];
+		struct dma_desc *p = priv->dma_tx + entry;
+
+		/* Check if the descriptor is owned by the DMA. */
+		if (priv->mac_type->ops->get_tx_owner(p))
+			break;
+
+		/* Verify tx error by looking at the last segment */
+		last = priv->mac_type->ops->get_tx_ls(p);
+		if (likely(last)) {
+			int tx_error =
+			    priv->mac_type->ops->tx_status(&priv->dev->stats,
+							   &priv->xstats,
+							   p, ioaddr);
+			if (likely(tx_error == 0)) {
+				priv->dev->stats.tx_packets++;
+				priv->xstats.tx_pkt_n++;
+			} else
+				priv->dev->stats.tx_errors++;
+		}
+		TX_DBG("%s: curr %d, dirty %d\n", __func__,
+			priv->cur_tx, priv->dirty_tx);
+
+		if (likely(p->des2))
+			dma_unmap_single(priv->device, p->des2,
+					 priv->mac_type->ops->get_tx_len(p),
+					 DMA_TO_DEVICE);
+		if (unlikely(p->des3))
+			p->des3 = 0;
+
+		if (likely(skb != NULL)) {
+			/*
+			 * If there's room in the queue (limit it to size)
+			 * we add this skb back into the pool,
+			 * if it's the right size.
+			 */
+			if ((skb_queue_len(&priv->rx_recycle) <
+				priv->dma_rx_size) &&
+				skb_recycle_check(skb, priv->dma_buf_sz))
+				__skb_queue_head(&priv->rx_recycle, skb);
+			else
+				dev_kfree_skb(skb);
+
+			priv->tx_skbuff[entry] = NULL;
+		}
+
+		priv->mac_type->ops->release_tx_desc(p);
+
+		entry = (++priv->dirty_tx) % txsize;
+	}
+	if (unlikely(netif_queue_stopped(priv->dev) &&
+		     stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
+		netif_tx_lock(priv->dev);
+		if (netif_queue_stopped(priv->dev) &&
+		     stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
+			TX_DBG("%s: restart transmit\n", __func__);
+			netif_wake_queue(priv->dev);
+		}
+		netif_tx_unlock(priv->dev);
+	}
+	return;
+}
+
+static inline void stmmac_enable_irq(struct stmmac_priv *priv)
+{
+#ifndef CONFIG_STMMAC_TIMER
+	writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA);
+#else
+	priv->tm->timer_start(tmrate);
+#endif
+}
+
+static inline void stmmac_disable_irq(struct stmmac_priv *priv)
+{
+#ifndef CONFIG_STMMAC_TIMER
+	writel(0, priv->dev->base_addr + DMA_INTR_ENA);
+#else
+	priv->tm->timer_stop();
+#endif
+}
+
+static int stmmac_has_work(struct stmmac_priv *priv)
+{
+	unsigned int has_work = 0;
+	int rxret, tx_work = 0;
+
+	rxret = priv->mac_type->ops->get_rx_owner(priv->dma_rx +
+		(priv->cur_rx % priv->dma_rx_size));
+
+	if (priv->dirty_tx != priv->cur_tx)
+		tx_work = 1;
+
+	if (likely(!rxret || tx_work))
+		has_work = 1;
+
+	return has_work;
+}
+
+static inline void _stmmac_schedule(struct stmmac_priv *priv)
+{
+	if (likely(stmmac_has_work(priv))) {
+		stmmac_disable_irq(priv);
+		napi_schedule(&priv->napi);
+	}
+}
+
+#ifdef CONFIG_STMMAC_TIMER
+void stmmac_schedule(struct net_device *dev)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+
+	priv->xstats.sched_timer_n++;
+
+	_stmmac_schedule(priv);
+
+	return;
+}
+
+static void stmmac_no_timer_started(unsigned int x)
+{;
+};
+
+static void stmmac_no_timer_stopped(void)
+{;
+};
+#endif
+
+/**
+ * stmmac_tx_err:
+ * @priv: pointer to the private device structure
+ * Description: it cleans the descriptors and restarts the transmission
+ * in case of errors.
+ */
+static void stmmac_tx_err(struct stmmac_priv *priv)
+{
+	netif_stop_queue(priv->dev);
+
+	stmmac_dma_stop_tx(priv->dev->base_addr);
+	dma_free_tx_skbufs(priv);
+	priv->mac_type->ops->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
+	priv->dirty_tx = 0;
+	priv->cur_tx = 0;
+	stmmac_dma_start_tx(priv->dev->base_addr);
+
+	priv->dev->stats.tx_errors++;
+	netif_wake_queue(priv->dev);
+
+	return;
+}
+
+/**
+ * stmmac_dma_interrupt - Interrupt handler for the driver
+ * @dev: net device structure
+ * Description: Interrupt handler for the driver (DMA).
+ */
+static void stmmac_dma_interrupt(struct net_device *dev)
+{
+	unsigned long ioaddr = dev->base_addr;
+	struct stmmac_priv *priv = netdev_priv(dev);
+	/* read the status register (CSR5) */
+	u32 intr_status = readl(ioaddr + DMA_STATUS);
+
+	DBG(intr, INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
+
+#ifdef STMMAC_DEBUG
+	/* It displays the DMA transmit process state (CSR5 register) */
+	if (netif_msg_tx_done(priv))
+		show_tx_process_state(intr_status);
+	if (netif_msg_rx_status(priv))
+		show_rx_process_state(intr_status);
+#endif
+	/* ABNORMAL interrupts */
+	if (unlikely(intr_status & DMA_STATUS_AIS)) {
+		DBG(intr, INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
+		if (unlikely(intr_status & DMA_STATUS_UNF)) {
+			DBG(intr, INFO, "transmit underflow\n");
+			if (unlikely(tc != SF_DMA_MODE)
+			    && (tc <= 256)) {
+				/* Try to bump up the threshold */
+				tc += 64;
+				priv->mac_type->ops->dma_mode(ioaddr, tc,
+					      SF_DMA_MODE);
+				priv->xstats.threshold = tc;
+			}
+			stmmac_tx_err(priv);
+			priv->xstats.tx_undeflow_irq++;
+		}
+		if (unlikely(intr_status & DMA_STATUS_TJT)) {
+			DBG(intr, INFO, "transmit jabber\n");
+			priv->xstats.tx_jabber_irq++;
+		}
+		if (unlikely(intr_status & DMA_STATUS_OVF)) {
+			DBG(intr, INFO, "recv overflow\n");
+			priv->xstats.rx_overflow_irq++;
+		}
+		if (unlikely(intr_status & DMA_STATUS_RU)) {
+			DBG(intr, INFO, "receive buffer unavailable\n");
+			priv->xstats.rx_buf_unav_irq++;
+		}
+		if (unlikely(intr_status & DMA_STATUS_RPS)) {
+			DBG(intr, INFO, "receive process stopped\n");
+			priv->xstats.rx_process_stopped_irq++;
+		}
+		if (unlikely(intr_status & DMA_STATUS_RWT)) {
+			DBG(intr, INFO, "receive watchdog\n");
+			priv->xstats.rx_watchdog_irq++;
+		}
+		if (unlikely(intr_status & DMA_STATUS_ETI)) {
+			DBG(intr, INFO, "transmit early interrupt\n");
+			priv->xstats.tx_early_irq++;
+		}
+		if (unlikely(intr_status & DMA_STATUS_TPS)) {
+			DBG(intr, INFO, "transmit process stopped\n");
+			priv->xstats.tx_process_stopped_irq++;
+			stmmac_tx_err(priv);
+		}
+		if (unlikely(intr_status & DMA_STATUS_FBI)) {
+			DBG(intr, INFO, "fatal bus error\n");
+			priv->xstats.fatal_bus_error_irq++;
+			stmmac_tx_err(priv);
+		}
+	}
+
+	/* TX/RX NORMAL interrupts */
+	if (intr_status & DMA_STATUS_NIS) {
+		priv->xstats.normal_irq_n++;
+		if (likely((intr_status & DMA_STATUS_RI) ||
+			 (intr_status & (DMA_STATUS_TI))))
+				_stmmac_schedule(priv);
+	}
+
+	/* Optional hardware blocks, interrupts should be disabled */
+	if (unlikely(intr_status &
+		     (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
+		pr_info("%s: unexpected status %08x\n", __func__, intr_status);
+
+	/* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
+	writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
+
+	DBG(intr, INFO, "\n\n");
+
+	return;
+}
+
+/**
+ *  stmmac_open - open entry point of the driver
+ *  @dev : pointer to the device structure.
+ *  Description:
+ *  This function is the open entry point of the driver.
+ *  Return value:
+ *  0 on success and an appropriate (-)ve integer as defined in errno.h
+ *  file on failure.
+ */
+static int stmmac_open(struct net_device *dev)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+	unsigned long ioaddr = dev->base_addr;
+	int ret;
+
+	/* Check that the MAC address is valid.  If its not, refuse
+	 * to bring the device up. The user must specify an
+	 * address using the following linux command:
+	 *      ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx  */
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		random_ether_addr(dev->dev_addr);
+		pr_warning("%s: generated random MAC address %pM\n", dev->name,
+			dev->dev_addr);
+	}
+
+	stmmac_verify_args();
+
+	ret = stmmac_init_phy(dev);
+	if (unlikely(ret)) {
+		pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
+		return ret;
+	}
+
+	/* Request the IRQ lines */
+	ret = request_irq(dev->irq, &stmmac_interrupt,
+			  IRQF_SHARED, dev->name, dev);
+	if (unlikely(ret < 0)) {
+		pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
+		       __func__, dev->irq, ret);
+		return ret;
+	}
+
+#ifdef CONFIG_STMMAC_TIMER
+	priv->tm = kmalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
+	if (unlikely(priv->tm == NULL)) {
+		pr_err("%s: ERROR: timer memory alloc failed \n", __func__);
+		return -ENOMEM;
+	}
+	priv->tm->freq = tmrate;
+
+	/* Test if the HW timer can be actually used.
+	 * In case of failure continue with no timer. */
+	if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) {
+		pr_warning("stmmaceth: cannot attach the HW timer\n");
+		tmrate = 0;
+		priv->tm->freq = 0;
+		priv->tm->timer_start = stmmac_no_timer_started;
+		priv->tm->timer_stop = stmmac_no_timer_stopped;
+	}
+#endif
+
+	/* Create and initialize the TX/RX descriptors chains. */
+	priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
+	priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
+	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
+	init_dma_desc_rings(dev);
+
+	/* DMA initialization and SW reset */
+	if (unlikely(priv->mac_type->ops->dma_init(ioaddr,
+		priv->pbl, priv->dma_tx_phy, priv->dma_rx_phy) < 0)) {
+
+		pr_err("%s: DMA initialization failed\n", __func__);
+		return -1;
+	}
+
+	/* Copy the MAC addr into the HW  */
+	priv->mac_type->ops->set_umac_addr(ioaddr, dev->dev_addr, 0);
+	/* Initialize the MAC Core */
+	priv->mac_type->ops->core_init(ioaddr);
+
+	priv->shutdown = 0;
+
+	/* Initialise the MMC (if present) to disable all interrupts. */
+	writel(0xffffffff, ioaddr + MMC_HIGH_INTR_MASK);
+	writel(0xffffffff, ioaddr + MMC_LOW_INTR_MASK);
+
+	/* Enable the MAC Rx/Tx */
+	stmmac_mac_enable_rx(ioaddr);
+	stmmac_mac_enable_tx(ioaddr);
+
+	/* Set the HW DMA mode and the COE */
+	stmmac_dma_operation_mode(priv);
+
+	/* Extra statistics */
+	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
+	priv->xstats.threshold = tc;
+
+	/* Start the ball rolling... */
+	DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
+	stmmac_dma_start_tx(ioaddr);
+	stmmac_dma_start_rx(ioaddr);
+
+#ifdef CONFIG_STMMAC_TIMER
+	priv->tm->timer_start(tmrate);
+#endif
+	/* Dump DMA/MAC registers */
+	if (netif_msg_hw(priv)) {
+		priv->mac_type->ops->dump_mac_regs(ioaddr);
+		priv->mac_type->ops->dump_dma_regs(ioaddr);
+	}
+
+	if (priv->phydev)
+		phy_start(priv->phydev);
+
+	napi_enable(&priv->napi);
+	skb_queue_head_init(&priv->rx_recycle);
+	netif_start_queue(dev);
+	return 0;
+}
+
+/**
+ *  stmmac_release - close entry point of the driver
+ *  @dev : device pointer.
+ *  Description:
+ *  This is the stop entry point of the driver.
+ */
+static int stmmac_release(struct net_device *dev)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+
+	/* Stop and disconnect the PHY */
+	if (priv->phydev) {
+		phy_stop(priv->phydev);
+		phy_disconnect(priv->phydev);
+		priv->phydev = NULL;
+	}
+
+	netif_stop_queue(dev);
+
+#ifdef CONFIG_STMMAC_TIMER
+	/* Stop and release the timer */
+	stmmac_close_ext_timer();
+	if (priv->tm != NULL)
+		kfree(priv->tm);
+#endif
+	napi_disable(&priv->napi);
+	skb_queue_purge(&priv->rx_recycle);
+
+	/* Free the IRQ lines */
+	free_irq(dev->irq, dev);
+
+	/* Stop TX/RX DMA and clear the descriptors */
+	stmmac_dma_stop_tx(dev->base_addr);
+	stmmac_dma_stop_rx(dev->base_addr);
+
+	/* Release and free the Rx/Tx resources */
+	free_dma_desc_resources(priv);
+
+	/* Disable the MAC core */
+	stmmac_mac_disable_tx(dev->base_addr);
+	stmmac_mac_disable_rx(dev->base_addr);
+
+	netif_carrier_off(dev);
+
+	return 0;
+}
+
+/*
+ * To perform emulated hardware segmentation on skb.
+ */
+static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb)
+{
+	struct sk_buff *segs, *curr_skb;
+	int gso_segs = skb_shinfo(skb)->gso_segs;
+
+	/* Estimate the number of fragments in the worst case */
+	if (unlikely(stmmac_tx_avail(priv) < gso_segs)) {
+		netif_stop_queue(priv->dev);
+		TX_DBG(KERN_ERR "%s: TSO BUG! Tx Ring full when queue awake\n",
+		       __func__);
+		if (stmmac_tx_avail(priv) < gso_segs)
+			return NETDEV_TX_BUSY;
+
+		netif_wake_queue(priv->dev);
+	}
+	TX_DBG("\tstmmac_sw_tso: segmenting: skb %p (len %d)\n",
+	       skb, skb->len);
+
+	segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO);
+	if (unlikely(IS_ERR(segs)))
+		goto sw_tso_end;
+
+	do {
+		curr_skb = segs;
+		segs = segs->next;
+		TX_DBG("\t\tcurrent skb->len: %d, *curr %p,"
+		       "*next %p\n", curr_skb->len, curr_skb, segs);
+		curr_skb->next = NULL;
+		stmmac_xmit(curr_skb, priv->dev);
+	} while (segs);
+
+sw_tso_end:
+	dev_kfree_skb(skb);
+
+	return NETDEV_TX_OK;
+}
+
+static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
+					       struct net_device *dev,
+					       int csum_insertion)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+	unsigned int nopaged_len = skb_headlen(skb);
+	unsigned int txsize = priv->dma_tx_size;
+	unsigned int entry = priv->cur_tx % txsize;
+	struct dma_desc *desc = priv->dma_tx + entry;
+
+	if (nopaged_len > BUF_SIZE_8KiB) {
+
+		int buf2_size = nopaged_len - BUF_SIZE_8KiB;
+
+		desc->des2 = dma_map_single(priv->device, skb->data,
+					    BUF_SIZE_8KiB, DMA_TO_DEVICE);
+		desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+		priv->mac_type->ops->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
+						     csum_insertion);
+
+		entry = (++priv->cur_tx) % txsize;
+		desc = priv->dma_tx + entry;
+
+		desc->des2 = dma_map_single(priv->device,
+					skb->data + BUF_SIZE_8KiB,
+					buf2_size, DMA_TO_DEVICE);
+		desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+		priv->mac_type->ops->prepare_tx_desc(desc, 0,
+						     buf2_size, csum_insertion);
+		priv->mac_type->ops->set_tx_owner(desc);
+		priv->tx_skbuff[entry] = NULL;
+	} else {
+		desc->des2 = dma_map_single(priv->device, skb->data,
+					nopaged_len, DMA_TO_DEVICE);
+		desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+		priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
+						     csum_insertion);
+	}
+	return entry;
+}
+
+/**
+ *  stmmac_xmit:
+ *  @skb : the socket buffer
+ *  @dev : device pointer
+ *  Description : Tx entry point of the driver.
+ */
+static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+	unsigned int txsize = priv->dma_tx_size;
+	unsigned int entry;
+	int i, csum_insertion = 0;
+	int nfrags = skb_shinfo(skb)->nr_frags;
+	struct dma_desc *desc, *first;
+
+	if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
+		if (!netif_queue_stopped(dev)) {
+			netif_stop_queue(dev);
+			/* This is a hard error, log it. */
+			pr_err("%s: BUG! Tx Ring full when queue awake\n",
+				__func__);
+		}
+		return NETDEV_TX_BUSY;
+	}
+
+	entry = priv->cur_tx % txsize;
+
+#ifdef STMMAC_XMIT_DEBUG
+	if ((skb->len > ETH_FRAME_LEN) || nfrags)
+		pr_info("stmmac xmit:\n"
+		       "\tskb addr %p - len: %d - nopaged_len: %d\n"
+		       "\tn_frags: %d - ip_summed: %d - %s gso\n",
+		       skb, skb->len, skb_headlen(skb), nfrags, skb->ip_summed,
+		       !skb_is_gso(skb) ? "isn't" : "is");
+#endif
+
+	if (unlikely(skb_is_gso(skb)))
+		return stmmac_sw_tso(priv, skb);
+
+	if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
+		if (likely(priv->tx_coe == NO_HW_CSUM))
+			skb_checksum_help(skb);
+		else
+			csum_insertion = 1;
+	}
+
+	desc = priv->dma_tx + entry;
+	first = desc;
+
+#ifdef STMMAC_XMIT_DEBUG
+	if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
+		pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n"
+		       "\t\tn_frags: %d, ip_summed: %d\n",
+		       skb->len, skb_headlen(skb), nfrags, skb->ip_summed);
+#endif
+	priv->tx_skbuff[entry] = skb;
+	if (unlikely(skb->len >= BUF_SIZE_4KiB)) {
+		entry = stmmac_handle_jumbo_frames(skb, dev, csum_insertion);
+		desc = priv->dma_tx + entry;
+	} else {
+		unsigned int nopaged_len = skb_headlen(skb);
+		desc->des2 = dma_map_single(priv->device, skb->data,
+					nopaged_len, DMA_TO_DEVICE);
+		priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
+						     csum_insertion);
+	}
+
+	for (i = 0; i < nfrags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		int len = frag->size;
+
+		entry = (++priv->cur_tx) % txsize;
+		desc = priv->dma_tx + entry;
+
+		TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
+		desc->des2 = dma_map_page(priv->device, frag->page,
+					  frag->page_offset,
+					  len, DMA_TO_DEVICE);
+		priv->tx_skbuff[entry] = NULL;
+		priv->mac_type->ops->prepare_tx_desc(desc, 0, len,
+						     csum_insertion);
+		priv->mac_type->ops->set_tx_owner(desc);
+	}
+
+	/* Interrupt on completition only for the latest segment */
+	priv->mac_type->ops->close_tx_desc(desc);
+#ifdef CONFIG_STMMAC_TIMER
+	/* Clean IC while using timers */
+	priv->mac_type->ops->clear_tx_ic(desc);
+#endif
+	/* To avoid raise condition */
+	priv->mac_type->ops->set_tx_owner(first);
+
+	priv->cur_tx++;
+
+#ifdef STMMAC_XMIT_DEBUG
+	if (netif_msg_pktdata(priv)) {
+		pr_info("stmmac xmit: current=%d, dirty=%d, entry=%d, "
+		       "first=%p, nfrags=%d\n",
+		       (priv->cur_tx % txsize), (priv->dirty_tx % txsize),
+		       entry, first, nfrags);
+		display_ring(priv->dma_tx, txsize);
+		pr_info(">>> frame to be transmitted: ");
+		print_pkt(skb->data, skb->len);
+	}
+#endif
+	if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
+		TX_DBG("%s: stop transmitted packets\n", __func__);
+		netif_stop_queue(dev);
+	}
+
+	dev->stats.tx_bytes += skb->len;
+
+	/* CSR1 enables the transmit DMA to check for new descriptor */
+	writel(1, dev->base_addr + DMA_XMT_POLL_DEMAND);
+
+	return NETDEV_TX_OK;
+}
+
+static inline void stmmac_rx_refill(struct stmmac_priv *priv)
+{
+	unsigned int rxsize = priv->dma_rx_size;
+	int bfsize = priv->dma_buf_sz;
+	struct dma_desc *p = priv->dma_rx;
+
+	for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
+		unsigned int entry = priv->dirty_rx % rxsize;
+		if (likely(priv->rx_skbuff[entry] == NULL)) {
+			struct sk_buff *skb;
+
+			skb = __skb_dequeue(&priv->rx_recycle);
+			if (skb == NULL)
+				skb = netdev_alloc_skb_ip_align(priv->dev,
+								bfsize);
+
+			if (unlikely(skb == NULL))
+				break;
+
+			priv->rx_skbuff[entry] = skb;
+			priv->rx_skbuff_dma[entry] =
+			    dma_map_single(priv->device, skb->data, bfsize,
+					   DMA_FROM_DEVICE);
+
+			(p + entry)->des2 = priv->rx_skbuff_dma[entry];
+			if (unlikely(priv->is_gmac)) {
+				if (bfsize >= BUF_SIZE_8KiB)
+					(p + entry)->des3 =
+					    (p + entry)->des2 + BUF_SIZE_8KiB;
+			}
+			RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
+		}
+		priv->mac_type->ops->set_rx_owner(p + entry);
+	}
+	return;
+}
+
+static int stmmac_rx(struct stmmac_priv *priv, int limit)
+{
+	unsigned int rxsize = priv->dma_rx_size;
+	unsigned int entry = priv->cur_rx % rxsize;
+	unsigned int next_entry;
+	unsigned int count = 0;
+	struct dma_desc *p = priv->dma_rx + entry;
+	struct dma_desc *p_next;
+
+#ifdef STMMAC_RX_DEBUG
+	if (netif_msg_hw(priv)) {
+		pr_debug(">>> stmmac_rx: descriptor ring:\n");
+		display_ring(priv->dma_rx, rxsize);
+	}
+#endif
+	count = 0;
+	while (!priv->mac_type->ops->get_rx_owner(p)) {
+		int status;
+
+		if (count >= limit)
+			break;
+
+		count++;
+
+		next_entry = (++priv->cur_rx) % rxsize;
+		p_next = priv->dma_rx + next_entry;
+		prefetch(p_next);
+
+		/* read the status of the incoming frame */
+		status = (priv->mac_type->ops->rx_status(&priv->dev->stats,
+							 &priv->xstats, p));
+		if (unlikely(status == discard_frame))
+			priv->dev->stats.rx_errors++;
+		else {
+			struct sk_buff *skb;
+			/* Length should omit the CRC */
+			int frame_len =
+			    priv->mac_type->ops->get_rx_frame_len(p) - 4;
+
+#ifdef STMMAC_RX_DEBUG
+			if (frame_len > ETH_FRAME_LEN)
+				pr_debug("\tRX frame size %d, COE status: %d\n",
+					frame_len, status);
+
+			if (netif_msg_hw(priv))
+				pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
+					p, entry, p->des2);
+#endif
+			skb = priv->rx_skbuff[entry];
+			if (unlikely(!skb)) {
+				pr_err("%s: Inconsistent Rx descriptor chain\n",
+					priv->dev->name);
+				priv->dev->stats.rx_dropped++;
+				break;
+			}
+			prefetch(skb->data - NET_IP_ALIGN);
+			priv->rx_skbuff[entry] = NULL;
+
+			skb_put(skb, frame_len);
+			dma_unmap_single(priv->device,
+					 priv->rx_skbuff_dma[entry],
+					 priv->dma_buf_sz, DMA_FROM_DEVICE);
+#ifdef STMMAC_RX_DEBUG
+			if (netif_msg_pktdata(priv)) {
+				pr_info(" frame received (%dbytes)", frame_len);
+				print_pkt(skb->data, frame_len);
+			}
+#endif
+			skb->protocol = eth_type_trans(skb, priv->dev);
+
+			if (unlikely(status == csum_none)) {
+				/* always for the old mac 10/100 */
+				skb->ip_summed = CHECKSUM_NONE;
+				netif_receive_skb(skb);
+			} else {
+				skb->ip_summed = CHECKSUM_UNNECESSARY;
+				napi_gro_receive(&priv->napi, skb);
+			}
+
+			priv->dev->stats.rx_packets++;
+			priv->dev->stats.rx_bytes += frame_len;
+			priv->dev->last_rx = jiffies;
+		}
+		entry = next_entry;
+		p = p_next;	/* use prefetched values */
+	}
+
+	stmmac_rx_refill(priv);
+
+	priv->xstats.rx_pkt_n += count;
+
+	return count;
+}
+
+/**
+ *  stmmac_poll - stmmac poll method (NAPI)
+ *  @napi : pointer to the napi structure.
+ *  @budget : maximum number of packets that the current CPU can receive from
+ *	      all interfaces.
+ *  Description :
+ *   This function implements the the reception process.
+ *   Also it runs the TX completion thread
+ */
+static int stmmac_poll(struct napi_struct *napi, int budget)
+{
+	struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
+	int work_done = 0;
+
+	priv->xstats.poll_n++;
+	stmmac_tx(priv);
+	work_done = stmmac_rx(priv, budget);
+
+	if (work_done < budget) {
+		napi_complete(napi);
+		stmmac_enable_irq(priv);
+	}
+	return work_done;
+}
+
+/**
+ *  stmmac_tx_timeout
+ *  @dev : Pointer to net device structure
+ *  Description: this function is called when a packet transmission fails to
+ *   complete within a reasonable tmrate. The driver will mark the error in the
+ *   netdev structure and arrange for the device to be reset to a sane state
+ *   in order to transmit a new packet.
+ */
+static void stmmac_tx_timeout(struct net_device *dev)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+
+	/* Clear Tx resources and restart transmitting again */
+	stmmac_tx_err(priv);
+	return;
+}
+
+/* Configuration changes (passed on by ifconfig) */
+static int stmmac_config(struct net_device *dev, struct ifmap *map)
+{
+	if (dev->flags & IFF_UP)	/* can't act on a running interface */
+		return -EBUSY;
+
+	/* Don't allow changing the I/O address */
+	if (map->base_addr != dev->base_addr) {
+		pr_warning("%s: can't change I/O address\n", dev->name);
+		return -EOPNOTSUPP;
+	}
+
+	/* Don't allow changing the IRQ */
+	if (map->irq != dev->irq) {
+		pr_warning("%s: can't change IRQ number %d\n",
+		       dev->name, dev->irq);
+		return -EOPNOTSUPP;
+	}
+
+	/* ignore other fields */
+	return 0;
+}
+
+/**
+ *  stmmac_multicast_list - entry point for multicast addressing
+ *  @dev : pointer to the device structure
+ *  Description:
+ *  This function is a driver entry point which gets called by the kernel
+ *  whenever multicast addresses must be enabled/disabled.
+ *  Return value:
+ *  void.
+ */
+static void stmmac_multicast_list(struct net_device *dev)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+
+	spin_lock(&priv->lock);
+	priv->mac_type->ops->set_filter(dev);
+	spin_unlock(&priv->lock);
+	return;
+}
+
+/**
+ *  stmmac_change_mtu - entry point to change MTU size for the device.
+ *  @dev : device pointer.
+ *  @new_mtu : the new MTU size for the device.
+ *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
+ *  to drive packet transmission. Ethernet has an MTU of 1500 octets
+ *  (ETH_DATA_LEN). This value can be changed with ifconfig.
+ *  Return value:
+ *  0 on success and an appropriate (-)ve integer as defined in errno.h
+ *  file on failure.
+ */
+static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+	int max_mtu;
+
+	if (netif_running(dev)) {
+		pr_err("%s: must be stopped to change its MTU\n", dev->name);
+		return -EBUSY;
+	}
+
+	if (priv->is_gmac)
+		max_mtu = JUMBO_LEN;
+	else
+		max_mtu = ETH_DATA_LEN;
+
+	if ((new_mtu < 46) || (new_mtu > max_mtu)) {
+		pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
+		return -EINVAL;
+	}
+
+	dev->mtu = new_mtu;
+
+	return 0;
+}
+
+static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev = (struct net_device *)dev_id;
+	struct stmmac_priv *priv = netdev_priv(dev);
+
+	if (unlikely(!dev)) {
+		pr_err("%s: invalid dev pointer\n", __func__);
+		return IRQ_NONE;
+	}
+
+	if (priv->is_gmac) {
+		unsigned long ioaddr = dev->base_addr;
+		/* To handle GMAC own interrupts */
+		priv->mac_type->ops->host_irq_status(ioaddr);
+	}
+	stmmac_dma_interrupt(dev);
+
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling receive - used by NETCONSOLE and other diagnostic tools
+ * to allow network I/O with interrupts disabled. */
+static void stmmac_poll_controller(struct net_device *dev)
+{
+	disable_irq(dev->irq);
+	stmmac_interrupt(dev->irq, dev);
+	enable_irq(dev->irq);
+}
+#endif
+
+/**
+ *  stmmac_ioctl - Entry point for the Ioctl
+ *  @dev: Device pointer.
+ *  @rq: An IOCTL specefic structure, that can contain a pointer to
+ *  a proprietary structure used to pass information to the driver.
+ *  @cmd: IOCTL command
+ *  Description:
+ *  Currently there are no special functionality supported in IOCTL, just the
+ *  phy_mii_ioctl(...) can be invoked.
+ */
+static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+	int ret = -EOPNOTSUPP;
+
+	if (!netif_running(dev))
+		return -EINVAL;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		if (!priv->phydev)
+			return -EINVAL;
+
+		spin_lock(&priv->lock);
+		ret = phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
+		spin_unlock(&priv->lock);
+	default:
+		break;
+	}
+	return ret;
+}
+
+#ifdef STMMAC_VLAN_TAG_USED
+static void stmmac_vlan_rx_register(struct net_device *dev,
+				    struct vlan_group *grp)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+
+	DBG(probe, INFO, "%s: Setting vlgrp to %p\n", dev->name, grp);
+
+	spin_lock(&priv->lock);
+	priv->vlgrp = grp;
+	spin_unlock(&priv->lock);
+
+	return;
+}
+#endif
+
+static const struct net_device_ops stmmac_netdev_ops = {
+	.ndo_open = stmmac_open,
+	.ndo_start_xmit = stmmac_xmit,
+	.ndo_stop = stmmac_release,
+	.ndo_change_mtu = stmmac_change_mtu,
+	.ndo_set_multicast_list = stmmac_multicast_list,
+	.ndo_tx_timeout = stmmac_tx_timeout,
+	.ndo_do_ioctl = stmmac_ioctl,
+	.ndo_set_config = stmmac_config,
+#ifdef STMMAC_VLAN_TAG_USED
+	.ndo_vlan_rx_register = stmmac_vlan_rx_register,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller = stmmac_poll_controller,
+#endif
+	.ndo_set_mac_address = eth_mac_addr,
+};
+
+/**
+ * stmmac_probe - Initialization of the adapter .
+ * @dev : device pointer
+ * Description: The function initializes the network device structure for
+ * the STMMAC driver. It also calls the low level routines
+ * in order to init the HW (i.e. the DMA engine)
+ */
+static int stmmac_probe(struct net_device *dev)
+{
+	int ret = 0;
+	struct stmmac_priv *priv = netdev_priv(dev);
+
+	ether_setup(dev);
+
+	dev->netdev_ops = &stmmac_netdev_ops;
+	stmmac_set_ethtool_ops(dev);
+
+	dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA);
+	dev->watchdog_timeo = msecs_to_jiffies(watchdog);
+#ifdef STMMAC_VLAN_TAG_USED
+	/* Both mac100 and gmac support receive VLAN tag detection */
+	dev->features |= NETIF_F_HW_VLAN_RX;
+#endif
+	priv->msg_enable = netif_msg_init(debug, default_msg_level);
+
+	if (priv->is_gmac)
+		priv->rx_csum = 1;
+
+	if (flow_ctrl)
+		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
+
+	priv->pause = pause;
+	netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
+
+	/* Get the MAC address */
+	priv->mac_type->ops->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
+
+	if (!is_valid_ether_addr(dev->dev_addr))
+		pr_warning("\tno valid MAC address;"
+			"please, use ifconfig or nwhwconfig!\n");
+
+	ret = register_netdev(dev);
+	if (ret) {
+		pr_err("%s: ERROR %i registering the device\n",
+		       __func__, ret);
+		return -ENODEV;
+	}
+
+	DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
+	    dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
+	    (dev->features & NETIF_F_HW_CSUM) ? "on" : "off");
+
+	spin_lock_init(&priv->lock);
+
+	return ret;
+}
+
+/**
+ * stmmac_mac_device_setup
+ * @dev : device pointer
+ * Description: select and initialise the mac device (mac100 or Gmac).
+ */
+static int stmmac_mac_device_setup(struct net_device *dev)
+{
+	struct stmmac_priv *priv = netdev_priv(dev);
+	unsigned long ioaddr = dev->base_addr;
+
+	struct mac_device_info *device;
+
+	if (priv->is_gmac)
+		device = gmac_setup(ioaddr);
+	else
+		device = mac100_setup(ioaddr);
+
+	if (!device)
+		return -ENOMEM;
+
+	priv->mac_type = device;
+
+	priv->wolenabled = priv->mac_type->hw.pmt;	/* PMT supported */
+	if (priv->wolenabled == PMT_SUPPORTED)
+		priv->wolopts = WAKE_MAGIC;		/* Magic Frame */
+
+	return 0;
+}
+
+static int stmmacphy_dvr_probe(struct platform_device *pdev)
+{
+	struct plat_stmmacphy_data *plat_dat;
+	plat_dat = (struct plat_stmmacphy_data *)((pdev->dev).platform_data);
+
+	pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n",
+	       plat_dat->bus_id);
+
+	return 0;
+}
+
+static int stmmacphy_dvr_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static struct platform_driver stmmacphy_driver = {
+	.driver = {
+		   .name = PHY_RESOURCE_NAME,
+		   },
+	.probe = stmmacphy_dvr_probe,
+	.remove = stmmacphy_dvr_remove,
+};
+
+/**
+ * stmmac_associate_phy
+ * @dev: pointer to device structure
+ * @data: points to the private structure.
+ * Description: Scans through all the PHYs we have registered and checks if
+ * any are associated with our MAC.  If so, then just fill in
+ * the blanks in our local context structure
+ */
+static int stmmac_associate_phy(struct device *dev, void *data)
+{
+	struct stmmac_priv *priv = (struct stmmac_priv *)data;
+	struct plat_stmmacphy_data *plat_dat;
+
+	plat_dat = (struct plat_stmmacphy_data *)(dev->platform_data);
+
+	DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__,
+		plat_dat->bus_id);
+
+	/* Check that this phy is for the MAC being initialised */
+	if (priv->bus_id != plat_dat->bus_id)
+		return 0;
+
+	/* OK, this PHY is connected to the MAC.
+	   Go ahead and get the parameters */
+	DBG(probe, DEBUG, "%s: OK. Found PHY config\n", __func__);
+	priv->phy_irq =
+	    platform_get_irq_byname(to_platform_device(dev), "phyirq");
+	DBG(probe, DEBUG, "%s: PHY irq on bus %d is %d\n", __func__,
+	    plat_dat->bus_id, priv->phy_irq);
+
+	/* Override with kernel parameters if supplied XXX CRS XXX
+	 * this needs to have multiple instances */
+	if ((phyaddr >= 0) && (phyaddr <= 31))
+		plat_dat->phy_addr = phyaddr;
+
+	priv->phy_addr = plat_dat->phy_addr;
+	priv->phy_mask = plat_dat->phy_mask;
+	priv->phy_interface = plat_dat->interface;
+	priv->phy_reset = plat_dat->phy_reset;
+
+	DBG(probe, DEBUG, "%s: exiting\n", __func__);
+	return 1;	/* forces exit of driver_for_each_device() */
+}
+
+/**
+ * stmmac_dvr_probe
+ * @pdev: platform device pointer
+ * Description: the driver is initialized through platform_device.
+ */
+static int stmmac_dvr_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct resource *res;
+	unsigned int *addr = NULL;
+	struct net_device *ndev = NULL;
+	struct stmmac_priv *priv;
+	struct plat_stmmacenet_data *plat_dat;
+
+	pr_info("STMMAC driver:\n\tplatform registration... ");
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -ENODEV;
+		goto out;
+	}
+	pr_info("done!\n");
+
+	if (!request_mem_region(res->start, (res->end - res->start),
+				pdev->name)) {
+		pr_err("%s: ERROR: memory allocation failed"
+		       "cannot get the I/O addr 0x%x\n",
+		       __func__, (unsigned int)res->start);
+		ret = -EBUSY;
+		goto out;
+	}
+
+	addr = ioremap(res->start, (res->end - res->start));
+	if (!addr) {
+		pr_err("%s: ERROR: memory mapping failed \n", __func__);
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ndev = alloc_etherdev(sizeof(struct stmmac_priv));
+	if (!ndev) {
+		pr_err("%s: ERROR: allocating the device\n", __func__);
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+
+	/* Get the MAC information */
+	ndev->irq = platform_get_irq_byname(pdev, "macirq");
+	if (ndev->irq == -ENXIO) {
+		pr_err("%s: ERROR: MAC IRQ configuration "
+		       "information not found\n", __func__);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	priv = netdev_priv(ndev);
+	priv->device = &(pdev->dev);
+	priv->dev = ndev;
+	plat_dat = (struct plat_stmmacenet_data *)((pdev->dev).platform_data);
+	priv->bus_id = plat_dat->bus_id;
+	priv->pbl = plat_dat->pbl;	/* TLI */
+	priv->is_gmac = plat_dat->has_gmac;	/* GMAC is on board */
+
+	platform_set_drvdata(pdev, ndev);
+
+	/* Set the I/O base addr */
+	ndev->base_addr = (unsigned long)addr;
+
+	/* MAC HW revice detection */
+	ret = stmmac_mac_device_setup(ndev);
+	if (ret < 0)
+		goto out;
+
+	/* Network Device Registration */
+	ret = stmmac_probe(ndev);
+	if (ret < 0)
+		goto out;
+
+	/* associate a PHY - it is provided by another platform bus */
+	if (!driver_for_each_device
+	    (&(stmmacphy_driver.driver), NULL, (void *)priv,
+	     stmmac_associate_phy)) {
+		pr_err("No PHY device is associated with this MAC!\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	priv->fix_mac_speed = plat_dat->fix_mac_speed;
+	priv->bsp_priv = plat_dat->bsp_priv;
+
+	pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
+	       "\tIO base addr: 0x%08x)\n", ndev->name, pdev->name,
+	       pdev->id, ndev->irq, (unsigned int)addr);
+
+	/* MDIO bus Registration */
+	pr_debug("\tMDIO bus (id: %d)...", priv->bus_id);
+	ret = stmmac_mdio_register(ndev);
+	if (ret < 0)
+		goto out;
+	pr_debug("registered!\n");
+
+out:
+	if (ret < 0) {
+		platform_set_drvdata(pdev, NULL);
+		release_mem_region(res->start, (res->end - res->start));
+		if (addr != NULL)
+			iounmap(addr);
+	}
+
+	return ret;
+}
+
+/**
+ * stmmac_dvr_remove
+ * @pdev: platform device pointer
+ * Description: this function resets the TX/RX processes, disables the MAC RX/TX
+ * changes the link status, releases the DMA descriptor rings,
+ * unregisters the MDIO bus and unmaps the allocated memory.
+ */
+static int stmmac_dvr_remove(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct resource *res;
+
+	pr_info("%s:\n\tremoving driver", __func__);
+
+	stmmac_dma_stop_rx(ndev->base_addr);
+	stmmac_dma_stop_tx(ndev->base_addr);
+
+	stmmac_mac_disable_rx(ndev->base_addr);
+	stmmac_mac_disable_tx(ndev->base_addr);
+
+	netif_carrier_off(ndev);
+
+	stmmac_mdio_unregister(ndev);
+
+	platform_set_drvdata(pdev, NULL);
+	unregister_netdev(ndev);
+
+	iounmap((void *)ndev->base_addr);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(res->start, (res->end - res->start));
+
+	free_netdev(ndev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct stmmac_priv *priv = netdev_priv(dev);
+	int dis_ic = 0;
+
+	if (!dev || !netif_running(dev))
+		return 0;
+
+	spin_lock(&priv->lock);
+
+	if (state.event == PM_EVENT_SUSPEND) {
+		netif_device_detach(dev);
+		netif_stop_queue(dev);
+		if (priv->phydev)
+			phy_stop(priv->phydev);
+
+#ifdef CONFIG_STMMAC_TIMER
+		priv->tm->timer_stop();
+		dis_ic = 1;
+#endif
+		napi_disable(&priv->napi);
+
+		/* Stop TX/RX DMA */
+		stmmac_dma_stop_tx(dev->base_addr);
+		stmmac_dma_stop_rx(dev->base_addr);
+		/* Clear the Rx/Tx descriptors */
+		priv->mac_type->ops->init_rx_desc(priv->dma_rx,
+						  priv->dma_rx_size, dis_ic);
+		priv->mac_type->ops->init_tx_desc(priv->dma_tx,
+						  priv->dma_tx_size);
+
+		stmmac_mac_disable_tx(dev->base_addr);
+
+		if (device_may_wakeup(&(pdev->dev))) {
+			/* Enable Power down mode by programming the PMT regs */
+			if (priv->wolenabled == PMT_SUPPORTED)
+				priv->mac_type->ops->pmt(dev->base_addr,
+							 priv->wolopts);
+		} else {
+			stmmac_mac_disable_rx(dev->base_addr);
+		}
+	} else {
+		priv->shutdown = 1;
+		/* Although this can appear slightly redundant it actually
+		 * makes fast the standby operation and guarantees the driver
+		 * working if hibernation is on media. */
+		stmmac_release(dev);
+	}
+
+	spin_unlock(&priv->lock);
+	return 0;
+}
+
+static int stmmac_resume(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct stmmac_priv *priv = netdev_priv(dev);
+	unsigned long ioaddr = dev->base_addr;
+
+	if (!netif_running(dev))
+		return 0;
+
+	spin_lock(&priv->lock);
+
+	if (priv->shutdown) {
+		/* Re-open the interface and re-init the MAC/DMA
+		   and the rings. */
+		stmmac_open(dev);
+		goto out_resume;
+	}
+
+	/* Power Down bit, into the PM register, is cleared
+	 * automatically as soon as a magic packet or a Wake-up frame
+	 * is received. Anyway, it's better to manually clear
+	 * this bit because it can generate problems while resuming
+	 * from another devices (e.g. serial console). */
+	if (device_may_wakeup(&(pdev->dev)))
+		if (priv->wolenabled == PMT_SUPPORTED)
+			priv->mac_type->ops->pmt(dev->base_addr, 0);
+
+	netif_device_attach(dev);
+
+	/* Enable the MAC and DMA */
+	stmmac_mac_enable_rx(ioaddr);
+	stmmac_mac_enable_tx(ioaddr);
+	stmmac_dma_start_tx(ioaddr);
+	stmmac_dma_start_rx(ioaddr);
+
+#ifdef CONFIG_STMMAC_TIMER
+	priv->tm->timer_start(tmrate);
+#endif
+	napi_enable(&priv->napi);
+
+	if (priv->phydev)
+		phy_start(priv->phydev);
+
+	netif_start_queue(dev);
+
+out_resume:
+	spin_unlock(&priv->lock);
+	return 0;
+}
+#endif
+
+static struct platform_driver stmmac_driver = {
+	.driver = {
+		   .name = STMMAC_RESOURCE_NAME,
+		   },
+	.probe = stmmac_dvr_probe,
+	.remove = stmmac_dvr_remove,
+#ifdef CONFIG_PM
+	.suspend = stmmac_suspend,
+	.resume = stmmac_resume,
+#endif
+
+};
+
+/**
+ * stmmac_init_module - Entry point for the driver
+ * Description: This function is the entry point for the driver.
+ */
+static int __init stmmac_init_module(void)
+{
+	int ret;
+
+	if (platform_driver_register(&stmmacphy_driver)) {
+		pr_err("No PHY devices registered!\n");
+		return -ENODEV;
+	}
+
+	ret = platform_driver_register(&stmmac_driver);
+	return ret;
+}
+
+/**
+ * stmmac_cleanup_module - Cleanup routine for the driver
+ * Description: This function is the cleanup routine for the driver.
+ */
+static void __exit stmmac_cleanup_module(void)
+{
+	platform_driver_unregister(&stmmacphy_driver);
+	platform_driver_unregister(&stmmac_driver);
+}
+
+#ifndef MODULE
+static int __init stmmac_cmdline_opt(char *str)
+{
+	char *opt;
+
+	if (!str || !*str)
+		return -EINVAL;
+	while ((opt = strsep(&str, ",")) != NULL) {
+		if (!strncmp(opt, "debug:", 6))
+			strict_strtoul(opt + 6, 0, (unsigned long *)&debug);
+		else if (!strncmp(opt, "phyaddr:", 8))
+			strict_strtoul(opt + 8, 0, (unsigned long *)&phyaddr);
+		else if (!strncmp(opt, "dma_txsize:", 11))
+			strict_strtoul(opt + 11, 0,
+				       (unsigned long *)&dma_txsize);
+		else if (!strncmp(opt, "dma_rxsize:", 11))
+			strict_strtoul(opt + 11, 0,
+				       (unsigned long *)&dma_rxsize);
+		else if (!strncmp(opt, "buf_sz:", 7))
+			strict_strtoul(opt + 7, 0, (unsigned long *)&buf_sz);
+		else if (!strncmp(opt, "tc:", 3))
+			strict_strtoul(opt + 3, 0, (unsigned long *)&tc);
+		else if (!strncmp(opt, "tx_coe:", 7))
+			strict_strtoul(opt + 7, 0, (unsigned long *)&tx_coe);
+		else if (!strncmp(opt, "watchdog:", 9))
+			strict_strtoul(opt + 9, 0, (unsigned long *)&watchdog);
+		else if (!strncmp(opt, "flow_ctrl:", 10))
+			strict_strtoul(opt + 10, 0,
+				       (unsigned long *)&flow_ctrl);
+		else if (!strncmp(opt, "pause:", 6))
+			strict_strtoul(opt + 6, 0, (unsigned long *)&pause);
+#ifdef CONFIG_STMMAC_TIMER
+		else if (!strncmp(opt, "tmrate:", 7))
+			strict_strtoul(opt + 7, 0, (unsigned long *)&tmrate);
+#endif
+	}
+	return 0;
+}
+
+__setup("stmmaceth=", stmmac_cmdline_opt);
+#endif
+
+module_init(stmmac_init_module);
+module_exit(stmmac_cleanup_module);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
new file mode 100644
index 0000000..8498552
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -0,0 +1,217 @@
+/*******************************************************************************
+  STMMAC Ethernet Driver -- MDIO bus implementation
+  Provides Bus interface for MII registers
+
+  Copyright (C) 2007-2009  STMicroelectronics Ltd
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Author: Carl Shaw <carl.shaw@st.com>
+  Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include "stmmac.h"
+
+#define MII_BUSY 0x00000001
+#define MII_WRITE 0x00000002
+
+/**
+ * stmmac_mdio_read
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr reg bits 15-11
+ * @phyreg: MII addr reg bits 10-6
+ * Description: it reads data from the MII register from within the phy device.
+ * For the 7111 GMAC, we must set the bit 0 in the MII address register while
+ * accessing the PHY registers.
+ * Fortunately, it seems this has no drawback for the 7109 MAC.
+ */
+static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
+{
+	struct net_device *ndev = bus->priv;
+	struct stmmac_priv *priv = netdev_priv(ndev);
+	unsigned long ioaddr = ndev->base_addr;
+	unsigned int mii_address = priv->mac_type->hw.mii.addr;
+	unsigned int mii_data = priv->mac_type->hw.mii.data;
+
+	int data;
+	u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
+			((phyreg << 6) & (0x000007C0)));
+	regValue |= MII_BUSY;	/* in case of GMAC */
+
+	do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
+	writel(regValue, ioaddr + mii_address);
+	do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
+
+	/* Read the data from the MII data register */
+	data = (int)readl(ioaddr + mii_data);
+
+	return data;
+}
+
+/**
+ * stmmac_mdio_write
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr reg bits 15-11
+ * @phyreg: MII addr reg bits 10-6
+ * @phydata: phy data
+ * Description: it writes the data into the MII register from within the device.
+ */
+static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
+			     u16 phydata)
+{
+	struct net_device *ndev = bus->priv;
+	struct stmmac_priv *priv = netdev_priv(ndev);
+	unsigned long ioaddr = ndev->base_addr;
+	unsigned int mii_address = priv->mac_type->hw.mii.addr;
+	unsigned int mii_data = priv->mac_type->hw.mii.data;
+
+	u16 value =
+	    (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
+	    | MII_WRITE;
+
+	value |= MII_BUSY;
+
+	/* Wait until any existing MII operation is complete */
+	do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
+
+	/* Set the MII address register to write */
+	writel(phydata, ioaddr + mii_data);
+	writel(value, ioaddr + mii_address);
+
+	/* Wait until any existing MII operation is complete */
+	do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
+
+	return 0;
+}
+
+/**
+ * stmmac_mdio_reset
+ * @bus: points to the mii_bus structure
+ * Description: reset the MII bus
+ */
+static int stmmac_mdio_reset(struct mii_bus *bus)
+{
+	struct net_device *ndev = bus->priv;
+	struct stmmac_priv *priv = netdev_priv(ndev);
+	unsigned long ioaddr = ndev->base_addr;
+	unsigned int mii_address = priv->mac_type->hw.mii.addr;
+
+	if (priv->phy_reset) {
+		pr_debug("stmmac_mdio_reset: calling phy_reset\n");
+		priv->phy_reset(priv->bsp_priv);
+	}
+
+	/* This is a workaround for problems with the STE101P PHY.
+	 * It doesn't complete its reset until at least one clock cycle
+	 * on MDC, so perform a dummy mdio read.
+	 */
+	writel(0, ioaddr + mii_address);
+
+	return 0;
+}
+
+/**
+ * stmmac_mdio_register
+ * @ndev: net device structure
+ * Description: it registers the MII bus
+ */
+int stmmac_mdio_register(struct net_device *ndev)
+{
+	int err = 0;
+	struct mii_bus *new_bus;
+	int *irqlist;
+	struct stmmac_priv *priv = netdev_priv(ndev);
+	int addr, found;
+
+	new_bus = mdiobus_alloc();
+	if (new_bus == NULL)
+		return -ENOMEM;
+
+	irqlist = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+	if (irqlist == NULL) {
+		err = -ENOMEM;
+		goto irqlist_alloc_fail;
+	}
+
+	/* Assign IRQ to phy at address phy_addr */
+	if (priv->phy_addr != -1)
+		irqlist[priv->phy_addr] = priv->phy_irq;
+
+	new_bus->name = "STMMAC MII Bus";
+	new_bus->read = &stmmac_mdio_read;
+	new_bus->write = &stmmac_mdio_write;
+	new_bus->reset = &stmmac_mdio_reset;
+	snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
+	new_bus->priv = ndev;
+	new_bus->irq = irqlist;
+	new_bus->phy_mask = priv->phy_mask;
+	new_bus->parent = priv->device;
+	err = mdiobus_register(new_bus);
+	if (err != 0) {
+		pr_err("%s: Cannot register as MDIO bus\n", new_bus->name);
+		goto bus_register_fail;
+	}
+
+	priv->mii = new_bus;
+
+	found = 0;
+	for (addr = 0; addr < 32; addr++) {
+		struct phy_device *phydev = new_bus->phy_map[addr];
+		if (phydev) {
+			if (priv->phy_addr == -1) {
+				priv->phy_addr = addr;
+				phydev->irq = priv->phy_irq;
+				irqlist[addr] = priv->phy_irq;
+			}
+			pr_info("%s: PHY ID %08x at %d IRQ %d (%s)%s\n",
+			       ndev->name, phydev->phy_id, addr,
+			       phydev->irq, dev_name(&phydev->dev),
+			       (addr == priv->phy_addr) ? " active" : "");
+			found = 1;
+		}
+	}
+
+	if (!found)
+		pr_warning("%s: No PHY found\n", ndev->name);
+
+	return 0;
+bus_register_fail:
+	kfree(irqlist);
+irqlist_alloc_fail:
+	kfree(new_bus);
+	return err;
+}
+
+/**
+ * stmmac_mdio_unregister
+ * @ndev: net device structure
+ * Description: it unregisters the MII bus
+ */
+int stmmac_mdio_unregister(struct net_device *ndev)
+{
+	struct stmmac_priv *priv = netdev_priv(ndev);
+
+	mdiobus_unregister(priv->mii);
+	priv->mii->priv = NULL;
+	kfree(priv->mii);
+
+	return 0;
+}
diff --git a/drivers/net/stmmac/stmmac_timer.c b/drivers/net/stmmac/stmmac_timer.c
new file mode 100644
index 0000000..b838c65
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_timer.c
@@ -0,0 +1,140 @@
+/*******************************************************************************
+  STMMAC external timer support.
+
+  Copyright (C) 2007-2009  STMicroelectronics Ltd
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+#include "stmmac_timer.h"
+
+static void stmmac_timer_handler(void *data)
+{
+	struct net_device *dev = (struct net_device *)data;
+
+	stmmac_schedule(dev);
+
+	return;
+}
+
+#define STMMAC_TIMER_MSG(timer, freq) \
+printk(KERN_INFO "stmmac_timer: %s Timer ON (freq %dHz)\n", timer, freq);
+
+#if defined(CONFIG_STMMAC_RTC_TIMER)
+#include <linux/rtc.h>
+static struct rtc_device *stmmac_rtc;
+static rtc_task_t stmmac_task;
+
+static void stmmac_rtc_start(unsigned int new_freq)
+{
+	rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq);
+	rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1);
+	return;
+}
+
+static void stmmac_rtc_stop(void)
+{
+	rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
+	return;
+}
+
+int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
+{
+	stmmac_task.private_data = dev;
+	stmmac_task.func = stmmac_timer_handler;
+
+	stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
+	if (stmmac_rtc == NULL) {
+		pr_error("open rtc device failed\n");
+		return -ENODEV;
+	}
+
+	rtc_irq_register(stmmac_rtc, &stmmac_task);
+
+	/* Periodic mode is not supported */
+	if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) {
+		pr_error("set periodic failed\n");
+		rtc_irq_unregister(stmmac_rtc, &stmmac_task);
+		rtc_class_close(stmmac_rtc);
+		return -1;
+	}
+
+	STMMAC_TIMER_MSG(CONFIG_RTC_HCTOSYS_DEVICE, tm->freq);
+
+	tm->timer_start = stmmac_rtc_start;
+	tm->timer_stop = stmmac_rtc_stop;
+
+	return 0;
+}
+
+int stmmac_close_ext_timer(void)
+{
+	rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
+	rtc_irq_unregister(stmmac_rtc, &stmmac_task);
+	rtc_class_close(stmmac_rtc);
+	return 0;
+}
+
+#elif defined(CONFIG_STMMAC_TMU_TIMER)
+#include <linux/clk.h>
+#define TMU_CHANNEL "tmu2_clk"
+static struct clk *timer_clock;
+
+static void stmmac_tmu_start(unsigned int new_freq)
+{
+	clk_set_rate(timer_clock, new_freq);
+	clk_enable(timer_clock);
+	return;
+}
+
+static void stmmac_tmu_stop(void)
+{
+	clk_disable(timer_clock);
+	return;
+}
+
+int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
+{
+	timer_clock = clk_get(NULL, TMU_CHANNEL);
+
+	if (timer_clock == NULL)
+		return -1;
+
+	if (tmu2_register_user(stmmac_timer_handler, (void *)dev) < 0) {
+		timer_clock = NULL;
+		return -1;
+	}
+
+	STMMAC_TIMER_MSG("TMU2", tm->freq);
+	tm->timer_start = stmmac_tmu_start;
+	tm->timer_stop = stmmac_tmu_stop;
+
+	return 0;
+}
+
+int stmmac_close_ext_timer(void)
+{
+	clk_disable(timer_clock);
+	tmu2_unregister_user();
+	clk_put(timer_clock);
+	return 0;
+}
+#endif
diff --git a/drivers/net/stmmac/stmmac_timer.h b/drivers/net/stmmac/stmmac_timer.h
new file mode 100644
index 0000000..f795cae
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_timer.h
@@ -0,0 +1,41 @@
+/*******************************************************************************
+  STMMAC external timer Header File.
+
+  Copyright (C) 2007-2009  STMicroelectronics Ltd
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+struct stmmac_timer {
+	void (*timer_start) (unsigned int new_freq);
+	void (*timer_stop) (void);
+	unsigned int freq;
+};
+
+/* Open the HW timer device and return 0 in case of success */
+int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm);
+/* Stop the timer and release it */
+int stmmac_close_ext_timer(void);
+/* Function used for scheduling task within the stmmac */
+void stmmac_schedule(struct net_device *dev);
+
+#if defined(CONFIG_STMMAC_TMU_TIMER)
+extern int tmu2_register_user(void *fnt, void *data);
+extern void tmu2_unregister_user(void);
+#endif
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 305ec3d..d6f4faf 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1033,10 +1033,8 @@
 			(csum_stuff_off << 21));
 	}
 
-	local_irq_save(flags);
-	if (!spin_trylock(&gp->tx_lock)) {
+	if (!spin_trylock_irqsave(&gp->tx_lock, flags)) {
 		/* Tell upper layer to requeue */
-		local_irq_restore(flags);
 		return NETDEV_TX_LOCKED;
 	}
 	/* We raced with gem_do_stop() */
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index d1298e5..6572e8a 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -22,11 +22,7 @@
  * All Rights Reserved.
  */
 
-#ifdef TC35815_NAPI
-#define DRV_VERSION	"1.38-NAPI"
-#else
-#define DRV_VERSION	"1.38"
-#endif
+#define DRV_VERSION	"1.39"
 static const char *version = "tc35815.c:v" DRV_VERSION "\n";
 #define MODNAME			"tc35815"
 
@@ -54,13 +50,6 @@
 #include <asm/io.h>
 #include <asm/byteorder.h>
 
-/* First, a few definitions that the brave might change. */
-
-#define GATHER_TXINT	/* On-Demand Tx Interrupt */
-#define WORKAROUND_LOSTCAR
-#define WORKAROUND_100HALF_PROMISC
-/* #define TC35815_USE_PACKEDBUFFER */
-
 enum tc35815_chiptype {
 	TC35815CF = 0,
 	TC35815_NWU,
@@ -330,17 +319,10 @@
 
 
 /* Some useful constants. */
-#undef NO_CHECK_CARRIER	/* Does not check No-Carrier with TP */
 
-#ifdef NO_CHECK_CARRIER
-#define TX_CTL_CMD	(Tx_EnComp | Tx_EnTxPar | Tx_EnLateColl | \
-	Tx_EnExColl | Tx_EnExDefer | Tx_EnUnder | \
-	Tx_En)	/* maybe  0x7b01 */
-#else
-#define TX_CTL_CMD	(Tx_EnComp | Tx_EnTxPar | Tx_EnLateColl | \
+#define TX_CTL_CMD	(Tx_EnTxPar | Tx_EnLateColl | \
 	Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \
 	Tx_En)	/* maybe  0x7b01 */
-#endif
 /* Do not use Rx_StripCRC -- it causes trouble on BLEx/FDAEx condition */
 #define RX_CTL_CMD	(Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \
 	| Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */
@@ -361,13 +343,6 @@
 #define TX_THRESHOLD_KEEP_LIMIT 10
 
 /* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */
-#ifdef TC35815_USE_PACKEDBUFFER
-#define FD_PAGE_NUM 2
-#define RX_BUF_NUM	8	/* >= 2 */
-#define RX_FD_NUM	250	/* >= 32 */
-#define TX_FD_NUM	128
-#define RX_BUF_SIZE	PAGE_SIZE
-#else /* TC35815_USE_PACKEDBUFFER */
 #define FD_PAGE_NUM 4
 #define RX_BUF_NUM	128	/* < 256 */
 #define RX_FD_NUM	256	/* >= 32 */
@@ -381,7 +356,6 @@
 #define RX_BUF_SIZE	\
 	L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN)
 #endif
-#endif /* TC35815_USE_PACKEDBUFFER */
 #define RX_FD_RESERVE	(2 / 2)	/* max 2 BD per RxFD */
 #define NAPI_WEIGHT	16
 
@@ -439,11 +413,7 @@
 	/*
 	 * Transmitting: Batch Mode.
 	 *	1 BD in 1 TxFD.
-	 * Receiving: Packing Mode. (TC35815_USE_PACKEDBUFFER)
-	 *	1 circular FD for Free Buffer List.
-	 *	RX_BUF_NUM BD in Free Buffer FD.
-	 *	One Free Buffer BD has PAGE_SIZE data buffer.
-	 * Or Non-Packing Mode.
+	 * Receiving: Non-Packing Mode.
 	 *	1 circular FD for Free Buffer List.
 	 *	RX_BUF_NUM BD in Free Buffer FD.
 	 *	One Free Buffer BD has ETH_FRAME_LEN data buffer.
@@ -457,21 +427,11 @@
 	struct RxFD *rfd_limit;
 	struct RxFD *rfd_cur;
 	struct FrFD *fbl_ptr;
-#ifdef TC35815_USE_PACKEDBUFFER
-	unsigned char fbl_curid;
-	void *data_buf[RX_BUF_NUM];		/* packing */
-	dma_addr_t data_buf_dma[RX_BUF_NUM];
-	struct {
-		struct sk_buff *skb;
-		dma_addr_t skb_dma;
-	} tx_skbs[TX_FD_NUM];
-#else
 	unsigned int fbl_count;
 	struct {
 		struct sk_buff *skb;
 		dma_addr_t skb_dma;
 	} tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM];
-#endif
 	u32 msg_enable;
 	enum tc35815_chiptype chiptype;
 };
@@ -486,51 +446,6 @@
 	return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma));
 }
 #endif
-#ifdef TC35815_USE_PACKEDBUFFER
-static inline void *rxbuf_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
-{
-	int i;
-	for (i = 0; i < RX_BUF_NUM; i++) {
-		if (bus >= lp->data_buf_dma[i] &&
-		    bus < lp->data_buf_dma[i] + PAGE_SIZE)
-			return (void *)((u8 *)lp->data_buf[i] +
-					(bus - lp->data_buf_dma[i]));
-	}
-	return NULL;
-}
-
-#define TC35815_DMA_SYNC_ONDEMAND
-static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle)
-{
-#ifdef TC35815_DMA_SYNC_ONDEMAND
-	void *buf;
-	/* pci_map + pci_dma_sync will be more effective than
-	 * pci_alloc_consistent on some archs. */
-	buf = (void *)__get_free_page(GFP_ATOMIC);
-	if (!buf)
-		return NULL;
-	*dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE,
-				     PCI_DMA_FROMDEVICE);
-	if (pci_dma_mapping_error(hwdev, *dma_handle)) {
-		free_page((unsigned long)buf);
-		return NULL;
-	}
-	return buf;
-#else
-	return pci_alloc_consistent(hwdev, PAGE_SIZE, dma_handle);
-#endif
-}
-
-static void free_rxbuf_page(struct pci_dev *hwdev, void *buf, dma_addr_t dma_handle)
-{
-#ifdef TC35815_DMA_SYNC_ONDEMAND
-	pci_unmap_single(hwdev, dma_handle, PAGE_SIZE, PCI_DMA_FROMDEVICE);
-	free_page((unsigned long)buf);
-#else
-	pci_free_consistent(hwdev, PAGE_SIZE, buf, dma_handle);
-#endif
-}
-#else /* TC35815_USE_PACKEDBUFFER */
 static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
 				       struct pci_dev *hwdev,
 				       dma_addr_t *dma_handle)
@@ -555,19 +470,14 @@
 			 PCI_DMA_FROMDEVICE);
 	dev_kfree_skb_any(skb);
 }
-#endif /* TC35815_USE_PACKEDBUFFER */
 
 /* Index to functions, as function prototypes. */
 
 static int	tc35815_open(struct net_device *dev);
 static int	tc35815_send_packet(struct sk_buff *skb, struct net_device *dev);
 static irqreturn_t	tc35815_interrupt(int irq, void *dev_id);
-#ifdef TC35815_NAPI
 static int	tc35815_rx(struct net_device *dev, int limit);
 static int	tc35815_poll(struct napi_struct *napi, int budget);
-#else
-static void	tc35815_rx(struct net_device *dev);
-#endif
 static void	tc35815_txdone(struct net_device *dev);
 static int	tc35815_close(struct net_device *dev);
 static struct	net_device_stats *tc35815_get_stats(struct net_device *dev);
@@ -654,8 +564,6 @@
 		 * TX4939 PCFG.SPEEDn bit will be changed on
 		 * NETDEV_CHANGE event.
 		 */
-
-#if !defined(NO_CHECK_CARRIER) && defined(WORKAROUND_LOSTCAR)
 		/*
 		 * WORKAROUND: enable LostCrS only if half duplex
 		 * operation.
@@ -665,7 +573,6 @@
 		    lp->chiptype != TC35815_TX4939)
 			tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr,
 				  &tr->Tx_Ctl);
-#endif
 
 		lp->speed = phydev->speed;
 		lp->duplex = phydev->duplex;
@@ -674,11 +581,9 @@
 
 	if (phydev->link != lp->link) {
 		if (phydev->link) {
-#ifdef WORKAROUND_100HALF_PROMISC
 			/* delayed promiscuous enabling */
 			if (dev->flags & IFF_PROMISC)
 				tc35815_set_multicast_list(dev);
-#endif
 		} else {
 			lp->speed = 0;
 			lp->duplex = -1;
@@ -923,9 +828,7 @@
 	dev->netdev_ops = &tc35815_netdev_ops;
 	dev->ethtool_ops = &tc35815_ethtool_ops;
 	dev->watchdog_timeo = TC35815_TX_TIMEOUT;
-#ifdef TC35815_NAPI
 	netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT);
-#endif
 
 	dev->irq = pdev->irq;
 	dev->base_addr = (unsigned long)ioaddr;
@@ -1007,25 +910,6 @@
 		if (!lp->fd_buf)
 			return -ENOMEM;
 		for (i = 0; i < RX_BUF_NUM; i++) {
-#ifdef TC35815_USE_PACKEDBUFFER
-			lp->data_buf[i] =
-				alloc_rxbuf_page(lp->pci_dev,
-						 &lp->data_buf_dma[i]);
-			if (!lp->data_buf[i]) {
-				while (--i >= 0) {
-					free_rxbuf_page(lp->pci_dev,
-							lp->data_buf[i],
-							lp->data_buf_dma[i]);
-					lp->data_buf[i] = NULL;
-				}
-				pci_free_consistent(lp->pci_dev,
-						    PAGE_SIZE * FD_PAGE_NUM,
-						    lp->fd_buf,
-						    lp->fd_buf_dma);
-				lp->fd_buf = NULL;
-				return -ENOMEM;
-			}
-#else
 			lp->rx_skbs[i].skb =
 				alloc_rxbuf_skb(dev, lp->pci_dev,
 						&lp->rx_skbs[i].skb_dma);
@@ -1043,15 +927,9 @@
 				lp->fd_buf = NULL;
 				return -ENOMEM;
 			}
-#endif
 		}
 		printk(KERN_DEBUG "%s: FD buf %p DataBuf",
 		       dev->name, lp->fd_buf);
-#ifdef TC35815_USE_PACKEDBUFFER
-		printk(" DataBuf");
-		for (i = 0; i < RX_BUF_NUM; i++)
-			printk(" %p", lp->data_buf[i]);
-#endif
 		printk("\n");
 	} else {
 		for (i = 0; i < FD_PAGE_NUM; i++)
@@ -1084,7 +962,6 @@
 	lp->fbl_ptr = (struct FrFD *)fd_addr;
 	lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr));
 	lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD);
-#ifndef TC35815_USE_PACKEDBUFFER
 	/*
 	 * move all allocated skbs to head of rx_skbs[] array.
 	 * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in
@@ -1102,11 +979,7 @@
 			lp->fbl_count++;
 		}
 	}
-#endif
 	for (i = 0; i < RX_BUF_NUM; i++) {
-#ifdef TC35815_USE_PACKEDBUFFER
-		lp->fbl_ptr->bd[i].BuffData = cpu_to_le32(lp->data_buf_dma[i]);
-#else
 		if (i >= lp->fbl_count) {
 			lp->fbl_ptr->bd[i].BuffData = 0;
 			lp->fbl_ptr->bd[i].BDCtl = 0;
@@ -1114,15 +987,11 @@
 		}
 		lp->fbl_ptr->bd[i].BuffData =
 			cpu_to_le32(lp->rx_skbs[i].skb_dma);
-#endif
 		/* BDID is index of FrFD.bd[] */
 		lp->fbl_ptr->bd[i].BDCtl =
 			cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) |
 				    RX_BUF_SIZE);
 	}
-#ifdef TC35815_USE_PACKEDBUFFER
-	lp->fbl_curid = 0;
-#endif
 
 	printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n",
 	       dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr);
@@ -1196,19 +1065,11 @@
 	lp->fbl_ptr = NULL;
 
 	for (i = 0; i < RX_BUF_NUM; i++) {
-#ifdef TC35815_USE_PACKEDBUFFER
-		if (lp->data_buf[i]) {
-			free_rxbuf_page(lp->pci_dev,
-					lp->data_buf[i], lp->data_buf_dma[i]);
-			lp->data_buf[i] = NULL;
-		}
-#else
 		if (lp->rx_skbs[i].skb) {
 			free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb,
 				       lp->rx_skbs[i].skb_dma);
 			lp->rx_skbs[i].skb = NULL;
 		}
-#endif
 	}
 	if (lp->fd_buf) {
 		pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM,
@@ -1254,7 +1115,7 @@
 	return bd_count;
 }
 
-#if defined(DEBUG) || defined(TC35815_USE_PACKEDBUFFER)
+#ifdef DEBUG
 static void
 dump_frfd(struct FrFD *fd)
 {
@@ -1271,9 +1132,7 @@
 		       le32_to_cpu(fd->bd[i].BDCtl));
 	printk("\n");
 }
-#endif
 
-#ifdef DEBUG
 static void
 panic_queues(struct net_device *dev)
 {
@@ -1400,9 +1259,7 @@
 		return -EAGAIN;
 	}
 
-#ifdef TC35815_NAPI
 	napi_enable(&lp->napi);
-#endif
 
 	/* Reset the hardware here. Don't forget to set the station address. */
 	spin_lock_irq(&lp->lock);
@@ -1478,9 +1335,7 @@
 			(struct tc35815_regs __iomem *)dev->base_addr;
 		/* Start DMA Transmitter. */
 		txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
-#ifdef GATHER_TXINT
 		txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
-#endif
 		if (netif_msg_tx_queued(lp)) {
 			printk("%s: starting TxFD.\n", dev->name);
 			dump_txfd(txfd);
@@ -1536,11 +1391,7 @@
 	tc35815_schedule_restart(dev);
 }
 
-#ifdef TC35815_NAPI
 static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
-#else
-static int tc35815_do_interrupt(struct net_device *dev, u32 status)
-#endif
 {
 	struct tc35815_local *lp = netdev_priv(dev);
 	int ret = -1;
@@ -1579,12 +1430,7 @@
 	/* normal notification */
 	if (status & Int_IntMacRx) {
 		/* Got a packet(s). */
-#ifdef TC35815_NAPI
 		ret = tc35815_rx(dev, limit);
-#else
-		tc35815_rx(dev);
-		ret = 0;
-#endif
 		lp->lstats.rx_ints++;
 	}
 	if (status & Int_IntMacTx) {
@@ -1592,7 +1438,8 @@
 		lp->lstats.tx_ints++;
 		tc35815_txdone(dev);
 		netif_wake_queue(dev);
-		ret = 0;
+		if (ret < 0)
+			ret = 0;
 	}
 	return ret;
 }
@@ -1607,7 +1454,6 @@
 	struct tc35815_local *lp = netdev_priv(dev);
 	struct tc35815_regs __iomem *tr =
 		(struct tc35815_regs __iomem *)dev->base_addr;
-#ifdef TC35815_NAPI
 	u32 dmactl = tc_readl(&tr->DMA_Ctl);
 
 	if (!(dmactl & DMA_IntMask)) {
@@ -1624,22 +1470,6 @@
 		return IRQ_HANDLED;
 	}
 	return IRQ_NONE;
-#else
-	int handled;
-	u32 status;
-
-	spin_lock(&lp->lock);
-	status = tc_readl(&tr->Int_Src);
-	/* BLEx, FDAEx will be cleared later */
-	tc_writel(status & ~(Int_BLEx | Int_FDAEx),
-		  &tr->Int_Src);	/* write to clear */
-	handled = tc35815_do_interrupt(dev, status);
-	if (status & (Int_BLEx | Int_FDAEx))
-		tc_writel(status & (Int_BLEx | Int_FDAEx), &tr->Int_Src);
-	(void)tc_readl(&tr->Int_Src);	/* flush */
-	spin_unlock(&lp->lock);
-	return IRQ_RETVAL(handled >= 0);
-#endif /* TC35815_NAPI */
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1652,20 +1482,13 @@
 #endif
 
 /* We have a good packet(s), get it/them out of the buffers. */
-#ifdef TC35815_NAPI
 static int
 tc35815_rx(struct net_device *dev, int limit)
-#else
-static void
-tc35815_rx(struct net_device *dev)
-#endif
 {
 	struct tc35815_local *lp = netdev_priv(dev);
 	unsigned int fdctl;
 	int i;
-#ifdef TC35815_NAPI
 	int received = 0;
-#endif
 
 	while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) {
 		int status = le32_to_cpu(lp->rfd_cur->fd.FDStat);
@@ -1684,52 +1507,9 @@
 			struct sk_buff *skb;
 			unsigned char *data;
 			int cur_bd;
-#ifdef TC35815_USE_PACKEDBUFFER
-			int offset;
-#endif
 
-#ifdef TC35815_NAPI
 			if (--limit < 0)
 				break;
-#endif
-#ifdef TC35815_USE_PACKEDBUFFER
-			BUG_ON(bd_count > 2);
-			skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
-			if (skb == NULL) {
-				printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
-				       dev->name);
-				dev->stats.rx_dropped++;
-				break;
-			}
-			skb_reserve(skb, NET_IP_ALIGN);
-
-			data = skb_put(skb, pkt_len);
-
-			/* copy from receive buffer */
-			cur_bd = 0;
-			offset = 0;
-			while (offset < pkt_len && cur_bd < bd_count) {
-				int len = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BDCtl) &
-					BD_BuffLength_MASK;
-				dma_addr_t dma = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BuffData);
-				void *rxbuf = rxbuf_bus_to_virt(lp, dma);
-				if (offset + len > pkt_len)
-					len = pkt_len - offset;
-#ifdef TC35815_DMA_SYNC_ONDEMAND
-				pci_dma_sync_single_for_cpu(lp->pci_dev,
-							    dma, len,
-							    PCI_DMA_FROMDEVICE);
-#endif
-				memcpy(data + offset, rxbuf, len);
-#ifdef TC35815_DMA_SYNC_ONDEMAND
-				pci_dma_sync_single_for_device(lp->pci_dev,
-							       dma, len,
-							       PCI_DMA_FROMDEVICE);
-#endif
-				offset += len;
-				cur_bd++;
-			}
-#else /* TC35815_USE_PACKEDBUFFER */
 			BUG_ON(bd_count > 1);
 			cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl)
 				  & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;
@@ -1757,16 +1537,11 @@
 				memmove(skb->data, skb->data - NET_IP_ALIGN,
 					pkt_len);
 			data = skb_put(skb, pkt_len);
-#endif /* TC35815_USE_PACKEDBUFFER */
 			if (netif_msg_pktdata(lp))
 				print_eth(data);
 			skb->protocol = eth_type_trans(skb, dev);
-#ifdef TC35815_NAPI
 			netif_receive_skb(skb);
 			received++;
-#else
-			netif_rx(skb);
-#endif
 			dev->stats.rx_packets++;
 			dev->stats.rx_bytes += pkt_len;
 		} else {
@@ -1803,19 +1578,11 @@
 			BUG_ON(id >= RX_BUF_NUM);
 #endif
 			/* free old buffers */
-#ifdef TC35815_USE_PACKEDBUFFER
-			while (lp->fbl_curid != id)
-#else
 			lp->fbl_count--;
 			while (lp->fbl_count < RX_BUF_NUM)
-#endif
 			{
-#ifdef TC35815_USE_PACKEDBUFFER
-				unsigned char curid = lp->fbl_curid;
-#else
 				unsigned char curid =
 					(id + 1 + lp->fbl_count) % RX_BUF_NUM;
-#endif
 				struct BDesc *bd = &lp->fbl_ptr->bd[curid];
 #ifdef DEBUG
 				bdctl = le32_to_cpu(bd->BDCtl);
@@ -1826,7 +1593,6 @@
 				}
 #endif
 				/* pass BD to controller */
-#ifndef TC35815_USE_PACKEDBUFFER
 				if (!lp->rx_skbs[curid].skb) {
 					lp->rx_skbs[curid].skb =
 						alloc_rxbuf_skb(dev,
@@ -1836,21 +1602,11 @@
 						break; /* try on next reception */
 					bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma);
 				}
-#endif /* TC35815_USE_PACKEDBUFFER */
 				/* Note: BDLength was modified by chip. */
 				bd->BDCtl = cpu_to_le32(BD_CownsBD |
 							(curid << BD_RxBDID_SHIFT) |
 							RX_BUF_SIZE);
-#ifdef TC35815_USE_PACKEDBUFFER
-				lp->fbl_curid = (curid + 1) % RX_BUF_NUM;
-				if (netif_msg_rx_status(lp)) {
-					printk("%s: Entering new FBD %d\n",
-					       dev->name, lp->fbl_curid);
-					dump_frfd(lp->fbl_ptr);
-				}
-#else
 				lp->fbl_count++;
-#endif
 			}
 		}
 
@@ -1882,12 +1638,9 @@
 #endif
 	}
 
-#ifdef TC35815_NAPI
 	return received;
-#endif
 }
 
-#ifdef TC35815_NAPI
 static int tc35815_poll(struct napi_struct *napi, int budget)
 {
 	struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi);
@@ -1924,13 +1677,8 @@
 	}
 	return received;
 }
-#endif
 
-#ifdef NO_CHECK_CARRIER
-#define TX_STA_ERR	(Tx_ExColl|Tx_Under|Tx_Defer|Tx_LateColl|Tx_TxPar|Tx_SQErr)
-#else
 #define TX_STA_ERR	(Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr)
-#endif
 
 static void
 tc35815_check_tx_stat(struct net_device *dev, int status)
@@ -1944,16 +1692,12 @@
 	if (status & Tx_TxColl_MASK)
 		dev->stats.collisions += status & Tx_TxColl_MASK;
 
-#ifndef NO_CHECK_CARRIER
 	/* TX4939 does not have NCarr */
 	if (lp->chiptype == TC35815_TX4939)
 		status &= ~Tx_NCarr;
-#ifdef WORKAROUND_LOSTCAR
 	/* WORKAROUND: ignore LostCrS in full duplex operation */
 	if (!lp->link || lp->duplex == DUPLEX_FULL)
 		status &= ~Tx_NCarr;
-#endif
-#endif
 
 	if (!(status & TX_STA_ERR)) {
 		/* no error. */
@@ -1983,12 +1727,10 @@
 		dev->stats.tx_fifo_errors++;
 		msg = "Excessive Deferral.";
 	}
-#ifndef NO_CHECK_CARRIER
 	if (status & Tx_NCarr) {
 		dev->stats.tx_carrier_errors++;
 		msg = "Lost Carrier Sense.";
 	}
-#endif
 	if (status & Tx_LateColl) {
 		dev->stats.tx_aborted_errors++;
 		msg = "Late Collision.";
@@ -2044,11 +1786,7 @@
 			pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE);
 			lp->tx_skbs[lp->tfd_end].skb = NULL;
 			lp->tx_skbs[lp->tfd_end].skb_dma = 0;
-#ifdef TC35815_NAPI
 			dev_kfree_skb_any(skb);
-#else
-			dev_kfree_skb_irq(skb);
-#endif
 		}
 		txfd->fd.FDSystem = cpu_to_le32(0xffffffff);
 
@@ -2083,9 +1821,7 @@
 
 				/* start DMA Transmitter again */
 				txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
-#ifdef GATHER_TXINT
 				txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
-#endif
 				if (netif_msg_tx_queued(lp)) {
 					printk("%s: start TxFD on queue.\n",
 					       dev->name);
@@ -2112,9 +1848,7 @@
 	struct tc35815_local *lp = netdev_priv(dev);
 
 	netif_stop_queue(dev);
-#ifdef TC35815_NAPI
 	napi_disable(&lp->napi);
-#endif
 	if (lp->phy_dev)
 		phy_stop(lp->phy_dev);
 	cancel_work_sync(&lp->restart_work);
@@ -2198,14 +1932,12 @@
 		(struct tc35815_regs __iomem *)dev->base_addr;
 
 	if (dev->flags & IFF_PROMISC) {
-#ifdef WORKAROUND_100HALF_PROMISC
 		/* With some (all?) 100MHalf HUB, controller will hang
 		 * if we enabled promiscuous mode before linkup... */
 		struct tc35815_local *lp = netdev_priv(dev);
 
 		if (!lp->link)
 			return;
-#endif
 		/* Enable promiscuous mode */
 		tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
 	} else if ((dev->flags & IFF_ALLMULTI) ||
@@ -2392,9 +2124,6 @@
 		tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl);
 	else
 		tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl);
-#ifdef TC35815_USE_PACKEDBUFFER
-	tc_writel(RxFrag_EnPack | ETH_ZLEN, &tr->RxFragSize);	/* Packing */
-#endif
 	tc_writel(0, &tr->TxPollCtr);	/* Batch mode */
 	tc_writel(TX_THRESHOLD, &tr->TxThrsh);
 	tc_writel(INT_EN_CMD, &tr->Int_En);
@@ -2412,19 +2141,12 @@
 	tc_writel(RX_CTL_CMD, &tr->Rx_Ctl);	/* start MAC receiver */
 
 	/* start MAC transmitter */
-#ifndef NO_CHECK_CARRIER
 	/* TX4939 does not have EnLCarr */
 	if (lp->chiptype == TC35815_TX4939)
 		txctl &= ~Tx_EnLCarr;
-#ifdef WORKAROUND_LOSTCAR
 	/* WORKAROUND: ignore LostCrS in full duplex operation */
 	if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL)
 		txctl &= ~Tx_EnLCarr;
-#endif
-#endif /* !NO_CHECK_CARRIER */
-#ifdef GATHER_TXINT
-	txctl &= ~Tx_EnComp;	/* disable global tx completion int. */
-#endif
 	tc_writel(txctl, &tr->Tx_Ctl);
 }
 
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 79d4868..492bff6 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -1878,7 +1878,7 @@
 			udelay(50);	/* give hw a chance to clean fifo */
 			continue;
 		}
-		avail = MIN(avail, size);
+		avail = min(avail, size);
 		DBG("about to push  %d bytes starting %p size %d\n", avail,
 		    data, size);
 		bdx_tx_push_desc(priv, data, avail);
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index 4fc875e..1241419 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -76,8 +76,6 @@
 #define FIFO_SIZE  4096
 #define FIFO_EXTRA_SPACE            1024
 
-#define MIN(x, y)  ((x) < (y) ? (x) : (y))
-
 #if BITS_PER_LONG == 64
 #    define H32_64(x)  (u32) ((u64)(x) >> 32)
 #    define L32_64(x)  (u32) ((u64)(x) & 0xffffffff)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index ba5d3fe..47a4f09 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
 
 #define DRV_MODULE_NAME		"tg3"
 #define PFX DRV_MODULE_NAME	": "
-#define DRV_MODULE_VERSION	"3.102"
-#define DRV_MODULE_RELDATE	"September 1, 2009"
+#define DRV_MODULE_VERSION	"3.103"
+#define DRV_MODULE_RELDATE	"November 2, 2009"
 
 #define TG3_DEF_MAC_MODE	0
 #define TG3_DEF_RX_MODE		0
@@ -937,9 +937,10 @@
 	u32 val;
 	struct phy_device *phydev;
 
-	phydev = tp->mdio_bus->phy_map[PHY_ADDR];
+	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
 	case TG3_PHY_ID_BCM50610:
+	case TG3_PHY_ID_BCM50610M:
 		val = MAC_PHYCFG2_50610_LED_MODES;
 		break;
 	case TG3_PHY_ID_BCMAC131:
@@ -1031,7 +1032,7 @@
 		if (is_serdes)
 			tp->phy_addr += 7;
 	} else
-		tp->phy_addr = PHY_ADDR;
+		tp->phy_addr = TG3_PHY_MII_ADDR;
 
 	if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
@@ -1062,7 +1063,7 @@
 	tp->mdio_bus->read     = &tg3_mdio_read;
 	tp->mdio_bus->write    = &tg3_mdio_write;
 	tp->mdio_bus->reset    = &tg3_mdio_reset;
-	tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
+	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
 	tp->mdio_bus->irq      = &tp->mdio_irq[0];
 
 	for (i = 0; i < PHY_MAX_ADDR; i++)
@@ -1084,7 +1085,7 @@
 		return i;
 	}
 
-	phydev = tp->mdio_bus->phy_map[PHY_ADDR];
+	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 
 	if (!phydev || !phydev->drv) {
 		printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
@@ -1096,8 +1097,14 @@
 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
 	case TG3_PHY_ID_BCM57780:
 		phydev->interface = PHY_INTERFACE_MODE_GMII;
+		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
 		break;
 	case TG3_PHY_ID_BCM50610:
+	case TG3_PHY_ID_BCM50610M:
+		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
+				     PHY_BRCM_RX_REFCLK_UNUSED |
+				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
+				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
 		if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
 		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
@@ -1111,6 +1118,7 @@
 	case TG3_PHY_ID_RTL8201E:
 	case TG3_PHY_ID_BCMAC131:
 		phydev->interface = PHY_INTERFACE_MODE_MII;
+		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
 		tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
 		break;
 	}
@@ -1311,7 +1319,7 @@
 	u32 old_tx_mode = tp->tx_mode;
 
 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
-		autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
+		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
 	else
 		autoneg = tp->link_config.autoneg;
 
@@ -1348,7 +1356,7 @@
 	u8 oldflowctrl, linkmesg = 0;
 	u32 mac_mode, lcl_adv, rmt_adv;
 	struct tg3 *tp = netdev_priv(dev);
-	struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
+	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 
 	spin_lock_bh(&tp->lock);
 
@@ -1363,8 +1371,11 @@
 
 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
 			mac_mode |= MAC_MODE_PORT_MODE_MII;
-		else
+		else if (phydev->speed == SPEED_1000 ||
+			 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
+		else
+			mac_mode |= MAC_MODE_PORT_MODE_MII;
 
 		if (phydev->duplex == DUPLEX_HALF)
 			mac_mode |= MAC_MODE_HALF_DUPLEX;
@@ -1434,7 +1445,7 @@
 	/* Bring the PHY back to a known state. */
 	tg3_bmcr_reset(tp);
 
-	phydev = tp->mdio_bus->phy_map[PHY_ADDR];
+	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 
 	/* Attach the MAC to the PHY. */
 	phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
@@ -1461,7 +1472,7 @@
 				      SUPPORTED_Asym_Pause);
 		break;
 	default:
-		phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
+		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
 		return -EINVAL;
 	}
 
@@ -1479,7 +1490,7 @@
 	if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
 		return;
 
-	phydev = tp->mdio_bus->phy_map[PHY_ADDR];
+	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 
 	if (tp->link_config.phy_is_low_power) {
 		tp->link_config.phy_is_low_power = 0;
@@ -1499,13 +1510,13 @@
 	if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
 		return;
 
-	phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
+	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
 }
 
 static void tg3_phy_fini(struct tg3 *tp)
 {
 	if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
-		phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
+		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
 		tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
 	}
 }
@@ -2149,6 +2160,26 @@
 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
 		udelay(40);
 		return;
+	} else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
+		u32 phytest;
+		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
+			u32 phy;
+
+			tg3_writephy(tp, MII_ADVERTISE, 0);
+			tg3_writephy(tp, MII_BMCR,
+				     BMCR_ANENABLE | BMCR_ANRESTART);
+
+			tg3_writephy(tp, MII_TG3_FET_TEST,
+				     phytest | MII_TG3_FET_SHADOW_EN);
+			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
+				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
+				tg3_writephy(tp,
+					     MII_TG3_FET_SHDW_AUXMODE4,
+					     phy);
+			}
+			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
+		}
+		return;
 	} else if (do_low_power) {
 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
 			     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
@@ -2474,7 +2505,7 @@
 			struct phy_device *phydev;
 			u32 phyid, advertising;
 
-			phydev = tp->mdio_bus->phy_map[PHY_ADDR];
+			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 
 			tp->link_config.phy_is_low_power = 1;
 
@@ -3243,15 +3274,6 @@
 			pci_write_config_word(tp->pdev,
 					      tp->pcie_cap + PCI_EXP_LNKCTL,
 					      newlnkctl);
-	} else if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
-		u32 newreg, oldreg = tr32(TG3_PCIE_LNKCTL);
-		if (tp->link_config.active_speed == SPEED_100 ||
-		    tp->link_config.active_speed == SPEED_10)
-			newreg = oldreg & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
-		else
-			newreg = oldreg | TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
-		if (newreg != oldreg)
-			tw32(TG3_PCIE_LNKCTL, newreg);
 	}
 
 	if (current_link_up != netif_carrier_ok(tp->dev)) {
@@ -4435,6 +4457,10 @@
 
 	mapping = pci_map_single(tp->pdev, skb->data, skb_size,
 				 PCI_DMA_FROMDEVICE);
+	if (pci_dma_mapping_error(tp->pdev, mapping)) {
+		dev_kfree_skb(skb);
+		return -EIO;
+	}
 
 	map->skb = skb;
 	pci_unmap_addr_set(map, mapping, mapping);
@@ -5124,7 +5150,8 @@
 		/* Make sure new skb does not cross any 4G boundaries.
 		 * Drop the packet if it does.
 		 */
-		if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
+		if (ret || ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
+			    tg3_4g_overflow_test(new_addr, new_skb->len))) {
 			if (!ret)
 				skb_dma_unmap(&tp->pdev->dev, new_skb,
 					      DMA_TO_DEVICE);
@@ -5392,7 +5419,7 @@
 	mss = 0;
 	if ((mss = skb_shinfo(skb)->gso_size) != 0) {
 		struct iphdr *iph;
-		int tcp_opt_len, ip_tcp_len, hdr_len;
+		u32 tcp_opt_len, ip_tcp_len, hdr_len;
 
 		if (skb_header_cloned(skb) &&
 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
@@ -5423,8 +5450,10 @@
 								 IPPROTO_TCP,
 								 0);
 
-		if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
-		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
+		if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
+			mss |= hdr_len << 9;
+		else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
+			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
 			if (tcp_opt_len || iph->ihl > 5) {
 				int tsflags;
 
@@ -5459,9 +5488,18 @@
 
 	would_hit_hwbug = 0;
 
-	if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
+	if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
 		would_hit_hwbug = 1;
-	else if (tg3_4g_overflow_test(mapping, len))
+
+	if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
+	    tg3_4g_overflow_test(mapping, len))
+		would_hit_hwbug = 1;
+
+	if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
+	    tg3_40bit_overflow_test(tp, mapping, len))
+		would_hit_hwbug = 1;
+
+	if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
 		would_hit_hwbug = 1;
 
 	tg3_set_txd(tnapi, entry, mapping, len, base_flags,
@@ -5482,10 +5520,16 @@
 
 			tnapi->tx_buffers[entry].skb = NULL;
 
-			if (tg3_4g_overflow_test(mapping, len))
+			if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
+			    len <= 8)
 				would_hit_hwbug = 1;
 
-			if (tg3_40bit_overflow_test(tp, mapping, len))
+			if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
+			    tg3_4g_overflow_test(mapping, len))
+				would_hit_hwbug = 1;
+
+			if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
+			    tg3_40bit_overflow_test(tp, mapping, len))
 				would_hit_hwbug = 1;
 
 			if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
@@ -6580,6 +6624,30 @@
 
 	tg3_mdio_start(tp);
 
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+		u8 phy_addr;
+
+		phy_addr = tp->phy_addr;
+		tp->phy_addr = TG3_PHY_PCIE_ADDR;
+
+		tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
+			     TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT);
+		val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL |
+		      TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL |
+		      TG3_PCIEPHY_TX0CTRL1_NB_EN;
+		tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val);
+		udelay(10);
+
+		tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
+			     TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT);
+		val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN |
+		      TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN;
+		tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val);
+		udelay(10);
+
+		tp->phy_addr = phy_addr;
+	}
+
 	if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
 	    tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
@@ -7162,15 +7230,9 @@
 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
 
 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
-	}
 
-	if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
-		val = tr32(TG3_PCIE_LNKCTL);
-		if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG)
-			val |= TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
-		else
-			val &= ~TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
-		tw32(TG3_PCIE_LNKCTL, val);
+		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
+		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
 	}
 
 	/* This works around an issue with Athlon chipsets on
@@ -7602,6 +7664,9 @@
 	if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
 		val |= WDMAC_MODE_STATUS_TAG_FIX;
 
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+		val |= WDMAC_MODE_BURST_ALL_DATA;
+
 	tw32_f(WDMAC_MODE, val);
 	udelay(40);
 
@@ -9240,9 +9305,11 @@
 	struct tg3 *tp = netdev_priv(dev);
 
 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
+		struct phy_device *phydev;
 		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
 			return -EAGAIN;
-		return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
+		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+		return phy_ethtool_gset(phydev, cmd);
 	}
 
 	cmd->supported = (SUPPORTED_Autoneg);
@@ -9281,9 +9348,11 @@
 	struct tg3 *tp = netdev_priv(dev);
 
 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
+		struct phy_device *phydev;
 		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
 			return -EAGAIN;
-		return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
+		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+		return phy_ethtool_sset(phydev, cmd);
 	}
 
 	if (cmd->autoneg != AUTONEG_ENABLE &&
@@ -9466,7 +9535,7 @@
 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
 		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
 			return -EAGAIN;
-		r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
+		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
 	} else {
 		u32 bmcr;
 
@@ -9585,7 +9654,7 @@
 			u32 newadv;
 			struct phy_device *phydev;
 
-			phydev = tp->mdio_bus->phy_map[PHY_ADDR];
+			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 
 			if (epause->rx_pause) {
 				if (epause->tx_pause)
@@ -10338,7 +10407,10 @@
 	for (i = 14; i < tx_len; i++)
 		tx_data[i] = (u8) (i & 0xff);
 
-	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
+	if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
+		dev_kfree_skb(skb);
+		return -EIO;
+	}
 
 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
 	       rnapi->coal_now);
@@ -10349,7 +10421,8 @@
 
 	num_pkts = 0;
 
-	tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
+	tg3_set_txd(tnapi, tnapi->tx_prod,
+		    skb_shinfo(skb)->dma_head, tx_len, 0, 1);
 
 	tnapi->tx_prod++;
 	num_pkts++;
@@ -10359,8 +10432,8 @@
 
 	udelay(10);
 
-	/* 250 usec to allow enough time on some 10/100 Mbps devices.  */
-	for (i = 0; i < 25; i++) {
+	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
+	for (i = 0; i < 35; i++) {
 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
 		       coal_now);
 
@@ -10373,7 +10446,7 @@
 			break;
 	}
 
-	pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
+	skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
 	dev_kfree_skb(skb);
 
 	if (tx_idx != tnapi->tx_prod)
@@ -10565,9 +10638,11 @@
 	int err;
 
 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
+		struct phy_device *phydev;
 		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
 			return -EAGAIN;
-		return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
+		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+		return phy_mii_ioctl(phydev, data, cmd);
 	}
 
 	switch(cmd) {
@@ -12610,12 +12685,19 @@
 
 	tp->irq_max = 1;
 
-#ifdef TG3_NAPI
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
 		tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
 		tp->irq_max = TG3_IRQ_MAX_VECS;
 	}
-#endif
+
+	if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+			tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
+		else {
+			tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
+			tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
+		}
+	}
 
 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
 	     (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
@@ -12926,11 +13008,6 @@
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
 		tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
 
-	if ((tp->pci_chip_rev_id == CHIPREV_ID_57780_A1 &&
-	     tr32(RCVLPC_STATS_ENABLE) & RCVLPC_STATSENAB_ASF_FIX) ||
-	    tp->pci_chip_rev_id == CHIPREV_ID_57780_A0)
-		tp->tg3_flags3 |= TG3_FLG3_TOGGLE_10_100_L1PLLPD;
-
 	err = tg3_mdio_init(tp);
 	if (err)
 		return err;
@@ -13975,8 +14052,7 @@
 		goto err_out_iounmap;
 	}
 
-	if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+	if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
 		dev->netdev_ops = &tg3_netdev_ops;
 	else
 		dev->netdev_ops = &tg3_netdev_ops_dma_bug;
@@ -14131,13 +14207,14 @@
 	       tg3_bus_string(tp, str),
 	       dev->dev_addr);
 
-	if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
+	if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
+		struct phy_device *phydev;
+		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 		printk(KERN_INFO
 		       "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
-		       tp->dev->name,
-		       tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
-		       dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
-	else
+		       tp->dev->name, phydev->drv->name,
+		       dev_name(&phydev->dev));
+	} else
 		printk(KERN_INFO
 		       "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
 		       tp->dev->name, tg3_phy_string(tp),
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index bab7940..d770da1 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1264,8 +1264,9 @@
 #define  WDMAC_MODE_FIFOURUN_ENAB	 0x00000080
 #define  WDMAC_MODE_FIFOOREAD_ENAB	 0x00000100
 #define  WDMAC_MODE_LNGREAD_ENAB	 0x00000200
-#define  WDMAC_MODE_RX_ACCEL	 	 0x00000400
+#define  WDMAC_MODE_RX_ACCEL		 0x00000400
 #define  WDMAC_MODE_STATUS_TAG_FIX	 0x20000000
+#define  WDMAC_MODE_BURST_ALL_DATA	 0xc0000000
 #define WDMAC_STATUS			0x00004c04
 #define  WDMAC_STATUS_TGTABORT		 0x00000004
 #define  WDMAC_STATUS_MSTABORT		 0x00000008
@@ -1953,10 +1954,34 @@
 #define  NIC_SRAM_MBUF_POOL_BASE5705	0x00010000
 #define  NIC_SRAM_MBUF_POOL_SIZE5705	0x0000e000
 
-/* Currently this is fixed. */
-#define PHY_ADDR		0x01
 
-/* Tigon3 specific PHY MII registers. */
+/* Currently this is fixed. */
+#define TG3_PHY_PCIE_ADDR		0x00
+#define TG3_PHY_MII_ADDR		0x01
+
+
+/*** Tigon3 specific PHY PCIE registers. ***/
+
+#define TG3_PCIEPHY_BLOCK_ADDR		0x1f
+#define  TG3_PCIEPHY_XGXS_BLK1		0x0801
+#define  TG3_PCIEPHY_TXB_BLK		0x0861
+#define  TG3_PCIEPHY_BLOCK_SHIFT	4
+
+/* TG3_PCIEPHY_TXB_BLK */
+#define TG3_PCIEPHY_TX0CTRL1		0x15
+#define  TG3_PCIEPHY_TX0CTRL1_TXOCM	0x0003
+#define  TG3_PCIEPHY_TX0CTRL1_RDCTL	0x0008
+#define  TG3_PCIEPHY_TX0CTRL1_TXCMV	0x0030
+#define  TG3_PCIEPHY_TX0CTRL1_TKSEL	0x0040
+#define  TG3_PCIEPHY_TX0CTRL1_NB_EN	0x0400
+
+/* TG3_PCIEPHY_XGXS_BLK1 */
+#define TG3_PCIEPHY_PWRMGMT4		0x1a
+#define TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN	0x0038
+#define TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN	0x4000
+
+
+/*** Tigon3 specific PHY MII registers. ***/
 #define  TG3_BMCR_SPEED1000		0x0040
 
 #define MII_TG3_CTRL			0x09 /* 1000-baseT control register */
@@ -2055,6 +2080,9 @@
 #define MII_TG3_FET_SHDW_MISCCTRL	0x10
 #define  MII_TG3_FET_SHDW_MISCCTRL_MDIX	0x4000
 
+#define MII_TG3_FET_SHDW_AUXMODE4	0x1a
+#define MII_TG3_FET_SHDW_AUXMODE4_SBPD	0x0008
+
 #define MII_TG3_FET_SHDW_AUXSTAT2	0x1b
 #define  MII_TG3_FET_SHDW_AUXSTAT2_APD	0x0020
 
@@ -2756,9 +2784,11 @@
 #define TG3_FLG3_PHY_ENABLE_APD		0x00001000
 #define TG3_FLG3_5755_PLUS		0x00002000
 #define TG3_FLG3_NO_NVRAM		0x00004000
-#define TG3_FLG3_TOGGLE_10_100_L1PLLPD	0x00008000
 #define TG3_FLG3_PHY_IS_FET		0x00010000
 #define TG3_FLG3_ENABLE_RSS		0x00020000
+#define TG3_FLG3_4G_DMA_BNDRY_BUG	0x00080000
+#define TG3_FLG3_40BIT_DMA_LIMIT_BUG	0x00100000
+#define TG3_FLG3_SHORT_DMA_BUG		0x00200000
 
 	struct timer_list		timer;
 	u16				timer_counter;
@@ -2834,6 +2864,7 @@
 #define PHY_REV_BCM5401_C0		0x6
 #define PHY_REV_BCM5411_X0		0x1 /* Found on Netgear GA302T */
 #define TG3_PHY_ID_BCM50610		0x143bd60
+#define TG3_PHY_ID_BCM50610M	0x143bd70
 #define TG3_PHY_ID_BCMAC131		0x143bc70
 #define TG3_PHY_ID_RTL8211C		0x001cc910
 #define TG3_PHY_ID_RTL8201E		0x00008200
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 525bbc5..6a3c751 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -1143,9 +1143,16 @@
                 } else {
 			char **prphase = printphase;
 			char **prerror = printerror;
+			int pnr = err / 16 - 1;
+			int enr = err % 16 - 1;
 			DPRINTK("TR Adapter misc open failure, error code = ");
-			printk("0x%x, Phase: %s, Error: %s\n",
-				err, prphase[err/16 -1], prerror[err%16 -1]);
+			if (pnr < 0 || pnr >= ARRAY_SIZE(printphase) ||
+					enr < 0 ||
+					enr >= ARRAY_SIZE(printerror))
+				printk("0x%x, invalid Phase/Error.", err);
+			else
+				printk("0x%x, Phase: %s, Error: %s\n", err,
+						prphase[pnr], prerror[enr]);
 			printk(" retrying after %ds delay...\n",
 					TR_RETRY_INTERVAL/HZ);
                 }
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index c47237c..32d9356 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -174,7 +174,7 @@
 	    * Ericsson Mobile Broadband Module (all variants)
  	    * Motorola (DM100 and SB4100)
  	    * Broadcom Cable Modem (reference design)
-	    * Toshiba (PCX1100U and F3507g)
+	    * Toshiba (PCX1100U and F3507g/F3607gw)
 	    * ...
 
 	  This driver creates an interface named "ethX", where X depends on
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 71e65fc..71d7ff3 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -552,20 +552,60 @@
 			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
 	.driver_info = (unsigned long) &mbm_info,
 }, {
-	/* Ericsson F3307 */
+	/* Ericsson F3607gw ver 2 */
+	USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1905, USB_CLASS_COMM,
+			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+	.driver_info = (unsigned long) &mbm_info,
+}, {
+	/* Ericsson F3607gw ver 3 */
 	USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
 	.driver_info = (unsigned long) &mbm_info,
 }, {
+	/* Ericsson F3307 */
+	USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190a, USB_CLASS_COMM,
+			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+	.driver_info = (unsigned long) &mbm_info,
+}, {
+	/* Ericsson F3307 ver 2 */
+	USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1909, USB_CLASS_COMM,
+			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+	.driver_info = (unsigned long) &mbm_info,
+}, {
+	/* Ericsson C3607w */
+	USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1049, USB_CLASS_COMM,
+			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+	.driver_info = (unsigned long) &mbm_info,
+}, {
 	/* Toshiba F3507g */
 	USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
 	.driver_info = (unsigned long) &mbm_info,
 }, {
+	/* Toshiba F3607gw */
+	USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130c, USB_CLASS_COMM,
+			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+	.driver_info = (unsigned long) &mbm_info,
+}, {
+	/* Toshiba F3607gw ver 2 */
+	USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x1311, USB_CLASS_COMM,
+			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+	.driver_info = (unsigned long) &mbm_info,
+}, {
 	/* Dell F3507g */
 	USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM,
 			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
 	.driver_info = (unsigned long) &mbm_info,
+}, {
+	/* Dell F3607gw */
+	USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8183, USB_CLASS_COMM,
+			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+	.driver_info = (unsigned long) &mbm_info,
+}, {
+	/* Dell F3607gw ver 2 */
+	USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8184, USB_CLASS_COMM,
+			USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+	.driver_info = (unsigned long) &mbm_info,
 },
 	{ },		// END
 };
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 72470f7..a2b30a1 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -649,6 +649,10 @@
 	USB_DEVICE(0x0fe6, 0x8101),	/* DM9601 USB to Fast Ethernet Adapter */
 	.driver_info = (unsigned long)&dm9601_info,
 	 },
+	{
+	 USB_DEVICE(0x0a46, 0x9000),	/* DM9000E */
+	 .driver_info = (unsigned long)&dm9601_info,
+	 },
 	{},			// END
 };
 
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index e391ef9..3b80e8d 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -471,16 +471,7 @@
 	int result;
 
 	dbg("kaweth_reset(%p)", kaweth);
-	result = kaweth_control(kaweth,
-				usb_sndctrlpipe(kaweth->dev, 0),
-				USB_REQ_SET_CONFIGURATION,
-				0,
-				kaweth->dev->config[0].desc.bConfigurationValue,
-				0,
-				NULL,
-				0,
-				KAWETH_CONTROL_TIMEOUT);
-
+	result = usb_reset_configuration(kaweth->dev);
 	mdelay(10);
 
 	dbg("kaweth_reset() returns %d.",result);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index ade5b34..9bed694cd 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -442,7 +442,7 @@
 	return err;
 }
 
-static void veth_dellink(struct net_device *dev)
+static void veth_dellink(struct net_device *dev, struct list_head *head)
 {
 	struct veth_priv *priv;
 	struct net_device *peer;
@@ -450,8 +450,8 @@
 	priv = netdev_priv(dev);
 	peer = priv->peer;
 
-	unregister_netdevice(dev);
-	unregister_netdevice(peer);
+	unregister_netdevice_queue(dev, head);
+	unregister_netdevice_queue(peer, head);
 }
 
 static const struct nla_policy veth_policy[VETH_INFO_MAX + 1];
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 144db63..158f411 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -364,11 +364,6 @@
 module_param(rx_copybreak, int, 0644);
 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 
-#ifdef CONFIG_PM
-static DEFINE_SPINLOCK(velocity_dev_list_lock);
-static LIST_HEAD(velocity_dev_list);
-#endif
-
 /*
  *	Internal board variants. At the moment we have only one
  */
@@ -417,14 +412,6 @@
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct velocity_info *vptr = netdev_priv(dev);
 
-#ifdef CONFIG_PM
-	unsigned long flags;
-
-	spin_lock_irqsave(&velocity_dev_list_lock, flags);
-	if (!list_empty(&velocity_dev_list))
-		list_del(&vptr->list);
-	spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
-#endif
 	unregister_netdev(dev);
 	iounmap(vptr->mac_regs);
 	pci_release_regions(pdev);
@@ -2577,7 +2564,6 @@
 	vptr->tx.numq = info->txqueue;
 	vptr->multicast_limit = MCAM_SIZE;
 	spin_lock_init(&vptr->lock);
-	INIT_LIST_HEAD(&vptr->list);
 }
 
 /**
@@ -2776,15 +2762,6 @@
 	/* and leave the chip powered down */
 
 	pci_set_power_state(pdev, PCI_D3hot);
-#ifdef CONFIG_PM
-	{
-		unsigned long flags;
-
-		spin_lock_irqsave(&velocity_dev_list_lock, flags);
-		list_add(&vptr->list, &velocity_dev_list);
-		spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
-	}
-#endif
 	velocity_nics++;
 out:
 	return ret;
@@ -3240,20 +3217,10 @@
 {
 	struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
 	struct net_device *dev = ifa->ifa_dev->dev;
-	struct velocity_info *vptr;
-	unsigned long flags;
 
-	if (dev_net(dev) != &init_net)
-		return NOTIFY_DONE;
-
-	spin_lock_irqsave(&velocity_dev_list_lock, flags);
-	list_for_each_entry(vptr, &velocity_dev_list, list) {
-		if (vptr->dev == dev) {
-			velocity_get_ip(vptr);
-			break;
-		}
-	}
-	spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
+	if (dev_net(dev) == &init_net &&
+	    dev->netdev_ops == &velocity_netdev_ops)
+		velocity_get_ip(netdev_priv(dev));
 
 	return NOTIFY_DONE;
 }
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 2f00c13..ce894ff 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1499,8 +1499,6 @@
 #define GET_RD_BY_IDX(vptr, idx)   (vptr->rd_ring[idx])
 
 struct velocity_info {
-	struct list_head list;
-
 	struct pci_dev *pdev;
 	struct net_device *dev;
 
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 556512d..22a8ca5 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -451,7 +451,7 @@
 		vi->dev->stats.tx_bytes += skb->len;
 		vi->dev->stats.tx_packets++;
 		tot_sgs += skb_vnet_hdr(skb)->num_sg;
-		kfree_skb(skb);
+		dev_kfree_skb_any(skb);
 	}
 	return tot_sgs;
 }
@@ -514,8 +514,7 @@
 	/* Free up any pending old buffers before queueing new ones. */
 	free_old_xmit_skbs(vi);
 
-	/* Put new one in send queue and do transmit */
-	__skb_queue_head(&vi->send, skb);
+	/* Try to transmit */
 	capacity = xmit_skb(vi, skb);
 
 	/* This can happen with OOM and indirect buffers. */
@@ -529,8 +528,17 @@
 		}
 		return NETDEV_TX_BUSY;
 	}
-
 	vi->svq->vq_ops->kick(vi->svq);
+
+	/*
+	 * Put new one in send queue.  You'd expect we'd need this before
+	 * xmit_skb calls add_buf(), since the callback can be triggered
+	 * immediately after that.  But since the callback just triggers
+	 * another call back here, normal network xmit locking prevents the
+	 * race.
+	 */
+	__skb_queue_head(&vi->send, skb);
+
 	/* Don't wait up for transmitted skbs to be freed. */
 	skb_orphan(skb);
 	nf_reset(skb);
@@ -988,7 +996,7 @@
 	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
 };
 
-static struct virtio_driver virtio_net = {
+static struct virtio_driver virtio_net_driver = {
 	.feature_table = features,
 	.feature_table_size = ARRAY_SIZE(features),
 	.driver.name =	KBUILD_MODNAME,
@@ -1001,12 +1009,12 @@
 
 static int __init init(void)
 {
-	return register_virtio_driver(&virtio_net);
+	return register_virtio_driver(&virtio_net_driver);
 }
 
 static void __exit fini(void)
 {
-	unregister_virtio_driver(&virtio_net);
+	unregister_virtio_driver(&virtio_net_driver);
 }
 module_init(init);
 module_exit(fini);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 44fb0c5..004353a 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -481,7 +481,8 @@
 	}
 	rq->uncommitted[ring_idx] += num_allocated;
 
-	dprintk(KERN_ERR "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
+	dev_dbg(&adapter->netdev->dev,
+		"alloc_rx_buf: %d allocated, next2fill %u, next2comp "
 		"%u, uncommited %u\n", num_allocated, ring->next2fill,
 		ring->next2comp, rq->uncommitted[ring_idx]);
 
@@ -539,7 +540,8 @@
 		tbi = tq->buf_info + tq->tx_ring.next2fill;
 		tbi->map_type = VMXNET3_MAP_NONE;
 
-		dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
+		dev_dbg(&adapter->netdev->dev,
+			"txd[%u]: 0x%Lx 0x%x 0x%x\n",
 			tq->tx_ring.next2fill, ctx->sop_txd->txd.addr,
 			ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
 		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
@@ -572,7 +574,8 @@
 		gdesc->dword[2] = dw2 | buf_size;
 		gdesc->dword[3] = 0;
 
-		dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
+		dev_dbg(&adapter->netdev->dev,
+			"txd[%u]: 0x%Lx 0x%x 0x%x\n",
 			tq->tx_ring.next2fill, gdesc->txd.addr,
 			gdesc->dword[2], gdesc->dword[3]);
 		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
@@ -600,7 +603,8 @@
 		gdesc->dword[2] = dw2 | frag->size;
 		gdesc->dword[3] = 0;
 
-		dprintk(KERN_ERR "txd[%u]: 0x%llu %u %u\n",
+		dev_dbg(&adapter->netdev->dev,
+			"txd[%u]: 0x%llu %u %u\n",
 			tq->tx_ring.next2fill, gdesc->txd.addr,
 			gdesc->dword[2], gdesc->dword[3]);
 		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
@@ -697,7 +701,8 @@
 	tdd = tq->data_ring.base + tq->tx_ring.next2fill;
 
 	memcpy(tdd->data, skb->data, ctx->copy_size);
-	dprintk(KERN_ERR "copy %u bytes to dataRing[%u]\n",
+	dev_dbg(&adapter->netdev->dev,
+		"copy %u bytes to dataRing[%u]\n",
 		ctx->copy_size, tq->tx_ring.next2fill);
 	return 1;
 
@@ -808,7 +813,8 @@
 
 	if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
 		tq->stats.tx_ring_full++;
-		dprintk(KERN_ERR "tx queue stopped on %s, next2comp %u"
+		dev_dbg(&adapter->netdev->dev,
+			"tx queue stopped on %s, next2comp %u"
 			" next2fill %u\n", adapter->netdev->name,
 			tq->tx_ring.next2comp, tq->tx_ring.next2fill);
 
@@ -853,7 +859,8 @@
 
 	/* finally flips the GEN bit of the SOP desc */
 	gdesc->dword[2] ^= VMXNET3_TXD_GEN;
-	dprintk(KERN_ERR "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
+	dev_dbg(&adapter->netdev->dev,
+		"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
 		(u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
 		tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2],
 		gdesc->dword[3]);
@@ -990,7 +997,8 @@
 			if (unlikely(rcd->len == 0)) {
 				/* Pretend the rx buffer is skipped. */
 				BUG_ON(!(rcd->sop && rcd->eop));
-				dprintk(KERN_ERR "rxRing[%u][%u] 0 length\n",
+				dev_dbg(&adapter->netdev->dev,
+					"rxRing[%u][%u] 0 length\n",
 					ring_idx, idx);
 				goto rcd_done;
 			}
@@ -1314,9 +1322,11 @@
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	int irq;
 
+#ifdef CONFIG_PCI_MSI
 	if (adapter->intr.type == VMXNET3_IT_MSIX)
 		irq = adapter->intr.msix_entries[0].vector;
 	else
+#endif
 		irq = adapter->pdev->irq;
 
 	disable_irq(irq);
@@ -1330,12 +1340,15 @@
 {
 	int err;
 
+#ifdef CONFIG_PCI_MSI
 	if (adapter->intr.type == VMXNET3_IT_MSIX) {
 		/* we only use 1 MSI-X vector */
 		err = request_irq(adapter->intr.msix_entries[0].vector,
 				  vmxnet3_intr, 0, adapter->netdev->name,
 				  adapter->netdev);
-	} else if (adapter->intr.type == VMXNET3_IT_MSI) {
+	} else
+#endif
+	if (adapter->intr.type == VMXNET3_IT_MSI) {
 		err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
 				  adapter->netdev->name, adapter->netdev);
 	} else {
@@ -1376,6 +1389,7 @@
 	       adapter->intr.num_intrs <= 0);
 
 	switch (adapter->intr.type) {
+#ifdef CONFIG_PCI_MSI
 	case VMXNET3_IT_MSIX:
 	{
 		int i;
@@ -1385,6 +1399,7 @@
 				 adapter->netdev);
 		break;
 	}
+#endif
 	case VMXNET3_IT_MSI:
 		free_irq(adapter->pdev->irq, adapter->netdev);
 		break;
@@ -1676,7 +1691,8 @@
 	int err;
 	u32 ret;
 
-	dprintk(KERN_ERR "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
+	dev_dbg(&adapter->netdev->dev,
+		"%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
 		" %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
 		adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
 		adapter->rx_queue.rx_ring[0].size,
@@ -2134,6 +2150,7 @@
 	if (adapter->intr.type == VMXNET3_IT_AUTO) {
 		int err;
 
+#ifdef CONFIG_PCI_MSI
 		adapter->intr.msix_entries[0].entry = 0;
 		err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
 				      VMXNET3_LINUX_MAX_MSIX_VECT);
@@ -2142,6 +2159,7 @@
 			adapter->intr.type = VMXNET3_IT_MSIX;
 			return;
 		}
+#endif
 
 		err = pci_enable_msi(adapter->pdev);
 		if (!err) {
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 6bb9157..4450816 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -27,15 +27,11 @@
 #ifndef _VMXNET3_INT_H
 #define _VMXNET3_INT_H
 
-#include <linux/types.h>
 #include <linux/ethtool.h>
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/pci.h>
-#include <linux/ethtool.h>
 #include <linux/compiler.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/ioport.h>
@@ -59,7 +55,6 @@
 #include <linux/if_vlan.h>
 #include <linux/if_arp.h>
 #include <linux/inetdevice.h>
-#include <linux/dst.h>
 
 #include "vmxnet3_defs.h"
 
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 63d0f89..e21358e 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -3612,11 +3612,12 @@
 		device_config->vp_config[i].fifo.enable =
 						VXGE_HW_FIFO_ENABLE;
 		device_config->vp_config[i].fifo.max_frags =
-				MAX_SKB_FRAGS;
+				MAX_SKB_FRAGS + 1;
 		device_config->vp_config[i].fifo.memblock_size =
 			VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
 
-		txdl_size = MAX_SKB_FRAGS * sizeof(struct vxge_hw_fifo_txd);
+		txdl_size = device_config->vp_config[i].fifo.max_frags *
+				sizeof(struct vxge_hw_fifo_txd);
 		txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
 
 		device_config->vp_config[i].fifo.fifo_blocks =
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index fa66248..77c2a75 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -18,6 +18,6 @@
 #define VXGE_VERSION_MAJOR	"2"
 #define VXGE_VERSION_MINOR	"0"
 #define VXGE_VERSION_FIX	"6"
-#define VXGE_VERSION_BUILD	"18707"
+#define VXGE_VERSION_BUILD	"18937"
 #define VXGE_VERSION_FOR	"k"
 #endif
diff --git a/drivers/net/wimax/i2400m/Kconfig b/drivers/net/wimax/i2400m/Kconfig
index d623b3d..3f70338 100644
--- a/drivers/net/wimax/i2400m/Kconfig
+++ b/drivers/net/wimax/i2400m/Kconfig
@@ -31,6 +31,14 @@
 
 	  If unsure, it is safe to select M (module).
 
+config WIMAX_IWMC3200_SDIO
+	bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO"
+	depends on WIMAX_I2400M_SDIO
+	select IWMC3200TOP
+	help
+	  Select if you have a device based on the Intel Multicom WiMAX
+          Connection 3200 over SDIO.
+
 config WIMAX_I2400M_DEBUG_LEVEL
 	int "WiMAX i2400m debug level"
 	depends on WIMAX_I2400M
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 7116a1a..4eec87c 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -4790,9 +4790,8 @@
 static int get_dec_u16( char *buffer, int *start, int limit ) {
 	u16 value;
 	int valid = 0;
-	for( value = 0; buffer[*start] >= '0' &&
-		     buffer[*start] <= '9' &&
-		     *start < limit; (*start)++ ) {
+	for (value = 0; *start < limit && buffer[*start] >= '0' &&
+			buffer[*start] <= '9'; (*start)++) {
 		valid = 1;
 		value *= 10;
 		value += buffer[*start] - '0';
@@ -5660,7 +5659,8 @@
 
 	pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
 	pci_save_state(pdev);
-	return pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	return 0;
 }
 
 static int airo_pci_resume(struct pci_dev *pdev)
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index ec034af..9f94598 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -231,7 +231,7 @@
 	struct sk_buff_head tx_status_ampdu;
 	spinlock_t tx_ampdu_list_lock;
 	struct list_head tx_ampdu_list;
-	unsigned int tx_ampdu_pending;
+	atomic_t tx_ampdu_pending;
 
 	/* rxstream mpdu merge */
 	struct ar9170_rxstream_mpdu_merge rx_mpdu;
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 8811314..701ddb7 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -152,14 +152,14 @@
 #define		AR9170_MAC_REG_FTF_BIT14		BIT(14)
 #define		AR9170_MAC_REG_FTF_BIT15		BIT(15)
 #define		AR9170_MAC_REG_FTF_BAR			BIT(24)
-#define		AR9170_MAC_REG_FTF_BIT25		BIT(25)
+#define		AR9170_MAC_REG_FTF_BA			BIT(25)
 #define		AR9170_MAC_REG_FTF_PSPOLL		BIT(26)
 #define		AR9170_MAC_REG_FTF_RTS			BIT(27)
 #define		AR9170_MAC_REG_FTF_CTS			BIT(28)
 #define		AR9170_MAC_REG_FTF_ACK			BIT(29)
 #define		AR9170_MAC_REG_FTF_CFE			BIT(30)
 #define		AR9170_MAC_REG_FTF_CFE_ACK		BIT(31)
-#define		AR9170_MAC_REG_FTF_DEFAULTS		0x0500ffff
+#define		AR9170_MAC_REG_FTF_DEFAULTS		0x0700ffff
 #define		AR9170_MAC_REG_FTF_MONITOR		0xfd00ffff
 
 #define AR9170_MAC_REG_RX_TOTAL			(AR9170_MAC_REG_BASE + 0x6A0)
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index de0ba2b..7e59b82 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -414,9 +414,9 @@
 
 	skb_queue_tail(&ar->tx_status_ampdu, skb);
 	ar9170_tx_fake_ampdu_status(ar);
-	ar->tx_ampdu_pending--;
 
-	if (!list_empty(&ar->tx_ampdu_list) && !ar->tx_ampdu_pending)
+	if (atomic_dec_and_test(&ar->tx_ampdu_pending) &&
+	    !list_empty(&ar->tx_ampdu_list))
 		ar9170_tx_ampdu(ar);
 }
 
@@ -1248,6 +1248,7 @@
 	ar->global_ampdu_density = 6;
 	ar->global_ampdu_factor = 3;
 
+	atomic_set(&ar->tx_ampdu_pending, 0);
 	ar->bad_hw_nagger = jiffies;
 
 	err = ar->open(ar);
@@ -1773,7 +1774,7 @@
 					  msecs_to_jiffies(AR9170_TX_TIMEOUT);
 
 			if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
-				ar->tx_ampdu_pending++;
+				atomic_inc(&ar->tx_ampdu_pending);
 
 #ifdef AR9170_QUEUE_DEBUG
 			printk(KERN_DEBUG "%s: send frame q:%d =>\n",
@@ -1784,7 +1785,7 @@
 			err = ar->tx(ar, skb);
 			if (unlikely(err)) {
 				if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
-					ar->tx_ampdu_pending--;
+					atomic_dec(&ar->tx_ampdu_pending);
 
 				frames_failed++;
 				dev_kfree_skb_any(skb);
@@ -1931,7 +1932,7 @@
 	if (info->flags & IEEE80211_TX_CTL_AMPDU) {
 		bool run = ar9170_tx_ampdu_queue(ar, skb);
 
-		if (run || !ar->tx_ampdu_pending)
+		if (run || !atomic_read(&ar->tx_ampdu_pending))
 			ar9170_tx_ampdu(ar);
 	} else {
 		unsigned int queue = skb_get_queue_mapping(skb);
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index e974e58..6bdcdf6 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -108,15 +108,15 @@
 		return ;
 
 	spin_lock_irqsave(&aru->tx_urb_lock, flags);
-	if (aru->tx_submitted_urbs >= AR9170_NUM_TX_URBS) {
+	if (atomic_read(&aru->tx_submitted_urbs) >= AR9170_NUM_TX_URBS) {
 		spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
 		return ;
 	}
-	aru->tx_submitted_urbs++;
+	atomic_inc(&aru->tx_submitted_urbs);
 
 	urb = usb_get_from_anchor(&aru->tx_pending);
 	if (!urb) {
-		aru->tx_submitted_urbs--;
+		atomic_dec(&aru->tx_submitted_urbs);
 		spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
 
 		return ;
@@ -133,7 +133,7 @@
 				err);
 
 		usb_unanchor_urb(urb);
-		aru->tx_submitted_urbs--;
+		atomic_dec(&aru->tx_submitted_urbs);
 		ar9170_tx_callback(&aru->common, urb->context);
 	}
 
@@ -151,7 +151,7 @@
 		return ;
 	}
 
-	aru->tx_submitted_urbs--;
+	atomic_dec(&aru->tx_submitted_urbs);
 
 	ar9170_tx_callback(&aru->common, skb);
 
@@ -794,7 +794,7 @@
 	spin_lock_init(&aru->tx_urb_lock);
 
 	aru->tx_pending_urbs = 0;
-	aru->tx_submitted_urbs = 0;
+	atomic_set(&aru->tx_submitted_urbs, 0);
 
 	aru->common.stop = ar9170_usb_stop;
 	aru->common.flush = ar9170_usb_flush;
diff --git a/drivers/net/wireless/ath/ar9170/usb.h b/drivers/net/wireless/ath/ar9170/usb.h
index d098f4d..a2ce3b1 100644
--- a/drivers/net/wireless/ath/ar9170/usb.h
+++ b/drivers/net/wireless/ath/ar9170/usb.h
@@ -67,7 +67,7 @@
 	bool req_one_stage_fw;
 
 	spinlock_t tx_urb_lock;
-	unsigned int tx_submitted_urbs;
+	atomic_t tx_submitted_urbs;
 	unsigned int tx_pending_urbs;
 
 	struct completion cmd_wait;
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 647d826..6a2a967 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -198,6 +198,7 @@
 #define AR5K_TUNE_CWMAX_11B			1023
 #define AR5K_TUNE_CWMAX_XR			7
 #define AR5K_TUNE_NOISE_FLOOR			-72
+#define AR5K_TUNE_CCA_MAX_GOOD_VALUE		-95
 #define AR5K_TUNE_MAX_TXPOWER			63
 #define AR5K_TUNE_DEFAULT_TXPOWER		25
 #define AR5K_TUNE_TPC_TXPOWER			false
@@ -1006,6 +1007,14 @@
 	} cap_queues;
 };
 
+/* size of noise floor history (keep it a power of two) */
+#define ATH5K_NF_CAL_HIST_MAX	8
+struct ath5k_nfcal_hist
+{
+	s16 index;				/* current index into nfval */
+	s16 nfval[ATH5K_NF_CAL_HIST_MAX];	/* last few noise floors */
+};
+
 
 /***************************************\
   HARDWARE ABSTRACTION LAYER STRUCTURE
@@ -1112,6 +1121,8 @@
 		struct ieee80211_channel r_last_channel;
 	} ah_radar;
 
+	struct ath5k_nfcal_hist ah_nfcal_hist;
+
 	/* noise floor from last periodic calibration */
 	s32			ah_noise_floor;
 
@@ -1274,8 +1285,10 @@
 extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
 extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
 /* PHY calibration */
+void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah);
 extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel);
 extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq);
+extern s16 ath5k_hw_get_noise_floor(struct ath5k_hw *ah);
 extern void ath5k_hw_calibration_poll(struct ath5k_hw *ah);
 /* Spur mitigation */
 bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 92995ad..4228444 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -331,6 +331,8 @@
 
 	ath5k_hw_rfgain_opt_init(ah);
 
+	ath5k_hw_init_nfcal_hist(ah);
+
 	/* turn on HW LEDs */
 	ath5k_hw_set_ledstate(ah, AR5K_LED_INIT);
 
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 1a039f2..8959907 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1124,77 +1124,148 @@
 		ah->ah_swi_mask = AR5K_SWI_FULL_CALIBRATION;
 		AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI);
 	}
-
 }
 
-/**
- * ath5k_hw_noise_floor_calibration - perform PHY noise floor calibration
- *
- * @ah: struct ath5k_hw pointer we are operating on
- * @freq: the channel frequency, just used for error logging
- *
- * This function performs a noise floor calibration of the PHY and waits for
- * it to complete. Then the noise floor value is compared to some maximum
- * noise floor we consider valid.
- *
- * Note that this is different from what the madwifi HAL does: it reads the
- * noise floor and afterwards initiates the calibration. Since the noise floor
- * calibration can take some time to finish, depending on the current channel
- * use, that avoids the occasional timeout warnings we are seeing now.
- *
- * See the following link for an Atheros patent on noise floor calibration:
- * http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL \
- * &p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=7245893.PN.&OS=PN/7
- *
- * XXX: Since during noise floor calibration antennas are detached according to
- * the patent, we should stop tx queues here.
- */
-int
-ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq)
+static int sign_extend(int val, const int nbits)
 {
-	int ret;
-	unsigned int i;
-	s32 noise_floor;
+	int order = BIT(nbits-1);
+	return (val ^ order) - order;
+}
 
-	/*
-	 * Enable noise floor calibration
-	 */
-	AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
-				AR5K_PHY_AGCCTL_NF);
+static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
+{
+	s32 val;
 
-	ret = ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
-			AR5K_PHY_AGCCTL_NF, 0, false);
-	if (ret) {
-		ATH5K_ERR(ah->ah_sc,
-			"noise floor calibration timeout (%uMHz)\n", freq);
-		return -EAGAIN;
-	}
+	val = ath5k_hw_reg_read(ah, AR5K_PHY_NF);
+	return sign_extend(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 9);
+}
 
-	/* Wait until the noise floor is calibrated and read the value */
-	for (i = 20; i > 0; i--) {
-		mdelay(1);
-		noise_floor = ath5k_hw_reg_read(ah, AR5K_PHY_NF);
-		noise_floor = AR5K_PHY_NF_RVAL(noise_floor);
-		if (noise_floor & AR5K_PHY_NF_ACTIVE) {
-			noise_floor = AR5K_PHY_NF_AVAL(noise_floor);
+void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
+{
+	int i;
 
-			if (noise_floor <= AR5K_TUNE_NOISE_FLOOR)
-				break;
+	ah->ah_nfcal_hist.index = 0;
+	for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++)
+		ah->ah_nfcal_hist.nfval[i] = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
+}
+
+static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
+{
+	struct ath5k_nfcal_hist *hist = &ah->ah_nfcal_hist;
+	hist->index = (hist->index + 1) & (ATH5K_NF_CAL_HIST_MAX-1);
+	hist->nfval[hist->index] = noise_floor;
+}
+
+static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
+{
+	s16 sort[ATH5K_NF_CAL_HIST_MAX];
+	s16 tmp;
+	int i, j;
+
+	memcpy(sort, ah->ah_nfcal_hist.nfval, sizeof(sort));
+	for (i = 0; i < ATH5K_NF_CAL_HIST_MAX - 1; i++) {
+		for (j = 1; j < ATH5K_NF_CAL_HIST_MAX - i; j++) {
+			if (sort[j] > sort[j-1]) {
+				tmp = sort[j];
+				sort[j] = sort[j-1];
+				sort[j-1] = tmp;
+			}
 		}
 	}
+	for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++) {
+		ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
+			"cal %d:%d\n", i, sort[i]);
+	}
+	return sort[(ATH5K_NF_CAL_HIST_MAX-1) / 2];
+}
 
-	ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
-		"noise floor %d\n", noise_floor);
+/*
+ * When we tell the hardware to perform a noise floor calibration
+ * by setting the AR5K_PHY_AGCCTL_NF bit, it will periodically
+ * sample-and-hold the minimum noise level seen at the antennas.
+ * This value is then stored in a ring buffer of recently measured
+ * noise floor values so we have a moving window of the last few
+ * samples.
+ *
+ * The median of the values in the history is then loaded into the
+ * hardware for its own use for RSSI and CCA measurements.
+ */
+void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
+{
+	struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+	u32 val;
+	s16 nf, threshold;
+	u8 ee_mode;
 
-	if (noise_floor > AR5K_TUNE_NOISE_FLOOR) {
-		ATH5K_ERR(ah->ah_sc,
-			"noise floor calibration failed (%uMHz)\n", freq);
-		return -EAGAIN;
+	/* keep last value if calibration hasn't completed */
+	if (ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL) & AR5K_PHY_AGCCTL_NF) {
+		ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
+			"NF did not complete in calibration window\n");
+
+		return;
 	}
 
-	ah->ah_noise_floor = noise_floor;
+	switch (ah->ah_current_channel->hw_value & CHANNEL_MODES) {
+	case CHANNEL_A:
+	case CHANNEL_T:
+	case CHANNEL_XR:
+		ee_mode = AR5K_EEPROM_MODE_11A;
+		break;
+	case CHANNEL_G:
+	case CHANNEL_TG:
+		ee_mode = AR5K_EEPROM_MODE_11G;
+		break;
+	default:
+	case CHANNEL_B:
+		ee_mode = AR5K_EEPROM_MODE_11B;
+		break;
+	}
 
-	return 0;
+
+	/* completed NF calibration, test threshold */
+	nf = ath5k_hw_read_measured_noise_floor(ah);
+	threshold = ee->ee_noise_floor_thr[ee_mode];
+
+	if (nf > threshold) {
+		ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
+			"noise floor failure detected; "
+			"read %d, threshold %d\n",
+			nf, threshold);
+
+		nf = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
+	}
+
+	ath5k_hw_update_nfcal_hist(ah, nf);
+	nf = ath5k_hw_get_median_noise_floor(ah);
+
+	/* load noise floor (in .5 dBm) so the hardware will use it */
+	val = ath5k_hw_reg_read(ah, AR5K_PHY_NF) & ~AR5K_PHY_NF_M;
+	val |= (nf * 2) & AR5K_PHY_NF_M;
+	ath5k_hw_reg_write(ah, val, AR5K_PHY_NF);
+
+	AR5K_REG_MASKED_BITS(ah, AR5K_PHY_AGCCTL, AR5K_PHY_AGCCTL_NF,
+		~(AR5K_PHY_AGCCTL_NF_EN | AR5K_PHY_AGCCTL_NF_NOUPDATE));
+
+	ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL, AR5K_PHY_AGCCTL_NF,
+		0, false);
+
+	/*
+	 * Load a high max CCA Power value (-50 dBm in .5 dBm units)
+	 * so that we're not capped by the median we just loaded.
+	 * This will be used as the initial value for the next noise
+	 * floor calibration.
+	 */
+	val = (val & ~AR5K_PHY_NF_M) | ((-50 * 2) & AR5K_PHY_NF_M);
+	ath5k_hw_reg_write(ah, val, AR5K_PHY_NF);
+	AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+		AR5K_PHY_AGCCTL_NF_EN |
+		AR5K_PHY_AGCCTL_NF_NOUPDATE |
+		AR5K_PHY_AGCCTL_NF);
+
+	ah->ah_noise_floor = nf;
+
+	ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
+		"noise floor calibrated: %d\n", nf);
 }
 
 /*
@@ -1287,7 +1358,7 @@
 		return ret;
 	}
 
-	ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
+	ath5k_hw_update_noise_floor(ah);
 
 	/*
 	 * Re-enable RX/TX and beacons
@@ -1360,7 +1431,7 @@
 	 * since noise floor calibration interrupts rx path while I/Q
 	 * calibration doesn't. We don't need to run noise floor calibration
 	 * as often as I/Q calibration.*/
-	ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
+	ath5k_hw_update_noise_floor(ah);
 
 	/* Initiate a gain_F calibration */
 	ath5k_hw_request_rfgain_probe(ah);
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index 64227ab..4cb9c5d 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -2033,17 +2033,14 @@
 #define	AR5K_PHY_AGCCTL_NF_NOUPDATE	0x00020000	/* Don't update nf automaticaly */
 
 /*
- * PHY noise floor status register
+ * PHY noise floor status register (CCA = Clear Channel Assessment)
  */
 #define AR5K_PHY_NF			0x9864			/* Register address */
-#define AR5K_PHY_NF_M			0x000001ff	/* Noise floor mask */
-#define AR5K_PHY_NF_ACTIVE		0x00000100	/* Noise floor calibration still active */
-#define AR5K_PHY_NF_RVAL(_n)		(((_n) >> 19) & AR5K_PHY_NF_M)
-#define AR5K_PHY_NF_AVAL(_n)		(-((_n) ^ AR5K_PHY_NF_M) + 1)
-#define AR5K_PHY_NF_SVAL(_n)		(((_n) & AR5K_PHY_NF_M) | (1 << 9))
+#define AR5K_PHY_NF_M			0x000001ff	/* Noise floor, written to hardware in 1/2 dBm units */
+#define AR5K_PHY_NF_SVAL(_n)           (((_n) & AR5K_PHY_NF_M) | (1 << 9))
 #define	AR5K_PHY_NF_THRESH62		0x0007f000	/* Thresh62 -check ANI patent- (field) */
 #define	AR5K_PHY_NF_THRESH62_S		12
-#define	AR5K_PHY_NF_MINCCA_PWR		0x0ff80000	/* ??? */
+#define	AR5K_PHY_NF_MINCCA_PWR		0x0ff80000	/* Minimum measured noise level, read from hardware in 1 dBm units */
 #define	AR5K_PHY_NF_MINCCA_PWR_S	19
 
 /*
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 3dab3d8..62954fc 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -1293,7 +1293,7 @@
 	 * out and/or noise floor calibration might timeout.
 	 */
 	AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
-				AR5K_PHY_AGCCTL_CAL);
+				AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF);
 
 	/* At the same time start I/Q calibration for QAM constellation
 	 * -no need for CCK- */
@@ -1314,21 +1314,6 @@
 			channel->center_freq);
 	}
 
-	/*
-	 * If we run NF calibration before AGC, it always times out.
-	 * Binary HAL starts NF and AGC calibration at the same time
-	 * and only waits for AGC to finish. Also if AGC or NF cal.
-	 * times out, reset doesn't fail on binary HAL. I believe
-	 * that's wrong because since rx path is routed to a detector,
-	 * if cal. doesn't finish we won't have RX. Sam's HAL for AR5210/5211
-	 * enables noise floor calibration after offset calibration and if noise
-	 * floor calibration fails, reset fails. I believe that's
-	 * a better approach, we just need to find a polling interval
-	 * that suits best, even if reset continues we need to make
-	 * sure that rx path is ready.
-	 */
-	ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
-
 	/* Restore antenna mode */
 	ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
 
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 33c9e816..25531f2 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -29,15 +29,13 @@
 
 static void ath_ahb_cleanup(struct ath_common *common)
 {
-	struct ath_hw *ah = (struct ath_hw *) common->ah;
-	struct ath_softc *sc = ah->ah_sc;
+	struct ath_softc *sc = (struct ath_softc *)common->priv;
 	iounmap(sc->mem);
 }
 
 static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
 {
-	struct ath_hw *ah = (struct ath_hw *) common->ah;
-	struct ath_softc *sc = ah->ah_sc;
+	struct ath_softc *sc = (struct ath_softc *)common->priv;
 	struct platform_device *pdev = to_platform_device(sc->dev);
 	struct ath9k_platform_data *pdata;
 
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index f46bd05d..551f880 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -813,7 +813,7 @@
 	}
 }
 
-static void ath9k_hw_9271_pa_cal(struct ath_hw *ah)
+static void ath9k_hw_9271_pa_cal(struct ath_hw *ah, bool is_reset)
 {
 	u32 regVal;
 	unsigned int i;
@@ -889,10 +889,19 @@
 		REG_WRITE(ah, 0x7834, regVal);
 	}
 
-	/*  Empirical offset correction  */
-#if 0
-	REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0x20);
-#endif
+	regVal = (regVal >>20) & 0x7f;
+
+	/* Update PA cal info */
+	if ((!is_reset) && (ah->pacal_info.prev_offset == regVal)) {
+		if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
+			ah->pacal_info.max_skipcount =
+				2 * ah->pacal_info.max_skipcount;
+		ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
+	} else {
+		ah->pacal_info.max_skipcount = 1;
+		ah->pacal_info.skipcount = 0;
+		ah->pacal_info.prev_offset = regVal;
+	}
 
 	regVal = REG_READ(ah, 0x7834);
 	regVal |= 0x1;
@@ -1043,7 +1052,7 @@
 	if (longcal) {
 		/* Do periodic PAOffset Cal */
 		if (AR_SREV_9271(ah))
-			ath9k_hw_9271_pa_cal(ah);
+			ath9k_hw_9271_pa_cal(ah, false);
 		else if (AR_SREV_9285_11_OR_LATER(ah)) {
 			if (!ah->pacal_info.skipcount)
 				ath9k_hw_9285_pa_cal(ah, false);
@@ -1070,6 +1079,7 @@
 }
 EXPORT_SYMBOL(ath9k_hw_calibrate);
 
+/* Carrier leakage Calibration fix */
 static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
 {
 	struct ath_common *common = ath9k_hw_common(ah);
@@ -1115,7 +1125,7 @@
 {
 	struct ath_common *common = ath9k_hw_common(ah);
 
-	if (AR_SREV_9285_12_OR_LATER(ah)) {
+	if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
 		if (!ar9285_clc(ah, chan))
 			return false;
 	} else {
@@ -1151,7 +1161,9 @@
 	}
 
 	/* Do PA Calibration */
-	if (AR_SREV_9285_11_OR_LATER(ah))
+	if (AR_SREV_9271(ah))
+		ath9k_hw_9271_pa_cal(ah, true);
+	else if (AR_SREV_9285_11_OR_LATER(ah))
 		ath9k_hw_9285_pa_cal(ah, true);
 
 	/* Do NF Calibration after DC offset and other calibrations */
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 0639364..bb72b46 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -679,7 +679,7 @@
 		return rate;
 
 	if (rate_table->info[rate].valid_single_stream &&
-	    !(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG));
+	    !(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG))
 		return rate;
 
 	/* This should not happen */
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index a8620b1..2a4efcb 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -2079,7 +2079,9 @@
 	if (needreset) {
 		ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
 			  "tx hung, resetting the chip\n");
+		ath9k_ps_wakeup(sc);
 		ath_reset(sc, false);
+		ath9k_ps_restore(sc);
 	}
 
 	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index c1dd857..a1c3952 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -65,10 +65,13 @@
 	CTRY_ALGERIA = 12,
 	CTRY_ARGENTINA = 32,
 	CTRY_ARMENIA = 51,
+	CTRY_ARUBA = 533,
 	CTRY_AUSTRALIA = 36,
 	CTRY_AUSTRIA = 40,
 	CTRY_AZERBAIJAN = 31,
 	CTRY_BAHRAIN = 48,
+	CTRY_BANGLADESH = 50,
+	CTRY_BARBADOS = 52,
 	CTRY_BELARUS = 112,
 	CTRY_BELGIUM = 56,
 	CTRY_BELIZE = 84,
@@ -77,6 +80,7 @@
 	CTRY_BRAZIL = 76,
 	CTRY_BRUNEI_DARUSSALAM = 96,
 	CTRY_BULGARIA = 100,
+	CTRY_CAMBODIA = 116,
 	CTRY_CANADA = 124,
 	CTRY_CHILE = 152,
 	CTRY_CHINA = 156,
@@ -97,7 +101,11 @@
 	CTRY_GEORGIA = 268,
 	CTRY_GERMANY = 276,
 	CTRY_GREECE = 300,
+	CTRY_GREENLAND = 304,
+	CTRY_GRENEDA = 308,
+	CTRY_GUAM = 316,
 	CTRY_GUATEMALA = 320,
+	CTRY_HAITI = 332,
 	CTRY_HONDURAS = 340,
 	CTRY_HONG_KONG = 344,
 	CTRY_HUNGARY = 348,
diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h
index 9847af7..248c670 100644
--- a/drivers/net/wireless/ath/regd_common.h
+++ b/drivers/net/wireless/ath/regd_common.h
@@ -288,13 +288,16 @@
 	{CTRY_DEFAULT, FCC1_FCCA, "CO"},
 	{CTRY_ALBANIA, NULL1_WORLD, "AL"},
 	{CTRY_ALGERIA, NULL1_WORLD, "DZ"},
-	{CTRY_ARGENTINA, APL3_WORLD, "AR"},
+	{CTRY_ARGENTINA, FCC3_WORLD, "AR"},
 	{CTRY_ARMENIA, ETSI4_WORLD, "AM"},
+	{CTRY_ARUBA, ETSI1_WORLD, "AW"},
 	{CTRY_AUSTRALIA, FCC2_WORLD, "AU"},
 	{CTRY_AUSTRALIA2, FCC6_WORLD, "AU"},
 	{CTRY_AUSTRIA, ETSI1_WORLD, "AT"},
 	{CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"},
 	{CTRY_BAHRAIN, APL6_WORLD, "BH"},
+	{CTRY_BANGLADESH, NULL1_WORLD, "BD"},
+	{CTRY_BARBADOS, FCC2_WORLD, "BB"},
 	{CTRY_BELARUS, ETSI1_WORLD, "BY"},
 	{CTRY_BELGIUM, ETSI1_WORLD, "BE"},
 	{CTRY_BELGIUM2, ETSI4_WORLD, "BL"},
@@ -304,13 +307,14 @@
 	{CTRY_BRAZIL, FCC3_WORLD, "BR"},
 	{CTRY_BRUNEI_DARUSSALAM, APL1_WORLD, "BN"},
 	{CTRY_BULGARIA, ETSI6_WORLD, "BG"},
-	{CTRY_CANADA, FCC2_FCCA, "CA"},
+	{CTRY_CAMBODIA, ETSI1_WORLD, "KH"},
+	{CTRY_CANADA, FCC3_FCCA, "CA"},
 	{CTRY_CANADA2, FCC6_FCCA, "CA"},
 	{CTRY_CHILE, APL6_WORLD, "CL"},
 	{CTRY_CHINA, APL1_WORLD, "CN"},
 	{CTRY_COLOMBIA, FCC1_FCCA, "CO"},
 	{CTRY_COSTA_RICA, FCC1_WORLD, "CR"},
-	{CTRY_CROATIA, ETSI3_WORLD, "HR"},
+	{CTRY_CROATIA, ETSI1_WORLD, "HR"},
 	{CTRY_CYPRUS, ETSI1_WORLD, "CY"},
 	{CTRY_CZECH, ETSI3_WORLD, "CZ"},
 	{CTRY_DENMARK, ETSI1_WORLD, "DK"},
@@ -324,18 +328,22 @@
 	{CTRY_GEORGIA, ETSI4_WORLD, "GE"},
 	{CTRY_GERMANY, ETSI1_WORLD, "DE"},
 	{CTRY_GREECE, ETSI1_WORLD, "GR"},
+	{CTRY_GREENLAND, ETSI1_WORLD, "GL"},
+	{CTRY_GRENEDA, FCC3_FCCA, "GD"},
+	{CTRY_GUAM, FCC1_FCCA, "GU"},
 	{CTRY_GUATEMALA, FCC1_FCCA, "GT"},
+	{CTRY_HAITI, ETSI1_WORLD, "HT"},
 	{CTRY_HONDURAS, NULL1_WORLD, "HN"},
-	{CTRY_HONG_KONG, FCC2_WORLD, "HK"},
+	{CTRY_HONG_KONG, FCC3_WORLD, "HK"},
 	{CTRY_HUNGARY, ETSI1_WORLD, "HU"},
 	{CTRY_ICELAND, ETSI1_WORLD, "IS"},
 	{CTRY_INDIA, APL6_WORLD, "IN"},
-	{CTRY_INDONESIA, APL1_WORLD, "ID"},
+	{CTRY_INDONESIA, NULL1_WORLD, "ID"},
 	{CTRY_IRAN, APL1_WORLD, "IR"},
 	{CTRY_IRELAND, ETSI1_WORLD, "IE"},
 	{CTRY_ISRAEL, NULL1_WORLD, "IL"},
 	{CTRY_ITALY, ETSI1_WORLD, "IT"},
-	{CTRY_JAMAICA, ETSI1_WORLD, "JM"},
+	{CTRY_JAMAICA, FCC3_WORLD, "JM"},
 
 	{CTRY_JAPAN, MKK1_MKKA, "JP"},
 	{CTRY_JAPAN1, MKK1_MKKB, "JP"},
@@ -402,7 +410,7 @@
 	{CTRY_KOREA_ROC, APL9_WORLD, "KR"},
 	{CTRY_KOREA_ROC2, APL2_WORLD, "K2"},
 	{CTRY_KOREA_ROC3, APL9_WORLD, "K3"},
-	{CTRY_KUWAIT, NULL1_WORLD, "KW"},
+	{CTRY_KUWAIT, ETSI3_WORLD, "KW"},
 	{CTRY_LATVIA, ETSI1_WORLD, "LV"},
 	{CTRY_LEBANON, NULL1_WORLD, "LB"},
 	{CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI"},
@@ -414,13 +422,13 @@
 	{CTRY_MALTA, ETSI1_WORLD, "MT"},
 	{CTRY_MEXICO, FCC1_FCCA, "MX"},
 	{CTRY_MONACO, ETSI4_WORLD, "MC"},
-	{CTRY_MOROCCO, NULL1_WORLD, "MA"},
+	{CTRY_MOROCCO, APL4_WORLD, "MA"},
 	{CTRY_NEPAL, APL1_WORLD, "NP"},
 	{CTRY_NETHERLANDS, ETSI1_WORLD, "NL"},
 	{CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN"},
 	{CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ"},
 	{CTRY_NORWAY, ETSI1_WORLD, "NO"},
-	{CTRY_OMAN, APL6_WORLD, "OM"},
+	{CTRY_OMAN, FCC3_WORLD, "OM"},
 	{CTRY_PAKISTAN, NULL1_WORLD, "PK"},
 	{CTRY_PANAMA, FCC1_FCCA, "PA"},
 	{CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG"},
@@ -429,7 +437,7 @@
 	{CTRY_POLAND, ETSI1_WORLD, "PL"},
 	{CTRY_PORTUGAL, ETSI1_WORLD, "PT"},
 	{CTRY_PUERTO_RICO, FCC1_FCCA, "PR"},
-	{CTRY_QATAR, NULL1_WORLD, "QA"},
+	{CTRY_QATAR, APL1_WORLD, "QA"},
 	{CTRY_ROMANIA, NULL1_WORLD, "RO"},
 	{CTRY_RUSSIA, NULL1_WORLD, "RU"},
 	{CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"},
@@ -445,7 +453,7 @@
 	{CTRY_SYRIA, NULL1_WORLD, "SY"},
 	{CTRY_TAIWAN, APL3_FCCA, "TW"},
 	{CTRY_THAILAND, FCC3_WORLD, "TH"},
-	{CTRY_TRINIDAD_Y_TOBAGO, ETSI4_WORLD, "TT"},
+	{CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT"},
 	{CTRY_TUNISIA, ETSI3_WORLD, "TN"},
 	{CTRY_TURKEY, ETSI3_WORLD, "TR"},
 	{CTRY_UKRAINE, NULL1_WORLD, "UA"},
@@ -456,7 +464,7 @@
 	 * would need to assign new special alpha2 to CRDA db as with the world
 	 * regdomain and use another alpha2 */
 	{CTRY_UNITED_STATES_FCC49, FCC4_FCCA, "PS"},
-	{CTRY_URUGUAY, APL2_WORLD, "UY"},
+	{CTRY_URUGUAY, FCC3_WORLD, "UY"},
 	{CTRY_UZBEKISTAN, FCC3_FCCA, "UZ"},
 	{CTRY_VENEZUELA, APL2_ETSIC, "VE"},
 	{CTRY_VIET_NAM, NULL1_WORLD, "VN"},
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 6607162..65b23f72 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -749,12 +749,6 @@
 #endif
 };
 
-/*
- * Include goes here to avoid a dependency problem.
- * A better fix would be to integrate xmit.h into b43.h.
- */
-#include "xmit.h"
-
 /* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */
 struct b43_wl {
 	/* Pointer to the active wireless device on this chip */
@@ -830,13 +824,9 @@
 	struct b43_leds leds;
 
 #ifdef CONFIG_B43_PIO
-	/*
-	 * RX/TX header/tail buffers used by the frame transmit functions.
-	 */
-	struct b43_rxhdr_fw4 rxhdr;
-	struct b43_txhdr txhdr;
-	u8 rx_tail[4];
-	u8 tx_tail[4];
+	/* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */
+	u8 pio_scratchspace[110] __attribute__((__aligned__(8)));
+	u8 pio_tailspace[4] __attribute__((__aligned__(8)));
 #endif /* CONFIG_B43_PIO */
 };
 
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 8701034..de4e804 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1157,8 +1157,9 @@
 }
 
 static int dma_tx_fragment(struct b43_dmaring *ring,
-			   struct sk_buff *skb)
+			   struct sk_buff **in_skb)
 {
+	struct sk_buff *skb = *in_skb;
 	const struct b43_dma_ops *ops = ring->ops;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	u8 *header;
@@ -1224,8 +1225,14 @@
 		}
 
 		memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
+		memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
+		bounce_skb->dev = skb->dev;
+		skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
+		info = IEEE80211_SKB_CB(bounce_skb);
+
 		dev_kfree_skb_any(skb);
 		skb = bounce_skb;
+		*in_skb = bounce_skb;
 		meta->skb = skb;
 		meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
 		if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
@@ -1355,7 +1362,11 @@
 	 * static, so we don't need to store it per frame. */
 	ring->queue_prio = skb_get_queue_mapping(skb);
 
-	err = dma_tx_fragment(ring, skb);
+	/* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
+	 * into the skb data or cb now. */
+	hdr = NULL;
+	info = NULL;
+	err = dma_tx_fragment(ring, &skb);
 	if (unlikely(err == -ENOKEY)) {
 		/* Drop this packet, as we don't have the encryption key
 		 * anymore and must not transmit it unencrypted. */
diff --git a/drivers/net/wireless/b43/leds.h b/drivers/net/wireless/b43/leds.h
index 4c56187..32b66d5 100644
--- a/drivers/net/wireless/b43/leds.h
+++ b/drivers/net/wireless/b43/leds.h
@@ -1,6 +1,7 @@
 #ifndef B43_LEDS_H_
 #define B43_LEDS_H_
 
+struct b43_wl;
 struct b43_wldev;
 
 #ifdef CONFIG_B43_LEDS
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index df6b26a..ed6e96a 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4501,7 +4501,6 @@
 
 	cancel_work_sync(&(wl->beacon_update_trigger));
 
-	wiphy_rfkill_stop_polling(hw->wiphy);
 	mutex_lock(&wl->mutex);
 	if (b43_status(dev) >= B43_STAT_STARTED) {
 		dev = b43_wireless_core_stop(dev);
@@ -4671,7 +4670,7 @@
 {
 	struct b43_wl *wl = dev->wl;
 	struct ssb_bus *bus = dev->dev->bus;
-	struct pci_dev *pdev = bus->host_pci;
+	struct pci_dev *pdev = (bus->bustype == SSB_BUSTYPE_PCI) ? bus->host_pci : NULL;
 	int err;
 	bool have_2ghz_phy = 0, have_5ghz_phy = 0;
 	u32 tmp;
@@ -4804,7 +4803,7 @@
 
 	if (!list_empty(&wl->devlist)) {
 		/* We are not the first core on this chip. */
-		pdev = dev->bus->host_pci;
+		pdev = (dev->bus->bustype == SSB_BUSTYPE_PCI) ? dev->bus->host_pci : NULL;
 		/* Only special chips support more than one wireless
 		 * core, although some of the other chips have more than
 		 * one wireless core as well. Check for this and
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index dbbf0d1..3105f235 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -341,12 +341,15 @@
 			q->mmio_base + B43_PIO_TXDATA,
 			sizeof(u16));
 	if (data_len & 1) {
+		u8 *tail = wl->pio_tailspace;
+		BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
+
 		/* Write the last byte. */
 		ctl &= ~B43_PIO_TXCTL_WRITEHI;
 		b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
-		wl->tx_tail[0] = data[data_len - 1];
-		wl->tx_tail[1] = 0;
-		ssb_block_write(dev->dev, wl->tx_tail, 2,
+		tail[0] = data[data_len - 1];
+		tail[1] = 0;
+		ssb_block_write(dev->dev, tail, 2,
 				q->mmio_base + B43_PIO_TXDATA,
 				sizeof(u16));
 	}
@@ -392,31 +395,31 @@
 			q->mmio_base + B43_PIO8_TXDATA,
 			sizeof(u32));
 	if (data_len & 3) {
-		wl->tx_tail[3] = 0;
+		u8 *tail = wl->pio_tailspace;
+		BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
+
+		memset(tail, 0, 4);
 		/* Write the last few bytes. */
 		ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
 			 B43_PIO8_TXCTL_24_31);
 		switch (data_len & 3) {
 		case 3:
 			ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
-			wl->tx_tail[0] = data[data_len - 3];
-			wl->tx_tail[1] = data[data_len - 2];
-			wl->tx_tail[2] = data[data_len - 1];
+			tail[0] = data[data_len - 3];
+			tail[1] = data[data_len - 2];
+			tail[2] = data[data_len - 1];
 			break;
 		case 2:
 			ctl |= B43_PIO8_TXCTL_8_15;
-			wl->tx_tail[0] = data[data_len - 2];
-			wl->tx_tail[1] = data[data_len - 1];
-			wl->tx_tail[2] = 0;
+			tail[0] = data[data_len - 2];
+			tail[1] = data[data_len - 1];
 			break;
 		case 1:
-			wl->tx_tail[0] = data[data_len - 1];
-			wl->tx_tail[1] = 0;
-			wl->tx_tail[2] = 0;
+			tail[0] = data[data_len - 1];
 			break;
 		}
 		b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
-		ssb_block_write(dev->dev, wl->tx_tail, 4,
+		ssb_block_write(dev->dev, tail, 4,
 				q->mmio_base + B43_PIO8_TXDATA,
 				sizeof(u32));
 	}
@@ -455,6 +458,7 @@
 	int err;
 	unsigned int hdrlen;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct b43_txhdr *txhdr = (struct b43_txhdr *)wl->pio_scratchspace;
 
 	B43_WARN_ON(list_empty(&q->packets_list));
 	pack = list_entry(q->packets_list.next,
@@ -462,7 +466,9 @@
 
 	cookie = generate_cookie(q, pack);
 	hdrlen = b43_txhdr_size(dev);
-	err = b43_generate_txhdr(dev, (u8 *)&wl->txhdr, skb,
+	BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(struct b43_txhdr));
+	B43_WARN_ON(sizeof(wl->pio_scratchspace) < hdrlen);
+	err = b43_generate_txhdr(dev, (u8 *)txhdr, skb,
 				 info, cookie);
 	if (err)
 		return err;
@@ -476,9 +482,9 @@
 
 	pack->skb = skb;
 	if (q->rev >= 8)
-		pio_tx_frame_4byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen);
+		pio_tx_frame_4byte_queue(pack, (const u8 *)txhdr, hdrlen);
 	else
-		pio_tx_frame_2byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen);
+		pio_tx_frame_2byte_queue(pack, (const u8 *)txhdr, hdrlen);
 
 	/* Remove it from the list of available packet slots.
 	 * It will be put back when we receive the status report. */
@@ -624,8 +630,11 @@
 	unsigned int i, padding;
 	struct sk_buff *skb;
 	const char *err_msg = NULL;
+	struct b43_rxhdr_fw4 *rxhdr =
+		(struct b43_rxhdr_fw4 *)wl->pio_scratchspace;
 
-	memset(&wl->rxhdr, 0, sizeof(wl->rxhdr));
+	BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr));
+	memset(rxhdr, 0, sizeof(*rxhdr));
 
 	/* Check if we have data and wait for it to get ready. */
 	if (q->rev >= 8) {
@@ -663,16 +672,16 @@
 
 	/* Get the preamble (RX header) */
 	if (q->rev >= 8) {
-		ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr),
+		ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr),
 			       q->mmio_base + B43_PIO8_RXDATA,
 			       sizeof(u32));
 	} else {
-		ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr),
+		ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr),
 			       q->mmio_base + B43_PIO_RXDATA,
 			       sizeof(u16));
 	}
 	/* Sanity checks. */
-	len = le16_to_cpu(wl->rxhdr.frame_len);
+	len = le16_to_cpu(rxhdr->frame_len);
 	if (unlikely(len > 0x700)) {
 		err_msg = "len > 0x700";
 		goto rx_error;
@@ -682,7 +691,7 @@
 		goto rx_error;
 	}
 
-	macstat = le32_to_cpu(wl->rxhdr.mac_status);
+	macstat = le32_to_cpu(rxhdr->mac_status);
 	if (macstat & B43_RX_MAC_FCSERR) {
 		if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
 			/* Drop frames with failed FCS. */
@@ -707,22 +716,25 @@
 			       q->mmio_base + B43_PIO8_RXDATA,
 			       sizeof(u32));
 		if (len & 3) {
+			u8 *tail = wl->pio_tailspace;
+			BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
+
 			/* Read the last few bytes. */
-			ssb_block_read(dev->dev, wl->rx_tail, 4,
+			ssb_block_read(dev->dev, tail, 4,
 				       q->mmio_base + B43_PIO8_RXDATA,
 				       sizeof(u32));
 			switch (len & 3) {
 			case 3:
-				skb->data[len + padding - 3] = wl->rx_tail[0];
-				skb->data[len + padding - 2] = wl->rx_tail[1];
-				skb->data[len + padding - 1] = wl->rx_tail[2];
+				skb->data[len + padding - 3] = tail[0];
+				skb->data[len + padding - 2] = tail[1];
+				skb->data[len + padding - 1] = tail[2];
 				break;
 			case 2:
-				skb->data[len + padding - 2] = wl->rx_tail[0];
-				skb->data[len + padding - 1] = wl->rx_tail[1];
+				skb->data[len + padding - 2] = tail[0];
+				skb->data[len + padding - 1] = tail[1];
 				break;
 			case 1:
-				skb->data[len + padding - 1] = wl->rx_tail[0];
+				skb->data[len + padding - 1] = tail[0];
 				break;
 			}
 		}
@@ -731,15 +743,18 @@
 			       q->mmio_base + B43_PIO_RXDATA,
 			       sizeof(u16));
 		if (len & 1) {
+			u8 *tail = wl->pio_tailspace;
+			BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
+
 			/* Read the last byte. */
-			ssb_block_read(dev->dev, wl->rx_tail, 2,
+			ssb_block_read(dev->dev, tail, 2,
 				       q->mmio_base + B43_PIO_RXDATA,
 				       sizeof(u16));
-			skb->data[len + padding - 1] = wl->rx_tail[0];
+			skb->data[len + padding - 1] = tail[0];
 		}
 	}
 
-	b43_rx(q->dev, skb, &wl->rxhdr);
+	b43_rx(q->dev, skb, rxhdr);
 
 	return 1;
 
diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
index 7a3218c..ffdce6f 100644
--- a/drivers/net/wireless/b43/rfkill.c
+++ b/drivers/net/wireless/b43/rfkill.c
@@ -33,7 +33,8 @@
 		      & B43_MMIO_RADIO_HWENABLED_HI_MASK))
 			return 1;
 	} else {
-		if (b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
+		if (b43_status(dev) >= B43_STAT_STARTED &&
+		    b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
 		    & B43_MMIO_RADIO_HWENABLED_LO_MASK)
 			return 1;
 	}
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index f4e9695..7a5e294 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -27,7 +27,7 @@
 
 */
 
-#include "b43.h"
+#include "xmit.h"
 #include "phy_common.h"
 #include "dma.h"
 #include "pio.h"
@@ -690,10 +690,7 @@
 	}
 
 	memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
-
-	local_bh_disable();
-	ieee80211_rx(dev->wl->hw, skb);
-	local_bh_enable();
+	ieee80211_rx_ni(dev->wl->hw, skb);
 
 #if B43_DEBUG
 	dev->rx_count++;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 1d9223b..0983406 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -3592,7 +3592,7 @@
 {
 	struct b43legacy_wl *wl = dev->wl;
 	struct ssb_bus *bus = dev->dev->bus;
-	struct pci_dev *pdev = bus->host_pci;
+	struct pci_dev *pdev = (bus->bustype == SSB_BUSTYPE_PCI) ? bus->host_pci : NULL;
 	int err;
 	int have_bphy = 0;
 	int have_gphy = 0;
@@ -3706,7 +3706,7 @@
 
 	if (!list_empty(&wl->devlist)) {
 		/* We are not the first core on this chip. */
-		pdev = dev->bus->host_pci;
+		pdev = (dev->bus->bustype == SSB_BUSTYPE_PCI) ? dev->bus->host_pci : NULL;
 		/* Only special chips support more than one wireless
 		 * core, although some of the other chips have more than
 		 * one wireless core as well. Check for this and
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 240cff1..a741d37 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -6325,8 +6325,10 @@
 
       fail:
 	if (dev) {
-		if (registered)
+		if (registered) {
+			unregister_ieee80211(priv->ieee);
 			unregister_netdev(dev);
+		}
 
 		ipw2100_hw_stop_adapter(priv);
 
@@ -6383,6 +6385,7 @@
 		/* Unregister the device first - this results in close()
 		 * being called if the device is open.  If we free storage
 		 * first, then close() will crash. */
+		unregister_ieee80211(priv->ieee);
 		unregister_netdev(dev);
 
 		/* ipw2100_down will ensure that there is no more pending work
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 61ef890..4539e63 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11822,6 +11822,7 @@
 		if (err) {
 			IPW_ERROR("Failed to register promiscuous network "
 				  "device (error %d).\n", err);
+			unregister_ieee80211(priv->ieee);
 			unregister_netdev(priv->net_dev);
 			goto out_remove_sysfs;
 		}
@@ -11872,6 +11873,7 @@
 
 	mutex_unlock(&priv->mutex);
 
+	unregister_ieee80211(priv->ieee);
 	unregister_netdev(priv->net_dev);
 
 	if (priv->rxq) {
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index bf45391..f42ade6 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -1020,6 +1020,7 @@
 /* ieee80211.c */
 extern void free_ieee80211(struct net_device *dev, int monitor);
 extern struct net_device *alloc_ieee80211(int sizeof_priv, int monitor);
+extern void unregister_ieee80211(struct libipw_device *ieee);
 extern int libipw_change_mtu(struct net_device *dev, int new_mtu);
 
 extern void libipw_networks_age(struct libipw_device *ieee,
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index a0e9f6a..be5b809 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -235,16 +235,19 @@
 	libipw_networks_free(ieee);
 
 	/* free cfg80211 resources */
-	if (!monitor) {
-		wiphy_unregister(ieee->wdev.wiphy);
-		kfree(ieee->a_band.channels);
-		kfree(ieee->bg_band.channels);
+	if (!monitor)
 		wiphy_free(ieee->wdev.wiphy);
-	}
 
 	free_netdev(dev);
 }
 
+void unregister_ieee80211(struct libipw_device *ieee)
+{
+	wiphy_unregister(ieee->wdev.wiphy);
+	kfree(ieee->a_band.channels);
+	kfree(ieee->bg_band.channels);
+}
+
 #ifdef CONFIG_LIBIPW_DEBUG
 
 static int debug = 0;
@@ -330,3 +333,4 @@
 
 EXPORT_SYMBOL(alloc_ieee80211);
 EXPORT_SYMBOL(free_ieee80211);
+EXPORT_SYMBOL(unregister_ieee80211);
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 679a67f..3a645e4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -110,8 +110,7 @@
 	.send_tx_power = iwl5000_send_tx_power,
 	.update_chain_flags = iwl_update_chain_flags,
 	.apm_ops = {
-		.init =	iwl5000_apm_init,
-		.reset = iwl5000_apm_reset,
+		.init = iwl_apm_init,
 		.stop = iwl_apm_stop,
 		.config = iwl1000_nic_config,
 		.set_pwr_src = iwl_set_pwr_src,
@@ -159,15 +158,20 @@
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.eeprom_ver = EEPROM_1000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_A,
 	.valid_rx_ant = ANT_AB,
-	.need_pll_cfg = true,
+	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+	.set_l0s = false,
+	.use_bsm = false,
 	.max_ll_items = OTP_MAX_LL_ITEMS_1000,
 	.shadow_ram_support = false,
 	.ht_greenfield_support = true,
 	.led_compensation = 51,
 	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.support_ct_kill_exit = true,
 };
 
 struct iwl_cfg iwl1000_bg_cfg = {
@@ -180,15 +184,20 @@
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.eeprom_ver = EEPROM_1000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_A,
 	.valid_rx_ant = ANT_AB,
-	.need_pll_cfg = true,
+	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+	.set_l0s = false,
+	.use_bsm = false,
 	.max_ll_items = OTP_MAX_LL_ITEMS_1000,
 	.shadow_ram_support = false,
 	.ht_greenfield_support = true,
 	.led_compensation = 51,
 	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+	.support_ct_kill_exit = true,
 };
 
 MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 1677278..6fd10d4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -71,12 +71,6 @@
 
 #include "iwl-eeprom.h"
 
-/*
- * uCode queue management definitions ...
- * Queue #4 is the command queue for 3945 and 4965.
- */
-#define IWL_CMD_QUEUE_NUM	4
-
 /* Time constants */
 #define SHORT_SLOT_TIME 9
 #define LONG_SLOT_TIME 20
@@ -254,12 +248,6 @@
 #define TFD_CTL_PAD_SET(n)         (n << 28)
 #define TFD_CTL_PAD_GET(ctl)       (ctl >> 28)
 
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
 /* Sizes and addresses for instruction and data memory (SRAM) in
  * 3945's embedded processor.  Driver access is via HBUS_TARG_MEM_* regs. */
 #define IWL39_RTC_INST_LOWER_BOUND		(0x000000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 4115672..09a7bd2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -293,7 +293,7 @@
 static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
 			    struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
 	int txq_id = SEQ_TO_QUEUE(sequence);
 	int index = SEQ_TO_INDEX(sequence);
@@ -353,16 +353,12 @@
 void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
 		struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
 		     (int)sizeof(struct iwl3945_notif_statistics),
 		     le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
 
 	memcpy(&priv->statistics_39, pkt->u.raw, sizeof(priv->statistics_39));
-
-	iwl_leds_background(priv);
-
-	priv->last_statistics_time = jiffies;
 }
 
 /******************************************************************************
@@ -545,14 +541,18 @@
 				   struct iwl_rx_mem_buffer *rxb,
 				   struct ieee80211_rx_status *stats)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
 	struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
 	struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
-	short len = le16_to_cpu(rx_hdr->len);
+	u16 len = le16_to_cpu(rx_hdr->len);
+	struct sk_buff *skb;
+	int ret;
+	__le16 fc = hdr->frame_control;
 
 	/* We received data from the HW, so stop the watchdog */
-	if (unlikely((len + IWL39_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
+	if (unlikely(len + IWL39_RX_FRAME_SIZE >
+		     PAGE_SIZE << priv->hw_params.rx_page_order)) {
 		IWL_DEBUG_DROP(priv, "Corruption detected!\n");
 		return;
 	}
@@ -564,20 +564,49 @@
 		return;
 	}
 
-	skb_reserve(rxb->skb, (void *)rx_hdr->payload - (void *)pkt);
-	/* Set the size of the skb to the size of the frame */
-	skb_put(rxb->skb, le16_to_cpu(rx_hdr->len));
+	skb = alloc_skb(IWL_LINK_HDR_MAX, GFP_ATOMIC);
+	if (!skb) {
+		IWL_ERR(priv, "alloc_skb failed\n");
+		return;
+	}
 
 	if (!iwl3945_mod_params.sw_crypto)
 		iwl_set_decrypted_flag(priv,
-				       (struct ieee80211_hdr *)rxb->skb->data,
+				       (struct ieee80211_hdr *)rxb_addr(rxb),
 				       le32_to_cpu(rx_end->status), stats);
 
-	iwl_update_stats(priv, false, hdr->frame_control, len);
+	skb_add_rx_frag(skb, 0, rxb->page,
+			(void *)rx_hdr->payload - (void *)pkt, len);
 
-	memcpy(IEEE80211_SKB_RXCB(rxb->skb), stats, sizeof(*stats));
-	ieee80211_rx_irqsafe(priv->hw, rxb->skb);
-	rxb->skb = NULL;
+	/* mac80211 currently doesn't support paged SKB. Convert it to
+	 * linear SKB for management frame and data frame requires
+	 * software decryption or software defragementation. */
+	if (ieee80211_is_mgmt(fc) ||
+	    ieee80211_has_protected(fc) ||
+	    ieee80211_has_morefrags(fc) ||
+	    le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
+		ret = skb_linearize(skb);
+	else
+		ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
+			0 : -ENOMEM;
+
+	if (ret) {
+		kfree_skb(skb);
+		goto out;
+	}
+
+	/*
+	 * XXX: We cannot touch the page and its virtual memory (pkt) after
+	 * here. It might have already been freed by the above skb change.
+	 */
+
+	iwl_update_stats(priv, false, fc, len);
+	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+	ieee80211_rx(priv->hw, skb);
+ out:
+	priv->alloc_rxb_page--;
+	rxb->page = NULL;
 }
 
 #define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
@@ -587,7 +616,7 @@
 {
 	struct ieee80211_hdr *header;
 	struct ieee80211_rx_status rx_status;
-	struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
 	struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
 	struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
@@ -787,29 +816,31 @@
 	u8 data_retry_limit;
 	__le32 tx_flags;
 	__le16 fc = hdr->frame_control;
-	struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
+	struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
 
 	rate = iwl3945_rates[rate_index].plcp;
-	tx_flags = tx->tx_flags;
+	tx_flags = tx_cmd->tx_flags;
 
 	/* We need to figure out how to get the sta->supp_rates while
 	 * in this running context */
 	rate_mask = IWL_RATES_MASK;
 
+
+	/* Set retry limit on DATA packets and Probe Responses*/
+	if (ieee80211_is_probe_resp(fc))
+		data_retry_limit = 3;
+	else
+		data_retry_limit = IWL_DEFAULT_TX_RETRY;
+	tx_cmd->data_retry_limit = data_retry_limit;
+
 	if (tx_id >= IWL_CMD_QUEUE_NUM)
 		rts_retry_limit = 3;
 	else
 		rts_retry_limit = 7;
 
-	if (ieee80211_is_probe_resp(fc)) {
-		data_retry_limit = 3;
-		if (data_retry_limit < rts_retry_limit)
-			rts_retry_limit = data_retry_limit;
-	} else
-		data_retry_limit = IWL_DEFAULT_TX_RETRY;
-
-	if (priv->data_retry_limit != -1)
-		data_retry_limit = priv->data_retry_limit;
+	if (data_retry_limit < rts_retry_limit)
+		rts_retry_limit = data_retry_limit;
+	tx_cmd->rts_retry_limit = rts_retry_limit;
 
 	if (ieee80211_is_mgmt(fc)) {
 		switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
@@ -827,22 +858,20 @@
 		}
 	}
 
-	tx->rts_retry_limit = rts_retry_limit;
-	tx->data_retry_limit = data_retry_limit;
-	tx->rate = rate;
-	tx->tx_flags = tx_flags;
+	tx_cmd->rate = rate;
+	tx_cmd->tx_flags = tx_flags;
 
 	/* OFDM */
-	tx->supp_rates[0] =
+	tx_cmd->supp_rates[0] =
 	   ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
 
 	/* CCK */
-	tx->supp_rates[1] = (rate_mask & 0xF);
+	tx_cmd->supp_rates[1] = (rate_mask & 0xF);
 
 	IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
 		       "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
-		       tx->rate, le32_to_cpu(tx->tx_flags),
-		       tx->supp_rates[1], tx->supp_rates[0]);
+		       tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags),
+		       tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
 }
 
 u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags)
@@ -958,6 +987,11 @@
 
 	iwl3945_hw_txq_ctx_free(priv);
 
+	/* allocate tx queue structure */
+	rc = iwl_alloc_txq_mem(priv);
+	if (rc)
+		return rc;
+
 	/* Tx CMD queue */
 	rc = iwl3945_tx_reset(priv);
 	if (rc)
@@ -982,42 +1016,25 @@
 	return rc;
 }
 
+
+/*
+ * Start up 3945's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_apm_stop())
+ * NOTE:  This does not load uCode nor start the embedded processor
+ */
 static int iwl3945_apm_init(struct iwl_priv *priv)
 {
-	int ret;
+	int ret = iwl_apm_init(priv);
 
-	iwl_power_initialize(priv);
+	/* Clear APMG (NIC's internal power management) interrupts */
+	iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
+	iwl_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
 
-	iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
-			  CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+	/* Reset radio chip */
+	iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+	udelay(5);
+	iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
 
-	/* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
-	iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
-			  CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
-
-	/* set "initialization complete" bit to move adapter
-	* D0U* --> D0A* state */
-	iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
-	ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
-			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
-			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
-	if (ret < 0) {
-		IWL_DEBUG_INFO(priv, "Failed to init the card\n");
-		goto out;
-	}
-
-	/* enable DMA */
-	iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
-						APMG_CLK_VAL_BSM_CLK_RQT);
-
-	udelay(20);
-
-	/* disable L1-Active */
-	iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
-			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-
-out:
 	return ret;
 }
 
@@ -1142,12 +1159,16 @@
 	int txq_id;
 
 	/* Tx queues */
-	for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
-		if (txq_id == IWL_CMD_QUEUE_NUM)
-			iwl_cmd_queue_free(priv);
-		else
-			iwl_tx_queue_free(priv, txq_id);
+	if (priv->txq)
+		for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
+		     txq_id++)
+			if (txq_id == IWL_CMD_QUEUE_NUM)
+				iwl_cmd_queue_free(priv);
+			else
+				iwl_tx_queue_free(priv, txq_id);
 
+	/* free tx queue structure */
+	iwl_free_txq_mem(priv);
 }
 
 void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
@@ -1156,6 +1177,7 @@
 
 	/* stop SCD */
 	iwl_write_prph(priv, ALM_SCD_MODE_REG, 0);
+	iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
 
 	/* reset TFD queues */
 	for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
@@ -1168,47 +1190,6 @@
 	iwl3945_hw_txq_ctx_free(priv);
 }
 
-static int iwl3945_apm_reset(struct iwl_priv *priv)
-{
-	iwl_apm_stop_master(priv);
-
-
-	iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-	udelay(10);
-
-	iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
-	iwl_poll_bit(priv, CSR_GP_CNTRL,
-			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
-			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
-
-	iwl_write_prph(priv, APMG_CLK_CTRL_REG,
-				APMG_CLK_VAL_BSM_CLK_RQT);
-
-	iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
-	iwl_write_prph(priv, APMG_RTC_INT_STT_REG,
-					0xFFFFFFFF);
-
-	/* enable DMA */
-	iwl_write_prph(priv, APMG_CLK_EN_REG,
-				APMG_CLK_VAL_DMA_CLK_RQT |
-				APMG_CLK_VAL_BSM_CLK_RQT);
-	udelay(10);
-
-	iwl_set_bits_prph(priv, APMG_PS_CTRL_REG,
-				APMG_PS_CTRL_VAL_RESET_REQ);
-	udelay(5);
-	iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
-				APMG_PS_CTRL_VAL_RESET_REQ);
-
-	/* Clear the 'host command active' bit... */
-	clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
-
-	wake_up_interruptible(&priv->wait_command_queue);
-
-	return 0;
-}
-
 /**
  * iwl3945_hw_reg_adjust_power_by_temp
  * return index delta into power gain settings table
@@ -1817,7 +1798,7 @@
 static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
 {
 	int rc = 0;
-	struct iwl_rx_packet *res = NULL;
+	struct iwl_rx_packet *pkt;
 	struct iwl3945_rxon_assoc_cmd rxon_assoc;
 	struct iwl_host_cmd cmd = {
 		.id = REPLY_RXON_ASSOC,
@@ -1846,14 +1827,14 @@
 	if (rc)
 		return rc;
 
-	res = (struct iwl_rx_packet *)cmd.reply_skb->data;
-	if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
+	pkt = (struct iwl_rx_packet *)cmd.reply_page;
+	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
 		IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
 		rc = -EIO;
 	}
 
-	priv->alloc_rxb_skb--;
-	dev_kfree_skb_any(cmd.reply_skb);
+	priv->alloc_rxb_page--;
+	free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
 
 	return rc;
 }
@@ -2001,12 +1982,6 @@
 	return 0;
 }
 
-/* will add 3945 channel switch cmd handling later */
-int iwl3945_hw_channel_switch(struct iwl_priv *priv, u16 channel)
-{
-	return 0;
-}
-
 /**
  * iwl3945_reg_txpower_periodic -  called when time to check our temperature.
  *
@@ -2516,11 +2491,10 @@
 	}
 
 	/* Assign number of Usable TX queues */
-	priv->hw_params.max_txq_num = IWL39_NUM_QUEUES;
+	priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
 
 	priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
-	priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_3K;
-	priv->hw_params.max_pkt_size = 2342;
+	priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K);
 	priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
 	priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
 	priv->hw_params.max_stations = IWL3945_STATION_COUNT;
@@ -2803,7 +2777,6 @@
 	.dump_nic_error_log = iwl3945_dump_nic_error_log,
 	.apm_ops = {
 		.init = iwl3945_apm_init,
-		.reset = iwl3945_apm_reset,
 		.stop = iwl_apm_stop,
 		.config = iwl3945_nic_config,
 		.set_pwr_src = iwl3945_set_pwr_src,
@@ -2833,6 +2806,7 @@
 static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
 	.get_hcmd_size = iwl3945_get_hcmd_size,
 	.build_addsta_hcmd = iwl3945_build_addsta_hcmd,
+	.rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
 };
 
 static struct iwl_ops iwl3945_ops = {
@@ -2852,7 +2826,11 @@
 	.eeprom_size = IWL3945_EEPROM_IMG_SIZE,
 	.eeprom_ver = EEPROM_3945_EEPROM_VERSION,
 	.ops = &iwl3945_ops,
+	.num_of_queues = IWL39_NUM_QUEUES,
 	.mod_params = &iwl3945_mod_params,
+	.pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
+	.set_l0s = false,
+	.use_bsm = true,
 	.use_isr_legacy = true,
 	.ht_greenfield_support = false,
 	.led_compensation = 64,
@@ -2867,6 +2845,7 @@
 	.eeprom_size = IWL3945_EEPROM_IMG_SIZE,
 	.eeprom_ver = EEPROM_3945_EEPROM_VERSION,
 	.ops = &iwl3945_ops,
+	.num_of_queues = IWL39_NUM_QUEUES,
 	.mod_params = &iwl3945_mod_params,
 	.use_isr_legacy = true,
 	.ht_greenfield_support = false,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index f3907c1..ebb999a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -130,12 +130,6 @@
 #define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
 #define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
 
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
 #define SUP_RATE_11A_MAX_NUM_CHANNELS  8
 #define SUP_RATE_11B_MAX_NUM_CHANNELS  4
 #define SUP_RATE_11G_MAX_NUM_CHANNELS  12
@@ -280,8 +274,6 @@
  */
 extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
 
-extern int iwl3945_hw_channel_switch(struct iwl_priv *priv, u16 channel);
-
 /*
  * Forward declare iwl-3945.c functions for iwl-base.c
  */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index b34322a..c606366 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -76,12 +76,9 @@
 
 /*
  * uCode queue management definitions ...
- * Queue #4 is the command queue for 3945 and 4965; map it to Tx FIFO chnl 4.
  * The first queue used for block-ack aggregation is #7 (4965 only).
  * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
  */
-#define IWL_CMD_QUEUE_NUM       4
-#define IWL_CMD_FIFO_NUM        4
 #define IWL49_FIRST_AMPDU_QUEUE	7
 
 /* Time constants */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index f8eed9a..1ff465a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -62,8 +62,6 @@
 
 /* module parameters */
 static struct iwl_mod_params iwl4965_mod_params = {
-	.num_of_queues = IWL49_NUM_QUEUES,
-	.num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
 	.amsdu_size_8K = 1,
 	.restart_fw = 1,
 	/* the rest are 0 by default */
@@ -319,64 +317,13 @@
 	iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
 }
 
-static int iwl4965_apm_init(struct iwl_priv *priv)
-{
-	int ret = 0;
-
-	iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
-			  CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
-
-	/* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
-	iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
-			  CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
-
-	/* set "initialization complete" bit to move adapter
-	 * D0U* --> D0A* state */
-	iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
-	/* wait for clock stabilization */
-	ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
-			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
-			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
-	if (ret < 0) {
-		IWL_DEBUG_INFO(priv, "Failed to init the card\n");
-		goto out;
-	}
-
-	/* enable DMA */
-	iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
-						APMG_CLK_VAL_BSM_CLK_RQT);
-
-	udelay(20);
-
-	/* disable L1-Active */
-	iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
-			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-
-out:
-	return ret;
-}
-
-
 static void iwl4965_nic_config(struct iwl_priv *priv)
 {
 	unsigned long flags;
 	u16 radio_cfg;
-	u16 lctl;
 
 	spin_lock_irqsave(&priv->lock, flags);
 
-	lctl = iwl_pcie_link_ctl(priv);
-
-	/* HW bug W/A - negligible power consumption */
-	/* L1-ASPM is enabled by BIOS */
-	if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
-		/* L1-ASPM enabled: disable L0S  */
-		iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-	else
-		/* L1-ASPM disabled: enable L0S */
-		iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-
 	radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
 
 	/* write radio config values to register */
@@ -397,46 +344,6 @@
 	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static int iwl4965_apm_reset(struct iwl_priv *priv)
-{
-	int ret = 0;
-
-	iwl_apm_stop_master(priv);
-
-
-	iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
-	udelay(10);
-
-	/* FIXME: put here L1A -L0S w/a */
-
-	iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
-	ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
-			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
-			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
-	if (ret < 0)
-		goto out;
-
-	udelay(10);
-
-	/* Enable DMA and BSM Clock */
-	iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT |
-					      APMG_CLK_VAL_BSM_CLK_RQT);
-
-	udelay(10);
-
-	/* disable L1A */
-	iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
-			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-
-	clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
-	wake_up_interruptible(&priv->wait_command_queue);
-
-out:
-	return ret;
-}
-
 /* Reset differential Rx gains in NIC to prepare for chain noise calibration.
  * Called after every association, but this runs only once!
  *  ... once chain noise is calibrated the first time, it's good forever.  */
@@ -526,18 +433,6 @@
 	data->beacon_count = 0;
 }
 
-static void iwl4965_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
-			__le32 *tx_flags)
-{
-	if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
-		*tx_flags |= TX_CMD_FLG_RTS_MSK;
-		*tx_flags &= ~TX_CMD_FLG_CTS_MSK;
-	} else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
-		*tx_flags &= ~TX_CMD_FLG_RTS_MSK;
-		*tx_flags |= TX_CMD_FLG_CTS_MSK;
-	}
-}
-
 static void iwl4965_bg_txpower_work(struct work_struct *work)
 {
 	struct iwl_priv *priv = container_of(work, struct iwl_priv,
@@ -718,6 +613,10 @@
 
 	.nrg_th_cck = 100,
 	.nrg_th_ofdm = 100,
+
+	.barker_corr_th_min = 190,
+	.barker_corr_th_min_mrc = 390,
+	.nrg_th_cca = 62,
 };
 
 static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
@@ -734,19 +633,16 @@
  */
 static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
 {
+	if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+	    priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
+		priv->cfg->num_of_queues =
+			priv->cfg->mod_params->num_of_queues;
 
-	if ((priv->cfg->mod_params->num_of_queues > IWL49_NUM_QUEUES) ||
-	    (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
-		IWL_ERR(priv,
-			"invalid queues_num, should be between %d and %d\n",
-			IWL_MIN_NUM_QUEUES, IWL49_NUM_QUEUES);
-		return -EINVAL;
-	}
-
-	priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
+	priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
 	priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
 	priv->hw_params.scd_bc_tbls_size =
-			IWL49_NUM_QUEUES * sizeof(struct iwl4965_scd_bc_tbl);
+			priv->cfg->num_of_queues *
+			sizeof(struct iwl4965_scd_bc_tbl);
 	priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
 	priv->hw_params.max_stations = IWL4965_STATION_COUNT;
 	priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID;
@@ -757,10 +653,10 @@
 
 	priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
 
-	priv->hw_params.tx_chains_num = 2;
-	priv->hw_params.rx_chains_num = 2;
-	priv->hw_params.valid_tx_ant = ANT_A | ANT_B;
-	priv->hw_params.valid_rx_ant = ANT_A | ANT_B;
+	priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+	priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+	priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
+	priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
 	if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
 		priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
 
@@ -1537,14 +1433,13 @@
 	return ret;
 }
 
-#ifdef IEEE80211_CONF_CHANNEL_SWITCH
 static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
 {
 	int rc;
 	u8 band = 0;
 	bool is_ht40 = false;
 	u8 ctrl_chan_high = 0;
-	struct iwl4965_channel_switch_cmd cmd = { 0 };
+	struct iwl4965_channel_switch_cmd cmd;
 	const struct iwl_channel_info *ch_info;
 
 	band = priv->band == IEEE80211_BAND_2GHZ;
@@ -1565,8 +1460,11 @@
 	cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
 	if (ch_info)
 		cmd.expect_beacon = is_channel_radar(ch_info);
-	else
-		cmd.expect_beacon = 1;
+	else {
+		IWL_ERR(priv, "invalid channel switch from %u to %u\n",
+			priv->active_rxon.channel, channel);
+		return -EFAULT;
+	}
 
 	rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_ht40,
 				      ctrl_chan_high, &cmd.tx_power);
@@ -1578,7 +1476,6 @@
 	rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
 	return rc;
 }
-#endif
 
 /**
  * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
@@ -1775,11 +1672,13 @@
 				   u16 ssn_idx, u8 tx_fifo)
 {
 	if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
-	    (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) {
+	    (IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
+	     <= txq_id)) {
 		IWL_WARN(priv,
 			"queue number out of range: %d, must be %d to %d\n",
 			txq_id, IWL49_FIRST_AMPDU_QUEUE,
-			IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1);
+			IWL49_FIRST_AMPDU_QUEUE +
+			priv->cfg->num_of_ampdu_queues - 1);
 		return -EINVAL;
 	}
 
@@ -1840,11 +1739,13 @@
 	u16 ra_tid;
 
 	if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
-	    (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) {
+	    (IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
+	     <= txq_id)) {
 		IWL_WARN(priv,
 			"queue number out of range: %d, must be %d to %d\n",
 			txq_id, IWL49_FIRST_AMPDU_QUEUE,
-			IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1);
+			IWL49_FIRST_AMPDU_QUEUE +
+			priv->cfg->num_of_ampdu_queues - 1);
 		return -EINVAL;
 	}
 
@@ -2048,7 +1949,7 @@
 static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
 				struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
 	int txq_id = SEQ_TO_QUEUE(sequence);
 	int index = SEQ_TO_INDEX(sequence);
@@ -2249,7 +2150,7 @@
 	.build_addsta_hcmd = iwl4965_build_addsta_hcmd,
 	.chain_noise_reset = iwl4965_chain_noise_reset,
 	.gain_computation = iwl4965_gain_computation,
-	.rts_tx_cmd_flag = iwl4965_rts_tx_cmd_flag,
+	.rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
 	.calc_rssi = iwl4965_calc_rssi,
 };
 
@@ -2271,9 +2172,9 @@
 	.load_ucode = iwl4965_load_bsm,
 	.dump_nic_event_log = iwl_dump_nic_event_log,
 	.dump_nic_error_log = iwl_dump_nic_error_log,
+	.set_channel_switch = iwl4965_hw_channel_switch,
 	.apm_ops = {
-		.init = iwl4965_apm_init,
-		.reset = iwl4965_apm_reset,
+		.init = iwl_apm_init,
 		.stop = iwl_apm_stop,
 		.config = iwl4965_nic_config,
 		.set_pwr_src = iwl_set_pwr_src,
@@ -2323,7 +2224,14 @@
 	.eeprom_ver = EEPROM_4965_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
 	.ops = &iwl4965_ops,
+	.num_of_queues = IWL49_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl4965_mod_params,
+	.valid_tx_ant = ANT_AB,
+	.valid_rx_ant = ANT_AB,
+	.pll_cfg_val = 0,
+	.set_l0s = true,
+	.use_bsm = true,
 	.use_isr_legacy = true,
 	.ht_greenfield_support = false,
 	.broken_powersave = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 8cc3d50..d256fec 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -72,115 +72,14 @@
 	IWL_TX_FIFO_HCCA_2
 };
 
-int iwl5000_apm_init(struct iwl_priv *priv)
-{
-	int ret = 0;
-
-	iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
-		    CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
-
-	/* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
-	iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
-		    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
-
-	/* Set FH wait threshold to maximum (HW error during stress W/A) */
-	iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
-
-	/* enable HAP INTA to move device L1a -> L0s */
-	iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
-
-	if (priv->cfg->need_pll_cfg)
-		iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
-
-	/* set "initialization complete" bit to move adapter
-	 * D0U* --> D0A* state */
-	iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
-	/* wait for clock stabilization */
-	ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
-			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
-			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
-	if (ret < 0) {
-		IWL_DEBUG_INFO(priv, "Failed to init the card\n");
-		return ret;
-	}
-
-	/* enable DMA */
-	iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
-
-	udelay(20);
-
-	/* disable L1-Active */
-	iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
-			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-
-	return ret;
-}
-
-int iwl5000_apm_reset(struct iwl_priv *priv)
-{
-	int ret = 0;
-
-	iwl_apm_stop_master(priv);
-
-	iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
-	udelay(10);
-
-
-	/* FIXME: put here L1A -L0S w/a */
-
-	if (priv->cfg->need_pll_cfg)
-		iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
-
-	/* set "initialization complete" bit to move adapter
-	 * D0U* --> D0A* state */
-	iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
-	/* wait for clock stabilization */
-	ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
-			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
-			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
-	if (ret < 0) {
-		IWL_DEBUG_INFO(priv, "Failed to init the card\n");
-		goto out;
-	}
-
-	/* enable DMA */
-	iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
-
-	udelay(20);
-
-	/* disable L1-Active */
-	iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
-			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-out:
-
-	return ret;
-}
-
-
 /* NIC configuration for 5000 series */
 void iwl5000_nic_config(struct iwl_priv *priv)
 {
 	unsigned long flags;
 	u16 radio_cfg;
-	u16 lctl;
 
 	spin_lock_irqsave(&priv->lock, flags);
 
-	lctl = iwl_pcie_link_ctl(priv);
-
-	/* HW bug W/A */
-	/* L1-ASPM is enabled by BIOS */
-	if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
-		/* L1-APSM enabled: disable L0S  */
-		iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-	else
-		/* L1-ASPM disabled: enable L0S */
-		iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-
 	radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
 
 	/* write radio config values to register */
@@ -279,7 +178,7 @@
 			data->delta_gain_code[i] = 0;
 			continue;
 		}
-		delta_g = (1000 * ((s32)average_noise[0] -
+		delta_g = (1000 * ((s32)average_noise[default_chain] -
 			(s32)average_noise[i])) / 1500;
 		/* bound gain by 2 bits value max, 3rd bit is sign */
 		data->delta_gain_code[i] =
@@ -372,6 +271,10 @@
 	.auto_corr_max_cck_mrc = 400,
 	.nrg_th_cck = 95,
 	.nrg_th_ofdm = 95,
+
+	.barker_corr_th_min = 190,
+	.barker_corr_th_min_mrc = 390,
+	.nrg_th_cca = 62,
 };
 
 static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
@@ -394,6 +297,10 @@
 	.auto_corr_max_cck_mrc = 400,
 	.nrg_th_cck = 95,
 	.nrg_th_ofdm = 95,
+
+	.barker_corr_th_min = 190,
+	.barker_corr_th_min_mrc = 390,
+	.nrg_th_cca = 62,
 };
 
 const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
@@ -458,7 +365,7 @@
 static void iwl5000_rx_calib_result(struct iwl_priv *priv,
 			     struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
 	int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
 	int index;
@@ -784,18 +691,16 @@
 
 int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
 {
-	if ((priv->cfg->mod_params->num_of_queues > IWL50_NUM_QUEUES) ||
-	    (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
-		IWL_ERR(priv,
-			"invalid queues_num, should be between %d and %d\n",
-			IWL_MIN_NUM_QUEUES, IWL50_NUM_QUEUES);
-		return -EINVAL;
-	}
+	if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+	    priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
+		priv->cfg->num_of_queues =
+			priv->cfg->mod_params->num_of_queues;
 
-	priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
+	priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
 	priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
 	priv->hw_params.scd_bc_tbls_size =
-			IWL50_NUM_QUEUES * sizeof(struct iwl5000_scd_bc_tbl);
+			priv->cfg->num_of_queues *
+			sizeof(struct iwl5000_scd_bc_tbl);
 	priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
 	priv->hw_params.max_stations = IWL5000_STATION_COUNT;
 	priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
@@ -947,11 +852,13 @@
 	u16 ra_tid;
 
 	if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
-	    (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) {
+	    (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
+	     <= txq_id)) {
 		IWL_WARN(priv,
 			"queue number out of range: %d, must be %d to %d\n",
 			txq_id, IWL50_FIRST_AMPDU_QUEUE,
-			IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1);
+			IWL50_FIRST_AMPDU_QUEUE +
+			priv->cfg->num_of_ampdu_queues - 1);
 		return -EINVAL;
 	}
 
@@ -1005,11 +912,13 @@
 				   u16 ssn_idx, u8 tx_fifo)
 {
 	if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
-	    (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) {
+	    (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
+	     <= txq_id)) {
 		IWL_ERR(priv,
 			"queue number out of range: %d, must be %d to %d\n",
 			txq_id, IWL50_FIRST_AMPDU_QUEUE,
-			IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1);
+			IWL50_FIRST_AMPDU_QUEUE +
+			priv->cfg->num_of_ampdu_queues - 1);
 		return -EINVAL;
 	}
 
@@ -1176,7 +1085,7 @@
 static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
 				struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
 	int txq_id = SEQ_TO_QUEUE(sequence);
 	int index = SEQ_TO_INDEX(sequence);
@@ -1473,6 +1382,36 @@
 IWL5000_UCODE_GET(init_data_size);
 IWL5000_UCODE_GET(boot_size);
 
+static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
+{
+	struct iwl5000_channel_switch_cmd cmd;
+	const struct iwl_channel_info *ch_info;
+	struct iwl_host_cmd hcmd = {
+		.id = REPLY_CHANNEL_SWITCH,
+		.len = sizeof(cmd),
+		.flags = CMD_SIZE_HUGE,
+		.data = &cmd,
+	};
+
+	IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
+		priv->active_rxon.channel, channel);
+	cmd.band = priv->band == IEEE80211_BAND_2GHZ;
+	cmd.channel = cpu_to_le16(channel);
+	cmd.rxon_flags = priv->active_rxon.flags;
+	cmd.rxon_filter_flags = priv->active_rxon.filter_flags;
+	cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+	ch_info = iwl_get_channel_info(priv, priv->band, channel);
+	if (ch_info)
+		cmd.expect_beacon = is_channel_radar(ch_info);
+	else {
+		IWL_ERR(priv, "invalid channel switch from %u to %u\n",
+			priv->active_rxon.channel, channel);
+		return -EFAULT;
+	}
+
+	return iwl_send_cmd_sync(priv, &hcmd);
+}
+
 struct iwl_hcmd_ops iwl5000_hcmd = {
 	.rxon_assoc = iwl5000_send_rxon_assoc,
 	.commit_rxon = iwl_commit_rxon,
@@ -1520,9 +1459,9 @@
 	.alive_notify = iwl5000_alive_notify,
 	.send_tx_power = iwl5000_send_tx_power,
 	.update_chain_flags = iwl_update_chain_flags,
+	.set_channel_switch = iwl5000_hw_channel_switch,
 	.apm_ops = {
-		.init =	iwl5000_apm_init,
-		.reset = iwl5000_apm_reset,
+		.init = iwl_apm_init,
 		.stop = iwl_apm_stop,
 		.config = iwl5000_nic_config,
 		.set_pwr_src = iwl_set_pwr_src,
@@ -1572,9 +1511,9 @@
 	.alive_notify = iwl5000_alive_notify,
 	.send_tx_power = iwl5000_send_tx_power,
 	.update_chain_flags = iwl_update_chain_flags,
+	.set_channel_switch = iwl5000_hw_channel_switch,
 	.apm_ops = {
-		.init =	iwl5000_apm_init,
-		.reset = iwl5000_apm_reset,
+		.init = iwl_apm_init,
 		.stop = iwl_apm_stop,
 		.config = iwl5000_nic_config,
 		.set_pwr_src = iwl_set_pwr_src,
@@ -1621,8 +1560,6 @@
 };
 
 struct iwl_mod_params iwl50_mod_params = {
-	.num_of_queues = IWL50_NUM_QUEUES,
-	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.amsdu_size_8K = 1,
 	.restart_fw = 1,
 	/* the rest are 0 by default */
@@ -1639,10 +1576,14 @@
 	.eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
 	.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_ABC,
 	.valid_rx_ant = ANT_ABC,
-	.need_pll_cfg = true,
+	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+	.set_l0s = true,
+	.use_bsm = false,
 	.ht_greenfield_support = true,
 	.led_compensation = 51,
 	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
@@ -1658,10 +1599,14 @@
 	.eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
 	.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_B,
 	.valid_rx_ant = ANT_AB,
-	.need_pll_cfg = true,
+	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+	.set_l0s = true,
+	.use_bsm = false,
 	.ht_greenfield_support = true,
 	.led_compensation = 51,
 	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
@@ -1677,10 +1622,14 @@
 	.eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
 	.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_B,
 	.valid_rx_ant = ANT_AB,
-	.need_pll_cfg = true,
+	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+	.set_l0s = true,
+	.use_bsm = false,
 	.ht_greenfield_support = true,
 	.led_compensation = 51,
 	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
@@ -1696,10 +1645,14 @@
 	.eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
 	.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_B,
 	.valid_rx_ant = ANT_AB,
-	.need_pll_cfg = true,
+	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+	.set_l0s = true,
+	.use_bsm = false,
 	.ht_greenfield_support = true,
 	.led_compensation = 51,
 	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
@@ -1715,10 +1668,14 @@
 	.eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
 	.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_ABC,
 	.valid_rx_ant = ANT_ABC,
-	.need_pll_cfg = true,
+	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+	.set_l0s = true,
+	.use_bsm = false,
 	.ht_greenfield_support = true,
 	.led_compensation = 51,
 	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
@@ -1734,10 +1691,14 @@
 	.eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
 	.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_A,
 	.valid_rx_ant = ANT_AB,
-	.need_pll_cfg = true,
+	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+	.set_l0s = true,
+	.use_bsm = false,
 	.ht_greenfield_support = true,
 	.led_compensation = 51,
 	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index d1f0b0b..32466d3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -52,8 +52,8 @@
 #define IWL6050_UCODE_API_MAX 4
 
 /* Lowest firmware API version supported */
-#define IWL6000_UCODE_API_MIN 1
-#define IWL6050_UCODE_API_MIN 1
+#define IWL6000_UCODE_API_MIN 4
+#define IWL6050_UCODE_API_MIN 4
 
 #define IWL6000_FW_PRE "iwlwifi-6000-"
 #define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
@@ -121,22 +121,24 @@
 	.auto_corr_max_cck_mrc = 310,
 	.nrg_th_cck = 97,
 	.nrg_th_ofdm = 100,
+
+	.barker_corr_th_min = 190,
+	.barker_corr_th_min_mrc = 390,
+	.nrg_th_cca = 62,
 };
 
 static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
 {
-	if ((priv->cfg->mod_params->num_of_queues > IWL50_NUM_QUEUES) ||
-	    (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
-		IWL_ERR(priv,
-			"invalid queues_num, should be between %d and %d\n",
-			IWL_MIN_NUM_QUEUES, IWL50_NUM_QUEUES);
-		return -EINVAL;
-	}
+	if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+	    priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
+		priv->cfg->num_of_queues =
+			priv->cfg->mod_params->num_of_queues;
 
-	priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
+	priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
 	priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
 	priv->hw_params.scd_bc_tbls_size =
-			IWL50_NUM_QUEUES * sizeof(struct iwl5000_scd_bc_tbl);
+			priv->cfg->num_of_queues *
+			sizeof(struct iwl5000_scd_bc_tbl);
 	priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
 	priv->hw_params.max_stations = IWL5000_STATION_COUNT;
 	priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
@@ -170,6 +172,37 @@
 	return 0;
 }
 
+static int iwl6000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
+{
+	struct iwl6000_channel_switch_cmd cmd;
+	const struct iwl_channel_info *ch_info;
+	struct iwl_host_cmd hcmd = {
+		.id = REPLY_CHANNEL_SWITCH,
+		.len = sizeof(cmd),
+		.flags = CMD_SIZE_HUGE,
+		.data = &cmd,
+	};
+
+	IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
+		priv->active_rxon.channel, channel);
+
+	cmd.band = priv->band == IEEE80211_BAND_2GHZ;
+	cmd.channel = cpu_to_le16(channel);
+	cmd.rxon_flags = priv->active_rxon.flags;
+	cmd.rxon_filter_flags = priv->active_rxon.filter_flags;
+	cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+	ch_info = iwl_get_channel_info(priv, priv->band, channel);
+	if (ch_info)
+		cmd.expect_beacon = is_channel_radar(ch_info);
+	else {
+		IWL_ERR(priv, "invalid channel switch from %u to %u\n",
+			priv->active_rxon.channel, channel);
+		return -EFAULT;
+	}
+
+	return iwl_send_cmd_sync(priv, &hcmd);
+}
+
 static struct iwl_lib_ops iwl6000_lib = {
 	.set_hw_params = iwl6000_hw_set_hw_params,
 	.txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
@@ -190,9 +223,9 @@
 	.alive_notify = iwl5000_alive_notify,
 	.send_tx_power = iwl5000_send_tx_power,
 	.update_chain_flags = iwl_update_chain_flags,
+	.set_channel_switch = iwl6000_hw_channel_switch,
 	.apm_ops = {
-		.init =	iwl5000_apm_init,
-		.reset = iwl5000_apm_reset,
+		.init = iwl_apm_init,
 		.stop = iwl_apm_stop,
 		.config = iwl6000_nic_config,
 		.set_pwr_src = iwl_set_pwr_src,
@@ -231,6 +264,21 @@
 	.led = &iwlagn_led_ops,
 };
 
+static struct iwl_hcmd_utils_ops iwl6050_hcmd_utils = {
+	.get_hcmd_size = iwl5000_get_hcmd_size,
+	.build_addsta_hcmd = iwl5000_build_addsta_hcmd,
+	.rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag,
+	.calc_rssi = iwl5000_calc_rssi,
+};
+
+static struct iwl_ops iwl6050_ops = {
+	.ucode = &iwl5000_ucode,
+	.lib = &iwl6000_lib,
+	.hcmd = &iwl5000_hcmd,
+	.utils = &iwl6050_hcmd_utils,
+	.led = &iwlagn_led_ops,
+};
+
 
 /*
  * "h": Hybrid configuration, use both internal and external Power Amplifier
@@ -245,10 +293,14 @@
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_AB,
 	.valid_rx_ant = ANT_AB,
-	.need_pll_cfg = false,
+	.pll_cfg_val = 0,
+	.set_l0s = false,
+	.use_bsm = false,
 	.pa_type = IWL_PA_HYBRID,
 	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
 	.shadow_ram_support = true,
@@ -257,6 +309,8 @@
 	.use_rts_for_ht = true, /* use rts/cts protection */
 	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
 	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
 };
 
 struct iwl_cfg iwl6000h_2abg_cfg = {
@@ -269,10 +323,14 @@
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_AB,
 	.valid_rx_ant = ANT_AB,
-	.need_pll_cfg = false,
+	.pll_cfg_val = 0,
+	.set_l0s = false,
+	.use_bsm = false,
 	.pa_type = IWL_PA_HYBRID,
 	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
 	.shadow_ram_support = true,
@@ -280,6 +338,8 @@
 	.led_compensation = 51,
 	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
 	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
 };
 
 struct iwl_cfg iwl6000h_2bg_cfg = {
@@ -292,10 +352,14 @@
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_AB,
 	.valid_rx_ant = ANT_AB,
-	.need_pll_cfg = false,
+	.pll_cfg_val = 0,
+	.set_l0s = false,
+	.use_bsm = false,
 	.pa_type = IWL_PA_HYBRID,
 	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
 	.shadow_ram_support = true,
@@ -303,6 +367,8 @@
 	.led_compensation = 51,
 	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
 	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
 };
 
 /*
@@ -318,10 +384,14 @@
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_BC,
 	.valid_rx_ant = ANT_BC,
-	.need_pll_cfg = false,
+	.pll_cfg_val = 0,
+	.set_l0s = false,
+	.use_bsm = false,
 	.pa_type = IWL_PA_INTERNAL,
 	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
 	.shadow_ram_support = true,
@@ -330,6 +400,8 @@
 	.use_rts_for_ht = true, /* use rts/cts protection */
 	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
 	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
 };
 
 struct iwl_cfg iwl6000i_2abg_cfg = {
@@ -342,10 +414,14 @@
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_BC,
 	.valid_rx_ant = ANT_BC,
-	.need_pll_cfg = false,
+	.pll_cfg_val = 0,
+	.set_l0s = false,
+	.use_bsm = false,
 	.pa_type = IWL_PA_INTERNAL,
 	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
 	.shadow_ram_support = true,
@@ -353,6 +429,8 @@
 	.led_compensation = 51,
 	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
 	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
 };
 
 struct iwl_cfg iwl6000i_2bg_cfg = {
@@ -365,10 +443,14 @@
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_BC,
 	.valid_rx_ant = ANT_BC,
-	.need_pll_cfg = false,
+	.pll_cfg_val = 0,
+	.set_l0s = false,
+	.use_bsm = false,
 	.pa_type = IWL_PA_INTERNAL,
 	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
 	.shadow_ram_support = true,
@@ -376,6 +458,8 @@
 	.led_compensation = 51,
 	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
 	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
 };
 
 struct iwl_cfg iwl6050_2agn_cfg = {
@@ -384,22 +468,28 @@
 	.ucode_api_max = IWL6050_UCODE_API_MAX,
 	.ucode_api_min = IWL6050_UCODE_API_MIN,
 	.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
-	.ops = &iwl6000_ops,
+	.ops = &iwl6050_ops,
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
-	.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
+	.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_AB,
 	.valid_rx_ant = ANT_AB,
-	.need_pll_cfg = false,
+	.pll_cfg_val = 0,
+	.set_l0s = false,
+	.use_bsm = false,
 	.pa_type = IWL_PA_SYSTEM,
-	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+	.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
 	.shadow_ram_support = true,
 	.ht_greenfield_support = true,
 	.led_compensation = 51,
 	.use_rts_for_ht = true, /* use rts/cts protection */
 	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
 	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
 };
 
 struct iwl_cfg iwl6050_2abg_cfg = {
@@ -408,21 +498,27 @@
 	.ucode_api_max = IWL6050_UCODE_API_MAX,
 	.ucode_api_min = IWL6050_UCODE_API_MIN,
 	.sku = IWL_SKU_A|IWL_SKU_G,
-	.ops = &iwl6000_ops,
+	.ops = &iwl6050_ops,
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
-	.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
+	.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_AB,
 	.valid_rx_ant = ANT_AB,
-	.need_pll_cfg = false,
+	.pll_cfg_val = 0,
+	.set_l0s = false,
+	.use_bsm = false,
 	.pa_type = IWL_PA_SYSTEM,
-	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+	.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
 	.shadow_ram_support = true,
 	.ht_greenfield_support = true,
 	.led_compensation = 51,
 	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
 	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
 };
 
 struct iwl_cfg iwl6000_3agn_cfg = {
@@ -435,10 +531,14 @@
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_ABC,
 	.valid_rx_ant = ANT_ABC,
-	.need_pll_cfg = false,
+	.pll_cfg_val = 0,
+	.set_l0s = false,
+	.use_bsm = false,
 	.pa_type = IWL_PA_SYSTEM,
 	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
 	.shadow_ram_support = true,
@@ -447,6 +547,8 @@
 	.use_rts_for_ht = true, /* use rts/cts protection */
 	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
 	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
 };
 
 struct iwl_cfg iwl6050_3agn_cfg = {
@@ -455,22 +557,28 @@
 	.ucode_api_max = IWL6050_UCODE_API_MAX,
 	.ucode_api_min = IWL6050_UCODE_API_MIN,
 	.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
-	.ops = &iwl6000_ops,
+	.ops = &iwl6050_ops,
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
-	.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
+	.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
+	.num_of_queues = IWL50_NUM_QUEUES,
+	.num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
 	.mod_params = &iwl50_mod_params,
 	.valid_tx_ant = ANT_ABC,
 	.valid_rx_ant = ANT_ABC,
-	.need_pll_cfg = false,
+	.pll_cfg_val = 0,
+	.set_l0s = false,
+	.use_bsm = false,
 	.pa_type = IWL_PA_SYSTEM,
-	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+	.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
 	.shadow_ram_support = true,
 	.ht_greenfield_support = true,
 	.led_compensation = 51,
 	.use_rts_for_ht = true, /* use rts/cts protection */
 	.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
 	.supports_idle = true,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
 };
 
 MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index a07be29..27d4ece 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -75,106 +75,6 @@
 	/*ANT_ABC  -> */ ANT_ABC,
 };
 
-/**
- * struct iwl_rate_scale_data -- tx success history for one rate
- */
-struct iwl_rate_scale_data {
-	u64 data;		/* bitmap of successful frames */
-	s32 success_counter;	/* number of frames successful */
-	s32 success_ratio;	/* per-cent * 128  */
-	s32 counter;		/* number of frames attempted */
-	s32 average_tpt;	/* success ratio * expected throughput */
-	unsigned long stamp;
-};
-
-/**
- * struct iwl_scale_tbl_info -- tx params and success history for all rates
- *
- * There are two of these in struct iwl_lq_sta,
- * one for "active", and one for "search".
- */
-struct iwl_scale_tbl_info {
-	enum iwl_table_type lq_type;
-	u8 ant_type;
-	u8 is_SGI;	/* 1 = short guard interval */
-	u8 is_ht40;	/* 1 = 40 MHz channel width */
-	u8 is_dup;	/* 1 = duplicated data streams */
-	u8 action;	/* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
-	u8 max_search;	/* maximun number of tables we can search */
-	s32 *expected_tpt;	/* throughput metrics; expected_tpt_G, etc. */
-	u32 current_rate;  /* rate_n_flags, uCode API format */
-	struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
-};
-
-struct iwl_traffic_load {
-	unsigned long time_stamp;	/* age of the oldest statistics */
-	u32 packet_count[TID_QUEUE_MAX_SIZE];   /* packet count in this time
-						 * slice */
-	u32 total;			/* total num of packets during the
-					 * last TID_MAX_TIME_DIFF */
-	u8 queue_count;			/* number of queues that has
-					 * been used since the last cleanup */
-	u8 head;			/* start of the circular buffer */
-};
-
-/**
- * struct iwl_lq_sta -- driver's rate scaling private structure
- *
- * Pointer to this gets passed back and forth between driver and mac80211.
- */
-struct iwl_lq_sta {
-	u8 active_tbl;		/* index of active table, range 0-1 */
-	u8 enable_counter;	/* indicates HT mode */
-	u8 stay_in_tbl;		/* 1: disallow, 0: allow search for new mode */
-	u8 search_better_tbl;	/* 1: currently trying alternate mode */
-	s32 last_tpt;
-
-	/* The following determine when to search for a new mode */
-	u32 table_count_limit;
-	u32 max_failure_limit;	/* # failed frames before new search */
-	u32 max_success_limit;	/* # successful frames before new search */
-	u32 table_count;
-	u32 total_failed;	/* total failed frames, any/all rates */
-	u32 total_success;	/* total successful frames, any/all rates */
-	u64 flush_timer;	/* time staying in mode before new search */
-
-	u8 action_counter;	/* # mode-switch actions tried */
-	u8 is_green;
-	u8 is_dup;
-	enum ieee80211_band band;
-	u8 ibss_sta_added;
-
-	/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
-	u32 supp_rates;
-	u16 active_legacy_rate;
-	u16 active_siso_rate;
-	u16 active_mimo2_rate;
-	u16 active_mimo3_rate;
-	u16 active_rate_basic;
-	s8 max_rate_idx;     /* Max rate set by user */
-	u8 missed_rate_counter;
-
-	struct iwl_link_quality_cmd lq;
-	struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
-	struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
-	u8 tx_agg_tid_en;
-#ifdef CONFIG_MAC80211_DEBUGFS
-	struct dentry *rs_sta_dbgfs_scale_table_file;
-	struct dentry *rs_sta_dbgfs_stats_table_file;
-	struct dentry *rs_sta_dbgfs_rate_scale_data_file;
-	struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
-	u32 dbg_fixed_rate;
-#endif
-	struct iwl_priv *drv;
-
-	/* used to be in sta_info */
-	int last_txrate_idx;
-	/* last tx rate_n_flags */
-	u32 last_rate_n_flags;
-	/* packets destined for this STA are aggregated */
-	u8 is_agg;
-};
-
 static void rs_rate_scale_perform(struct iwl_priv *priv,
 				   struct sk_buff *skb,
 				   struct ieee80211_sta *sta,
@@ -2575,19 +2475,17 @@
 			  gfp_t gfp)
 {
 	struct iwl_lq_sta *lq_sta;
+	struct iwl_station_priv *sta_priv = (struct iwl_station_priv *) sta->drv_priv;
 	struct iwl_priv *priv;
 	int i, j;
 
 	priv = (struct iwl_priv *)priv_rate;
 	IWL_DEBUG_RATE(priv, "create station rate scale window\n");
 
-	lq_sta = kzalloc(sizeof(struct iwl_lq_sta), gfp);
+	lq_sta = &sta_priv->lq_sta;
 
-	if (lq_sta == NULL)
-		return NULL;
 	lq_sta->lq.sta_id = 0xff;
 
-
 	for (j = 0; j < LQ_SIZE; j++)
 		for (i = 0; i < IWL_RATE_COUNT; i++)
 			rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
@@ -2819,11 +2717,9 @@
 static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
 			void *priv_sta)
 {
-	struct iwl_lq_sta *lq_sta = priv_sta;
 	struct iwl_priv *priv __maybe_unused = priv_r;
 
 	IWL_DEBUG_RATE(priv, "enter\n");
-	kfree(lq_sta);
 	IWL_DEBUG_RATE(priv, "leave\n");
 }
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 9fac530..affc0c5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -54,6 +54,7 @@
 	u8 prev_table_rs;	/* prev in rate table cmd */
 };
 
+
 /*
  * These serve as indexes into
  * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
@@ -335,6 +336,106 @@
 	char	mcs[IWL_MAX_MCS_DISPLAY_SIZE];
 };
 
+/**
+ * struct iwl_rate_scale_data -- tx success history for one rate
+ */
+struct iwl_rate_scale_data {
+	u64 data;		/* bitmap of successful frames */
+	s32 success_counter;	/* number of frames successful */
+	s32 success_ratio;	/* per-cent * 128  */
+	s32 counter;		/* number of frames attempted */
+	s32 average_tpt;	/* success ratio * expected throughput */
+	unsigned long stamp;
+};
+
+/**
+ * struct iwl_scale_tbl_info -- tx params and success history for all rates
+ *
+ * There are two of these in struct iwl_lq_sta,
+ * one for "active", and one for "search".
+ */
+struct iwl_scale_tbl_info {
+	enum iwl_table_type lq_type;
+	u8 ant_type;
+	u8 is_SGI;	/* 1 = short guard interval */
+	u8 is_ht40;	/* 1 = 40 MHz channel width */
+	u8 is_dup;	/* 1 = duplicated data streams */
+	u8 action;	/* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
+	u8 max_search;	/* maximun number of tables we can search */
+	s32 *expected_tpt;	/* throughput metrics; expected_tpt_G, etc. */
+	u32 current_rate;  /* rate_n_flags, uCode API format */
+	struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
+};
+
+struct iwl_traffic_load {
+	unsigned long time_stamp;	/* age of the oldest statistics */
+	u32 packet_count[TID_QUEUE_MAX_SIZE];   /* packet count in this time
+						 * slice */
+	u32 total;			/* total num of packets during the
+					 * last TID_MAX_TIME_DIFF */
+	u8 queue_count;			/* number of queues that has
+					 * been used since the last cleanup */
+	u8 head;			/* start of the circular buffer */
+};
+
+/**
+ * struct iwl_lq_sta -- driver's rate scaling private structure
+ *
+ * Pointer to this gets passed back and forth between driver and mac80211.
+ */
+struct iwl_lq_sta {
+	u8 active_tbl;		/* index of active table, range 0-1 */
+	u8 enable_counter;	/* indicates HT mode */
+	u8 stay_in_tbl;		/* 1: disallow, 0: allow search for new mode */
+	u8 search_better_tbl;	/* 1: currently trying alternate mode */
+	s32 last_tpt;
+
+	/* The following determine when to search for a new mode */
+	u32 table_count_limit;
+	u32 max_failure_limit;	/* # failed frames before new search */
+	u32 max_success_limit;	/* # successful frames before new search */
+	u32 table_count;
+	u32 total_failed;	/* total failed frames, any/all rates */
+	u32 total_success;	/* total successful frames, any/all rates */
+	u64 flush_timer;	/* time staying in mode before new search */
+
+	u8 action_counter;	/* # mode-switch actions tried */
+	u8 is_green;
+	u8 is_dup;
+	enum ieee80211_band band;
+	u8 ibss_sta_added;
+
+	/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
+	u32 supp_rates;
+	u16 active_legacy_rate;
+	u16 active_siso_rate;
+	u16 active_mimo2_rate;
+	u16 active_mimo3_rate;
+	u16 active_rate_basic;
+	s8 max_rate_idx;     /* Max rate set by user */
+	u8 missed_rate_counter;
+
+	struct iwl_link_quality_cmd lq;
+	struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
+	struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
+	u8 tx_agg_tid_en;
+#ifdef CONFIG_MAC80211_DEBUGFS
+	struct dentry *rs_sta_dbgfs_scale_table_file;
+	struct dentry *rs_sta_dbgfs_stats_table_file;
+	struct dentry *rs_sta_dbgfs_rate_scale_data_file;
+	struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+	u32 dbg_fixed_rate;
+#endif
+	struct iwl_priv *drv;
+
+	/* used to be in sta_info */
+	int last_txrate_idx;
+	/* last tx rate_n_flags */
+	u32 last_rate_n_flags;
+	/* packets destined for this STA are aggregated */
+	u8 is_agg;
+};
+
 static inline u8 num_of_ant(u8 mask)
 {
 	return  !!((mask) & ANT_A) +
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 8d7bc38..fa1672e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -524,7 +524,7 @@
 static void iwl_rx_reply_alive(struct iwl_priv *priv,
 				struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_alive_resp *palive;
 	struct delayed_work *pwork;
 
@@ -610,7 +610,7 @@
 				struct iwl_rx_mem_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl4965_beacon_notif *beacon =
 		(struct iwl4965_beacon_notif *)pkt->u.raw;
 	u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
@@ -634,7 +634,7 @@
 static void iwl_rx_card_state_notif(struct iwl_priv *priv,
 				    struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
 	unsigned long status = priv->status;
 
@@ -769,7 +769,7 @@
 		IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
 
 	/* calculate total frames need to be restock after handling RX */
-	total_empty = r - priv->rxq.write_actual;
+	total_empty = r - rxq->write_actual;
 	if (total_empty < 0)
 		total_empty += RX_QUEUE_SIZE;
 
@@ -786,10 +786,10 @@
 
 		rxq->queue[i] = NULL;
 
-		pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
-				 priv->hw_params.rx_buf_size + 256,
-				 PCI_DMA_FROMDEVICE);
-		pkt = (struct iwl_rx_packet *)rxb->skb->data;
+		pci_unmap_page(priv->pci_dev, rxb->page_dma,
+			       PAGE_SIZE << priv->hw_params.rx_page_order,
+			       PCI_DMA_FROMDEVICE);
+		pkt = rxb_addr(rxb);
 
 		trace_iwlwifi_dev_rx(priv, pkt,
 			le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
@@ -814,8 +814,8 @@
 		if (priv->rx_handlers[pkt->hdr.cmd]) {
 			IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
 				i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
-			priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
 			priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
+			priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
 		} else {
 			/* No handling needed */
 			IWL_DEBUG_RX(priv,
@@ -824,35 +824,45 @@
 				pkt->hdr.cmd);
 		}
 
+		/*
+		 * XXX: After here, we should always check rxb->page
+		 * against NULL before touching it or its virtual
+		 * memory (pkt). Because some rx_handler might have
+		 * already taken or freed the pages.
+		 */
+
 		if (reclaim) {
-			/* Invoke any callbacks, transfer the skb to caller, and
-			 * fire off the (possibly) blocking iwl_send_cmd()
+			/* Invoke any callbacks, transfer the buffer to caller,
+			 * and fire off the (possibly) blocking iwl_send_cmd()
 			 * as we reclaim the driver command queue */
-			if (rxb && rxb->skb)
+			if (rxb->page)
 				iwl_tx_cmd_complete(priv, rxb);
 			else
 				IWL_WARN(priv, "Claim null rxb?\n");
 		}
 
-		/* For now we just don't re-use anything.  We can tweak this
-		 * later to try and re-use notification packets and SKBs that
-		 * fail to Rx correctly */
-		if (rxb->skb != NULL) {
-			priv->alloc_rxb_skb--;
-			dev_kfree_skb_any(rxb->skb);
-			rxb->skb = NULL;
-		}
-
+		/* Reuse the page if possible. For notification packets and
+		 * SKBs that fail to Rx correctly, add them back into the
+		 * rx_free list for reuse later. */
 		spin_lock_irqsave(&rxq->lock, flags);
-		list_add_tail(&rxb->list, &priv->rxq.rx_used);
+		if (rxb->page != NULL) {
+			rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
+				0, PAGE_SIZE << priv->hw_params.rx_page_order,
+				PCI_DMA_FROMDEVICE);
+			list_add_tail(&rxb->list, &rxq->rx_free);
+			rxq->free_count++;
+		} else
+			list_add_tail(&rxb->list, &rxq->rx_used);
+
 		spin_unlock_irqrestore(&rxq->lock, flags);
+
 		i = (i + 1) & RX_QUEUE_MASK;
 		/* If there are a lot of unused frames,
 		 * restock the Rx queue so ucode wont assert. */
 		if (fill_rx) {
 			count++;
 			if (count >= 8) {
-				priv->rxq.read = i;
+				rxq->read = i;
 				iwl_rx_replenish_now(priv);
 				count = 0;
 			}
@@ -860,7 +870,7 @@
 	}
 
 	/* Backtrack one entry */
-	priv->rxq.read = i;
+	rxq->read = i;
 	if (fill_rx)
 		iwl_rx_replenish_now(priv);
 	else
@@ -907,6 +917,8 @@
 	}
 #endif
 
+	spin_unlock_irqrestore(&priv->lock, flags);
+
 	/* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
 	 * atomic, make sure that inta covers all the interrupts that
 	 * we've discovered, even if FH interrupt came in just after
@@ -928,8 +940,6 @@
 
 		handled |= CSR_INT_BIT_HW_ERR;
 
-		spin_unlock_irqrestore(&priv->lock, flags);
-
 		return;
 	}
 
@@ -1019,6 +1029,7 @@
 	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
 		iwl_rx_handle(priv);
 		priv->isr_stats.rx++;
+		iwl_leds_background(priv);
 		handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
 	}
 
@@ -1056,7 +1067,6 @@
 			"flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
 	}
 #endif
-	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
 /* tasklet for iwlagn interrupt */
@@ -1086,6 +1096,9 @@
 				inta, inta_mask);
 	}
 #endif
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+
 	/* saved interrupt in inta variable now we can reset priv->inta */
 	priv->inta = 0;
 
@@ -1101,8 +1114,6 @@
 
 		handled |= CSR_INT_BIT_HW_ERR;
 
-		spin_unlock_irqrestore(&priv->lock, flags);
-
 		return;
 	}
 
@@ -1220,6 +1231,7 @@
 				    CSR_INT_PERIODIC_ENA);
 
 		priv->isr_stats.rx++;
+		iwl_leds_background(priv);
 	}
 
 	if (inta & CSR_INT_BIT_FH_TX) {
@@ -1242,14 +1254,10 @@
 			 inta & ~priv->inta_mask);
 	}
 
-
 	/* Re-enable all interrupts */
 	/* only Re-enable if diabled by irq */
 	if (test_bit(STATUS_INT_ENABLED, &priv->status))
 		iwl_enable_interrupts(priv);
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-
 }
 
 
@@ -1899,11 +1907,9 @@
 
 	udelay(5);
 
-	/* FIXME: apm_ops.suspend(priv) */
-	if (exit_pending)
-		priv->cfg->ops->lib->apm_ops.stop(priv);
-	else
-		priv->cfg->ops->lib->apm_ops.reset(priv);
+	/* Stop the device, and put it in low power state */
+	priv->cfg->ops->lib->apm_ops.stop(priv);
+
  exit:
 	memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
 
@@ -2290,6 +2296,69 @@
 
 #define UCODE_READY_TIMEOUT	(4 * HZ)
 
+/*
+ * Not a mac80211 entry point function, but it fits in with all the
+ * other mac80211 functions grouped here.
+ */
+static int iwl_setup_mac(struct iwl_priv *priv)
+{
+	int ret;
+	struct ieee80211_hw *hw = priv->hw;
+	hw->rate_control_algorithm = "iwl-agn-rs";
+
+	/* Tell mac80211 our characteristics */
+	hw->flags = IEEE80211_HW_SIGNAL_DBM |
+		    IEEE80211_HW_NOISE_DBM |
+		    IEEE80211_HW_AMPDU_AGGREGATION |
+		    IEEE80211_HW_SPECTRUM_MGMT;
+
+	if (!priv->cfg->broken_powersave)
+		hw->flags |= IEEE80211_HW_SUPPORTS_PS |
+			     IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+
+	hw->sta_data_size = sizeof(struct iwl_station_priv);
+	hw->wiphy->interface_modes =
+		BIT(NL80211_IFTYPE_STATION) |
+		BIT(NL80211_IFTYPE_ADHOC);
+
+	hw->wiphy->custom_regulatory = true;
+
+	/* Firmware does not support this */
+	hw->wiphy->disable_beacon_hints = true;
+
+	/*
+	 * For now, disable PS by default because it affects
+	 * RX performance significantly.
+	 */
+	hw->wiphy->ps_default = false;
+
+	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+	/* we create the 802.11 header and a zero-length SSID element */
+	hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
+
+	/* Default value; 4 EDCA QOS priorities */
+	hw->queues = 4;
+
+	hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+
+	if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
+		priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+			&priv->bands[IEEE80211_BAND_2GHZ];
+	if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
+		priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+			&priv->bands[IEEE80211_BAND_5GHZ];
+
+	ret = ieee80211_register_hw(priv->hw);
+	if (ret) {
+		IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
+		return ret;
+	}
+	priv->mac80211_registered = 1;
+
+	return 0;
+}
+
+
 static int iwl_mac_start(struct ieee80211_hw *hw)
 {
 	struct iwl_priv *priv = hw->priv;
@@ -3187,6 +3256,15 @@
 		iwl_down(priv);
 	}
 
+	/*
+	 * Make sure device is reset to low power before unloading driver.
+	 * This may be redundant with iwl_down(), but there are paths to
+	 * run iwl_down() without calling apm_ops.stop(), and there are
+	 * paths to avoid running iwl_down() at all before leaving driver.
+	 * This (inexpensive) call *makes sure* device is reset.
+	 */
+	priv->cfg->ops->lib->apm_ops.stop(priv);
+
 	iwl_tt_exit(priv);
 
 	/* make sure we flush any pending irq or
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index 69a80d7..1f801eb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -447,11 +447,11 @@
 				cpu_to_le16((u16)data->nrg_th_ofdm);
 
 	cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
-				cpu_to_le16(190);
+				cpu_to_le16(data->barker_corr_th_min);
 	cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
-				cpu_to_le16(390);
+				cpu_to_le16(data->barker_corr_th_min_mrc);
 	cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] =
-				cpu_to_le16(62);
+				cpu_to_le16(data->nrg_th_cca);
 
 	IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
 			data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
@@ -524,6 +524,9 @@
 	data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
 	data->nrg_th_cck = ranges->nrg_th_cck;
 	data->nrg_th_ofdm = ranges->nrg_th_ofdm;
+	data->barker_corr_th_min = ranges->barker_corr_th_min;
+	data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
+	data->nrg_th_cca = ranges->nrg_th_cca;
 
 	data->last_bad_plcp_cnt_ofdm = 0;
 	data->last_fa_cnt_ofdm = 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index ba3e4c8..954bad6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -109,11 +109,12 @@
 	REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */
 
 	/* WiMAX coexistence */
-	COEX_PRIORITY_TABLE_CMD = 0x5a,	/*5000 only */
+	COEX_PRIORITY_TABLE_CMD = 0x5a,	/* for 5000 series and up */
 	COEX_MEDIUM_NOTIFICATION = 0x5b,
 	COEX_EVENT_CMD = 0x5c,
 
 	/* Calibration */
+	TEMPERATURE_NOTIFICATION = 0x62,
 	CALIBRATION_CFG_CMD = 0x65,
 	CALIBRATION_RES_NOTIFICATION = 0x66,
 	CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
@@ -353,6 +354,9 @@
 #define POWER_TABLE_NUM_HT_OFDM_ENTRIES		32
 #define POWER_TABLE_CCK_ENTRY			32
 
+#define IWL_PWR_NUM_HT_OFDM_ENTRIES		24
+#define IWL_PWR_CCK_ENTRIES			2
+
 /**
  * union iwl4965_tx_power_dual_stream
  *
@@ -803,7 +807,7 @@
 	struct iwl3945_power_per_rate power[IWL_MAX_RATES];
 } __attribute__ ((packed));
 
-struct iwl_channel_switch_cmd {
+struct iwl4965_channel_switch_cmd {
 	u8 band;
 	u8 expect_beacon;
 	__le16 channel;
@@ -813,6 +817,48 @@
 	struct iwl4965_tx_power_db tx_power;
 } __attribute__ ((packed));
 
+/**
+ * struct iwl5000_channel_switch_cmd
+ * @band: 0- 5.2GHz, 1- 2.4GHz
+ * @expect_beacon: 0- resume transmits after channel switch
+ *		   1- wait for beacon to resume transmits
+ * @channel: new channel number
+ * @rxon_flags: Rx on flags
+ * @rxon_filter_flags: filtering parameters
+ * @switch_time: switch time in extended beacon format
+ * @reserved: reserved bytes
+ */
+struct iwl5000_channel_switch_cmd {
+	u8 band;
+	u8 expect_beacon;
+	__le16 channel;
+	__le32 rxon_flags;
+	__le32 rxon_filter_flags;
+	__le32 switch_time;
+	__le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
+} __attribute__ ((packed));
+
+/**
+ * struct iwl6000_channel_switch_cmd
+ * @band: 0- 5.2GHz, 1- 2.4GHz
+ * @expect_beacon: 0- resume transmits after channel switch
+ *		   1- wait for beacon to resume transmits
+ * @channel: new channel number
+ * @rxon_flags: Rx on flags
+ * @rxon_filter_flags: filtering parameters
+ * @switch_time: switch time in extended beacon format
+ * @reserved: reserved bytes
+ */
+struct iwl6000_channel_switch_cmd {
+	u8 band;
+	u8 expect_beacon;
+	__le16 channel;
+	__le32 rxon_flags;
+	__le32 rxon_filter_flags;
+	__le32 switch_time;
+	__le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
+} __attribute__ ((packed));
+
 /*
  * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
  */
@@ -2172,6 +2218,19 @@
 	__le32 reserved2;
 } __attribute__ ((packed));
 
+#define BT_COEX_DISABLE (0x0)
+#define BT_COEX_MODE_2W (0x1)
+#define BT_COEX_MODE_3W (0x2)
+#define BT_COEX_MODE_4W (0x3)
+
+#define BT_LEAD_TIME_MIN (0x0)
+#define BT_LEAD_TIME_DEF (0x1E)
+#define BT_LEAD_TIME_MAX (0xFF)
+
+#define BT_MAX_KILL_MIN (0x1)
+#define BT_MAX_KILL_DEF (0x5)
+#define BT_MAX_KILL_MAX (0xFF)
+
 /*
  * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
  *
@@ -3247,12 +3306,6 @@
  *   Lower values mean higher energy; this means making sure that the value
  *   in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
  *
- * Driver should set the following entries to fixed values:
- *
- *   HD_MIN_ENERGY_OFDM_DET_INDEX               100
- *   HD_BARKER_CORR_TH_ADD_MIN_INDEX            190
- *   HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX        390
- *   HD_OFDM_ENERGY_TH_IN_INDEX                  62
  */
 
 /*
@@ -3505,6 +3558,16 @@
  *****************************************************************************/
 
 struct iwl_rx_packet {
+	/*
+	 * The first 4 bytes of the RX frame header contain both the RX frame
+	 * size and some flags.
+	 * Bit fields:
+	 * 31:    flag flush RB request
+	 * 30:    flag ignore TC (terminal counter) request
+	 * 29:    flag fast IRQ request
+	 * 28-14: Reserved
+	 * 13-00: RX frame size
+	 */
 	__le32 len_n_flags;
 	struct iwl_cmd_header hdr;
 	union {
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index dc7fd87..d2b56ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -604,6 +604,23 @@
 }
 EXPORT_SYMBOL(iwlcore_free_geos);
 
+/*
+ *  iwlcore_rts_tx_cmd_flag: Set rts/cts. 3945 and 4965 only share this
+ *  function.
+ */
+void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
+				__le32 *tx_flags)
+{
+	if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+		*tx_flags |= TX_CMD_FLG_RTS_MSK;
+		*tx_flags &= ~TX_CMD_FLG_CTS_MSK;
+	} else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+		*tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+		*tx_flags |= TX_CMD_FLG_CTS_MSK;
+	}
+}
+EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag);
+
 static bool is_single_rx_stream(struct iwl_priv *priv)
 {
 	return !priv->current_ht_config.is_ht ||
@@ -1264,13 +1281,18 @@
 
 void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
 	struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
-	IWL_DEBUG_11H(priv, "CSA notif: channel %d, status %d\n",
-		      le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
-	rxon->channel = csa->channel;
-	priv->staging_rxon.channel = csa->channel;
+
+	if (!le32_to_cpu(csa->status)) {
+		rxon->channel = csa->channel;
+		priv->staging_rxon.channel = csa->channel;
+		IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
+		      le16_to_cpu(csa->channel));
+	} else
+		IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+		      le16_to_cpu(csa->channel));
 }
 EXPORT_SYMBOL(iwl_rx_csa);
 
@@ -1352,6 +1374,8 @@
 {
 	unsigned long flags;
 
+	IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
+
 	iwl_apm_stop_master(priv);
 
 	spin_lock_irqsave(&priv->lock, flags);
@@ -1365,6 +1389,118 @@
 }
 EXPORT_SYMBOL(iwl_apm_stop);
 
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_apm_stop())
+ * NOTE:  This does not load uCode nor start the embedded processor
+ */
+int iwl_apm_init(struct iwl_priv *priv)
+{
+	int ret = 0;
+	u16 lctl;
+
+	IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
+
+	/*
+	 * Use "set_bit" below rather than "write", to preserve any hardware
+	 * bits already set by default after reset.
+	 */
+
+	/* Disable L0S exit timer (platform NMI Work/Around) */
+	iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
+			  CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+	/*
+	 * Disable L0s without affecting L1;
+	 *  don't wait for ICH L0s (ICH bug W/A)
+	 */
+	iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
+			  CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+	/* Set FH wait threshold to maximum (HW error during stress W/A) */
+	iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+	/*
+	 * Enable HAP INTA (interrupt from management bus) to
+	 * wake device's PCI Express link L1a -> L0s
+	 * NOTE:  This is no-op for 3945 (non-existant bit)
+	 */
+	iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+				    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+	/*
+	 * HW bug W/A - costs negligible power consumption ...
+	 * Check if BIOS (or OS) enabled L1-ASPM on this device
+	 */
+	if (priv->cfg->set_l0s) {
+		lctl = iwl_pcie_link_ctl(priv);
+		if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
+					PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+			/* L1-ASPM enabled; disable(!) L0S  */
+			iwl_set_bit(priv, CSR_GIO_REG,
+					CSR_GIO_REG_VAL_L0S_ENABLED);
+			IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
+		} else {
+			/* L1-ASPM disabled; enable(!) L0S */
+			iwl_clear_bit(priv, CSR_GIO_REG,
+					CSR_GIO_REG_VAL_L0S_ENABLED);
+			IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
+		}
+	}
+
+	/* Configure analog phase-lock-loop before activating to D0A */
+	if (priv->cfg->pll_cfg_val)
+		iwl_set_bit(priv, CSR_ANA_PLL_CFG, priv->cfg->pll_cfg_val);
+
+	/*
+	 * Set "initialization complete" bit to move adapter from
+	 * D0U* --> D0A* (powered-up active) state.
+	 */
+	iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+	/*
+	 * Wait for clock stabilization; once stabilized, access to
+	 * device-internal resources is supported, e.g. iwl_write_prph()
+	 * and accesses to uCode SRAM.
+	 */
+	ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
+			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+			CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+	if (ret < 0) {
+		IWL_DEBUG_INFO(priv, "Failed to init the card\n");
+		goto out;
+	}
+
+	/*
+	 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
+	 * BSM (Boostrap State Machine) is only in 3945 and 4965;
+	 * later devices (i.e. 5000 and later) have non-volatile SRAM,
+	 * and don't need BSM to restore data after power-saving sleep.
+	 *
+	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
+	 * do not disable clocks.  This preserves any hardware bits already
+	 * set by default in "CLK_CTRL_REG" after reset.
+	 */
+	if (priv->cfg->use_bsm)
+		iwl_write_prph(priv, APMG_CLK_EN_REG,
+			APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
+	else
+		iwl_write_prph(priv, APMG_CLK_EN_REG,
+			APMG_CLK_VAL_DMA_CLK_RQT);
+	udelay(20);
+
+	/* Disable L1-Active */
+	iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
+			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+out:
+	return ret;
+}
+EXPORT_SYMBOL(iwl_apm_init);
+
+
+
 void iwl_configure_filter(struct ieee80211_hw *hw,
 			  unsigned int changed_flags,
 			  unsigned int *total_flags,
@@ -1412,73 +1548,14 @@
 }
 EXPORT_SYMBOL(iwl_configure_filter);
 
-int iwl_setup_mac(struct iwl_priv *priv)
-{
-	int ret;
-	struct ieee80211_hw *hw = priv->hw;
-	hw->rate_control_algorithm = "iwl-agn-rs";
-
-	/* Tell mac80211 our characteristics */
-	hw->flags = IEEE80211_HW_SIGNAL_DBM |
-		    IEEE80211_HW_NOISE_DBM |
-		    IEEE80211_HW_AMPDU_AGGREGATION |
-		    IEEE80211_HW_SPECTRUM_MGMT;
-
-	if (!priv->cfg->broken_powersave)
-		hw->flags |= IEEE80211_HW_SUPPORTS_PS |
-			     IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
-
-	hw->wiphy->interface_modes =
-		BIT(NL80211_IFTYPE_STATION) |
-		BIT(NL80211_IFTYPE_ADHOC);
-
-	hw->wiphy->custom_regulatory = true;
-
-	/* Firmware does not support this */
-	hw->wiphy->disable_beacon_hints = true;
-
-	/*
-	 * For now, disable PS by default because it affects
-	 * RX performance significantly.
-	 */
-	hw->wiphy->ps_default = false;
-
-	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
-	/* we create the 802.11 header and a zero-length SSID element */
-	hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
-
-	/* Default value; 4 EDCA QOS priorities */
-	hw->queues = 4;
-
-	hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
-
-	if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
-		priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-			&priv->bands[IEEE80211_BAND_2GHZ];
-	if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
-		priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-			&priv->bands[IEEE80211_BAND_5GHZ];
-
-	ret = ieee80211_register_hw(priv->hw);
-	if (ret) {
-		IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
-		return ret;
-	}
-	priv->mac80211_registered = 1;
-
-	return 0;
-}
-EXPORT_SYMBOL(iwl_setup_mac);
-
 int iwl_set_hw_params(struct iwl_priv *priv)
 {
 	priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
 	priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
 	if (priv->cfg->mod_params->amsdu_size_8K)
-		priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_8K;
+		priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
 	else
-		priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_4K;
-	priv->hw_params.max_pkt_size = priv->hw_params.rx_buf_size - 256;
+		priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
 
 	priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
 
@@ -1507,7 +1584,6 @@
 	/* Clear the driver's (not device's) station table */
 	iwl_clear_stations_table(priv);
 
-	priv->data_retry_limit = -1;
 	priv->ieee_channels = NULL;
 	priv->ieee_rates = NULL;
 	priv->band = IEEE80211_BAND_2GHZ;
@@ -1932,9 +2008,9 @@
 int iwl_send_bt_config(struct iwl_priv *priv)
 {
 	struct iwl_bt_cmd bt_cmd = {
-		.flags = 3,
-		.lead_time = 0xAA,
-		.max_kill = 1,
+		.flags = BT_COEX_MODE_4W,
+		.lead_time = BT_LEAD_TIME_DEF,
+		.max_kill = BT_MAX_KILL_DEF,
 		.kill_ack_mask = 0,
 		.kill_cts_mask = 0,
 	};
@@ -2094,10 +2170,7 @@
 	spin_unlock_irqrestore(&priv->lock, flags);
 	priv->thermal_throttle.ct_kill_toggle = false;
 
-	switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
-	case CSR_HW_REV_TYPE_1000:
-	case CSR_HW_REV_TYPE_6x00:
-	case CSR_HW_REV_TYPE_6x50:
+	if (priv->cfg->support_ct_kill_exit) {
 		adv_cmd.critical_temperature_enter =
 			cpu_to_le32(priv->hw_params.ct_kill_threshold);
 		adv_cmd.critical_temperature_exit =
@@ -2114,8 +2187,7 @@
 					"exit is %d\n",
 				       priv->hw_params.ct_kill_threshold,
 				       priv->hw_params.ct_kill_exit_threshold);
-		break;
-	default:
+	} else {
 		cmd.critical_temperature_R =
 			cpu_to_le32(priv->hw_params.ct_kill_threshold);
 
@@ -2128,7 +2200,6 @@
 					"succeeded, "
 					"critical temperature is %d\n",
 					priv->hw_params.ct_kill_threshold);
-		break;
 	}
 }
 EXPORT_SYMBOL(iwl_rf_kill_ct_config);
@@ -2160,7 +2231,7 @@
 			   struct iwl_rx_mem_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
 	IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
 		     sleep->pm_sleep_mode, sleep->pm_wakeup_src);
@@ -2171,7 +2242,7 @@
 void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
 				      struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
 	IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
 			"notification for %s:\n", len,
@@ -2183,7 +2254,7 @@
 void iwl_rx_reply_error(struct iwl_priv *priv,
 			struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
 	IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
 		"seq 0x%04X ser 0x%08X\n",
@@ -2648,6 +2719,14 @@
 			goto set_ch_out;
 		}
 
+		if (iwl_is_associated(priv) &&
+		    (le16_to_cpu(priv->active_rxon.channel) != ch) &&
+		    priv->cfg->ops->lib->set_channel_switch) {
+			ret = priv->cfg->ops->lib->set_channel_switch(priv,
+				ch);
+			goto out;
+		}
+
 		spin_lock_irqsave(&priv->lock, flags);
 
 		/* Configure HT40 channels */
@@ -2826,6 +2905,27 @@
 }
 EXPORT_SYMBOL(iwl_mac_reset_tsf);
 
+int iwl_alloc_txq_mem(struct iwl_priv *priv)
+{
+	if (!priv->txq)
+		priv->txq = kzalloc(
+			sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues,
+			GFP_KERNEL);
+	if (!priv->txq) {
+		IWL_ERR(priv, "Not enough memory for txq \n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(iwl_alloc_txq_mem);
+
+void iwl_free_txq_mem(struct iwl_priv *priv)
+{
+	kfree(priv->txq);
+	priv->txq = NULL;
+}
+EXPORT_SYMBOL(iwl_free_txq_mem);
+
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 
 #define IWL_TRAFFIC_DUMP_SIZE	(IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 6688b69..b875dcf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -109,7 +109,6 @@
 
 struct iwl_apm_ops {
 	int (*init)(struct iwl_priv *priv);
-	int (*reset)(struct iwl_priv *priv);
 	void (*stop)(struct iwl_priv *priv);
 	void (*config)(struct iwl_priv *priv);
 	int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src);
@@ -170,6 +169,7 @@
 	int (*load_ucode)(struct iwl_priv *priv);
 	void (*dump_nic_event_log)(struct iwl_priv *priv);
 	void (*dump_nic_error_log)(struct iwl_priv *priv);
+	int (*set_channel_switch)(struct iwl_priv *priv, u16 channel);
 	/* power management */
 	struct iwl_apm_ops apm_ops;
 
@@ -205,7 +205,6 @@
 	int sw_crypto;		/* def: 0 = using hardware encryption */
 	int disable_hw_scan;	/* def: 0 = use h/w scan */
 	int num_of_queues;	/* def: HW dependent */
-	int num_of_ampdu_queues;/* def: HW dependent */
 	int disable_11n;	/* def: 0 = 11n capabilities enabled */
 	int amsdu_size_8K;	/* def: 1 = enable 8K amsdu size */
 	int antenna;  		/* def: 0 = both antennas (use diversity) */
@@ -227,6 +226,8 @@
  *	The detail algorithm is described in iwl-led.c
  * @use_rts_for_ht: use rts/cts protection for HT traffic
  * @chain_noise_num_beacons: number of beacons used to compute chain noise
+ * @adv_thermal_throttle: support advance thermal throttle
+ * @support_ct_kill_exit: support ct kill exit condition
  *
  * We enable the driver to be backward compatible wrt API version. The
  * driver specifies which APIs it supports (with @ucode_api_max being the
@@ -258,11 +259,18 @@
 	int eeprom_size;
 	u16  eeprom_ver;
 	u16  eeprom_calib_ver;
+	int num_of_queues;	/* def: HW dependent */
+	int num_of_ampdu_queues;/* def: HW dependent */
 	const struct iwl_ops *ops;
 	const struct iwl_mod_params *mod_params;
 	u8   valid_tx_ant;
 	u8   valid_rx_ant;
-	bool need_pll_cfg;
+
+	/* for iwl_apm_init() */
+	u32 pll_cfg_val;
+	bool set_l0s;
+	bool use_bsm;
+
 	bool use_isr_legacy;
 	enum iwl_pa_type pa_type;
 	const u16 max_ll_items;
@@ -273,6 +281,8 @@
 	bool use_rts_for_ht;
 	int chain_noise_num_beacons;
 	const bool supports_idle;
+	bool adv_thermal_throttle;
+	bool support_ct_kill_exit;
 };
 
 /***************************
@@ -305,7 +315,6 @@
 			  unsigned int changed_flags,
 			  unsigned int *total_flags, u64 multicast);
 int iwl_hw_nic_init(struct iwl_priv *priv);
-int iwl_setup_mac(struct iwl_priv *priv);
 int iwl_set_hw_params(struct iwl_priv *priv);
 int iwl_init_drv(struct iwl_priv *priv);
 void iwl_uninit_drv(struct iwl_priv *priv);
@@ -327,6 +336,10 @@
 int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
 			 struct ieee80211_tx_queue_stats *stats);
 void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
+int iwl_alloc_txq_mem(struct iwl_priv *priv);
+void iwl_free_txq_mem(struct iwl_priv *priv);
+void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
+				__le32 *tx_flags);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 int iwl_alloc_traffic_mem(struct iwl_priv *priv);
 void iwl_free_traffic_mem(struct iwl_priv *priv);
@@ -527,7 +540,7 @@
 			   const void *data,
 			   void (*callback)(struct iwl_priv *priv,
 					    struct iwl_device_cmd *cmd,
-					    struct sk_buff *skb));
+					    struct iwl_rx_packet *pkt));
 
 int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
 
@@ -660,6 +673,7 @@
 					   struct iwl_rx_mem_buffer *rxb);
 void iwl_apm_stop(struct iwl_priv *priv);
 int iwl_apm_stop_master(struct iwl_priv *priv);
+int iwl_apm_init(struct iwl_priv *priv);
 
 void iwl_setup_rxon_timing(struct iwl_priv *priv);
 static inline int iwl_send_rxon_assoc(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 8f183e0..b6ed5a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -109,8 +109,9 @@
  * Bit fields:
  *  3-2:  0 = A, 1 = B, 2 = C, 3 = D step
  */
-#define CSR_HW_REV_WA_REG	(CSR_BASE+0x22C)
-#define CSR_DBG_HPET_MEM_REG	(CSR_BASE+0x240)
+#define CSR_HW_REV_WA_REG		(CSR_BASE+0x22C)
+#define CSR_DBG_HPET_MEM_REG		(CSR_BASE+0x240)
+#define CSR_DBG_LINK_PWR_MGMT_REG	(CSR_BASE+0x250)
 
 /* Bits for CSR_HW_IF_CONFIG_REG */
 #define CSR49_HW_IF_CONFIG_REG_BIT_4965_R	(0x00000010)
@@ -195,6 +196,7 @@
 #define CSR_RESET_REG_FLAG_SW_RESET                  (0x00000080)
 #define CSR_RESET_REG_FLAG_MASTER_DISABLED           (0x00000100)
 #define CSR_RESET_REG_FLAG_STOP_MASTER               (0x00000200)
+#define CSR_RESET_LINK_PWR_MGMT_DISABLED             (0x80000000)
 
 /* GP (general purpose) CONTROL */
 #define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY        (0x00000001)
@@ -235,6 +237,11 @@
 #define CSR_OTP_GP_REG_OTP_ACCESS_MODE	(0x00020000) /* 0 - absolute, 1 - relative */
 #define CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK          (0x00100000) /* bit 20 */
 #define CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK        (0x00200000) /* bit 21 */
+#define CSR_GP_REG_POWER_SAVE_STATUS_MSK            (0x03000000) /* bit 24/25 */
+#define CSR_GP_REG_NO_POWER_SAVE            (0x00000000)
+#define CSR_GP_REG_MAC_POWER_SAVE           (0x01000000)
+#define CSR_GP_REG_PHY_POWER_SAVE           (0x02000000)
+#define CSR_GP_REG_POWER_SAVE_ERROR         (0x03000000)
 
 /* EEPROM signature */
 #define CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP	(0x00000000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index b9ca475..96c92ea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -106,6 +106,7 @@
 		struct dentry *file_sensitivity;
 		struct dentry *file_chain_noise;
 		struct dentry *file_tx_power;
+		struct dentry *file_power_save_status;
 	} dbgfs_debug_files;
 	u32 sram_offset;
 	u32 sram_len;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 1794b9c..8784911 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -801,15 +801,20 @@
 	 * valid here. However, let's not confuse them and present
 	 * IWL_POWER_INDEX_1 as "1", not "0".
 	 */
-	if (value > 0)
+	if (value == 0)
+		return -EINVAL;
+	else if (value > 0)
 		value -= 1;
 
 	if (value != -1 && (value < 0 || value >= IWL_POWER_NUM))
 		return -EINVAL;
 
+	if (!iwl_is_ready_rf(priv))
+		return -EAGAIN;
+
 	priv->power_data.debug_sleep_level_override = value;
 
-	iwl_power_update_mode(priv, false);
+	iwl_power_update_mode(priv, true);
 
 	return count;
 }
@@ -882,10 +887,14 @@
 	struct iwl_rx_queue *rxq = &priv->rxq;
 	char *buf;
 	int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
-		(IWL_MAX_NUM_QUEUES * 32 * 8) + 400;
+		(priv->cfg->num_of_queues * 32 * 8) + 400;
 	const u8 *ptr;
 	ssize_t ret;
 
+	if (!priv->txq) {
+		IWL_ERR(priv, "txq not ready\n");
+		return -EAGAIN;
+	}
 	buf = kzalloc(bufsz, GFP_KERNEL);
 	if (!buf) {
 		IWL_ERR(priv, "Can not allocate buffer\n");
@@ -977,8 +986,12 @@
 	int pos = 0;
 	int cnt;
 	int ret;
-	const size_t bufsz = sizeof(char) * 60 * IWL_MAX_NUM_QUEUES;
+	const size_t bufsz = sizeof(char) * 60 * priv->cfg->num_of_queues;
 
+	if (!priv->txq) {
+		IWL_ERR(priv, "txq not ready\n");
+		return -EAGAIN;
+	}
 	buf = kzalloc(bufsz, GFP_KERNEL);
 	if (!buf)
 		return -ENOMEM;
@@ -1069,10 +1082,10 @@
 		sizeof(struct statistics_rx_non_phy) * 20 +
 		sizeof(struct statistics_rx_ht_phy) * 20 + 400;
 	ssize_t ret;
-	struct statistics_rx_phy *ofdm;
-	struct statistics_rx_phy *cck;
-	struct statistics_rx_non_phy *general;
-	struct statistics_rx_ht_phy *ht;
+	struct statistics_rx_phy *ofdm, *accum_ofdm;
+	struct statistics_rx_phy *cck, *accum_cck;
+	struct statistics_rx_non_phy *general, *accum_general;
+	struct statistics_rx_ht_phy *ht, *accum_ht;
 
 	if (!iwl_is_alive(priv))
 		return -EAGAIN;
@@ -1101,155 +1114,268 @@
 	cck = &priv->statistics.rx.cck;
 	general = &priv->statistics.rx.general;
 	ht = &priv->statistics.rx.ofdm_ht;
+	accum_ofdm = &priv->accum_statistics.rx.ofdm;
+	accum_cck = &priv->accum_statistics.rx.cck;
+	accum_general = &priv->accum_statistics.rx.general;
+	accum_ht = &priv->accum_statistics.rx.ofdm_ht;
 	pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
 	pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM:\n");
-	pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt: %u\n",
-			 le32_to_cpu(ofdm->ina_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt: %u\n",
-			 le32_to_cpu(ofdm->fina_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "plcp_err: %u\n",
-			 le32_to_cpu(ofdm->plcp_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "crc32_err: %u\n",
-			 le32_to_cpu(ofdm->crc32_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "overrun_err: %u\n",
-			 le32_to_cpu(ofdm->overrun_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "early_overrun_err: %u\n",
-			 le32_to_cpu(ofdm->early_overrun_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "crc32_good: %u\n",
-			 le32_to_cpu(ofdm->crc32_good));
-	pos += scnprintf(buf + pos, bufsz - pos, "false_alarm_cnt: %u\n",
-			 le32_to_cpu(ofdm->false_alarm_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "fina_sync_err_cnt: %u\n",
-			 le32_to_cpu(ofdm->fina_sync_err_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "sfd_timeout: %u\n",
-			 le32_to_cpu(ofdm->sfd_timeout));
-	pos += scnprintf(buf + pos, bufsz - pos, "fina_timeout: %u\n",
-			 le32_to_cpu(ofdm->fina_timeout));
-	pos += scnprintf(buf + pos, bufsz - pos, "unresponded_rts: %u\n",
-			 le32_to_cpu(ofdm->unresponded_rts));
 	pos += scnprintf(buf + pos, bufsz - pos,
-			"rxe_frame_limit_overrun: %u\n",
-			le32_to_cpu(ofdm->rxe_frame_limit_overrun));
-	pos += scnprintf(buf + pos, bufsz - pos, "sent_ack_cnt: %u\n",
-			 le32_to_cpu(ofdm->sent_ack_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "sent_cts_cnt: %u\n",
-			 le32_to_cpu(ofdm->sent_cts_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "sent_ba_rsp_cnt: %u\n",
-			 le32_to_cpu(ofdm->sent_ba_rsp_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "dsp_self_kill: %u\n",
-			 le32_to_cpu(ofdm->dsp_self_kill));
-	pos += scnprintf(buf + pos, bufsz - pos, "mh_format_err: %u\n",
-			 le32_to_cpu(ofdm->mh_format_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "re_acq_main_rssi_sum: %u\n",
-			 le32_to_cpu(ofdm->re_acq_main_rssi_sum));
+			"\t\t\tcurrent\t\t\taccumulative\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err);
+	pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "overrun_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->overrun_err),
+			 accum_ofdm->overrun_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "early_overrun_err:\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->early_overrun_err),
+			 accum_ofdm->early_overrun_err);
+	pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->crc32_good),
+			 accum_ofdm->crc32_good);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "false_alarm_cnt:\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->false_alarm_cnt),
+			 accum_ofdm->false_alarm_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "fina_sync_err_cnt:\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->fina_sync_err_cnt),
+			 accum_ofdm->fina_sync_err_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "sfd_timeout:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->sfd_timeout),
+			 accum_ofdm->sfd_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "fina_timeout:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->fina_timeout),
+			 accum_ofdm->fina_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "unresponded_rts:\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->unresponded_rts),
+			 accum_ofdm->unresponded_rts);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"rxe_frame_lmt_ovrun:\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+			 accum_ofdm->rxe_frame_limit_overrun);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "sent_ack_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->sent_ack_cnt),
+			 accum_ofdm->sent_ack_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "sent_cts_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->sent_cts_cnt),
+			 accum_ofdm->sent_cts_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "sent_ba_rsp_cnt:\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
+			 accum_ofdm->sent_ba_rsp_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "dsp_self_kill:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->dsp_self_kill),
+			 accum_ofdm->dsp_self_kill);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "mh_format_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->mh_format_err),
+			 accum_ofdm->mh_format_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "re_acq_main_rssi_sum:\t%u\t\t\t%u\n",
+			 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
+			 accum_ofdm->re_acq_main_rssi_sum);
 
 	pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - CCK:\n");
-	pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt: %u\n",
-			 le32_to_cpu(cck->ina_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt: %u\n",
-			 le32_to_cpu(cck->fina_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "plcp_err: %u\n",
-			 le32_to_cpu(cck->plcp_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "crc32_err: %u\n",
-			 le32_to_cpu(cck->crc32_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "overrun_err: %u\n",
-			 le32_to_cpu(cck->overrun_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "early_overrun_err: %u\n",
-			 le32_to_cpu(cck->early_overrun_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "crc32_good: %u\n",
-			 le32_to_cpu(cck->crc32_good));
-	pos += scnprintf(buf + pos, bufsz - pos, "false_alarm_cnt: %u\n",
-			 le32_to_cpu(cck->false_alarm_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "fina_sync_err_cnt: %u\n",
-			 le32_to_cpu(cck->fina_sync_err_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "sfd_timeout: %u\n",
-			 le32_to_cpu(cck->sfd_timeout));
-	pos += scnprintf(buf + pos, bufsz - pos, "fina_timeout: %u\n",
-			 le32_to_cpu(cck->fina_timeout));
-	pos += scnprintf(buf + pos, bufsz - pos, "unresponded_rts: %u\n",
-			 le32_to_cpu(cck->unresponded_rts));
 	pos += scnprintf(buf + pos, bufsz - pos,
-			"rxe_frame_limit_overrun: %u\n",
-			le32_to_cpu(cck->rxe_frame_limit_overrun));
-	pos += scnprintf(buf + pos, bufsz - pos, "sent_ack_cnt: %u\n",
-			 le32_to_cpu(cck->sent_ack_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "sent_cts_cnt: %u\n",
-			 le32_to_cpu(cck->sent_cts_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "sent_ba_rsp_cnt: %u\n",
-			 le32_to_cpu(cck->sent_ba_rsp_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "dsp_self_kill: %u\n",
-			 le32_to_cpu(cck->dsp_self_kill));
-	pos += scnprintf(buf + pos, bufsz - pos, "mh_format_err: %u\n",
-			 le32_to_cpu(cck->mh_format_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "re_acq_main_rssi_sum: %u\n",
-			 le32_to_cpu(cck->re_acq_main_rssi_sum));
+			"\t\t\tcurrent\t\t\taccumulative\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err);
+	pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "overrun_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->overrun_err),
+			 accum_cck->overrun_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "early_overrun_err:\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->early_overrun_err),
+			 accum_cck->early_overrun_err);
+	pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "false_alarm_cnt:\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->false_alarm_cnt),
+			 accum_cck->false_alarm_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "fina_sync_err_cnt:\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->fina_sync_err_cnt),
+			 accum_cck->fina_sync_err_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "sfd_timeout:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->sfd_timeout),
+			 accum_cck->sfd_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "fina_timeout:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->fina_timeout),
+			 accum_cck->fina_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "unresponded_rts:\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->unresponded_rts),
+			 accum_cck->unresponded_rts);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"rxe_frame_lmt_ovrun:\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->rxe_frame_limit_overrun),
+			 accum_cck->rxe_frame_limit_overrun);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "sent_ack_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->sent_ack_cnt),
+			 accum_cck->sent_ack_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "sent_cts_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->sent_cts_cnt),
+			 accum_cck->sent_cts_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "sent_ba_rsp_cnt:\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->sent_ba_rsp_cnt),
+			 accum_cck->sent_ba_rsp_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "dsp_self_kill:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->dsp_self_kill),
+			 accum_cck->dsp_self_kill);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "mh_format_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->mh_format_err),
+			 accum_cck->mh_format_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "re_acq_main_rssi_sum:\t%u\t\t\t%u\n",
+			 le32_to_cpu(cck->re_acq_main_rssi_sum),
+			 accum_cck->re_acq_main_rssi_sum);
 
 	pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - GENERAL:\n");
-	pos += scnprintf(buf + pos, bufsz - pos, "bogus_cts: %u\n",
-			 le32_to_cpu(general->bogus_cts));
-	pos += scnprintf(buf + pos, bufsz - pos, "bogus_ack: %u\n",
-			 le32_to_cpu(general->bogus_ack));
-	pos += scnprintf(buf + pos, bufsz - pos, "non_bssid_frames: %u\n",
-			 le32_to_cpu(general->non_bssid_frames));
-	pos += scnprintf(buf + pos, bufsz - pos, "filtered_frames: %u\n",
-			 le32_to_cpu(general->filtered_frames));
-	pos += scnprintf(buf + pos, bufsz - pos, "non_channel_beacons: %u\n",
-			 le32_to_cpu(general->non_channel_beacons));
-	pos += scnprintf(buf + pos, bufsz - pos, "channel_beacons: %u\n",
-			 le32_to_cpu(general->channel_beacons));
-	pos += scnprintf(buf + pos, bufsz - pos, "num_missed_bcon: %u\n",
-			 le32_to_cpu(general->num_missed_bcon));
 	pos += scnprintf(buf + pos, bufsz - pos,
-			"adc_rx_saturation_time: %u\n",
-			le32_to_cpu(general->adc_rx_saturation_time));
+			"\t\t\tcurrent\t\t\taccumulative\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "bogus_cts:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->bogus_cts),
+			 accum_general->bogus_cts);
+	pos += scnprintf(buf + pos, bufsz - pos, "bogus_ack:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->bogus_ack),
+			 accum_general->bogus_ack);
 	pos += scnprintf(buf + pos, bufsz - pos,
-			"ina_detection_search_time: %u\n",
-			le32_to_cpu(general->ina_detection_search_time));
-	pos += scnprintf(buf + pos, bufsz - pos, "beacon_silence_rssi_a: %u\n",
-			 le32_to_cpu(general->beacon_silence_rssi_a));
-	pos += scnprintf(buf + pos, bufsz - pos, "beacon_silence_rssi_b: %u\n",
-			 le32_to_cpu(general->beacon_silence_rssi_b));
-	pos += scnprintf(buf + pos, bufsz - pos, "beacon_silence_rssi_c: %u\n",
-			 le32_to_cpu(general->beacon_silence_rssi_c));
+			 "non_bssid_frames:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->non_bssid_frames),
+			 accum_general->non_bssid_frames);
 	pos += scnprintf(buf + pos, bufsz - pos,
-			"interference_data_flag: %u\n",
-			le32_to_cpu(general->interference_data_flag));
-	pos += scnprintf(buf + pos, bufsz - pos, "channel_load: %u\n",
-			 le32_to_cpu(general->channel_load));
-	pos += scnprintf(buf + pos, bufsz - pos, "dsp_false_alarms: %u\n",
-			 le32_to_cpu(general->dsp_false_alarms));
-	pos += scnprintf(buf + pos, bufsz - pos, "beacon_rssi_a: %u\n",
-			 le32_to_cpu(general->beacon_rssi_a));
-	pos += scnprintf(buf + pos, bufsz - pos, "beacon_rssi_b: %u\n",
-			 le32_to_cpu(general->beacon_rssi_b));
-	pos += scnprintf(buf + pos, bufsz - pos, "beacon_rssi_c: %u\n",
-			 le32_to_cpu(general->beacon_rssi_c));
-	pos += scnprintf(buf + pos, bufsz - pos, "beacon_energy_a: %u\n",
-			 le32_to_cpu(general->beacon_energy_a));
-	pos += scnprintf(buf + pos, bufsz - pos, "beacon_energy_b: %u\n",
-			 le32_to_cpu(general->beacon_energy_b));
-	pos += scnprintf(buf + pos, bufsz - pos, "beacon_energy_c: %u\n",
-			 le32_to_cpu(general->beacon_energy_c));
+			 "filtered_frames:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->filtered_frames),
+			 accum_general->filtered_frames);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "non_channel_beacons:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->non_channel_beacons),
+			 accum_general->non_channel_beacons);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "channel_beacons:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->channel_beacons),
+			 accum_general->channel_beacons);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "num_missed_bcon:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->num_missed_bcon),
+			 accum_general->num_missed_bcon);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"adc_rx_saturation_time:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->adc_rx_saturation_time),
+			 accum_general->adc_rx_saturation_time);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"ina_detect_search_tm:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->ina_detection_search_time),
+			 accum_general->ina_detection_search_time);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "beacon_silence_rssi_a:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->beacon_silence_rssi_a),
+			 accum_general->beacon_silence_rssi_a);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "beacon_silence_rssi_b:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->beacon_silence_rssi_b),
+			 accum_general->beacon_silence_rssi_b);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "beacon_silence_rssi_c:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->beacon_silence_rssi_c),
+			 accum_general->beacon_silence_rssi_c);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"interference_data_flag:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->interference_data_flag),
+			 accum_general->interference_data_flag);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "channel_load:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->channel_load),
+			 accum_general->channel_load);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "dsp_false_alarms:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->dsp_false_alarms),
+			 accum_general->dsp_false_alarms);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "beacon_rssi_a:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->beacon_rssi_a),
+			 accum_general->beacon_rssi_a);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "beacon_rssi_b:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->beacon_rssi_b),
+			 accum_general->beacon_rssi_b);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "beacon_rssi_c:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->beacon_rssi_c),
+			 accum_general->beacon_rssi_c);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "beacon_energy_a:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->beacon_energy_a),
+			 accum_general->beacon_energy_a);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "beacon_energy_b:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->beacon_energy_b),
+			 accum_general->beacon_energy_b);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "beacon_energy_c:\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->beacon_energy_c),
+			 accum_general->beacon_energy_c);
 
 	pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
-	pos += scnprintf(buf + pos, bufsz - pos, "plcp_err: %u\n",
-			 le32_to_cpu(ht->plcp_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "overrun_err: %u\n",
-			 le32_to_cpu(ht->overrun_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "early_overrun_err: %u\n",
-			 le32_to_cpu(ht->early_overrun_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "crc32_good: %u\n",
-			 le32_to_cpu(ht->crc32_good));
-	pos += scnprintf(buf + pos, bufsz - pos, "crc32_err: %u\n",
-			 le32_to_cpu(ht->crc32_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "mh_format_err: %u\n",
-			 le32_to_cpu(ht->mh_format_err));
-	pos += scnprintf(buf + pos, bufsz - pos, "agg_crc32_good: %u\n",
-			 le32_to_cpu(ht->agg_crc32_good));
-	pos += scnprintf(buf + pos, bufsz - pos, "agg_mpdu_cnt: %u\n",
-			 le32_to_cpu(ht->agg_mpdu_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "agg_cnt: %u\n",
-			 le32_to_cpu(ht->agg_cnt));
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"\t\t\tcurrent\t\t\taccumulative\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "overrun_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "early_overrun_err:\t%u\t\t\t%u\n",
+			 le32_to_cpu(ht->early_overrun_err),
+			 accum_ht->early_overrun_err);
+	pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good);
+	pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "mh_format_err:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ht->mh_format_err),
+			 accum_ht->mh_format_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "agg_crc32_good:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ht->agg_crc32_good),
+			 accum_ht->agg_crc32_good);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "agg_mpdu_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ht->agg_mpdu_cnt),
+			 accum_ht->agg_mpdu_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos, "agg_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt);
 
 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 	kfree(buf);
@@ -1265,7 +1391,7 @@
 	char *buf;
 	int bufsz = (sizeof(struct statistics_tx) * 24) + 250;
 	ssize_t ret;
-	struct statistics_tx *tx;
+	struct statistics_tx *tx, *accum_tx;
 
 	if (!iwl_is_alive(priv))
 		return -EAGAIN;
@@ -1291,62 +1417,107 @@
 	 * might not reflect the current uCode activity
 	 */
 	tx = &priv->statistics.tx;
+	accum_tx = &priv->accum_statistics.tx;
 	pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
 	pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Tx:\n");
-	pos += scnprintf(buf + pos, bufsz - pos, "preamble: %u\n",
-			 le32_to_cpu(tx->preamble_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "rx_detected_cnt: %u\n",
-			 le32_to_cpu(tx->rx_detected_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "bt_prio_defer_cnt: %u\n",
-			 le32_to_cpu(tx->bt_prio_defer_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "bt_prio_kill_cnt: %u\n",
-			 le32_to_cpu(tx->bt_prio_kill_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "few_bytes_cnt: %u\n",
-			 le32_to_cpu(tx->few_bytes_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "cts_timeout: %u\n",
-			 le32_to_cpu(tx->cts_timeout));
-	pos += scnprintf(buf + pos, bufsz - pos, "ack_timeout: %u\n",
-			 le32_to_cpu(tx->ack_timeout));
-	pos += scnprintf(buf + pos, bufsz - pos, "expected_ack_cnt: %u\n",
-			 le32_to_cpu(tx->expected_ack_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "actual_ack_cnt: %u\n",
-			 le32_to_cpu(tx->actual_ack_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "dump_msdu_cnt: %u\n",
-			 le32_to_cpu(tx->dump_msdu_cnt));
 	pos += scnprintf(buf + pos, bufsz - pos,
-			"burst_abort_next_frame_mismatch_cnt: %u\n",
-			le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt));
+			"\t\t\tcurrent\t\t\taccumulative\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "preamble:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->preamble_cnt),
+			 accum_tx->preamble_cnt);
 	pos += scnprintf(buf + pos, bufsz - pos,
-			"burst_abort_missing_next_frame_cnt: %u\n",
-			le32_to_cpu(tx->burst_abort_missing_next_frame_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "cts_timeout_collision: %u\n",
-			 le32_to_cpu(tx->cts_timeout_collision));
+			 "rx_detected_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->rx_detected_cnt),
+			 accum_tx->rx_detected_cnt);
 	pos += scnprintf(buf + pos, bufsz - pos,
-			"ack_or_ba_timeout_collision: %u\n",
-			le32_to_cpu(tx->ack_or_ba_timeout_collision));
-	pos += scnprintf(buf + pos, bufsz - pos, "agg ba_timeout: %u\n",
-			 le32_to_cpu(tx->agg.ba_timeout));
+			 "bt_prio_defer_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->bt_prio_defer_cnt),
+			 accum_tx->bt_prio_defer_cnt);
 	pos += scnprintf(buf + pos, bufsz - pos,
-			"agg ba_reschedule_frames: %u\n",
-			le32_to_cpu(tx->agg.ba_reschedule_frames));
+			 "bt_prio_kill_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->bt_prio_kill_cnt),
+			 accum_tx->bt_prio_kill_cnt);
 	pos += scnprintf(buf + pos, bufsz - pos,
-			"agg scd_query_agg_frame_cnt: %u\n",
-			le32_to_cpu(tx->agg.scd_query_agg_frame_cnt));
-	pos += scnprintf(buf + pos, bufsz - pos, "agg scd_query_no_agg: %u\n",
-			 le32_to_cpu(tx->agg.scd_query_no_agg));
-	pos += scnprintf(buf + pos, bufsz - pos, "agg scd_query_agg: %u\n",
-			 le32_to_cpu(tx->agg.scd_query_agg));
+			 "few_bytes_cnt:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->few_bytes_cnt),
+			 accum_tx->few_bytes_cnt);
 	pos += scnprintf(buf + pos, bufsz - pos,
-			"agg scd_query_mismatch: %u\n",
-			le32_to_cpu(tx->agg.scd_query_mismatch));
-	pos += scnprintf(buf + pos, bufsz - pos, "agg frame_not_ready: %u\n",
-			 le32_to_cpu(tx->agg.frame_not_ready));
-	pos += scnprintf(buf + pos, bufsz - pos, "agg underrun: %u\n",
-			 le32_to_cpu(tx->agg.underrun));
-	pos += scnprintf(buf + pos, bufsz - pos, "agg bt_prio_kill: %u\n",
-			 le32_to_cpu(tx->agg.bt_prio_kill));
-	pos += scnprintf(buf + pos, bufsz - pos, "agg rx_ba_rsp_cnt: %u\n",
-			 le32_to_cpu(tx->agg.rx_ba_rsp_cnt));
+			 "cts_timeout:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "ack_timeout:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->ack_timeout),
+			 accum_tx->ack_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "expected_ack_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->expected_ack_cnt),
+			 accum_tx->expected_ack_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "actual_ack_cnt:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->actual_ack_cnt),
+			 accum_tx->actual_ack_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "dump_msdu_cnt:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->dump_msdu_cnt),
+			 accum_tx->dump_msdu_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "abort_nxt_frame_mismatch:"
+			 "\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
+			 accum_tx->burst_abort_next_frame_mismatch_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "abort_missing_nxt_frame:"
+			 "\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
+			 accum_tx->burst_abort_missing_next_frame_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "cts_timeout_collision:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->cts_timeout_collision),
+			 accum_tx->cts_timeout_collision);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"ack_ba_timeout_collision:\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->ack_or_ba_timeout_collision),
+			 accum_tx->ack_or_ba_timeout_collision);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "agg ba_timeout:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->agg.ba_timeout),
+			 accum_tx->agg.ba_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"agg ba_resched_frames:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->agg.ba_reschedule_frames),
+			 accum_tx->agg.ba_reschedule_frames);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"agg scd_query_agg_frame:\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
+			 accum_tx->agg.scd_query_agg_frame_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "agg scd_query_no_agg:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->agg.scd_query_no_agg),
+			 accum_tx->agg.scd_query_no_agg);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "agg scd_query_agg:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->agg.scd_query_agg),
+			 accum_tx->agg.scd_query_agg);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"agg scd_query_mismatch:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->agg.scd_query_mismatch),
+			 accum_tx->agg.scd_query_mismatch);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "agg frame_not_ready:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->agg.frame_not_ready),
+			 accum_tx->agg.frame_not_ready);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "agg underrun:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->agg.underrun),
+			 accum_tx->agg.underrun);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "agg bt_prio_kill:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->agg.bt_prio_kill),
+			 accum_tx->agg.bt_prio_kill);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "agg rx_ba_rsp_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
+			 accum_tx->agg.rx_ba_rsp_cnt);
 
 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 	kfree(buf);
@@ -1362,9 +1533,9 @@
 	char *buf;
 	int bufsz = sizeof(struct statistics_general) * 4 + 250;
 	ssize_t ret;
-	struct statistics_general *general;
-	struct statistics_dbg *dbg;
-	struct statistics_div *div;
+	struct statistics_general *general, *accum_general;
+	struct statistics_dbg *dbg, *accum_dbg;
+	struct statistics_div *div, *accum_div;
 
 	if (!iwl_is_alive(priv))
 		return -EAGAIN;
@@ -1392,34 +1563,53 @@
 	general = &priv->statistics.general;
 	dbg = &priv->statistics.general.dbg;
 	div = &priv->statistics.general.div;
+	accum_general = &priv->accum_statistics.general;
+	accum_dbg = &priv->accum_statistics.general.dbg;
+	accum_div = &priv->accum_statistics.general.div;
 	pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
 	pos += scnprintf(buf + pos, bufsz - pos, "Statistics_General:\n");
-	pos += scnprintf(buf + pos, bufsz - pos, "temperature: %u\n",
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"\t\t\tcurrent\t\t\taccumulative\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "temperature:\t\t\t%u\n",
 			 le32_to_cpu(general->temperature));
-	pos += scnprintf(buf + pos, bufsz - pos, "temperature_m: %u\n",
+	pos += scnprintf(buf + pos, bufsz - pos, "temperature_m:\t\t\t%u\n",
 			 le32_to_cpu(general->temperature_m));
-	pos += scnprintf(buf + pos, bufsz - pos, "burst_check: %u\n",
-			 le32_to_cpu(dbg->burst_check));
-	pos += scnprintf(buf + pos, bufsz - pos, "burst_count: %u\n",
-			 le32_to_cpu(dbg->burst_count));
-	pos += scnprintf(buf + pos, bufsz - pos, "sleep_time: %u\n",
-			 le32_to_cpu(general->sleep_time));
-	pos += scnprintf(buf + pos, bufsz - pos, "slots_out: %u\n",
-			 le32_to_cpu(general->slots_out));
-	pos += scnprintf(buf + pos, bufsz - pos, "slots_idle: %u\n",
-			 le32_to_cpu(general->slots_idle));
-	pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp: %u\n",
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "burst_check:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(dbg->burst_check),
+			 accum_dbg->burst_check);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "burst_count:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(dbg->burst_count),
+			 accum_dbg->burst_count);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "sleep_time:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->sleep_time),
+			 accum_general->sleep_time);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "slots_out:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->slots_out),
+			 accum_general->slots_out);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "slots_idle:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->slots_idle),
+			 accum_general->slots_idle);
+	pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
 			 le32_to_cpu(general->ttl_timestamp));
-	pos += scnprintf(buf + pos, bufsz - pos, "tx_on_a: %u\n",
-			 le32_to_cpu(div->tx_on_a));
-	pos += scnprintf(buf + pos, bufsz - pos, "tx_on_b: %u\n",
-			 le32_to_cpu(div->tx_on_b));
-	pos += scnprintf(buf + pos, bufsz - pos, "exec_time: %u\n",
-			 le32_to_cpu(div->exec_time));
-	pos += scnprintf(buf + pos, bufsz - pos, "probe_time: %u\n",
-			 le32_to_cpu(div->probe_time));
-	pos += scnprintf(buf + pos, bufsz - pos, "rx_enable_counter: %u\n",
-			 le32_to_cpu(general->rx_enable_counter));
+	pos += scnprintf(buf + pos, bufsz - pos, "tx_on_a:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a);
+	pos += scnprintf(buf + pos, bufsz - pos, "tx_on_b:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "exec_time:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(div->exec_time), accum_div->exec_time);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "probe_time:\t\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(div->probe_time), accum_div->probe_time);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "rx_enable_counter:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(general->rx_enable_counter),
+			 accum_general->rx_enable_counter);
 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 	kfree(buf);
 	return ret;
@@ -1615,6 +1805,29 @@
 	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
+static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
+						    char __user *user_buf,
+						    size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+	char buf[60];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	u32 pwrsave_status;
+
+	pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
+			CSR_GP_REG_POWER_SAVE_STATUS_MSK;
+
+	pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
+	pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
+		(pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
+		(pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
+		(pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
+		"error");
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
 DEBUGFS_READ_WRITE_FILE_OPS(rx_statistics);
 DEBUGFS_READ_WRITE_FILE_OPS(tx_statistics);
 DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
@@ -1626,6 +1839,7 @@
 DEBUGFS_READ_FILE_OPS(sensitivity);
 DEBUGFS_READ_FILE_OPS(chain_noise);
 DEBUGFS_READ_FILE_OPS(tx_power);
+DEBUGFS_READ_FILE_OPS(power_save_status);
 
 /*
  * Create the debugfs files and directories
@@ -1673,6 +1887,7 @@
 	DEBUGFS_ADD_FILE(rx_queue, debug);
 	DEBUGFS_ADD_FILE(tx_queue, debug);
 	DEBUGFS_ADD_FILE(tx_power, debug);
+	DEBUGFS_ADD_FILE(power_save_status, debug);
 	if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
 		DEBUGFS_ADD_FILE(ucode_rx_stats, debug);
 		DEBUGFS_ADD_FILE(ucode_tx_stats, debug);
@@ -1725,6 +1940,7 @@
 	DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_rx_queue);
 	DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_queue);
 	DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_power);
+	DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_power_save_status);
 	if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
 		DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
 			file_ucode_rx_stats);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 72946c1..e7ce673 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -85,8 +85,6 @@
 				    __le32 *tx_flags);
 extern int iwl5000_calc_rssi(struct iwl_priv *priv,
 			     struct iwl_rx_phy_res *rx_resp);
-extern int iwl5000_apm_init(struct iwl_priv *priv);
-extern int iwl5000_apm_reset(struct iwl_priv *priv);
 extern void iwl5000_nic_config(struct iwl_priv *priv);
 extern u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv);
 extern const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
@@ -147,12 +145,13 @@
 #define	DEFAULT_LONG_RETRY_LIMIT  4U
 
 struct iwl_rx_mem_buffer {
-	dma_addr_t real_dma_addr;
-	dma_addr_t aligned_dma_addr;
-	struct sk_buff *skb;
+	dma_addr_t page_dma;
+	struct page *page;
 	struct list_head list;
 };
 
+#define rxb_addr(r) page_address(r->page)
+
 /* defined below */
 struct iwl_device_cmd;
 
@@ -168,7 +167,7 @@
 	 */
 	void (*callback)(struct iwl_priv *priv,
 			 struct iwl_device_cmd *cmd,
-			 struct sk_buff *skb);
+			 struct iwl_rx_packet *pkt);
 
 	/* The CMD_SIZE_HUGE flag bit indicates that the command
 	 * structure is stored at the end of the shared queue memory. */
@@ -324,6 +323,12 @@
  * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
 #define IWL_MIN_NUM_QUEUES	10
 
+/*
+ * uCode queue management definitions ...
+ * Queue #4 is the command queue for 3945/4965/5x00/1000/6x00.
+ */
+#define IWL_CMD_QUEUE_NUM	4
+
 /* Power management (not Tx power) structures */
 
 enum iwl_pwr_src {
@@ -359,7 +364,14 @@
 	CMD_WANT_SKB = (1 << 2),
 };
 
-#define IWL_CMD_MAX_PAYLOAD 320
+#define DEF_CMD_PAYLOAD_SIZE 320
+
+/*
+ * IWL_LINK_HDR_MAX should include ieee80211_hdr, radiotap header,
+ * SNAP header and alignment. It should also be big enough for 802.11
+ * control frames.
+ */
+#define IWL_LINK_HDR_MAX 64
 
 /**
  * struct iwl_device_cmd
@@ -376,7 +388,8 @@
 		u16 val16;
 		u32 val32;
 		struct iwl_tx_cmd tx;
-		u8 payload[IWL_CMD_MAX_PAYLOAD];
+		struct iwl6000_channel_switch_cmd chswitch;
+		u8 payload[DEF_CMD_PAYLOAD_SIZE];
 	} __attribute__ ((packed)) cmd;
 } __attribute__ ((packed));
 
@@ -385,21 +398,15 @@
 
 struct iwl_host_cmd {
 	const void *data;
-	struct sk_buff *reply_skb;
+	unsigned long reply_page;
 	void (*callback)(struct iwl_priv *priv,
 			 struct iwl_device_cmd *cmd,
-			 struct sk_buff *skb);
+			 struct iwl_rx_packet *pkt);
 	u32 flags;
 	u16 len;
 	u8 id;
 };
 
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
 #define SUP_RATE_11A_MAX_NUM_CHANNELS  8
 #define SUP_RATE_11B_MAX_NUM_CHANNELS  4
 #define SUP_RATE_11G_MAX_NUM_CHANNELS  12
@@ -563,6 +570,19 @@
 	struct iwl_hw_key keyinfo;
 };
 
+/*
+ * iwl_station_priv: Driver's private station information
+ *
+ * When mac80211 creates a station it reserves some space (hw->sta_data_size)
+ * in the structure for use by driver. This structure is places in that
+ * space.
+ *
+ * At the moment use it for the station's rate scaling information.
+ */
+struct iwl_station_priv {
+	struct iwl_lq_sta lq_sta;
+};
+
 /* one for each uCode image (inst/data, boot/init/runtime) */
 struct fw_desc {
 	void *v_addr;		/* access by driver */
@@ -624,6 +644,10 @@
 	u16 auto_corr_max_cck_mrc;
 	u16 auto_corr_min_cck;
 	u16 auto_corr_min_cck_mrc;
+
+	u16 barker_corr_th_min;
+	u16 barker_corr_th_min_mrc;
+	u16 nrg_th_cca;
 };
 
 
@@ -641,7 +665,7 @@
  * @valid_tx/rx_ant: usable antennas
  * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
  * @max_rxq_log: Log-base-2 of max_rxq_size
- * @rx_buf_size: Rx buffer size
+ * @rx_page_order: Rx buffer page order
  * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
  * @max_stations:
  * @bcast_sta_id:
@@ -664,9 +688,8 @@
 	u8  valid_rx_ant;
 	u16 max_rxq_size;
 	u16 max_rxq_log;
-	u32 rx_buf_size;
+	u32 rx_page_order;
 	u32 rx_wrt_ptr_reg;
-	u32 max_pkt_size;
 	u8  max_stations;
 	u8  bcast_sta_id;
 	u8  ht40_channel;
@@ -713,7 +736,11 @@
 
 static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge)
 {
-	/* This is for scan command, the big buffer at end of command array */
+	/*
+	 * This is for init calibration result and scan command which
+	 * required buffer > TFD_MAX_PAYLOAD_SIZE,
+	 * the big buffer at end of command array
+	 */
 	if (is_huge)
 		return q->n_window;	/* must be power of 2 */
 
@@ -845,6 +872,10 @@
 	s32 nrg_auto_corr_silence_diff;
 	u32 num_in_cck_no_fa;
 	u32 nrg_th_ofdm;
+
+	u16 barker_corr_th_min;
+	u16 barker_corr_th_min_mrc;
+	u16 nrg_th_cca;
 };
 
 /* Chain noise (differential Rx gain) calib data */
@@ -961,8 +992,6 @@
 };
 #endif
 
-#define IWL_MAX_NUM_QUEUES	20 /* FIXME: do dynamic allocation */
-
 struct iwl_priv {
 
 	/* ieee device used by generic ieee processing code */
@@ -976,7 +1005,7 @@
 	int frames_count;
 
 	enum ieee80211_band band;
-	int alloc_rxb_skb;
+	int alloc_rxb_page;
 
 	void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
 				       struct iwl_rx_mem_buffer *rxb);
@@ -1081,7 +1110,6 @@
 	u8 last_phy_res[100];
 
 	/* Rate scaling data */
-	s8 data_retry_limit;
 	u8 retry_rate;
 
 	wait_queue_head_t wait_command_queue;
@@ -1090,7 +1118,7 @@
 
 	/* Rx and Tx DMA processing queues */
 	struct iwl_rx_queue rxq;
-	struct iwl_tx_queue txq[IWL_MAX_NUM_QUEUES];
+	struct iwl_tx_queue *txq;
 	unsigned long txq_ctx_active_msk;
 	struct iwl_dma_ptr  kw;	/* keep warm address */
 	struct iwl_dma_ptr  scd_bc_tbls;
@@ -1113,7 +1141,9 @@
 	struct iwl_tt_mgmt thermal_throttle;
 
 	struct iwl_notif_statistics statistics;
-	unsigned long last_statistics_time;
+#ifdef CONFIG_IWLWIFI_DEBUG
+	struct iwl_notif_statistics accum_statistics;
+#endif
 
 	/* context information */
 	u16 rates_mask;
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 2e8c405..9429cb1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -358,6 +358,14 @@
 		udelay(5);
 		iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
 				    APMG_PS_CTRL_VAL_RESET_REQ);
+
+		/*
+		 * CSR auto clock gate disable bit -
+		 * this is only applicable for HW with OTP shadow RAM
+		 */
+		if (priv->cfg->shadow_ram_support)
+			iwl_set_bit(priv, CSR_DBG_LINK_PWR_MGMT_REG,
+				CSR_RESET_LINK_PWR_MGMT_DISABLED);
 	}
 	return ret;
 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index b363c96..5ba5a4e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -261,9 +261,12 @@
 /* 1000 Specific */
 #define EEPROM_1000_EEPROM_VERSION	(0x15C)
 
-/* 60x0 Specific */
+/* 6x00 Specific */
 #define EEPROM_6000_EEPROM_VERSION	(0x434)
 
+/* 6x50 Specific */
+#define EEPROM_6050_EEPROM_VERSION	(0x532)
+
 /* OTP */
 /* lower blocks contain EEPROM image and calibration data */
 #define OTP_LOW_IMAGE_SIZE		(2 * 512 * sizeof(u16)) /* 2 KB */
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 532c8d6..f2a60dc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -92,6 +92,8 @@
 		IWL_CMD(CALIBRATION_RES_NOTIFICATION);
 		IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
 		IWL_CMD(REPLY_TX_POWER_DBM_CMD);
+		IWL_CMD(TEMPERATURE_NOTIFICATION);
+		IWL_CMD(TX_ANT_CONFIGURATION_CMD);
 	default:
 		return "UNKNOWN";
 
@@ -103,17 +105,8 @@
 
 static void iwl_generic_cmd_callback(struct iwl_priv *priv,
 				     struct iwl_device_cmd *cmd,
-				     struct sk_buff *skb)
+				     struct iwl_rx_packet *pkt)
 {
-	struct iwl_rx_packet *pkt = NULL;
-
-	if (!skb) {
-		IWL_ERR(priv, "Error: Response NULL in %s.\n",
-				get_cmd_string(cmd->hdr.cmd));
-		return;
-	}
-
-	pkt = (struct iwl_rx_packet *)skb->data;
 	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
 		IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
 			get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
@@ -215,7 +208,7 @@
 		ret = -EIO;
 		goto fail;
 	}
-	if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_skb) {
+	if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
 		IWL_ERR(priv, "Error: Response NULL in '%s'\n",
 			  get_cmd_string(cmd->id));
 		ret = -EIO;
@@ -237,9 +230,9 @@
 							~CMD_WANT_SKB;
 	}
 fail:
-	if (cmd->reply_skb) {
-		dev_kfree_skb_any(cmd->reply_skb);
-		cmd->reply_skb = NULL;
+	if (cmd->reply_page) {
+		free_pages(cmd->reply_page, priv->hw_params.rx_page_order);
+		cmd->reply_page = 0;
 	}
 out:
 	clear_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status);
@@ -272,7 +265,7 @@
 			   u8 id, u16 len, const void *data,
 			   void (*callback)(struct iwl_priv *priv,
 					    struct iwl_device_cmd *cmd,
-					    struct sk_buff *skb))
+					    struct iwl_rx_packet *pkt))
 {
 	struct iwl_host_cmd cmd = {
 		.id = id,
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 9c6b149..9bce2c1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -66,7 +66,7 @@
 
 struct iwl_power_vec_entry {
 	struct iwl_powertable_cmd cmd;
-	u8 no_dtim;
+	u8 no_dtim;	/* number of skip dtim */
 };
 
 #define IWL_DTIM_RANGE_0_MAX	2
@@ -83,8 +83,9 @@
 				     cpu_to_le32(X4)}
 /* default power management (not Tx power) table values */
 /* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */
+/* DTIM 0 - 2 */
 static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = {
-	{{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
+	{{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF)}, 0},
 	{{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
 	{{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0},
 	{{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1},
@@ -93,15 +94,17 @@
 
 
 /* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
+/* DTIM 3 - 10 */
 static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = {
 	{{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
 	{{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
 	{{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0},
 	{{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1},
-	{{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 7, 10, 10)}, 2}
+	{{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10)}, 2}
 };
 
 /* for DTIM period > IWL_DTIM_RANGE_1_MAX */
+/* DTIM 11 - */
 static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = {
 	{{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
 	{{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
@@ -115,13 +118,15 @@
 				 enum iwl_power_level lvl, int period)
 {
 	const struct iwl_power_vec_entry *table;
-	int max_sleep, i;
-	bool skip;
+	int max_sleep[IWL_POWER_VEC_SIZE] = { 0 };
+	int i;
+	u8 skip;
+	u32 slp_itrvl;
 
 	table = range_2;
-	if (period < IWL_DTIM_RANGE_1_MAX)
+	if (period <= IWL_DTIM_RANGE_1_MAX)
 		table = range_1;
-	if (period < IWL_DTIM_RANGE_0_MAX)
+	if (period <= IWL_DTIM_RANGE_0_MAX)
 		table = range_0;
 
 	BUG_ON(lvl < 0 || lvl >= IWL_POWER_NUM);
@@ -129,34 +134,60 @@
 	*cmd = table[lvl].cmd;
 
 	if (period == 0) {
-		skip = false;
+		skip = 0;
 		period = 1;
+		for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
+			max_sleep[i] =  1;
+
 	} else {
-		skip = !!table[lvl].no_dtim;
+		skip = table[lvl].no_dtim;
+		for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
+			max_sleep[i] = le32_to_cpu(cmd->sleep_interval[i]);
+		max_sleep[IWL_POWER_VEC_SIZE - 1] = skip + 1;
 	}
 
-	if (skip) {
-		__le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
-		max_sleep = le32_to_cpu(slp_itrvl);
-		if (max_sleep == 0xFF)
-			max_sleep = period * (skip + 1);
-		else if (max_sleep > period)
-			max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
+	slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
+	/* figure out the listen interval based on dtim period and skip */
+	if (slp_itrvl == 0xFF)
+		cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
+			cpu_to_le32(period * (skip + 1));
+
+	slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
+	if (slp_itrvl > period)
+		cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
+			cpu_to_le32((slp_itrvl / period) * period);
+
+	if (skip)
 		cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
-	} else {
-		max_sleep = period;
+	else
 		cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
-	}
 
-	for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
-		if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
-			cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
+	slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
+	if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL)
+		cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
+			cpu_to_le32(IWL_CONN_MAX_LISTEN_INTERVAL);
+
+	/* enforce max sleep interval */
+	for (i = IWL_POWER_VEC_SIZE - 1; i >= 0 ; i--) {
+		if (le32_to_cpu(cmd->sleep_interval[i]) >
+		    (max_sleep[i] * period))
+			cmd->sleep_interval[i] =
+				cpu_to_le32(max_sleep[i] * period);
+		if (i != (IWL_POWER_VEC_SIZE - 1)) {
+			if (le32_to_cpu(cmd->sleep_interval[i]) >
+			    le32_to_cpu(cmd->sleep_interval[i+1]))
+				cmd->sleep_interval[i] =
+					cmd->sleep_interval[i+1];
+		}
+	}
 
 	if (priv->power_data.pci_pm)
 		cmd->flags |= IWL_POWER_PCI_PM_MSK;
 	else
 		cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
 
+	IWL_DEBUG_POWER(priv, "numSkipDtim = %u, dtimPeriod = %d\n",
+			skip, period);
 	IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1);
 }
 
@@ -862,9 +893,7 @@
 	INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
 	INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
 
-	switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
-	case CSR_HW_REV_TYPE_6x00:
-	case CSR_HW_REV_TYPE_6x50:
+	if (priv->cfg->adv_thermal_throttle) {
 		IWL_DEBUG_POWER(priv, "Advanced Thermal Throttling\n");
 		tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) *
 					 IWL_TI_STATE_MAX, GFP_KERNEL);
@@ -897,11 +926,9 @@
 				&restriction_range[0], size);
 			priv->thermal_throttle.advanced_tt = true;
 		}
-		break;
-	default:
+	} else {
 		IWL_DEBUG_POWER(priv, "Legacy Thermal Throttling\n");
 		priv->thermal_throttle.advanced_tt = false;
-		break;
 	}
 }
 EXPORT_SYMBOL(iwl_tt_initialize);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 493626b..e5339c9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -200,7 +200,7 @@
 		list_del(element);
 
 		/* Point to Rx buffer via next RBD in circular buffer */
-		rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->aligned_dma_addr);
+		rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->page_dma);
 		rxq->queue[rxq->write] = rxb;
 		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
 		rxq->free_count--;
@@ -239,8 +239,9 @@
 	struct iwl_rx_queue *rxq = &priv->rxq;
 	struct list_head *element;
 	struct iwl_rx_mem_buffer *rxb;
-	struct sk_buff *skb;
+	struct page *page;
 	unsigned long flags;
+	gfp_t gfp_mask = priority;
 
 	while (1) {
 		spin_lock_irqsave(&rxq->lock, flags);
@@ -251,30 +252,35 @@
 		spin_unlock_irqrestore(&rxq->lock, flags);
 
 		if (rxq->free_count > RX_LOW_WATERMARK)
-			priority |= __GFP_NOWARN;
-		/* Alloc a new receive buffer */
-		skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
-						priority);
+			gfp_mask |= __GFP_NOWARN;
 
-		if (!skb) {
+		if (priv->hw_params.rx_page_order > 0)
+			gfp_mask |= __GFP_COMP;
+
+		/* Alloc a new receive buffer */
+		page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
+		if (!page) {
 			if (net_ratelimit())
-				IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
+				IWL_DEBUG_INFO(priv, "alloc_pages failed, "
+					       "order: %d\n",
+					       priv->hw_params.rx_page_order);
+
 			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
 			    net_ratelimit())
-				IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
+				IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
 					 priority == GFP_ATOMIC ?  "GFP_ATOMIC" : "GFP_KERNEL",
 					 rxq->free_count);
 			/* We don't reschedule replenish work here -- we will
 			 * call the restock method and if it still needs
 			 * more buffers it will schedule replenish */
-			break;
+			return;
 		}
 
 		spin_lock_irqsave(&rxq->lock, flags);
 
 		if (list_empty(&rxq->rx_used)) {
 			spin_unlock_irqrestore(&rxq->lock, flags);
-			dev_kfree_skb_any(skb);
+			__free_pages(page, priv->hw_params.rx_page_order);
 			return;
 		}
 		element = rxq->rx_used.next;
@@ -283,24 +289,21 @@
 
 		spin_unlock_irqrestore(&rxq->lock, flags);
 
-		rxb->skb = skb;
-		/* Get physical address of RB/SKB */
-		rxb->real_dma_addr = pci_map_single(
-					priv->pci_dev,
-					rxb->skb->data,
-					priv->hw_params.rx_buf_size + 256,
-					PCI_DMA_FROMDEVICE);
+		rxb->page = page;
+		/* Get physical address of the RB */
+		rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
+				PAGE_SIZE << priv->hw_params.rx_page_order,
+				PCI_DMA_FROMDEVICE);
 		/* dma address must be no more than 36 bits */
-		BUG_ON(rxb->real_dma_addr & ~DMA_BIT_MASK(36));
+		BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
 		/* and also 256 byte aligned! */
-		rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256);
-		skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
+		BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
 
 		spin_lock_irqsave(&rxq->lock, flags);
 
 		list_add_tail(&rxb->list, &rxq->rx_free);
 		rxq->free_count++;
-		priv->alloc_rxb_skb++;
+		priv->alloc_rxb_page++;
 
 		spin_unlock_irqrestore(&rxq->lock, flags);
 	}
@@ -336,12 +339,14 @@
 {
 	int i;
 	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
-		if (rxq->pool[i].skb != NULL) {
-			pci_unmap_single(priv->pci_dev,
-					 rxq->pool[i].real_dma_addr,
-					 priv->hw_params.rx_buf_size + 256,
-					 PCI_DMA_FROMDEVICE);
-			dev_kfree_skb(rxq->pool[i].skb);
+		if (rxq->pool[i].page != NULL) {
+			pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+				PAGE_SIZE << priv->hw_params.rx_page_order,
+				PCI_DMA_FROMDEVICE);
+			__free_pages(rxq->pool[i].page,
+				     priv->hw_params.rx_page_order);
+			rxq->pool[i].page = NULL;
+			priv->alloc_rxb_page--;
 		}
 	}
 
@@ -405,14 +410,14 @@
 	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
 		/* In the reset function, these buffers may have been allocated
 		 * to an SKB, so we need to unmap and free potential storage */
-		if (rxq->pool[i].skb != NULL) {
-			pci_unmap_single(priv->pci_dev,
-					 rxq->pool[i].real_dma_addr,
-					 priv->hw_params.rx_buf_size + 256,
-					 PCI_DMA_FROMDEVICE);
-			priv->alloc_rxb_skb--;
-			dev_kfree_skb(rxq->pool[i].skb);
-			rxq->pool[i].skb = NULL;
+		if (rxq->pool[i].page != NULL) {
+			pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+				PAGE_SIZE << priv->hw_params.rx_page_order,
+				PCI_DMA_FROMDEVICE);
+			priv->alloc_rxb_page--;
+			__free_pages(rxq->pool[i].page,
+				     priv->hw_params.rx_page_order);
+			rxq->pool[i].page = NULL;
 		}
 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
 	}
@@ -491,7 +496,7 @@
 				struct iwl_rx_mem_buffer *rxb)
 
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_missed_beacon_notif *missed_beacon;
 
 	missed_beacon = &pkt->u.missed_beacon;
@@ -548,13 +553,51 @@
 			priv->last_rx_noise);
 }
 
+#ifdef CONFIG_IWLWIFI_DEBUG
+/*
+ *  based on the assumption of all statistics counter are in DWORD
+ *  FIXME: This function is for debugging, do not deal with
+ *  the case of counters roll-over.
+ */
+static void iwl_accumulative_statistics(struct iwl_priv *priv,
+					__le32 *stats)
+{
+	int i;
+	__le32 *prev_stats;
+	u32 *accum_stats;
+
+	prev_stats = (__le32 *)&priv->statistics;
+	accum_stats = (u32 *)&priv->accum_statistics;
+
+	for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics);
+	     i += sizeof(__le32), stats++, prev_stats++, accum_stats++)
+		if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats))
+			*accum_stats += (le32_to_cpu(*stats) -
+				le32_to_cpu(*prev_stats));
+
+	/* reset accumulative statistics for "no-counter" type statistics */
+	priv->accum_statistics.general.temperature =
+		priv->statistics.general.temperature;
+	priv->accum_statistics.general.temperature_m =
+		priv->statistics.general.temperature_m;
+	priv->accum_statistics.general.ttl_timestamp =
+		priv->statistics.general.ttl_timestamp;
+	priv->accum_statistics.tx.tx_power.ant_a =
+		priv->statistics.tx.tx_power.ant_a;
+	priv->accum_statistics.tx.tx_power.ant_b =
+		priv->statistics.tx.tx_power.ant_b;
+	priv->accum_statistics.tx.tx_power.ant_c =
+		priv->statistics.tx.tx_power.ant_c;
+}
+#endif
+
 #define REG_RECALIB_PERIOD (60)
 
 void iwl_rx_statistics(struct iwl_priv *priv,
 			      struct iwl_rx_mem_buffer *rxb)
 {
 	int change;
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
 	IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
 		     (int)sizeof(priv->statistics),
@@ -566,6 +609,9 @@
 		    STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
 		   (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
 
+#ifdef CONFIG_IWLWIFI_DEBUG
+	iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
+#endif
 	memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
 
 	set_bit(STATUS_STATISTICS, &priv->status);
@@ -582,9 +628,6 @@
 		iwl_rx_calc_noise(priv);
 		queue_work(priv->workqueue, &priv->run_time_calib_work);
 	}
-
-	iwl_leds_background(priv);
-
 	if (priv->cfg->ops->lib->temp_ops.temperature && change)
 		priv->cfg->ops->lib->temp_ops.temperature(priv);
 }
@@ -878,6 +921,10 @@
 					struct iwl_rx_mem_buffer *rxb,
 					struct ieee80211_rx_status *stats)
 {
+	struct sk_buff *skb;
+	int ret = 0;
+	__le16 fc = hdr->frame_control;
+
 	/* We only process data packets if the interface is open */
 	if (unlikely(!priv->is_open)) {
 		IWL_DEBUG_DROP_LIMIT(priv,
@@ -890,15 +937,43 @@
 	    iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
 		return;
 
-	/* Resize SKB from mac header to end of packet */
-	skb_reserve(rxb->skb, (void *)hdr - (void *)rxb->skb->data);
-	skb_put(rxb->skb, len);
+	skb = alloc_skb(IWL_LINK_HDR_MAX, GFP_ATOMIC);
+	if (!skb) {
+		IWL_ERR(priv, "alloc_skb failed\n");
+		return;
+	}
 
-	iwl_update_stats(priv, false, hdr->frame_control, len);
-	memcpy(IEEE80211_SKB_RXCB(rxb->skb), stats, sizeof(*stats));
-	ieee80211_rx_irqsafe(priv->hw, rxb->skb);
-	priv->alloc_rxb_skb--;
-	rxb->skb = NULL;
+	skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+
+	/* mac80211 currently doesn't support paged SKB. Convert it to
+	 * linear SKB for management frame and data frame requires
+	 * software decryption or software defragementation. */
+	if (ieee80211_is_mgmt(fc) ||
+	    ieee80211_has_protected(fc) ||
+	    ieee80211_has_morefrags(fc) ||
+	    le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
+		ret = skb_linearize(skb);
+	else
+		ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
+			 0 : -ENOMEM;
+
+	if (ret) {
+		kfree_skb(skb);
+		goto out;
+	}
+
+	/*
+	 * XXX: We cannot touch the page and its virtual memory (hdr) after
+	 * here. It might have already been freed by the above skb change.
+	 */
+
+	iwl_update_stats(priv, false, fc, len);
+	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+	ieee80211_rx(priv->hw, skb);
+ out:
+	priv->alloc_rxb_page--;
+	rxb->page = NULL;
 }
 
 /* This is necessary only for a number of statistics, see the caller. */
@@ -926,7 +1001,7 @@
 {
 	struct ieee80211_hdr *header;
 	struct ieee80211_rx_status rx_status;
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_rx_phy_res *phy_res;
 	__le32 rx_pkt_status;
 	struct iwl4965_rx_mpdu_res_start *amsdu;
@@ -1087,7 +1162,7 @@
 void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
 				    struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	priv->last_phy_res[0] = 1;
 	memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
 	       sizeof(struct iwl_rx_phy_res));
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 41f9a06..4fca65a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -111,7 +111,7 @@
 static int iwl_send_scan_abort(struct iwl_priv *priv)
 {
 	int ret = 0;
-	struct iwl_rx_packet *res;
+	struct iwl_rx_packet *pkt;
 	struct iwl_host_cmd cmd = {
 		.id = REPLY_SCAN_ABORT_CMD,
 		.flags = CMD_WANT_SKB,
@@ -131,21 +131,21 @@
 		return ret;
 	}
 
-	res = (struct iwl_rx_packet *)cmd.reply_skb->data;
-	if (res->u.status != CAN_ABORT_STATUS) {
+	pkt = (struct iwl_rx_packet *)cmd.reply_page;
+	if (pkt->u.status != CAN_ABORT_STATUS) {
 		/* The scan abort will return 1 for success or
 		 * 2 for "failure".  A failure condition can be
 		 * due to simply not being in an active scan which
 		 * can occur if we send the scan abort before we
 		 * the microcode has notified us that a scan is
 		 * completed. */
-		IWL_DEBUG_INFO(priv, "SCAN_ABORT returned %d.\n", res->u.status);
+		IWL_DEBUG_INFO(priv, "SCAN_ABORT returned %d.\n", pkt->u.status);
 		clear_bit(STATUS_SCAN_ABORTING, &priv->status);
 		clear_bit(STATUS_SCAN_HW, &priv->status);
 	}
 
-	priv->alloc_rxb_skb--;
-	dev_kfree_skb_any(cmd.reply_skb);
+	priv->alloc_rxb_page--;
+	free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
 
 	return ret;
 }
@@ -155,7 +155,7 @@
 			      struct iwl_rx_mem_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_scanreq_notification *notif =
 	    (struct iwl_scanreq_notification *)pkt->u.raw;
 
@@ -167,7 +167,7 @@
 static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
 				    struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_scanstart_notification *notif =
 	    (struct iwl_scanstart_notification *)pkt->u.raw;
 	priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
@@ -186,7 +186,7 @@
 				      struct iwl_rx_mem_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_scanresults_notification *notif =
 	    (struct iwl_scanresults_notification *)pkt->u.raw;
 
@@ -213,7 +213,7 @@
 				       struct iwl_rx_mem_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
 
 	IWL_DEBUG_SCAN(priv, "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.c b/drivers/net/wireless/iwlwifi/iwl-spectrum.c
index 022bcf1..1ea5cd3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.c
+++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.c
@@ -177,7 +177,7 @@
 static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
 					  struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
 
 	if (!report->state) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index c6633fe..dc74c16 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -99,32 +99,25 @@
 
 static void iwl_add_sta_callback(struct iwl_priv *priv,
 				 struct iwl_device_cmd *cmd,
-				 struct sk_buff *skb)
+				 struct iwl_rx_packet *pkt)
 {
-	struct iwl_rx_packet *res = NULL;
 	struct iwl_addsta_cmd *addsta =
 		(struct iwl_addsta_cmd *)cmd->cmd.payload;
 	u8 sta_id = addsta->sta.sta_id;
 
-	if (!skb) {
-		IWL_ERR(priv, "Error: Response NULL in REPLY_ADD_STA.\n");
-		return;
-	}
-
-	res = (struct iwl_rx_packet *)skb->data;
-	if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
+	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
 		IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
-			  res->hdr.flags);
+			  pkt->hdr.flags);
 		return;
 	}
 
-	switch (res->u.add_sta.status) {
+	switch (pkt->u.add_sta.status) {
 	case ADD_STA_SUCCESS_MSK:
 		iwl_sta_ucode_activate(priv, sta_id);
 		 /* fall through */
 	default:
 		IWL_DEBUG_HC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
-			     res->u.add_sta.status);
+			     pkt->u.add_sta.status);
 		break;
 	}
 }
@@ -132,7 +125,7 @@
 int iwl_send_add_sta(struct iwl_priv *priv,
 		     struct iwl_addsta_cmd *sta, u8 flags)
 {
-	struct iwl_rx_packet *res = NULL;
+	struct iwl_rx_packet *pkt = NULL;
 	int ret = 0;
 	u8 data[sizeof(*sta)];
 	struct iwl_host_cmd cmd = {
@@ -152,15 +145,15 @@
 	if (ret || (flags & CMD_ASYNC))
 		return ret;
 
-	res = (struct iwl_rx_packet *)cmd.reply_skb->data;
-	if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
+	pkt = (struct iwl_rx_packet *)cmd.reply_page;
+	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
 		IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
-			  res->hdr.flags);
+			  pkt->hdr.flags);
 		ret = -EIO;
 	}
 
 	if (ret == 0) {
-		switch (res->u.add_sta.status) {
+		switch (pkt->u.add_sta.status) {
 		case ADD_STA_SUCCESS_MSK:
 			iwl_sta_ucode_activate(priv, sta->sta.sta_id);
 			IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
@@ -172,8 +165,8 @@
 		}
 	}
 
-	priv->alloc_rxb_skb--;
-	dev_kfree_skb_any(cmd.reply_skb);
+	priv->alloc_rxb_page--;
+	free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
 
 	return ret;
 }
@@ -324,26 +317,19 @@
 
 static void iwl_remove_sta_callback(struct iwl_priv *priv,
 				    struct iwl_device_cmd *cmd,
-				    struct sk_buff *skb)
+				    struct iwl_rx_packet *pkt)
 {
-	struct iwl_rx_packet *res = NULL;
 	struct iwl_rem_sta_cmd *rm_sta =
-		 (struct iwl_rem_sta_cmd *)cmd->cmd.payload;
+			(struct iwl_rem_sta_cmd *)cmd->cmd.payload;
 	const char *addr = rm_sta->addr;
 
-	if (!skb) {
-		IWL_ERR(priv, "Error: Response NULL in REPLY_REMOVE_STA.\n");
-		return;
-	}
-
-	res = (struct iwl_rx_packet *)skb->data;
-	if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
+	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
 		IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
-		res->hdr.flags);
+		pkt->hdr.flags);
 		return;
 	}
 
-	switch (res->u.rem_sta.status) {
+	switch (pkt->u.rem_sta.status) {
 	case REM_STA_SUCCESS_MSK:
 		iwl_sta_ucode_deactivate(priv, addr);
 		break;
@@ -356,7 +342,7 @@
 static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
 				   u8 flags)
 {
-	struct iwl_rx_packet *res = NULL;
+	struct iwl_rx_packet *pkt;
 	int ret;
 
 	struct iwl_rem_sta_cmd rm_sta_cmd;
@@ -381,15 +367,15 @@
 	if (ret || (flags & CMD_ASYNC))
 		return ret;
 
-	res = (struct iwl_rx_packet *)cmd.reply_skb->data;
-	if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
+	pkt = (struct iwl_rx_packet *)cmd.reply_page;
+	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
 		IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
-			  res->hdr.flags);
+			  pkt->hdr.flags);
 		ret = -EIO;
 	}
 
 	if (!ret) {
-		switch (res->u.rem_sta.status) {
+		switch (pkt->u.rem_sta.status) {
 		case REM_STA_SUCCESS_MSK:
 			iwl_sta_ucode_deactivate(priv, addr);
 			IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
@@ -401,8 +387,8 @@
 		}
 	}
 
-	priv->alloc_rxb_skb--;
-	dev_kfree_skb_any(cmd.reply_skb);
+	priv->alloc_rxb_page--;
+	free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
 
 	return ret;
 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index c832ba0..8ae4c9b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -131,7 +131,7 @@
 	struct iwl_tx_queue *txq = &priv->txq[txq_id];
 	struct iwl_queue *q = &txq->q;
 	struct pci_dev *dev = priv->pci_dev;
-	int i, len;
+	int i;
 
 	if (q->n_bd == 0)
 		return;
@@ -141,8 +141,6 @@
 	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
 		priv->cfg->ops->lib->txq_free_tfd(priv, txq);
 
-	len = sizeof(struct iwl_device_cmd) * q->n_window;
-
 	/* De-alloc array of command/tx buffers */
 	for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
 		kfree(txq->cmd[i]);
@@ -180,14 +178,11 @@
 	struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
 	struct iwl_queue *q = &txq->q;
 	struct pci_dev *dev = priv->pci_dev;
-	int i, len;
+	int i;
 
 	if (q->n_bd == 0)
 		return;
 
-	len = sizeof(struct iwl_device_cmd) * q->n_window;
-	len += IWL_MAX_SCAN_SIZE;
-
 	/* De-alloc array of command/tx buffers */
 	for (i = 0; i <= TFD_CMD_SLOTS; i++)
 		kfree(txq->cmd[i]);
@@ -405,15 +400,19 @@
 	int txq_id;
 
 	/* Tx queues */
-	for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
-		if (txq_id == IWL_CMD_QUEUE_NUM)
-			iwl_cmd_queue_free(priv);
-		else
-			iwl_tx_queue_free(priv, txq_id);
-
+	if (priv->txq)
+		for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
+		     txq_id++)
+			if (txq_id == IWL_CMD_QUEUE_NUM)
+				iwl_cmd_queue_free(priv);
+			else
+				iwl_tx_queue_free(priv, txq_id);
 	iwl_free_dma_ptr(priv, &priv->kw);
 
 	iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
+
+	/* free tx queue structure */
+	iwl_free_txq_mem(priv);
 }
 EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
 
@@ -445,6 +444,12 @@
 		IWL_ERR(priv, "Keep Warm allocation failed\n");
 		goto error_kw;
 	}
+
+	/* allocate tx queue structure */
+	ret = iwl_alloc_txq_mem(priv);
+	if (ret)
+		goto error;
+
 	spin_lock_irqsave(&priv->lock, flags);
 
 	/* Turn off all Tx DMA fifos */
@@ -581,9 +586,7 @@
 	u8 rate_plcp;
 
 	/* Set retry limit on DATA packets and Probe Responses*/
-	if (priv->data_retry_limit != -1)
-		data_retry_limit = priv->data_retry_limit;
-	else if (ieee80211_is_probe_resp(fc))
+	if (ieee80211_is_probe_resp(fc))
 		data_retry_limit = 3;
 	else
 		data_retry_limit = IWL_DEFAULT_TX_RETRY;
@@ -1145,7 +1148,7 @@
  */
 void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
 	int txq_id = SEQ_TO_QUEUE(sequence);
 	int index = SEQ_TO_INDEX(sequence);
@@ -1172,10 +1175,10 @@
 
 	/* Input error checking is done when commands are added to queue. */
 	if (meta->flags & CMD_WANT_SKB) {
-		meta->source->reply_skb = rxb->skb;
-		rxb->skb = NULL;
+		meta->source->reply_page = (unsigned long)rxb_addr(rxb);
+		rxb->page = NULL;
 	} else if (meta->callback)
-		meta->callback(priv, cmd, rxb->skb);
+		meta->callback(priv, cmd, pkt);
 
 	iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
 
@@ -1434,7 +1437,7 @@
 void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
 					   struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
 	struct iwl_tx_queue *txq = NULL;
 	struct iwl_ht_agg *agg;
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index c347d66..bfd7f49 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -88,7 +88,6 @@
 
  /* module parameters */
 struct iwl_mod_params iwl3945_mod_params = {
-	.num_of_queues = IWL39_NUM_QUEUES, /* Not used */
 	.sw_crypto = 1,
 	.restart_fw = 1,
 	/* the rest are 0 by default */
@@ -366,13 +365,13 @@
 				      struct sk_buff *skb_frag,
 				      int sta_id)
 {
-	struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
+	struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
 	struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
 
 	switch (keyinfo->alg) {
 	case ALG_CCMP:
-		tx->sec_ctl = TX_CMD_SEC_CCM;
-		memcpy(tx->key, keyinfo->key, keyinfo->keylen);
+		tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+		memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
 		IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
 		break;
 
@@ -380,13 +379,13 @@
 		break;
 
 	case ALG_WEP:
-		tx->sec_ctl = TX_CMD_SEC_WEP |
+		tx_cmd->sec_ctl = TX_CMD_SEC_WEP |
 		    (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
 
 		if (keyinfo->keylen == 13)
-			tx->sec_ctl |= TX_CMD_SEC_KEY128;
+			tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
 
-		memcpy(&tx->key[3], keyinfo->key, keyinfo->keylen);
+		memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
 
 		IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
 			     "with key %d\n", info->control.hw_key->hw_key_idx);
@@ -406,12 +405,11 @@
 				  struct ieee80211_tx_info *info,
 				  struct ieee80211_hdr *hdr, u8 std_id)
 {
-	struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
-	__le32 tx_flags = tx->tx_flags;
+	struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
+	__le32 tx_flags = tx_cmd->tx_flags;
 	__le16 fc = hdr->frame_control;
-	u8 rc_flags = info->control.rates[0].flags;
 
-	tx->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+	tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
 	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
 		tx_flags |= TX_CMD_FLG_ACK_MSK;
 		if (ieee80211_is_mgmt(fc))
@@ -424,25 +422,19 @@
 		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
 	}
 
-	tx->sta_id = std_id;
+	tx_cmd->sta_id = std_id;
 	if (ieee80211_has_morefrags(fc))
 		tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
 
 	if (ieee80211_is_data_qos(fc)) {
 		u8 *qc = ieee80211_get_qos_ctl(hdr);
-		tx->tid_tspec = qc[0] & 0xf;
+		tx_cmd->tid_tspec = qc[0] & 0xf;
 		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
 	} else {
 		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
 	}
 
-	if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
-		tx_flags |= TX_CMD_FLG_RTS_MSK;
-		tx_flags &= ~TX_CMD_FLG_CTS_MSK;
-	} else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
-		tx_flags &= ~TX_CMD_FLG_RTS_MSK;
-		tx_flags |= TX_CMD_FLG_CTS_MSK;
-	}
+	priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
 
 	if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
 		tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
@@ -450,16 +442,16 @@
 	tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
 	if (ieee80211_is_mgmt(fc)) {
 		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
-			tx->timeout.pm_frame_timeout = cpu_to_le16(3);
+			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
 		else
-			tx->timeout.pm_frame_timeout = cpu_to_le16(2);
+			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
 	} else {
-		tx->timeout.pm_frame_timeout = 0;
+		tx_cmd->timeout.pm_frame_timeout = 0;
 	}
 
-	tx->driver_txop = 0;
-	tx->tx_flags = tx_flags;
-	tx->next_frame_len = 0;
+	tx_cmd->driver_txop = 0;
+	tx_cmd->tx_flags = tx_flags;
+	tx_cmd->next_frame_len = 0;
 }
 
 /*
@@ -469,7 +461,7 @@
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-	struct iwl3945_tx_cmd *tx;
+	struct iwl3945_tx_cmd *tx_cmd;
 	struct iwl_tx_queue *txq = NULL;
 	struct iwl_queue *q = NULL;
 	struct iwl_device_cmd *out_cmd;
@@ -568,9 +560,9 @@
 	/* Init first empty entry in queue's array of Tx/cmd buffers */
 	out_cmd = txq->cmd[idx];
 	out_meta = &txq->meta[idx];
-	tx = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
+	tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
 	memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
-	memset(tx, 0, sizeof(*tx));
+	memset(tx_cmd, 0, sizeof(*tx_cmd));
 
 	/*
 	 * Set up the Tx-command (not MAC!) header.
@@ -583,7 +575,7 @@
 				INDEX_TO_SEQ(q->write_ptr)));
 
 	/* Copy MAC header from skb into command buffer */
-	memcpy(tx->hdr, hdr, hdr_len);
+	memcpy(tx_cmd->hdr, hdr, hdr_len);
 
 
 	if (info->control.hw_key)
@@ -597,12 +589,12 @@
 
 	/* Total # bytes to be transmitted */
 	len = (u16)skb->len;
-	tx->len = cpu_to_le16(len);
+	tx_cmd->len = cpu_to_le16(len);
 
 	iwl_dbg_log_tx_data_frame(priv, len, hdr);
 	iwl_update_stats(priv, true, fc, len);
-	tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
-	tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
+	tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
+	tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
 
 	if (!ieee80211_has_morefrags(hdr->frame_control)) {
 		txq->need_update = 1;
@@ -615,9 +607,9 @@
 
 	IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
 		     le16_to_cpu(out_cmd->hdr.sequence));
-	IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx->tx_flags));
-	iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx));
-	iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr,
+	IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
+	iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
+	iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
 			   ieee80211_hdrlen(fc));
 
 	/*
@@ -753,7 +745,7 @@
 			       u8 type)
 {
 	struct iwl_spectrum_cmd spectrum;
-	struct iwl_rx_packet *res;
+	struct iwl_rx_packet *pkt;
 	struct iwl_host_cmd cmd = {
 		.id = REPLY_SPECTRUM_MEASUREMENT_CMD,
 		.data = (void *)&spectrum,
@@ -798,18 +790,18 @@
 	if (rc)
 		return rc;
 
-	res = (struct iwl_rx_packet *)cmd.reply_skb->data;
-	if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
+	pkt = (struct iwl_rx_packet *)cmd.reply_page;
+	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
 		IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
 		rc = -EIO;
 	}
 
-	spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
+	spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
 	switch (spectrum_resp_status) {
 	case 0:		/* Command will be handled */
-		if (res->u.spectrum.id != 0xff) {
+		if (pkt->u.spectrum.id != 0xff) {
 			IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
-						res->u.spectrum.id);
+						pkt->u.spectrum.id);
 			priv->measurement_status &= ~MEASUREMENT_READY;
 		}
 		priv->measurement_status |= MEASUREMENT_ACTIVE;
@@ -821,7 +813,7 @@
 		break;
 	}
 
-	dev_kfree_skb_any(cmd.reply_skb);
+	free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
 
 	return rc;
 }
@@ -830,7 +822,7 @@
 static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
 			       struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_alive_resp *palive;
 	struct delayed_work *pwork;
 
@@ -867,7 +859,7 @@
 				 struct iwl_rx_mem_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
-	struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 #endif
 
 	IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
@@ -903,7 +895,7 @@
 				struct iwl_rx_mem_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
-	struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
 	u8 rate = beacon->beacon_notify_hdr.rate;
 
@@ -926,7 +918,7 @@
 static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
 				    struct iwl_rx_mem_buffer *rxb)
 {
-	struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
 	unsigned long status = priv->status;
 
@@ -1090,7 +1082,7 @@
 		list_del(element);
 
 		/* Point to Rx buffer via next RBD in circular buffer */
-		rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->real_dma_addr);
+		rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
 		rxq->queue[rxq->write] = rxb;
 		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
 		rxq->free_count--;
@@ -1130,8 +1122,9 @@
 	struct iwl_rx_queue *rxq = &priv->rxq;
 	struct list_head *element;
 	struct iwl_rx_mem_buffer *rxb;
-	struct sk_buff *skb;
+	struct page *page;
 	unsigned long flags;
+	gfp_t gfp_mask = priority;
 
 	while (1) {
 		spin_lock_irqsave(&rxq->lock, flags);
@@ -1143,10 +1136,14 @@
 		spin_unlock_irqrestore(&rxq->lock, flags);
 
 		if (rxq->free_count > RX_LOW_WATERMARK)
-			priority |= __GFP_NOWARN;
+			gfp_mask |= __GFP_NOWARN;
+
+		if (priv->hw_params.rx_page_order > 0)
+			gfp_mask |= __GFP_COMP;
+
 		/* Alloc a new receive buffer */
-		skb = alloc_skb(priv->hw_params.rx_buf_size, priority);
-		if (!skb) {
+		page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
+		if (!page) {
 			if (net_ratelimit())
 				IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
 			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
@@ -1163,7 +1160,7 @@
 		spin_lock_irqsave(&rxq->lock, flags);
 		if (list_empty(&rxq->rx_used)) {
 			spin_unlock_irqrestore(&rxq->lock, flags);
-			dev_kfree_skb_any(skb);
+			__free_pages(page, priv->hw_params.rx_page_order);
 			return;
 		}
 		element = rxq->rx_used.next;
@@ -1171,26 +1168,18 @@
 		list_del(element);
 		spin_unlock_irqrestore(&rxq->lock, flags);
 
-		rxb->skb = skb;
-
-		/* If radiotap head is required, reserve some headroom here.
-		 * The physical head count is a variable rx_stats->phy_count.
-		 * We reserve 4 bytes here. Plus these extra bytes, the
-		 * headroom of the physical head should be enough for the
-		 * radiotap head that iwl3945 supported. See iwl3945_rt.
-		 */
-		skb_reserve(rxb->skb, 4);
-
+		rxb->page = page;
 		/* Get physical address of RB/SKB */
-		rxb->real_dma_addr = pci_map_single(priv->pci_dev,
-						rxb->skb->data,
-						priv->hw_params.rx_buf_size,
-						PCI_DMA_FROMDEVICE);
+		rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
+				PAGE_SIZE << priv->hw_params.rx_page_order,
+				PCI_DMA_FROMDEVICE);
 
 		spin_lock_irqsave(&rxq->lock, flags);
+
 		list_add_tail(&rxb->list, &rxq->rx_free);
-		priv->alloc_rxb_skb++;
 		rxq->free_count++;
+		priv->alloc_rxb_page++;
+
 		spin_unlock_irqrestore(&rxq->lock, flags);
 	}
 }
@@ -1206,14 +1195,14 @@
 	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
 		/* In the reset function, these buffers may have been allocated
 		 * to an SKB, so we need to unmap and free potential storage */
-		if (rxq->pool[i].skb != NULL) {
-			pci_unmap_single(priv->pci_dev,
-					 rxq->pool[i].real_dma_addr,
-					 priv->hw_params.rx_buf_size,
-					 PCI_DMA_FROMDEVICE);
-			priv->alloc_rxb_skb--;
-			dev_kfree_skb(rxq->pool[i].skb);
-			rxq->pool[i].skb = NULL;
+		if (rxq->pool[i].page != NULL) {
+			pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+				PAGE_SIZE << priv->hw_params.rx_page_order,
+				PCI_DMA_FROMDEVICE);
+			priv->alloc_rxb_page--;
+			__free_pages(rxq->pool[i].page,
+				     priv->hw_params.rx_page_order);
+			rxq->pool[i].page = NULL;
 		}
 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
 	}
@@ -1221,8 +1210,8 @@
 	/* Set us so that we have processed and used all buffers, but have
 	 * not restocked the Rx queue with fresh buffers */
 	rxq->read = rxq->write = 0;
-	rxq->free_count = 0;
 	rxq->write_actual = 0;
+	rxq->free_count = 0;
 	spin_unlock_irqrestore(&rxq->lock, flags);
 }
 
@@ -1255,12 +1244,14 @@
 {
 	int i;
 	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
-		if (rxq->pool[i].skb != NULL) {
-			pci_unmap_single(priv->pci_dev,
-					 rxq->pool[i].real_dma_addr,
-					 priv->hw_params.rx_buf_size,
-					 PCI_DMA_FROMDEVICE);
-			dev_kfree_skb(rxq->pool[i].skb);
+		if (rxq->pool[i].page != NULL) {
+			pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+				PAGE_SIZE << priv->hw_params.rx_page_order,
+				PCI_DMA_FROMDEVICE);
+			__free_pages(rxq->pool[i].page,
+				     priv->hw_params.rx_page_order);
+			rxq->pool[i].page = NULL;
+			priv->alloc_rxb_page--;
 		}
 	}
 
@@ -1376,7 +1367,7 @@
 	i = rxq->read;
 
 	/* calculate total frames need to be restock after handling RX */
-	total_empty = r - priv->rxq.write_actual;
+	total_empty = r - rxq->write_actual;
 	if (total_empty < 0)
 		total_empty += RX_QUEUE_SIZE;
 
@@ -1396,10 +1387,10 @@
 
 		rxq->queue[i] = NULL;
 
-		pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
-				priv->hw_params.rx_buf_size,
-				PCI_DMA_FROMDEVICE);
-		pkt = (struct iwl_rx_packet *)rxb->skb->data;
+		pci_unmap_page(priv->pci_dev, rxb->page_dma,
+			       PAGE_SIZE << priv->hw_params.rx_page_order,
+			       PCI_DMA_FROMDEVICE);
+		pkt = rxb_addr(rxb);
 
 		trace_iwlwifi_dev_rx(priv, pkt,
 			le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
@@ -1420,44 +1411,55 @@
 		if (priv->rx_handlers[pkt->hdr.cmd]) {
 			IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
 				get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
-			priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
 			priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
+			priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
 		} else {
 			/* No handling needed */
-			IWL_DEBUG_RX(priv, "r %d i %d No handler needed for %s, 0x%02x\n",
+			IWL_DEBUG_RX(priv,
+				"r %d i %d No handler needed for %s, 0x%02x\n",
 				r, i, get_cmd_string(pkt->hdr.cmd),
 				pkt->hdr.cmd);
 		}
 
+		/*
+		 * XXX: After here, we should always check rxb->page
+		 * against NULL before touching it or its virtual
+		 * memory (pkt). Because some rx_handler might have
+		 * already taken or freed the pages.
+		 */
+
 		if (reclaim) {
-			/* Invoke any callbacks, transfer the skb to caller, and
-			 * fire off the (possibly) blocking iwl_send_cmd()
+			/* Invoke any callbacks, transfer the buffer to caller,
+			 * and fire off the (possibly) blocking iwl_send_cmd()
 			 * as we reclaim the driver command queue */
-			if (rxb && rxb->skb)
+			if (rxb->page)
 				iwl_tx_cmd_complete(priv, rxb);
 			else
 				IWL_WARN(priv, "Claim null rxb?\n");
 		}
 
-		/* For now we just don't re-use anything.  We can tweak this
-		 * later to try and re-use notification packets and SKBs that
-		 * fail to Rx correctly */
-		if (rxb->skb != NULL) {
-			priv->alloc_rxb_skb--;
-			dev_kfree_skb_any(rxb->skb);
-			rxb->skb = NULL;
-		}
-
+		/* Reuse the page if possible. For notification packets and
+		 * SKBs that fail to Rx correctly, add them back into the
+		 * rx_free list for reuse later. */
 		spin_lock_irqsave(&rxq->lock, flags);
-		list_add_tail(&rxb->list, &priv->rxq.rx_used);
+		if (rxb->page != NULL) {
+			rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
+				0, PAGE_SIZE << priv->hw_params.rx_page_order,
+				PCI_DMA_FROMDEVICE);
+			list_add_tail(&rxb->list, &rxq->rx_free);
+			rxq->free_count++;
+		} else
+			list_add_tail(&rxb->list, &rxq->rx_used);
+
 		spin_unlock_irqrestore(&rxq->lock, flags);
+
 		i = (i + 1) & RX_QUEUE_MASK;
 		/* If there are a lot of unused frames,
 		 * restock the Rx queue so ucode won't assert. */
 		if (fill_rx) {
 			count++;
 			if (count >= 8) {
-				priv->rxq.read = i;
+				rxq->read = i;
 				iwl3945_rx_replenish_now(priv);
 				count = 0;
 			}
@@ -1465,7 +1467,7 @@
 	}
 
 	/* Backtrack one entry */
-	priv->rxq.read = i;
+	rxq->read = i;
 	if (fill_rx)
 		iwl3945_rx_replenish_now(priv);
 	else
@@ -1686,6 +1688,8 @@
 	}
 #endif
 
+	spin_unlock_irqrestore(&priv->lock, flags);
+
 	/* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
 	 * atomic, make sure that inta covers all the interrupts that
 	 * we've discovered, even if FH interrupt came in just after
@@ -1707,8 +1711,6 @@
 
 		handled |= CSR_INT_BIT_HW_ERR;
 
-		spin_unlock_irqrestore(&priv->lock, flags);
-
 		return;
 	}
 
@@ -1800,7 +1802,6 @@
 			"flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
 	}
 #endif
-	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
 static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
@@ -2563,11 +2564,6 @@
 			test_bit(STATUS_EXIT_PENDING, &priv->status) <<
 				STATUS_EXIT_PENDING;
 
-	priv->cfg->ops->lib->apm_ops.reset(priv);
-	spin_lock_irqsave(&priv->lock, flags);
-	iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-	spin_unlock_irqrestore(&priv->lock, flags);
-
 	iwl3945_hw_txq_ctx_stop(priv);
 	iwl3945_hw_rxq_stop(priv);
 
@@ -2576,10 +2572,8 @@
 
 	udelay(5);
 
-	if (exit_pending)
-		priv->cfg->ops->lib->apm_ops.stop(priv);
-	else
-		priv->cfg->ops->lib->apm_ops.reset(priv);
+	/* Stop the device, and put it in low power state */
+	priv->cfg->ops->lib->apm_ops.stop(priv);
 
  exit:
 	memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
@@ -2724,19 +2718,34 @@
 	mutex_unlock(&priv->mutex);
 }
 
+/*
+ * 3945 cannot interrupt driver when hardware rf kill switch toggles;
+ * driver must poll CSR_GP_CNTRL_REG register for change.  This register
+ * *is* readable even when device has been SW_RESET into low power mode
+ * (e.g. during RF KILL).
+ */
 static void iwl3945_rfkill_poll(struct work_struct *data)
 {
 	struct iwl_priv *priv =
 	    container_of(data, struct iwl_priv, rfkill_poll.work);
+	bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
+	bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
+			& CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
 
-	if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
-		clear_bit(STATUS_RF_KILL_HW, &priv->status);
-	else
-		set_bit(STATUS_RF_KILL_HW, &priv->status);
+	if (new_rfkill != old_rfkill) {
+		if (new_rfkill)
+			set_bit(STATUS_RF_KILL_HW, &priv->status);
+		else
+			clear_bit(STATUS_RF_KILL_HW, &priv->status);
 
-	wiphy_rfkill_set_hw_state(priv->hw->wiphy,
-			test_bit(STATUS_RF_KILL_HW, &priv->status));
+		wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
 
+		IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
+				new_rfkill ? "disable radio" : "enable radio");
+	}
+
+	/* Keep this running, even if radio now enabled.  This will be
+	 * cancelled in mac_start() if system decides to start again */
 	queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
 			   round_jiffies_relative(2 * HZ));
 
@@ -3797,7 +3806,6 @@
 	/* Clear the driver's (not device's) station table */
 	iwl_clear_stations_table(priv);
 
-	priv->data_retry_limit = -1;
 	priv->ieee_channels = NULL;
 	priv->ieee_rates = NULL;
 	priv->band = IEEE80211_BAND_2GHZ;
@@ -4056,6 +4064,7 @@
 			     &priv->bands[IEEE80211_BAND_2GHZ].channels[5]);
 	iwl3945_setup_deferred_work(priv);
 	iwl3945_setup_rx_handlers(priv);
+	iwl_power_initialize(priv);
 
 	/*********************************
 	 * 8. Setup and Register mac80211
@@ -4126,6 +4135,15 @@
 		iwl3945_down(priv);
 	}
 
+	/*
+	 * Make sure device is reset to low power before unloading driver.
+	 * This may be redundant with iwl_down(), but there are paths to
+	 * run iwl_down() without calling apm_ops.stop(), and there are
+	 * paths to avoid running iwl_down() at all before leaving driver.
+	 * This (inexpensive) call *makes sure* device is reset.
+	 */
+	priv->cfg->ops->lib->apm_ops.stop(priv);
+
 	/* make sure we flush any pending irq or
 	 * tasklet for the driver
 	 */
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
index c25a043..9606b31 100644
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ b/drivers/net/wireless/iwmc3200wifi/Kconfig
@@ -3,6 +3,7 @@
 	depends on MMC && WLAN_80211 && EXPERIMENTAL
 	depends on CFG80211
 	select FW_LOADER
+	select IWMC3200TOP
 	help
 	  The Intel Wireless Multicomm 3200 hardware is a combo
 	  card with GPS, Bluetooth, WiMax and 802.11 radios. It
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index a56a2b0..af72cc7 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -404,39 +404,21 @@
 {
 	struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
 	struct ieee80211_channel *chan = params->channel;
-	struct cfg80211_bss *bss;
 
 	if (!test_bit(IWM_STATUS_READY, &iwm->status))
 		return -EIO;
 
-	/* UMAC doesn't support creating IBSS network with specified bssid.
-	 * This should be removed after we have join only mode supported. */
+	/* UMAC doesn't support creating or joining an IBSS network
+	 * with specified bssid. */
 	if (params->bssid)
 		return -EOPNOTSUPP;
 
-	bss = cfg80211_get_ibss(iwm_to_wiphy(iwm), NULL,
-				params->ssid, params->ssid_len);
-	if (!bss) {
-		iwm_scan_one_ssid(iwm, params->ssid, params->ssid_len);
-		schedule_timeout_interruptible(2 * HZ);
-		bss = cfg80211_get_ibss(iwm_to_wiphy(iwm), NULL,
-					params->ssid, params->ssid_len);
-	}
-	/* IBSS join only mode is not supported by UMAC ATM */
-	if (bss) {
-		cfg80211_put_bss(bss);
-		return -EOPNOTSUPP;
-	}
-
 	iwm->channel = ieee80211_frequency_to_channel(chan->center_freq);
 	iwm->umac_profile->ibss.band = chan->band;
 	iwm->umac_profile->ibss.channel = iwm->channel;
 	iwm->umac_profile->ssid.ssid_len = params->ssid_len;
 	memcpy(iwm->umac_profile->ssid.ssid, params->ssid, params->ssid_len);
 
-	if (params->bssid)
-		memcpy(&iwm->umac_profile->bssid[0], params->bssid, ETH_ALEN);
-
 	return iwm_send_mlme_profile(iwm);
 }
 
@@ -489,12 +471,12 @@
 		return 0;
 	}
 
+	if (wpa_version & NL80211_WPA_VERSION_1)
+		iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WPA_ON_MSK;
+
 	if (wpa_version & NL80211_WPA_VERSION_2)
 		iwm->umac_profile->sec.flags = UMAC_SEC_FLG_RSNA_ON_MSK;
 
-	if (wpa_version & NL80211_WPA_VERSION_1)
-		iwm->umac_profile->sec.flags |= UMAC_SEC_FLG_WPA_ON_MSK;
-
 	return 0;
 }
 
@@ -645,6 +627,13 @@
 		iwm->default_key = sme->key_idx;
 	}
 
+	/* WPA and open AUTH type from wpa_s means WPS (a.k.a. WSC) */
+	if ((iwm->umac_profile->sec.flags &
+	     (UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK)) &&
+	    iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN) {
+			iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WSC_ON_MSK;
+	}
+
 	ret = iwm_send_mlme_profile(iwm);
 
 	if (iwm->umac_profile->sec.auth_type != UMAC_AUTH_TYPE_LEGACY_PSK ||
@@ -681,9 +670,19 @@
 static int iwm_cfg80211_set_txpower(struct wiphy *wiphy,
 				    enum tx_power_setting type, int dbm)
 {
+	struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
+	int ret;
+
 	switch (type) {
 	case TX_POWER_AUTOMATIC:
 		return 0;
+	case TX_POWER_FIXED:
+		ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
+					      CFG_TX_PWR_LIMIT_USR, dbm * 2);
+		if (ret < 0)
+			return ret;
+
+		return iwm_tx_power_trigger(iwm);
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -695,7 +694,7 @@
 {
 	struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
 
-	*dbm = iwm->txpower;
+	*dbm = iwm->txpower >> 1;
 
 	return 0;
 }
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 23b52fa..cad511a 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -76,6 +76,11 @@
 	int ret;
 	u8 oid = hdr->oid;
 
+	if (!test_bit(IWM_STATUS_READY, &iwm->status)) {
+		IWM_ERR(iwm, "Interface is not ready yet");
+		return -EAGAIN;
+	}
+
 	umac_cmd.id = UMAC_CMD_OPCODE_WIFI_IF_WRAPPER;
 	umac_cmd.resp = resp;
 
@@ -274,6 +279,17 @@
 	return ret;
 }
 
+int iwm_send_ct_kill_cfg(struct iwm_priv *iwm, u8 entry, u8 exit)
+{
+	struct iwm_ct_kill_cfg_cmd cmd;
+
+	cmd.entry_threshold = entry;
+	cmd.exit_threshold = exit;
+
+	return iwm_send_lmac_ptrough_cmd(iwm, REPLY_CT_KILL_CONFIG_CMD, &cmd,
+					 sizeof(struct iwm_ct_kill_cfg_cmd), 0);
+}
+
 int iwm_send_umac_reset(struct iwm_priv *iwm, __le32 reset_flags, bool resp)
 {
 	struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
@@ -777,11 +793,24 @@
 		return ret;
 
 	ret = wait_event_interruptible_timeout(iwm->mlme_queue,
-				(iwm->umac_profile_active == 0), 2 * HZ);
+				(iwm->umac_profile_active == 0), 5 * HZ);
 
 	return ret ? 0 : -EBUSY;
 }
 
+int iwm_tx_power_trigger(struct iwm_priv *iwm)
+{
+	struct iwm_umac_pwr_trigger pwr_trigger;
+
+	pwr_trigger.hdr.oid = UMAC_WIFI_IF_CMD_TX_PWR_TRIGGER;
+	pwr_trigger.hdr.buf_size =
+		cpu_to_le16(sizeof(struct iwm_umac_pwr_trigger) -
+			    sizeof(struct iwm_umac_wifi_if));
+
+
+	return iwm_send_wifi_if_cmd(iwm, &pwr_trigger, sizeof(pwr_trigger), 1);
+}
+
 int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags)
 {
 	struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
index e24d5b6..b36be2b 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -102,7 +102,6 @@
 	CFG_SCAN_NUM_PASSIVE_CHAN_PER_PARTIAL_SCAN,
 	CFG_TLC_SUPPORTED_TX_HT_RATES,
 	CFG_TLC_SUPPORTED_TX_RATES,
-	CFG_TLC_VALID_ANTENNA,
 	CFG_TLC_SPATIAL_STREAM_SUPPORTED,
 	CFG_TLC_RETRY_PER_RATE,
 	CFG_TLC_RETRY_PER_HT_RATE,
@@ -136,6 +135,10 @@
 	CFG_TLC_RENEW_ADDBA_DELAY,
 	CFG_TLC_NUM_OF_MULTISEC_TO_COUN_LOAD,
 	CFG_TLC_IS_STABLE_IN_HT,
+	CFG_TLC_SR_SIC_1ST_FAIL,
+	CFG_TLC_SR_SIC_1ST_PASS,
+	CFG_TLC_SR_SIC_TOTAL_FAIL,
+	CFG_TLC_SR_SIC_TOTAL_PASS,
 	CFG_RLC_CHAIN_CTRL,
 	CFG_TRK_TABLE_OP_MODE,
 	CFG_TRK_TABLE_RSSI_THRESHOLD,
@@ -147,6 +150,58 @@
 	CFG_MLME_DBG_NOTIF_BLOCK,
 	CFG_BT_OFF_BECONS_INTERVALS,
 	CFG_BT_FRAG_DURATION,
+	CFG_ACTIVE_CHAINS,
+	CFG_CALIB_CTRL,
+	CFG_CAPABILITY_SUPPORTED_HT_RATES,
+	CFG_HT_MAC_PARAM_INFO,
+	CFG_MIMO_PS_MODE,
+	CFG_HT_DEFAULT_CAPABILIES_INFO,
+	CFG_LED_SC_RESOLUTION_FACTOR,
+	CFG_PTAM_ENERGY_CCK_DET_DEFAULT,
+	CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_DEFAULT,
+	CFG_PTAM_CORR40_4_TH_ADD_MIN_DEFAULT,
+	CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_DEFAULT,
+	CFG_PTAM_CORR32_4_TH_ADD_MIN_DEFAULT,
+	CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_DEFAULT,
+	CFG_PTAM_CORR32_1_TH_ADD_MIN_DEFAULT,
+	CFG_PTAM_ENERGY_CCK_DET_MIN_VAL,
+	CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_MIN_VAL,
+	CFG_PTAM_CORR40_4_TH_ADD_MIN_MIN_VAL,
+	CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_MIN_VAL,
+	CFG_PTAM_CORR32_4_TH_ADD_MIN_MIN_VAL,
+	CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_MIN_VAL,
+	CFG_PTAM_CORR32_1_TH_ADD_MIN_MIN_VAL,
+	CFG_PTAM_ENERGY_CCK_DET_MAX_VAL,
+	CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_MAX_VAL,
+	CFG_PTAM_CORR40_4_TH_ADD_MIN_MAX_VAL,
+	CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_MAX_VAL,
+	CFG_PTAM_CORR32_4_TH_ADD_MIN_MAX_VAL,
+	CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_MAX_VAL,
+	CFG_PTAM_CORR32_1_TH_ADD_MIN_MAX_VAL,
+	CFG_PTAM_ENERGY_CCK_DET_STEP_VAL,
+	CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_STEP_VAL,
+	CFG_PTAM_CORR40_4_TH_ADD_MIN_STEP_VAL,
+	CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_STEP_VAL,
+	CFG_PTAM_CORR32_4_TH_ADD_MIN_STEP_VAL,
+	CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_STEP_VAL,
+	CFG_PTAM_CORR32_1_TH_ADD_MIN_STEP_VAL,
+	CFG_PTAM_LINK_SENS_FA_OFDM_MAX,
+	CFG_PTAM_LINK_SENS_FA_OFDM_MIN,
+	CFG_PTAM_LINK_SENS_FA_CCK_MAX,
+	CFG_PTAM_LINK_SENS_FA_CCK_MIN,
+	CFG_PTAM_LINK_SENS_NRG_DIFF,
+	CFG_PTAM_LINK_SENS_NRG_MARGIN,
+	CFG_PTAM_LINK_SENS_MAX_NUMBER_OF_TIMES_IN_CCK_NO_FA,
+	CFG_PTAM_LINK_SENS_AUTO_CORR_MAX_TH_CCK,
+	CFG_AGG_MGG_TID_LOAD_ADDBA_THRESHOLD,
+	CFG_AGG_MGG_TID_LOAD_DELBA_THRESHOLD,
+	CFG_AGG_MGG_ADDBA_BUF_SIZE,
+	CFG_AGG_MGG_ADDBA_INACTIVE_TIMEOUT,
+	CFG_AGG_MGG_ADDBA_DEBUG_FLAGS,
+	CFG_SCAN_PERIODIC_RSSI_HIGH_THRESHOLD,
+	CFG_SCAN_PERIODIC_COEF_RSSI_HIGH,
+	CFG_11D_ENABLED,
+	CFG_11H_FEATURE_FLAGS,
 
 	/* <-- LAST --> */
 	CFG_TBL_FIX_LAST
@@ -155,7 +210,8 @@
 /* variable size table */
 enum {
 	CFG_NET_ADDR = 0,
-	CFG_PROFILE,
+	CFG_LED_PATTERN_TABLE,
+
 	/* <-- LAST --> */
 	CFG_TBL_VAR_LAST
 };
@@ -288,6 +344,9 @@
 /* iwm_umac_security.flag is WSC mode on -- bits [2:2] */
 #define UMAC_SEC_FLG_WSC_ON_POS		2
 #define UMAC_SEC_FLG_WSC_ON_SEED	1
+#define UMAC_SEC_FLG_WSC_ON_MSK         (UMAC_SEC_FLG_WSC_ON_SEED << \
+					 UMAC_SEC_FLG_WSC_ON_POS)
+
 
 /* Legacy profile can use only WEP40 and WEP104 for encryption and
  * OPEN or PSK for authentication */
@@ -382,6 +441,11 @@
 	u8 reserved[3];
 } __attribute__ ((packed));
 
+struct iwm_umac_pwr_trigger {
+	struct iwm_umac_wifi_if hdr;
+	__le32 reseved;
+} __attribute__ ((packed));
+
 struct iwm_umac_cmd_stats_req {
 	__le32 flags;
 } __attribute__ ((packed));
@@ -393,6 +457,7 @@
 int iwm_send_periodic_calib_cfg(struct iwm_priv *iwm, u8 calib_requested);
 int iwm_send_calib_results(struct iwm_priv *iwm);
 int iwm_store_rxiq_calib_result(struct iwm_priv *iwm);
+int iwm_send_ct_kill_cfg(struct iwm_priv *iwm, u8 entry, u8 exit);
 
 /* UMAC commands */
 int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size,
@@ -407,6 +472,7 @@
 int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id);
 int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx);
 int iwm_set_key(struct iwm_priv *iwm, bool remove, struct iwm_key *key);
+int iwm_tx_power_trigger(struct iwm_priv *iwm);
 int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags);
 int iwm_send_umac_channel_list(struct iwm_priv *iwm);
 int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids,
diff --git a/drivers/net/wireless/iwmc3200wifi/fw.c b/drivers/net/wireless/iwmc3200wifi/fw.c
index 6b0bcad..4906709 100644
--- a/drivers/net/wireless/iwmc3200wifi/fw.c
+++ b/drivers/net/wireless/iwmc3200wifi/fw.c
@@ -217,6 +217,13 @@
 		 IWM_BUILD_YEAR(build_date), IWM_BUILD_MONTH(build_date),
 		 IWM_BUILD_DAY(build_date));
 
+	if (!strcmp(img_name, iwm->bus_ops->umac_name))
+		sprintf(iwm->umac_version, "%02X.%02X",
+			ver->major, ver->minor);
+
+	if (!strcmp(img_name, iwm->bus_ops->lmac_name))
+		sprintf(iwm->lmac_version, "%02X.%02X",
+			ver->major, ver->minor);
 
  err_release_fw:
 	release_firmware(fw);
@@ -398,6 +405,8 @@
 	iwm_send_prio_table(iwm);
 	iwm_send_calib_results(iwm);
 	iwm_send_periodic_calib_cfg(iwm, periodic_calib_map);
+	iwm_send_ct_kill_cfg(iwm, iwm->conf.ct_kill_entry,
+			     iwm->conf.ct_kill_exit);
 
 	return 0;
 
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 1b02a4e..a9bf6bc 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -65,6 +65,8 @@
 	u32 sdio_ior_timeout;
 	unsigned long calib_map;
 	unsigned long expected_calib_map;
+	u8 ct_kill_entry;
+	u8 ct_kill_exit;
 	bool reset_on_fatal_err;
 	bool auto_connect;
 	bool wimax_not_present;
@@ -276,12 +278,14 @@
 	struct iw_statistics wstats;
 	struct delayed_work stats_request;
 	struct delayed_work disconnect;
+	struct delayed_work ct_kill_delay;
 
 	struct iwm_debugfs dbg;
 
 	u8 *eeprom;
 	struct timer_list watchdog;
 	struct work_struct reset_worker;
+	struct work_struct auth_retry_worker;
 	struct mutex mutex;
 
 	u8 *req_ie;
@@ -290,6 +294,8 @@
 	int resp_ie_len;
 
 	struct iwm_fw_error_hdr *last_fw_err;
+	char umac_version[8];
+	char lmac_version[8];
 
 	char private[0] __attribute__((__aligned__(NETDEV_ALIGN)));
 };
diff --git a/drivers/net/wireless/iwmc3200wifi/lmac.h b/drivers/net/wireless/iwmc3200wifi/lmac.h
index 6c1a14c..a3a79b5 100644
--- a/drivers/net/wireless/iwmc3200wifi/lmac.h
+++ b/drivers/net/wireless/iwmc3200wifi/lmac.h
@@ -187,6 +187,14 @@
 				     COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \
 				     COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK)
 
+/* CT kill config command */
+struct iwm_ct_kill_cfg_cmd {
+	u32 exit_threshold;
+	u32 reserved;
+	u32 entry_threshold;
+} __attribute__ ((packed));
+
+
 /* LMAC OP CODES */
 #define REPLY_PAD			0x0
 #define REPLY_ALIVE			0x1
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 170f337..f93e913 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -63,6 +63,8 @@
 				  BIT(PHY_CALIBRATE_TX_IQ_CMD)	|
 				  BIT(PHY_CALIBRATE_RX_IQ_CMD)	|
 				  BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD),
+	.ct_kill_entry		= 110,
+	.ct_kill_exit		= 110,
 	.reset_on_fatal_err	= 1,
 	.auto_connect		= 1,
 	.wimax_not_present	= 0,
@@ -133,6 +135,17 @@
 	cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0, GFP_KERNEL);
 }
 
+static void iwm_ct_kill_work(struct work_struct *work)
+{
+	struct iwm_priv *iwm =
+		container_of(work, struct iwm_priv, ct_kill_delay.work);
+	struct wiphy *wiphy = iwm_to_wiphy(iwm);
+
+	IWM_INFO(iwm, "CT kill delay timeout\n");
+
+	wiphy_rfkill_set_hw_state(wiphy, false);
+}
+
 static int __iwm_up(struct iwm_priv *iwm);
 static int __iwm_down(struct iwm_priv *iwm);
 
@@ -194,6 +207,33 @@
 	mutex_unlock(&iwm->mutex);
 }
 
+static void iwm_auth_retry_worker(struct work_struct *work)
+{
+	struct iwm_priv *iwm;
+	int i, ret;
+
+	iwm = container_of(work, struct iwm_priv, auth_retry_worker);
+	if (iwm->umac_profile_active) {
+		ret = iwm_invalidate_mlme_profile(iwm);
+		if (ret < 0)
+			return;
+	}
+
+	iwm->umac_profile->sec.auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
+
+	ret = iwm_send_mlme_profile(iwm);
+	if (ret < 0)
+		return;
+
+	for (i = 0; i < IWM_NUM_KEYS; i++)
+		if (iwm->keys[i].key_len)
+			iwm_set_key(iwm, 0, &iwm->keys[i]);
+
+	iwm_set_tx_key(iwm, iwm->default_key);
+}
+
+
+
 static void iwm_watchdog(unsigned long data)
 {
 	struct iwm_priv *iwm = (struct iwm_priv *)data;
@@ -225,7 +265,9 @@
 	iwm->scan_id = 1;
 	INIT_DELAYED_WORK(&iwm->stats_request, iwm_statistics_request);
 	INIT_DELAYED_WORK(&iwm->disconnect, iwm_disconnect_work);
+	INIT_DELAYED_WORK(&iwm->ct_kill_delay, iwm_ct_kill_work);
 	INIT_WORK(&iwm->reset_worker, iwm_reset_worker);
+	INIT_WORK(&iwm->auth_retry_worker, iwm_auth_retry_worker);
 	INIT_LIST_HEAD(&iwm->bss_list);
 
 	skb_queue_head_init(&iwm->rx_list);
@@ -586,6 +628,7 @@
 {
 	int ret;
 	struct iwm_notif *notif_reboot, *notif_ack = NULL;
+	struct wiphy *wiphy = iwm_to_wiphy(iwm);
 
 	ret = iwm_bus_enable(iwm);
 	if (ret) {
@@ -647,6 +690,9 @@
 		goto err_disable;
 	}
 
+	snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), "L%s_U%s",
+		 iwm->lmac_version, iwm->umac_version);
+
 	/* We configure the UMAC and enable the wifi module */
 	ret = iwm_send_umac_config(iwm,
 			cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_CORE_EN) |
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index 35ec006..4f8dbdd 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -152,6 +152,7 @@
 	if (!iwm_to_ndev(iwm))
 		return;
 
+	cancel_delayed_work_sync(&iwm->ct_kill_delay);
 	free_netdev(iwm_to_ndev(iwm));
 	iwm_priv_deinit(iwm);
 	kfree(iwm->umac_profile);
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 40dbcbc..3ad95dc 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -422,7 +422,9 @@
 			if (IS_ERR(ticket_node))
 				return PTR_ERR(ticket_node);
 
-			IWM_DBG_RX(iwm, DBG, "TICKET RELEASE(%d)\n",
+			IWM_DBG_RX(iwm, DBG, "TICKET %s(%d)\n",
+				   ticket->action ==  IWM_RX_TICKET_RELEASE ?
+				   "RELEASE" : "DROP",
 				   ticket->id);
 			list_add_tail(&ticket_node->node, &iwm->rx_tickets);
 
@@ -499,6 +501,18 @@
 	return 0;
 }
 
+static u8 iwm_is_open_wep_profile(struct iwm_priv *iwm)
+{
+	if ((iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_40 ||
+	     iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_104) &&
+	    (iwm->umac_profile->sec.ucast_cipher ==
+	     iwm->umac_profile->sec.mcast_cipher) &&
+	    (iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN))
+	       return 1;
+
+       return 0;
+}
+
 static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
 				   unsigned long buf_size,
 				   struct iwm_wifi_cmd *cmd)
@@ -564,11 +578,17 @@
 			goto ibss;
 
 		if (!test_bit(IWM_STATUS_RESETTING, &iwm->status))
-			cfg80211_connect_result(iwm_to_ndev(iwm),
-						complete->bssid,
-						NULL, 0, NULL, 0,
-						WLAN_STATUS_UNSPECIFIED_FAILURE,
-						GFP_KERNEL);
+			if (!iwm_is_open_wep_profile(iwm)) {
+				cfg80211_connect_result(iwm_to_ndev(iwm),
+					       complete->bssid,
+					       NULL, 0, NULL, 0,
+					       WLAN_STATUS_UNSPECIFIED_FAILURE,
+					       GFP_KERNEL);
+			} else {
+				/* Let's try shared WEP auth */
+				IWM_ERR(iwm, "Trying WEP shared auth\n");
+				schedule_work(&iwm->auth_retry_worker);
+			}
 		else
 			cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0,
 					      GFP_KERNEL);
@@ -712,6 +732,19 @@
 	return 0;
 }
 
+static int iwm_mlme_medium_lost(struct iwm_priv *iwm, u8 *buf,
+				unsigned long buf_size,
+				struct iwm_wifi_cmd *cmd)
+{
+	struct wiphy *wiphy = iwm_to_wiphy(iwm);
+
+	IWM_DBG_NTF(iwm, DBG, "WiFi/WiMax coexistence radio is OFF\n");
+
+	wiphy_rfkill_set_hw_state(wiphy, true);
+
+	return 0;
+}
+
 static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
 				     unsigned long buf_size,
 				     struct iwm_wifi_cmd *cmd)
@@ -898,6 +931,8 @@
 	case WIFI_IF_NTFY_EXTENDED_IE_REQUIRED:
 		IWM_DBG_MLME(iwm, DBG, "Extended IE required\n");
 		break;
+	case WIFI_IF_NTFY_RADIO_PREEMPTION:
+		return iwm_mlme_medium_lost(iwm, buf, buf_size, cmd);
 	case WIFI_IF_NTFY_BSS_TRK_TABLE_CHANGED:
 		return iwm_mlme_update_bss_table(iwm, buf, buf_size, cmd);
 	case WIFI_IF_NTFY_BSS_TRK_ENTRIES_REMOVED:
@@ -1055,8 +1090,14 @@
 				   unsigned long buf_size,
 				   struct iwm_wifi_cmd *cmd)
 {
-	struct iwm_umac_wifi_if *hdr =
-			(struct iwm_umac_wifi_if *)cmd->buf.payload;
+	struct iwm_umac_wifi_if *hdr;
+
+	if (cmd == NULL) {
+		IWM_ERR(iwm, "Couldn't find expected wifi command\n");
+		return -EINVAL;
+	}
+
+	hdr = (struct iwm_umac_wifi_if *)cmd->buf.payload;
 
 	IWM_DBG_NTF(iwm, DBG, "WIFI_IF_WRAPPER cmd is delivered to UMAC: "
 		    "oid is 0x%x\n", hdr->oid);
@@ -1078,6 +1119,7 @@
 	return 0;
 }
 
+#define CT_KILL_DELAY (30 * HZ)
 static int iwm_ntf_card_state(struct iwm_priv *iwm, u8 *buf,
 			      unsigned long buf_size, struct iwm_wifi_cmd *cmd)
 {
@@ -1090,7 +1132,20 @@
 		 flags & IWM_CARD_STATE_HW_DISABLED ? "ON" : "OFF",
 		 flags & IWM_CARD_STATE_CTKILL_DISABLED ? "ON" : "OFF");
 
-	wiphy_rfkill_set_hw_state(wiphy, flags & IWM_CARD_STATE_HW_DISABLED);
+	if (flags & IWM_CARD_STATE_CTKILL_DISABLED) {
+		/*
+		 * We got a CTKILL event: We bring the interface down in
+		 * oder to cool the device down, and try to bring it up
+		 * 30 seconds later. If it's still too hot, we'll go through
+		 * this code path again.
+		 */
+		cancel_delayed_work_sync(&iwm->ct_kill_delay);
+		schedule_delayed_work(&iwm->ct_kill_delay, CT_KILL_DELAY);
+	}
+
+	wiphy_rfkill_set_hw_state(wiphy, flags &
+				  (IWM_CARD_STATE_HW_DISABLED |
+				   IWM_CARD_STATE_CTKILL_DISABLED));
 
 	return 0;
 }
@@ -1281,6 +1336,14 @@
 
 	switch (le32_to_cpu(hdr->cmd)) {
 	case UMAC_REBOOT_BARKER:
+		if (test_bit(IWM_STATUS_READY, &iwm->status)) {
+			IWM_ERR(iwm, "Unexpected BARKER\n");
+
+			schedule_work(&iwm->reset_worker);
+
+			return 0;
+		}
+
 		return iwm_notif_send(iwm, NULL, IWM_BARKER_REBOOT_NOTIFICATION,
 				      IWM_SRC_UDMA, buf, buf_size);
 	case UMAC_ACK_BARKER:
@@ -1443,7 +1506,8 @@
 		}
 		break;
 	case IWM_RX_TICKET_DROP:
-		IWM_DBG_RX(iwm, DBG, "DROP packet\n");
+		IWM_DBG_RX(iwm, DBG, "DROP packet: 0x%x\n",
+			   le16_to_cpu(ticket_node->ticket->flags));
 		kfree_skb(packet->skb);
 		break;
 	default:
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
index 8b1de84..cf86294 100644
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -224,8 +224,6 @@
 	struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
 	int ret;
 
-	iwm_reset(iwm);
-
 	sdio_claim_host(hw->func);
 	sdio_writeb(hw->func, 0, IWM_SDIO_INTR_ENABLE_ADDR, &ret);
 	if (ret < 0)
@@ -237,6 +235,8 @@
 
 	iwm_sdio_rx_free(hw);
 
+	iwm_reset(iwm);
+
 	IWM_DBG_SDIO(iwm, INFO, "IWM SDIO disable\n");
 
 	return 0;
@@ -493,8 +493,10 @@
 }
 
 static const struct sdio_device_id iwm_sdio_ids[] = {
-	{ SDIO_DEVICE(SDIO_VENDOR_ID_INTEL,
-		      SDIO_DEVICE_ID_INTEL_IWMC3200WIFI) },
+	/* Global/AGN SKU */
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1403) },
+	/* BGN SKU */
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1408) },
 	{ /* end: all zeroes */	},
 };
 MODULE_DEVICE_TABLE(sdio, iwm_sdio_ids);
diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h
index c5a14ae..be90354 100644
--- a/drivers/net/wireless/iwmc3200wifi/umac.h
+++ b/drivers/net/wireless/iwmc3200wifi/umac.h
@@ -687,6 +687,9 @@
 /* Tx/Rx rates window (number of max of last update window per second) */
 #define UMAC_NTF_RATE_SAMPLE_NR	4
 
+/* Max numbers of bits required to go through all antennae in bitmasks */
+#define UMAC_PHY_NUM_CHAINS     3
+
 #define IWM_UMAC_MGMT_TID	8
 #define IWM_UMAC_TID_NR		8
 
@@ -697,9 +700,11 @@
 	__le16 tid_load[IWM_UMAC_TID_NR + 2]; /* 1 non-QoS + 1 dword align */
 	__le16 tx_rate[UMAC_NTF_RATE_SAMPLE_NR];
 	__le16 rx_rate[UMAC_NTF_RATE_SAMPLE_NR];
+	__le32 chain_energy[UMAC_PHY_NUM_CHAINS];
 	s32 rssi_dbm;
 	s32 noise_dbm;
 	__le32 supp_rates;
+	__le32 supp_ht_rates;
 	__le32 missed_beacons;
 	__le32 rx_beacons;
 	__le32 rx_dir_pkts;
diff --git a/drivers/net/wireless/libertas/11d.c b/drivers/net/wireless/libertas/11d.c
deleted file mode 100644
index 5c69681..0000000
--- a/drivers/net/wireless/libertas/11d.c
+++ /dev/null
@@ -1,696 +0,0 @@
-/**
-  * This file contains functions for 802.11D.
-  */
-#include <linux/ctype.h>
-#include <linux/kernel.h>
-#include <linux/wireless.h>
-
-#include "host.h"
-#include "decl.h"
-#include "11d.h"
-#include "dev.h"
-#include "wext.h"
-
-#define TX_PWR_DEFAULT	10
-
-static struct region_code_mapping region_code_mapping[] = {
-	{"US ", 0x10},		/* US FCC      */
-	{"CA ", 0x10},		/* IC Canada   */
-	{"SG ", 0x10},		/* Singapore   */
-	{"EU ", 0x30},		/* ETSI        */
-	{"AU ", 0x30},		/* Australia   */
-	{"KR ", 0x30},		/* Republic Of Korea */
-	{"ES ", 0x31},		/* Spain       */
-	{"FR ", 0x32},		/* France      */
-	{"JP ", 0x40},		/* Japan       */
-};
-
-/* Following 2 structure defines the supported channels */
-static struct chan_freq_power channel_freq_power_UN_BG[] = {
-	{1, 2412, TX_PWR_DEFAULT},
-	{2, 2417, TX_PWR_DEFAULT},
-	{3, 2422, TX_PWR_DEFAULT},
-	{4, 2427, TX_PWR_DEFAULT},
-	{5, 2432, TX_PWR_DEFAULT},
-	{6, 2437, TX_PWR_DEFAULT},
-	{7, 2442, TX_PWR_DEFAULT},
-	{8, 2447, TX_PWR_DEFAULT},
-	{9, 2452, TX_PWR_DEFAULT},
-	{10, 2457, TX_PWR_DEFAULT},
-	{11, 2462, TX_PWR_DEFAULT},
-	{12, 2467, TX_PWR_DEFAULT},
-	{13, 2472, TX_PWR_DEFAULT},
-	{14, 2484, TX_PWR_DEFAULT}
-};
-
-static u8 lbs_region_2_code(u8 *region)
-{
-	u8 i;
-
-	for (i = 0; i < COUNTRY_CODE_LEN && region[i]; i++)
-		region[i] = toupper(region[i]);
-
-	for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) {
-		if (!memcmp(region, region_code_mapping[i].region,
-			    COUNTRY_CODE_LEN))
-			return (region_code_mapping[i].code);
-	}
-
-	/* default is US */
-	return (region_code_mapping[0].code);
-}
-
-static u8 *lbs_code_2_region(u8 code)
-{
-	u8 i;
-
-	for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) {
-		if (region_code_mapping[i].code == code)
-			return (region_code_mapping[i].region);
-	}
-	/* default is US */
-	return (region_code_mapping[0].region);
-}
-
-/**
- *  @brief This function finds the nrchan-th chan after the firstchan
- *  @param band       band
- *  @param firstchan  first channel number
- *  @param nrchan   number of channels
- *  @return 	      the nrchan-th chan number
-*/
-static u8 lbs_get_chan_11d(u8 firstchan, u8 nrchan, u8 *chan)
-/*find the nrchan-th chan after the firstchan*/
-{
-	u8 i;
-	struct chan_freq_power *cfp;
-	u8 cfp_no;
-
-	cfp = channel_freq_power_UN_BG;
-	cfp_no = ARRAY_SIZE(channel_freq_power_UN_BG);
-
-	for (i = 0; i < cfp_no; i++) {
-		if ((cfp + i)->channel == firstchan) {
-			lbs_deb_11d("firstchan found\n");
-			break;
-		}
-	}
-
-	if (i < cfp_no) {
-		/*if beyond the boundary */
-		if (i + nrchan < cfp_no) {
-			*chan = (cfp + i + nrchan)->channel;
-			return 1;
-		}
-	}
-
-	return 0;
-}
-
-/**
- *  @brief This function Checks if chan txpwr is learned from AP/IBSS
- *  @param chan                 chan number
- *  @param parsed_region_chan   pointer to parsed_region_chan_11d
- *  @return 	                TRUE; FALSE
-*/
-static u8 lbs_channel_known_11d(u8 chan,
-			  struct parsed_region_chan_11d * parsed_region_chan)
-{
-	struct chan_power_11d *chanpwr = parsed_region_chan->chanpwr;
-	u8 nr_chan = parsed_region_chan->nr_chan;
-	u8 i = 0;
-
-	lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (char *)chanpwr,
-		sizeof(struct chan_power_11d) * nr_chan);
-
-	for (i = 0; i < nr_chan; i++) {
-		if (chan == chanpwr[i].chan) {
-			lbs_deb_11d("found chan %d\n", chan);
-			return 1;
-		}
-	}
-
-	lbs_deb_11d("chan %d not found\n", chan);
-	return 0;
-}
-
-u32 lbs_chan_2_freq(u8 chan)
-{
-	struct chan_freq_power *cf;
-	u16 i;
-	u32 freq = 0;
-
-	cf = channel_freq_power_UN_BG;
-
-	for (i = 0; i < ARRAY_SIZE(channel_freq_power_UN_BG); i++) {
-		if (chan == cf[i].channel)
-			freq = cf[i].freq;
-	}
-
-	return freq;
-}
-
-static int generate_domain_info_11d(struct parsed_region_chan_11d
-				  *parsed_region_chan,
-				  struct lbs_802_11d_domain_reg *domaininfo)
-{
-	u8 nr_subband = 0;
-
-	u8 nr_chan = parsed_region_chan->nr_chan;
-	u8 nr_parsedchan = 0;
-
-	u8 firstchan = 0, nextchan = 0, maxpwr = 0;
-
-	u8 i, flag = 0;
-
-	memcpy(domaininfo->countrycode, parsed_region_chan->countrycode,
-	       COUNTRY_CODE_LEN);
-
-	lbs_deb_11d("nrchan %d\n", nr_chan);
-	lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (char *)parsed_region_chan,
-		sizeof(struct parsed_region_chan_11d));
-
-	for (i = 0; i < nr_chan; i++) {
-		if (!flag) {
-			flag = 1;
-			nextchan = firstchan =
-			    parsed_region_chan->chanpwr[i].chan;
-			maxpwr = parsed_region_chan->chanpwr[i].pwr;
-			nr_parsedchan = 1;
-			continue;
-		}
-
-		if (parsed_region_chan->chanpwr[i].chan == nextchan + 1 &&
-		    parsed_region_chan->chanpwr[i].pwr == maxpwr) {
-			nextchan++;
-			nr_parsedchan++;
-		} else {
-			domaininfo->subband[nr_subband].firstchan = firstchan;
-			domaininfo->subband[nr_subband].nrchan =
-			    nr_parsedchan;
-			domaininfo->subband[nr_subband].maxtxpwr = maxpwr;
-			nr_subband++;
-			nextchan = firstchan =
-			    parsed_region_chan->chanpwr[i].chan;
-			maxpwr = parsed_region_chan->chanpwr[i].pwr;
-		}
-	}
-
-	if (flag) {
-		domaininfo->subband[nr_subband].firstchan = firstchan;
-		domaininfo->subband[nr_subband].nrchan = nr_parsedchan;
-		domaininfo->subband[nr_subband].maxtxpwr = maxpwr;
-		nr_subband++;
-	}
-	domaininfo->nr_subband = nr_subband;
-
-	lbs_deb_11d("nr_subband=%x\n", domaininfo->nr_subband);
-	lbs_deb_hex(LBS_DEB_11D, "domaininfo", (char *)domaininfo,
-		COUNTRY_CODE_LEN + 1 +
-		sizeof(struct ieee_subbandset) * nr_subband);
-	return 0;
-}
-
-/**
- *  @brief This function generates parsed_region_chan from Domain Info learned from AP/IBSS
- *  @param region_chan          pointer to struct region_channel
- *  @param *parsed_region_chan  pointer to parsed_region_chan_11d
- *  @return 	                N/A
-*/
-static void lbs_generate_parsed_region_chan_11d(struct region_channel *region_chan,
-					  struct parsed_region_chan_11d *
-					  parsed_region_chan)
-{
-	u8 i;
-	struct chan_freq_power *cfp;
-
-	if (region_chan == NULL) {
-		lbs_deb_11d("region_chan is NULL\n");
-		return;
-	}
-
-	cfp = region_chan->CFP;
-	if (cfp == NULL) {
-		lbs_deb_11d("cfp is NULL \n");
-		return;
-	}
-
-	parsed_region_chan->band = region_chan->band;
-	parsed_region_chan->region = region_chan->region;
-	memcpy(parsed_region_chan->countrycode,
-	       lbs_code_2_region(region_chan->region), COUNTRY_CODE_LEN);
-
-	lbs_deb_11d("region 0x%x, band %d\n", parsed_region_chan->region,
-	       parsed_region_chan->band);
-
-	for (i = 0; i < region_chan->nrcfp; i++, cfp++) {
-		parsed_region_chan->chanpwr[i].chan = cfp->channel;
-		parsed_region_chan->chanpwr[i].pwr = cfp->maxtxpower;
-		lbs_deb_11d("chan %d, pwr %d\n",
-		       parsed_region_chan->chanpwr[i].chan,
-		       parsed_region_chan->chanpwr[i].pwr);
-	}
-	parsed_region_chan->nr_chan = region_chan->nrcfp;
-
-	lbs_deb_11d("nrchan %d\n", parsed_region_chan->nr_chan);
-
-	return;
-}
-
-/**
- *  @brief generate parsed_region_chan from Domain Info learned from AP/IBSS
- *  @param region               region ID
- *  @param band                 band
- *  @param chan                 chan
- *  @return 	                TRUE;FALSE
-*/
-static u8 lbs_region_chan_supported_11d(u8 region, u8 chan)
-{
-	struct chan_freq_power *cfp;
-	int cfp_no;
-	u8 idx;
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_11D);
-
-	cfp = lbs_get_region_cfp_table(region, &cfp_no);
-	if (cfp == NULL)
-		return 0;
-
-	for (idx = 0; idx < cfp_no; idx++) {
-		if (chan == (cfp + idx)->channel) {
-			/* If Mrvl Chip Supported? */
-			if ((cfp + idx)->unsupported) {
-				ret = 0;
-			} else {
-				ret = 1;
-			}
-			goto done;
-		}
-	}
-
-	/*chan is not in the region table */
-
-done:
-	lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
-	return ret;
-}
-
-/**
- *  @brief This function checks if chan txpwr is learned from AP/IBSS
- *  @param chan                 chan number
- *  @param parsed_region_chan   pointer to parsed_region_chan_11d
- *  @return 	                0
-*/
-static int parse_domain_info_11d(struct ieee_ie_country_info_full_set *countryinfo,
-				 u8 band,
-				 struct parsed_region_chan_11d *parsed_region_chan)
-{
-	u8 nr_subband, nrchan;
-	u8 lastchan, firstchan;
-	u8 region;
-	u8 curchan = 0;
-
-	u8 idx = 0;		/*chan index in parsed_region_chan */
-
-	u8 j, i;
-
-	lbs_deb_enter(LBS_DEB_11D);
-
-	/*validation Rules:
-	   1. valid region Code
-	   2. First Chan increment
-	   3. channel range no overlap
-	   4. channel is valid?
-	   5. channel is supported by region?
-	   6. Others
-	 */
-
-	lbs_deb_hex(LBS_DEB_11D, "countryinfo", (u8 *) countryinfo, 30);
-
-	if ((*(countryinfo->countrycode)) == 0
-	    || (countryinfo->header.len <= COUNTRY_CODE_LEN)) {
-		/* No region Info or Wrong region info: treat as No 11D info */
-		goto done;
-	}
-
-	/*Step1: check region_code */
-	parsed_region_chan->region = region =
-	    lbs_region_2_code(countryinfo->countrycode);
-
-	lbs_deb_11d("regioncode=%x\n", (u8) parsed_region_chan->region);
-	lbs_deb_hex(LBS_DEB_11D, "countrycode", (char *)countryinfo->countrycode,
-		COUNTRY_CODE_LEN);
-
-	parsed_region_chan->band = band;
-
-	memcpy(parsed_region_chan->countrycode, countryinfo->countrycode,
-	       COUNTRY_CODE_LEN);
-
-	nr_subband = (countryinfo->header.len - COUNTRY_CODE_LEN) /
-	    sizeof(struct ieee_subbandset);
-
-	for (j = 0, lastchan = 0; j < nr_subband; j++) {
-
-		if (countryinfo->subband[j].firstchan <= lastchan) {
-			/*Step2&3. Check First Chan Num increment and no overlap */
-			lbs_deb_11d("chan %d>%d, overlap\n",
-			       countryinfo->subband[j].firstchan, lastchan);
-			continue;
-		}
-
-		firstchan = countryinfo->subband[j].firstchan;
-		nrchan = countryinfo->subband[j].nrchan;
-
-		for (i = 0; idx < MAX_NO_OF_CHAN && i < nrchan; i++) {
-			/*step4: channel is supported? */
-
-			if (!lbs_get_chan_11d(firstchan, i, &curchan)) {
-				/* Chan is not found in UN table */
-				lbs_deb_11d("chan is not supported: %d \n", i);
-				break;
-			}
-
-			lastchan = curchan;
-
-			if (lbs_region_chan_supported_11d(region, curchan)) {
-				/*step5: Check if curchan is supported by mrvl in region */
-				parsed_region_chan->chanpwr[idx].chan = curchan;
-				parsed_region_chan->chanpwr[idx].pwr =
-				    countryinfo->subband[j].maxtxpwr;
-				idx++;
-			} else {
-				/*not supported and ignore the chan */
-				lbs_deb_11d(
-				       "i %d, chan %d unsupported in region %x, band %d\n",
-				       i, curchan, region, band);
-			}
-		}
-
-		/*Step6: Add other checking if any */
-
-	}
-
-	parsed_region_chan->nr_chan = idx;
-
-	lbs_deb_11d("nrchan=%x\n", parsed_region_chan->nr_chan);
-	lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (u8 *) parsed_region_chan,
-		2 + COUNTRY_CODE_LEN + sizeof(struct parsed_region_chan_11d) * idx);
-
-done:
-	lbs_deb_enter(LBS_DEB_11D);
-	return 0;
-}
-
-/**
- *  @brief This function calculates the scan type for channels
- *  @param chan                 chan number
- *  @param parsed_region_chan   pointer to parsed_region_chan_11d
- *  @return 	                PASSIVE if chan is unknown; ACTIVE if chan is known
-*/
-u8 lbs_get_scan_type_11d(u8 chan,
-			  struct parsed_region_chan_11d * parsed_region_chan)
-{
-	u8 scan_type = CMD_SCAN_TYPE_PASSIVE;
-
-	lbs_deb_enter(LBS_DEB_11D);
-
-	if (lbs_channel_known_11d(chan, parsed_region_chan)) {
-		lbs_deb_11d("found, do active scan\n");
-		scan_type = CMD_SCAN_TYPE_ACTIVE;
-	} else {
-		lbs_deb_11d("not found, do passive scan\n");
-	}
-
-	lbs_deb_leave_args(LBS_DEB_11D, "ret scan_type %d", scan_type);
-	return scan_type;
-
-}
-
-void lbs_init_11d(struct lbs_private *priv)
-{
-	priv->enable11d = 0;
-	memset(&(priv->parsed_region_chan), 0,
-	       sizeof(struct parsed_region_chan_11d));
-	return;
-}
-
-/**
- *  @brief This function sets DOMAIN INFO to FW
- *  @param priv       pointer to struct lbs_private
- *  @return 	      0; -1
-*/
-static int set_domain_info_11d(struct lbs_private *priv)
-{
-	int ret;
-
-	if (!priv->enable11d) {
-		lbs_deb_11d("dnld domain Info with 11d disabled\n");
-		return 0;
-	}
-
-	ret = lbs_prepare_and_send_command(priv, CMD_802_11D_DOMAIN_INFO,
-				    CMD_ACT_SET,
-				    CMD_OPTION_WAITFORRSP, 0, NULL);
-	if (ret)
-		lbs_deb_11d("fail to dnld domain info\n");
-
-	return ret;
-}
-
-/**
- *  @brief This function setups scan channels
- *  @param priv       pointer to struct lbs_private
- *  @param band       band
- *  @return 	      0
-*/
-int lbs_set_universaltable(struct lbs_private *priv, u8 band)
-{
-	u16 size = sizeof(struct chan_freq_power);
-	u16 i = 0;
-
-	memset(priv->universal_channel, 0,
-	       sizeof(priv->universal_channel));
-
-	priv->universal_channel[i].nrcfp =
-	    sizeof(channel_freq_power_UN_BG) / size;
-	lbs_deb_11d("BG-band nrcfp %d\n",
-	       priv->universal_channel[i].nrcfp);
-
-	priv->universal_channel[i].CFP = channel_freq_power_UN_BG;
-	priv->universal_channel[i].valid = 1;
-	priv->universal_channel[i].region = UNIVERSAL_REGION_CODE;
-	priv->universal_channel[i].band = band;
-	i++;
-
-	return 0;
-}
-
-/**
- *  @brief This function implements command CMD_802_11D_DOMAIN_INFO
- *  @param priv       pointer to struct lbs_private
- *  @param cmd        pointer to cmd buffer
- *  @param cmdno      cmd ID
- *  @param cmdOption  cmd action
- *  @return 	      0
-*/
-int lbs_cmd_802_11d_domain_info(struct lbs_private *priv,
-				 struct cmd_ds_command *cmd, u16 cmdno,
-				 u16 cmdoption)
-{
-	struct cmd_ds_802_11d_domain_info *pdomaininfo =
-	    &cmd->params.domaininfo;
-	struct mrvl_ie_domain_param_set *domain = &pdomaininfo->domain;
-	u8 nr_subband = priv->domainreg.nr_subband;
-
-	lbs_deb_enter(LBS_DEB_11D);
-
-	lbs_deb_11d("nr_subband=%x\n", nr_subband);
-
-	cmd->command = cpu_to_le16(cmdno);
-	pdomaininfo->action = cpu_to_le16(cmdoption);
-	if (cmdoption == CMD_ACT_GET) {
-		cmd->size =
-		    cpu_to_le16(sizeof(pdomaininfo->action) + S_DS_GEN);
-		lbs_deb_hex(LBS_DEB_11D, "802_11D_DOMAIN_INFO", (u8 *) cmd,
-			le16_to_cpu(cmd->size));
-		goto done;
-	}
-
-	domain->header.type = cpu_to_le16(TLV_TYPE_DOMAIN);
-	memcpy(domain->countrycode, priv->domainreg.countrycode,
-	       sizeof(domain->countrycode));
-
-	domain->header.len =
-	    cpu_to_le16(nr_subband * sizeof(struct ieee_subbandset) +
-			     sizeof(domain->countrycode));
-
-	if (nr_subband) {
-		memcpy(domain->subband, priv->domainreg.subband,
-		       nr_subband * sizeof(struct ieee_subbandset));
-
-		cmd->size = cpu_to_le16(sizeof(pdomaininfo->action) +
-					     le16_to_cpu(domain->header.len) +
-					     sizeof(struct mrvl_ie_header) +
-					     S_DS_GEN);
-	} else {
-		cmd->size =
-		    cpu_to_le16(sizeof(pdomaininfo->action) + S_DS_GEN);
-	}
-
-	lbs_deb_hex(LBS_DEB_11D, "802_11D_DOMAIN_INFO", (u8 *) cmd, le16_to_cpu(cmd->size));
-
-done:
-	lbs_deb_enter(LBS_DEB_11D);
-	return 0;
-}
-
-/**
- *  @brief This function parses countryinfo from AP and download country info to FW
- *  @param priv    pointer to struct lbs_private
- *  @param resp    pointer to command response buffer
- *  @return 	   0; -1
- */
-int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp)
-{
-	struct cmd_ds_802_11d_domain_info *domaininfo = &resp->params.domaininforesp;
-	struct mrvl_ie_domain_param_set *domain = &domaininfo->domain;
-	u16 action = le16_to_cpu(domaininfo->action);
-	s16 ret = 0;
-	u8 nr_subband = 0;
-
-	lbs_deb_enter(LBS_DEB_11D);
-
-	lbs_deb_hex(LBS_DEB_11D, "domain info resp", (u8 *) resp,
-		(int)le16_to_cpu(resp->size));
-
-	nr_subband = (le16_to_cpu(domain->header.len) - COUNTRY_CODE_LEN) /
-		      sizeof(struct ieee_subbandset);
-
-	lbs_deb_11d("domain info resp: nr_subband %d\n", nr_subband);
-
-	if (nr_subband > MRVDRV_MAX_SUBBAND_802_11D) {
-		lbs_deb_11d("Invalid Numrer of Subband returned!!\n");
-		return -1;
-	}
-
-	switch (action) {
-	case CMD_ACT_SET:	/*Proc Set action */
-		break;
-
-	case CMD_ACT_GET:
-		break;
-	default:
-		lbs_deb_11d("Invalid action:%d\n", domaininfo->action);
-		ret = -1;
-		break;
-	}
-
-	lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
-	return ret;
-}
-
-/**
- *  @brief This function parses countryinfo from AP and download country info to FW
- *  @param priv    pointer to struct lbs_private
- *  @return 	   0; -1
- */
-int lbs_parse_dnld_countryinfo_11d(struct lbs_private *priv,
-                                        struct bss_descriptor * bss)
-{
-	int ret;
-
-	lbs_deb_enter(LBS_DEB_11D);
-	if (priv->enable11d) {
-		memset(&priv->parsed_region_chan, 0,
-		       sizeof(struct parsed_region_chan_11d));
-		ret = parse_domain_info_11d(&bss->countryinfo, 0,
-					       &priv->parsed_region_chan);
-
-		if (ret == -1) {
-			lbs_deb_11d("error parsing domain_info from AP\n");
-			goto done;
-		}
-
-		memset(&priv->domainreg, 0,
-		       sizeof(struct lbs_802_11d_domain_reg));
-		generate_domain_info_11d(&priv->parsed_region_chan,
-				      &priv->domainreg);
-
-		ret = set_domain_info_11d(priv);
-
-		if (ret) {
-			lbs_deb_11d("error setting domain info\n");
-			goto done;
-		}
-	}
-	ret = 0;
-
-done:
-	lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
-	return ret;
-}
-
-/**
- *  @brief This function generates 11D info from user specified regioncode and download to FW
- *  @param priv    pointer to struct lbs_private
- *  @return 	   0; -1
- */
-int lbs_create_dnld_countryinfo_11d(struct lbs_private *priv)
-{
-	int ret;
-	struct region_channel *region_chan;
-	u8 j;
-
-	lbs_deb_enter(LBS_DEB_11D);
-	lbs_deb_11d("curbssparams.band %d\n", priv->curbssparams.band);
-
-	if (priv->enable11d) {
-		/* update parsed_region_chan_11; dnld domaininf to FW */
-
-		for (j = 0; j < ARRAY_SIZE(priv->region_channel); j++) {
-			region_chan = &priv->region_channel[j];
-
-			lbs_deb_11d("%d region_chan->band %d\n", j,
-			       region_chan->band);
-
-			if (!region_chan || !region_chan->valid
-			    || !region_chan->CFP)
-				continue;
-			if (region_chan->band != priv->curbssparams.band)
-				continue;
-			break;
-		}
-
-		if (j >= ARRAY_SIZE(priv->region_channel)) {
-			lbs_deb_11d("region_chan not found, band %d\n",
-			       priv->curbssparams.band);
-			ret = -1;
-			goto done;
-		}
-
-		memset(&priv->parsed_region_chan, 0,
-		       sizeof(struct parsed_region_chan_11d));
-		lbs_generate_parsed_region_chan_11d(region_chan,
-						     &priv->
-						     parsed_region_chan);
-
-		memset(&priv->domainreg, 0,
-		       sizeof(struct lbs_802_11d_domain_reg));
-		generate_domain_info_11d(&priv->parsed_region_chan,
-					 &priv->domainreg);
-
-		ret = set_domain_info_11d(priv);
-
-		if (ret) {
-			lbs_deb_11d("error setting domain info\n");
-			goto done;
-		}
-
-	}
-	ret = 0;
-
-done:
-	lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
-	return ret;
-}
diff --git a/drivers/net/wireless/libertas/11d.h b/drivers/net/wireless/libertas/11d.h
deleted file mode 100644
index fb75d3e..0000000
--- a/drivers/net/wireless/libertas/11d.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
-  * This header file contains data structures and
-  * function declarations of 802.11d
-  */
-#ifndef _LBS_11D_
-#define _LBS_11D_
-
-#include "types.h"
-#include "defs.h"
-
-#define UNIVERSAL_REGION_CODE			0xff
-
-/** (Beaconsize(256)-5(IEId,len,contrystr(3))/3(FirstChan,NoOfChan,MaxPwr)
- */
-#define MRVDRV_MAX_SUBBAND_802_11D		83
-
-#define COUNTRY_CODE_LEN			3
-#define MAX_NO_OF_CHAN 				40
-
-struct cmd_ds_command;
-
-/** Data structure for Country IE*/
-struct ieee_subbandset {
-	u8 firstchan;
-	u8 nrchan;
-	u8 maxtxpwr;
-} __attribute__ ((packed));
-
-struct ieee_ie_country_info_set {
-	struct ieee_ie_header header;
-
-	u8 countrycode[COUNTRY_CODE_LEN];
-	struct ieee_subbandset subband[1];
-};
-
-struct ieee_ie_country_info_full_set {
-	struct ieee_ie_header header;
-
-	u8 countrycode[COUNTRY_CODE_LEN];
-	struct ieee_subbandset subband[MRVDRV_MAX_SUBBAND_802_11D];
-} __attribute__ ((packed));
-
-struct mrvl_ie_domain_param_set {
-	struct mrvl_ie_header header;
-
-	u8 countrycode[COUNTRY_CODE_LEN];
-	struct ieee_subbandset subband[1];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11d_domain_info {
-	__le16 action;
-	struct mrvl_ie_domain_param_set domain;
-} __attribute__ ((packed));
-
-/** domain regulatory information */
-struct lbs_802_11d_domain_reg {
-	/** country Code*/
-	u8 countrycode[COUNTRY_CODE_LEN];
-	/** No. of subband*/
-	u8 nr_subband;
-	struct ieee_subbandset subband[MRVDRV_MAX_SUBBAND_802_11D];
-};
-
-struct chan_power_11d {
-	u8 chan;
-	u8 pwr;
-} __attribute__ ((packed));
-
-struct parsed_region_chan_11d {
-	u8 band;
-	u8 region;
-	s8 countrycode[COUNTRY_CODE_LEN];
-	struct chan_power_11d chanpwr[MAX_NO_OF_CHAN];
-	u8 nr_chan;
-} __attribute__ ((packed));
-
-struct region_code_mapping {
-	u8 region[COUNTRY_CODE_LEN];
-	u8 code;
-};
-
-struct lbs_private;
-
-u8 lbs_get_scan_type_11d(u8 chan,
-			  struct parsed_region_chan_11d *parsed_region_chan);
-
-u32 lbs_chan_2_freq(u8 chan);
-
-void lbs_init_11d(struct lbs_private *priv);
-
-int lbs_set_universaltable(struct lbs_private *priv, u8 band);
-
-int lbs_cmd_802_11d_domain_info(struct lbs_private *priv,
-				 struct cmd_ds_command *cmd, u16 cmdno,
-				 u16 cmdOption);
-
-int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp);
-
-struct bss_descriptor;
-int lbs_parse_dnld_countryinfo_11d(struct lbs_private *priv,
-                                        struct bss_descriptor * bss);
-
-int lbs_create_dnld_countryinfo_11d(struct lbs_private *priv);
-
-#endif
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index e5584dd..fa37039 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -1,4 +1,3 @@
-libertas-y += 11d.o
 libertas-y += assoc.o
 libertas-y += cfg.o
 libertas-y += cmd.o
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index dd87326..7510673 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -23,6 +23,13 @@
  */
 #define CAPINFO_MASK	(~(0xda00))
 
+/**
+ * 802.11b/g supported bitrates (in 500Kb/s units)
+ */
+u8 lbs_bg_rates[MAX_RATES] =
+    { 0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c,
+0x00, 0x00 };
+
 
 /**
  *  @brief This function finds common rates between rates and card rates.
@@ -147,6 +154,397 @@
 }
 
 
+int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
+			   struct assoc_request *assoc)
+{
+	struct cmd_ds_802_11_set_wep cmd;
+	int ret = 0;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.command = cpu_to_le16(CMD_802_11_SET_WEP);
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+
+	cmd.action = cpu_to_le16(cmd_action);
+
+	if (cmd_action == CMD_ACT_ADD) {
+		int i;
+
+		/* default tx key index */
+		cmd.keyindex = cpu_to_le16(assoc->wep_tx_keyidx &
+					   CMD_WEP_KEY_INDEX_MASK);
+
+		/* Copy key types and material to host command structure */
+		for (i = 0; i < 4; i++) {
+			struct enc_key *pkey = &assoc->wep_keys[i];
+
+			switch (pkey->len) {
+			case KEY_LEN_WEP_40:
+				cmd.keytype[i] = CMD_TYPE_WEP_40_BIT;
+				memmove(cmd.keymaterial[i], pkey->key, pkey->len);
+				lbs_deb_cmd("SET_WEP: add key %d (40 bit)\n", i);
+				break;
+			case KEY_LEN_WEP_104:
+				cmd.keytype[i] = CMD_TYPE_WEP_104_BIT;
+				memmove(cmd.keymaterial[i], pkey->key, pkey->len);
+				lbs_deb_cmd("SET_WEP: add key %d (104 bit)\n", i);
+				break;
+			case 0:
+				break;
+			default:
+				lbs_deb_cmd("SET_WEP: invalid key %d, length %d\n",
+					    i, pkey->len);
+				ret = -1;
+				goto done;
+				break;
+			}
+		}
+	} else if (cmd_action == CMD_ACT_REMOVE) {
+		/* ACT_REMOVE clears _all_ WEP keys */
+
+		/* default tx key index */
+		cmd.keyindex = cpu_to_le16(priv->wep_tx_keyidx &
+					   CMD_WEP_KEY_INDEX_MASK);
+		lbs_deb_cmd("SET_WEP: remove key %d\n", priv->wep_tx_keyidx);
+	}
+
+	ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd);
+done:
+	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
+	return ret;
+}
+
+int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
+			      uint16_t *enable)
+{
+	struct cmd_ds_802_11_enable_rsn cmd;
+	int ret;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+	cmd.action = cpu_to_le16(cmd_action);
+
+	if (cmd_action == CMD_ACT_GET)
+		cmd.enable = 0;
+	else {
+		if (*enable)
+			cmd.enable = cpu_to_le16(CMD_ENABLE_RSN);
+		else
+			cmd.enable = cpu_to_le16(CMD_DISABLE_RSN);
+		lbs_deb_cmd("ENABLE_RSN: %d\n", *enable);
+	}
+
+	ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd);
+	if (!ret && cmd_action == CMD_ACT_GET)
+		*enable = le16_to_cpu(cmd.enable);
+
+	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
+	return ret;
+}
+
+static void set_one_wpa_key(struct MrvlIEtype_keyParamSet *keyparam,
+		struct enc_key *key)
+{
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	if (key->flags & KEY_INFO_WPA_ENABLED)
+		keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_ENABLED);
+	if (key->flags & KEY_INFO_WPA_UNICAST)
+		keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_UNICAST);
+	if (key->flags & KEY_INFO_WPA_MCAST)
+		keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_MCAST);
+
+	keyparam->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
+	keyparam->keytypeid = cpu_to_le16(key->type);
+	keyparam->keylen = cpu_to_le16(key->len);
+	memcpy(keyparam->key, key->key, key->len);
+
+	/* Length field doesn't include the {type,length} header */
+	keyparam->length = cpu_to_le16(sizeof(*keyparam) - 4);
+	lbs_deb_leave(LBS_DEB_CMD);
+}
+
+int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
+				struct assoc_request *assoc)
+{
+	struct cmd_ds_802_11_key_material cmd;
+	int ret = 0;
+	int index = 0;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	cmd.action = cpu_to_le16(cmd_action);
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+
+	if (cmd_action == CMD_ACT_GET) {
+		cmd.hdr.size = cpu_to_le16(sizeof(struct cmd_header) + 2);
+	} else {
+		memset(cmd.keyParamSet, 0, sizeof(cmd.keyParamSet));
+
+		if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc->flags)) {
+			set_one_wpa_key(&cmd.keyParamSet[index],
+					&assoc->wpa_unicast_key);
+			index++;
+		}
+
+		if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc->flags)) {
+			set_one_wpa_key(&cmd.keyParamSet[index],
+					&assoc->wpa_mcast_key);
+			index++;
+		}
+
+		/* The common header and as many keys as we included */
+		cmd.hdr.size = cpu_to_le16(offsetof(typeof(cmd),
+						    keyParamSet[index]));
+	}
+	ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd);
+	/* Copy the returned key to driver private data */
+	if (!ret && cmd_action == CMD_ACT_GET) {
+		void *buf_ptr = cmd.keyParamSet;
+		void *resp_end = &(&cmd)[1];
+
+		while (buf_ptr < resp_end) {
+			struct MrvlIEtype_keyParamSet *keyparam = buf_ptr;
+			struct enc_key *key;
+			uint16_t param_set_len = le16_to_cpu(keyparam->length);
+			uint16_t key_len = le16_to_cpu(keyparam->keylen);
+			uint16_t key_flags = le16_to_cpu(keyparam->keyinfo);
+			uint16_t key_type = le16_to_cpu(keyparam->keytypeid);
+			void *end;
+
+			end = (void *)keyparam + sizeof(keyparam->type)
+				+ sizeof(keyparam->length) + param_set_len;
+
+			/* Make sure we don't access past the end of the IEs */
+			if (end > resp_end)
+				break;
+
+			if (key_flags & KEY_INFO_WPA_UNICAST)
+				key = &priv->wpa_unicast_key;
+			else if (key_flags & KEY_INFO_WPA_MCAST)
+				key = &priv->wpa_mcast_key;
+			else
+				break;
+
+			/* Copy returned key into driver */
+			memset(key, 0, sizeof(struct enc_key));
+			if (key_len > sizeof(key->key))
+				break;
+			key->type = key_type;
+			key->flags = key_flags;
+			key->len = key_len;
+			memcpy(key->key, keyparam->key, key->len);
+
+			buf_ptr = end + 1;
+		}
+	}
+
+	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
+	return ret;
+}
+
+static __le16 lbs_rate_to_fw_bitmap(int rate, int lower_rates_ok)
+{
+/*		Bit  	Rate
+*		15:13 Reserved
+*		12    54 Mbps
+*		11    48 Mbps
+*		10    36 Mbps
+*		9     24 Mbps
+*		8     18 Mbps
+*		7     12 Mbps
+*		6     9 Mbps
+*		5     6 Mbps
+*		4     Reserved
+*		3     11 Mbps
+*		2     5.5 Mbps
+*		1     2 Mbps
+*		0     1 Mbps
+**/
+
+	uint16_t ratemask;
+	int i = lbs_data_rate_to_fw_index(rate);
+	if (lower_rates_ok)
+		ratemask = (0x1fef >> (12 - i));
+	else
+		ratemask = (1 << i);
+	return cpu_to_le16(ratemask);
+}
+
+int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
+				      uint16_t cmd_action)
+{
+	struct cmd_ds_802_11_rate_adapt_rateset cmd;
+	int ret;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	if (!priv->cur_rate && !priv->enablehwauto)
+		return -EINVAL;
+
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+
+	cmd.action = cpu_to_le16(cmd_action);
+	cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
+	cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
+	ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
+	if (!ret && cmd_action == CMD_ACT_GET) {
+		priv->ratebitmap = le16_to_cpu(cmd.bitmap);
+		priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
+	}
+
+	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
+	return ret;
+}
+
+/**
+ *  @brief Set the data rate
+ *
+ *  @param priv    	A pointer to struct lbs_private structure
+ *  @param rate  	The desired data rate, or 0 to clear a locked rate
+ *
+ *  @return 	   	0 on success, error on failure
+ */
+int lbs_set_data_rate(struct lbs_private *priv, u8 rate)
+{
+	struct cmd_ds_802_11_data_rate cmd;
+	int ret = 0;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+
+	if (rate > 0) {
+		cmd.action = cpu_to_le16(CMD_ACT_SET_TX_FIX_RATE);
+		cmd.rates[0] = lbs_data_rate_to_fw_index(rate);
+		if (cmd.rates[0] == 0) {
+			lbs_deb_cmd("DATA_RATE: invalid requested rate of"
+				" 0x%02X\n", rate);
+			ret = 0;
+			goto out;
+		}
+		lbs_deb_cmd("DATA_RATE: set fixed 0x%02X\n", cmd.rates[0]);
+	} else {
+		cmd.action = cpu_to_le16(CMD_ACT_SET_TX_AUTO);
+		lbs_deb_cmd("DATA_RATE: setting auto\n");
+	}
+
+	ret = lbs_cmd_with_response(priv, CMD_802_11_DATA_RATE, &cmd);
+	if (ret)
+		goto out;
+
+	lbs_deb_hex(LBS_DEB_CMD, "DATA_RATE_RESP", (u8 *) &cmd, sizeof(cmd));
+
+	/* FIXME: get actual rates FW can do if this command actually returns
+	 * all data rates supported.
+	 */
+	priv->cur_rate = lbs_fw_index_to_data_rate(cmd.rates[0]);
+	lbs_deb_cmd("DATA_RATE: current rate is 0x%02x\n", priv->cur_rate);
+
+out:
+	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
+	return ret;
+}
+
+
+int lbs_cmd_802_11_rssi(struct lbs_private *priv,
+				struct cmd_ds_command *cmd)
+{
+
+	lbs_deb_enter(LBS_DEB_CMD);
+	cmd->command = cpu_to_le16(CMD_802_11_RSSI);
+	cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rssi) +
+		sizeof(struct cmd_header));
+	cmd->params.rssi.N = cpu_to_le16(DEFAULT_BCN_AVG_FACTOR);
+
+	/* reset Beacon SNR/NF/RSSI values */
+	priv->SNR[TYPE_BEACON][TYPE_NOAVG] = 0;
+	priv->SNR[TYPE_BEACON][TYPE_AVG] = 0;
+	priv->NF[TYPE_BEACON][TYPE_NOAVG] = 0;
+	priv->NF[TYPE_BEACON][TYPE_AVG] = 0;
+	priv->RSSI[TYPE_BEACON][TYPE_NOAVG] = 0;
+	priv->RSSI[TYPE_BEACON][TYPE_AVG] = 0;
+
+	lbs_deb_leave(LBS_DEB_CMD);
+	return 0;
+}
+
+int lbs_ret_802_11_rssi(struct lbs_private *priv,
+				struct cmd_ds_command *resp)
+{
+	struct cmd_ds_802_11_rssi_rsp *rssirsp = &resp->params.rssirsp;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	/* store the non average value */
+	priv->SNR[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->SNR);
+	priv->NF[TYPE_BEACON][TYPE_NOAVG] =
+		get_unaligned_le16(&rssirsp->noisefloor);
+
+	priv->SNR[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgSNR);
+	priv->NF[TYPE_BEACON][TYPE_AVG] =
+		get_unaligned_le16(&rssirsp->avgnoisefloor);
+
+	priv->RSSI[TYPE_BEACON][TYPE_NOAVG] =
+	    CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG],
+		     priv->NF[TYPE_BEACON][TYPE_NOAVG]);
+
+	priv->RSSI[TYPE_BEACON][TYPE_AVG] =
+	    CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_AVG] / AVG_SCALE,
+		     priv->NF[TYPE_BEACON][TYPE_AVG] / AVG_SCALE);
+
+	lbs_deb_cmd("RSSI: beacon %d, avg %d\n",
+	       priv->RSSI[TYPE_BEACON][TYPE_NOAVG],
+	       priv->RSSI[TYPE_BEACON][TYPE_AVG]);
+
+	lbs_deb_leave(LBS_DEB_CMD);
+	return 0;
+}
+
+
+int lbs_cmd_bcn_ctrl(struct lbs_private *priv,
+				struct cmd_ds_command *cmd,
+				u16 cmd_action)
+{
+	struct cmd_ds_802_11_beacon_control
+		*bcn_ctrl = &cmd->params.bcn_ctrl;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+	cmd->size =
+	    cpu_to_le16(sizeof(struct cmd_ds_802_11_beacon_control)
+			     + sizeof(struct cmd_header));
+	cmd->command = cpu_to_le16(CMD_802_11_BEACON_CTRL);
+
+	bcn_ctrl->action = cpu_to_le16(cmd_action);
+	bcn_ctrl->beacon_enable = cpu_to_le16(priv->beacon_enable);
+	bcn_ctrl->beacon_period = cpu_to_le16(priv->beacon_period);
+
+	lbs_deb_leave(LBS_DEB_CMD);
+	return 0;
+}
+
+int lbs_ret_802_11_bcn_ctrl(struct lbs_private *priv,
+					struct cmd_ds_command *resp)
+{
+	struct cmd_ds_802_11_beacon_control *bcn_ctrl =
+	    &resp->params.bcn_ctrl;
+
+	lbs_deb_enter(LBS_DEB_CMD);
+
+	if (bcn_ctrl->action == CMD_ACT_GET) {
+		priv->beacon_enable = (u8) le16_to_cpu(bcn_ctrl->beacon_enable);
+		priv->beacon_period = le16_to_cpu(bcn_ctrl->beacon_period);
+	}
+
+	lbs_deb_enter(LBS_DEB_CMD);
+	return 0;
+}
+
+
+
 static int lbs_assoc_post(struct lbs_private *priv,
 			  struct cmd_ds_802_11_associate_response *resp)
 {
@@ -226,7 +624,7 @@
 	priv->connect_status = LBS_CONNECTED;
 
 	/* Update current SSID and BSSID */
-	memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE);
+	memcpy(&priv->curbssparams.ssid, &bss->ssid, IEEE80211_MAX_SSID_LEN);
 	priv->curbssparams.ssid_len = bss->ssid_len;
 	memcpy(priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
 
@@ -369,12 +767,7 @@
 				   (u16)(pos - (u8 *) &cmd.iebuf));
 
 	/* update curbssparams */
-	priv->curbssparams.channel = bss->phy.ds.channel;
-
-	if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
-		ret = -1;
-		goto done;
-	}
+	priv->channel = bss->phy.ds.channel;
 
 	ret = lbs_cmd_with_response(priv, command, &cmd);
 	if (ret == 0) {
@@ -472,7 +865,7 @@
 	memcpy(&priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
 
 	/* Set the new SSID to current SSID */
-	memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE);
+	memcpy(&priv->curbssparams.ssid, &bss->ssid, IEEE80211_MAX_SSID_LEN);
 	priv->curbssparams.ssid_len = bss->ssid_len;
 
 	netif_carrier_on(priv->dev);
@@ -487,7 +880,7 @@
 	lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %pM, channel %d\n",
 		     print_ssid(ssid, bss->ssid, bss->ssid_len),
 		     priv->curbssparams.bssid,
-		     priv->curbssparams.channel);
+		     priv->channel);
 
 done:
 	lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
@@ -560,7 +953,7 @@
 	lbs_deb_join("AdhocJoin: band = %c\n", assoc_req->band);
 
 	priv->adhoccreate = 0;
-	priv->curbssparams.channel = bss->channel;
+	priv->channel = bss->channel;
 
 	/* Build the join command */
 	memset(&cmd, 0, sizeof(cmd));
@@ -633,11 +1026,6 @@
 		}
 	}
 
-	if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
-		ret = -1;
-		goto out;
-	}
-
 	ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd);
 	if (ret == 0) {
 		ret = lbs_adhoc_post(priv,
@@ -737,12 +1125,6 @@
 	lbs_deb_join("ADHOC_START: rates=%02x %02x %02x %02x\n",
 	       cmd.rates[0], cmd.rates[1], cmd.rates[2], cmd.rates[3]);
 
-	if (lbs_create_dnld_countryinfo_11d(priv)) {
-		lbs_deb_join("ADHOC_START: dnld_countryinfo_11d failed\n");
-		ret = -1;
-		goto out;
-	}
-
 	lbs_deb_join("ADHOC_START: Starting Ad-Hoc BSS on channel %d, band %d\n",
 		     assoc_req->channel, assoc_req->band);
 
@@ -1099,7 +1481,7 @@
 			/* else send START command */
 			lbs_deb_assoc("SSID not found, creating adhoc network\n");
 			memcpy(&assoc_req->bss.ssid, &assoc_req->ssid,
-				IW_ESSID_MAX_SIZE);
+				IEEE80211_MAX_SSID_LEN);
 			assoc_req->bss.ssid_len = assoc_req->ssid_len;
 			lbs_adhoc_start(priv, assoc_req);
 		}
@@ -1185,7 +1567,8 @@
 	}
 
 	priv->mode = assoc_req->mode;
-	ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, assoc_req->mode);
+	ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE,
+		assoc_req->mode == IW_MODE_ADHOC ? 2 : 1);
 
 done:
 	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -1205,7 +1588,7 @@
 		goto done;
 	}
 
-	if (assoc_req->channel == priv->curbssparams.channel)
+	if (assoc_req->channel == priv->channel)
 		goto done;
 
 	if (priv->mesh_dev) {
@@ -1217,7 +1600,7 @@
 	}
 
 	lbs_deb_assoc("ASSOC: channel: %d -> %d\n",
-		      priv->curbssparams.channel, assoc_req->channel);
+		      priv->channel, assoc_req->channel);
 
 	ret = lbs_set_channel(priv, assoc_req->channel);
 	if (ret < 0)
@@ -1232,7 +1615,7 @@
 		goto done;
 	}
 
-	if (assoc_req->channel != priv->curbssparams.channel) {
+	if (assoc_req->channel != priv->channel) {
 		lbs_deb_assoc("ASSOC: channel: failed to update channel to %d\n",
 		              assoc_req->channel);
 		goto restore_mesh;
@@ -1253,7 +1636,7 @@
  restore_mesh:
 	if (priv->mesh_dev)
 		lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
-				priv->curbssparams.channel);
+				priv->channel);
 
  done:
 	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -1475,7 +1858,7 @@
 	}
 
 	if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) {
-		if (assoc_req->channel != priv->curbssparams.channel)
+		if (assoc_req->channel != priv->channel)
 			return 1;
 	}
 
@@ -1557,7 +1940,7 @@
 
 	found = lbs_find_best_ssid_in_list(priv, preferred_mode);
 	if (found && (found->ssid_len > 0)) {
-		memcpy(out_ssid, &found->ssid, IW_ESSID_MAX_SIZE);
+		memcpy(out_ssid, &found->ssid, IEEE80211_MAX_SSID_LEN);
 		*out_ssid_len = found->ssid_len;
 		*out_mode = found->mode;
 		ret = 0;
@@ -1775,12 +2158,12 @@
 	assoc_req = priv->pending_assoc_req;
 	if (!test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
 		memcpy(&assoc_req->ssid, &priv->curbssparams.ssid,
-		       IW_ESSID_MAX_SIZE);
+		       IEEE80211_MAX_SSID_LEN);
 		assoc_req->ssid_len = priv->curbssparams.ssid_len;
 	}
 
 	if (!test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags))
-		assoc_req->channel = priv->curbssparams.channel;
+		assoc_req->channel = priv->channel;
 
 	if (!test_bit(ASSOC_FLAG_BAND, &assoc_req->flags))
 		assoc_req->band = priv->curbssparams.band;
diff --git a/drivers/net/wireless/libertas/assoc.h b/drivers/net/wireless/libertas/assoc.h
index 6e765e9..40621b7 100644
--- a/drivers/net/wireless/libertas/assoc.h
+++ b/drivers/net/wireless/libertas/assoc.h
@@ -3,7 +3,126 @@
 #ifndef _LBS_ASSOC_H_
 #define _LBS_ASSOC_H_
 
-#include "dev.h"
+
+#include "defs.h"
+#include "host.h"
+
+
+struct lbs_private;
+
+/*
+ * In theory, the IE is limited to the IE length, 255,
+ * but in practice 64 bytes are enough.
+ */
+#define MAX_WPA_IE_LEN 64
+
+
+
+struct lbs_802_11_security {
+	u8 WPAenabled;
+	u8 WPA2enabled;
+	u8 wep_enabled;
+	u8 auth_mode;
+	u32 key_mgmt;
+};
+
+/** Current Basic Service Set State Structure */
+struct current_bss_params {
+	/** bssid */
+	u8 bssid[ETH_ALEN];
+	/** ssid */
+	u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
+	u8 ssid_len;
+
+	/** band */
+	u8 band;
+	/** channel is directly in priv->channel */
+	/** zero-terminated array of supported data rates */
+	u8 rates[MAX_RATES + 1];
+};
+
+/**
+ *  @brief Structure used to store information for each beacon/probe response
+ */
+struct bss_descriptor {
+	u8 bssid[ETH_ALEN];
+
+	u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
+	u8 ssid_len;
+
+	u16 capability;
+	u32 rssi;
+	u32 channel;
+	u16 beaconperiod;
+	__le16 atimwindow;
+
+	/* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */
+	u8 mode;
+
+	/* zero-terminated array of supported data rates */
+	u8 rates[MAX_RATES + 1];
+
+	unsigned long last_scanned;
+
+	union ieee_phy_param_set phy;
+	union ieee_ss_param_set ss;
+
+	u8 wpa_ie[MAX_WPA_IE_LEN];
+	size_t wpa_ie_len;
+	u8 rsn_ie[MAX_WPA_IE_LEN];
+	size_t rsn_ie_len;
+
+	u8 mesh;
+
+	struct list_head list;
+};
+
+/** Association request
+ *
+ * Encapsulates all the options that describe a specific assocation request
+ * or configuration of the wireless card's radio, mode, and security settings.
+ */
+struct assoc_request {
+#define ASSOC_FLAG_SSID			1
+#define ASSOC_FLAG_CHANNEL		2
+#define ASSOC_FLAG_BAND			3
+#define ASSOC_FLAG_MODE			4
+#define ASSOC_FLAG_BSSID		5
+#define ASSOC_FLAG_WEP_KEYS		6
+#define ASSOC_FLAG_WEP_TX_KEYIDX	7
+#define ASSOC_FLAG_WPA_MCAST_KEY	8
+#define ASSOC_FLAG_WPA_UCAST_KEY	9
+#define ASSOC_FLAG_SECINFO		10
+#define ASSOC_FLAG_WPA_IE		11
+	unsigned long flags;
+
+	u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
+	u8 ssid_len;
+	u8 channel;
+	u8 band;
+	u8 mode;
+	u8 bssid[ETH_ALEN] __attribute__ ((aligned (2)));
+
+	/** WEP keys */
+	struct enc_key wep_keys[4];
+	u16 wep_tx_keyidx;
+
+	/** WPA keys */
+	struct enc_key wpa_mcast_key;
+	struct enc_key wpa_unicast_key;
+
+	struct lbs_802_11_security secinfo;
+
+	/** WPA Information Elements*/
+	u8 wpa_ie[MAX_WPA_IE_LEN];
+	u8 wpa_ie_len;
+
+	/* BSS to associate with for infrastructure of Ad-Hoc join */
+	struct bss_descriptor bss;
+};
+
+
+extern u8 lbs_bg_rates[MAX_RATES];
 
 void lbs_association_worker(struct work_struct *work);
 struct assoc_request *lbs_get_association_request(struct lbs_private *priv);
@@ -13,4 +132,24 @@
 int lbs_cmd_80211_deauthenticate(struct lbs_private *priv,
 				 u8 bssid[ETH_ALEN], u16 reason);
 
+int lbs_cmd_802_11_rssi(struct lbs_private *priv,
+				struct cmd_ds_command *cmd);
+int lbs_ret_802_11_rssi(struct lbs_private *priv,
+				struct cmd_ds_command *resp);
+
+int lbs_cmd_bcn_ctrl(struct lbs_private *priv,
+				struct cmd_ds_command *cmd,
+				u16 cmd_action);
+int lbs_ret_802_11_bcn_ctrl(struct lbs_private *priv,
+					struct cmd_ds_command *resp);
+
+int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
+			   struct assoc_request *assoc);
+
+int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
+			      uint16_t *enable);
+
+int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
+				struct assoc_request *assoc);
+
 #endif /* _LBS_ASSOC_H */
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 0fb3125..1065ce2 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -3,18 +3,20 @@
   * It prepares command and sends it to firmware when it is ready.
   */
 
-#include <net/iw_handler.h>
 #include <net/lib80211.h>
 #include <linux/kfifo.h>
+#include <linux/sched.h>
+
 #include "host.h"
-#include "hostcmd.h"
 #include "decl.h"
 #include "defs.h"
 #include "dev.h"
 #include "assoc.h"
 #include "wext.h"
+#include "scan.h"
 #include "cmd.h"
 
+
 static struct cmd_ctrl_node *lbs_get_cmd_ctrl_node(struct lbs_private *priv);
 
 /**
@@ -191,11 +193,6 @@
 		goto out;
 	}
 
-	if (lbs_set_universaltable(priv, 0)) {
-		ret = -1;
-		goto out;
-	}
-
 out:
 	lbs_deb_leave(LBS_DEB_CMD);
 	return ret;
@@ -244,7 +241,7 @@
 
 	cmd->command = cpu_to_le16(CMD_802_11_PS_MODE);
 	cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ps_mode) +
-				S_DS_GEN);
+				sizeof(struct cmd_header));
 	psm->action = cpu_to_le16(cmd_action);
 	psm->multipledtim = 0;
 	switch (cmd_action) {
@@ -273,33 +270,6 @@
 	return 0;
 }
 
-int lbs_cmd_802_11_inactivity_timeout(struct lbs_private *priv,
-				      uint16_t cmd_action, uint16_t *timeout)
-{
-	struct cmd_ds_802_11_inactivity_timeout cmd;
-	int ret;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	cmd.hdr.command = cpu_to_le16(CMD_802_11_INACTIVITY_TIMEOUT);
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
-	cmd.action = cpu_to_le16(cmd_action);
-
-	if (cmd_action == CMD_ACT_SET)
-		cmd.timeout = cpu_to_le16(*timeout);
-	else
-		cmd.timeout = 0;
-
-	ret = lbs_cmd_with_response(priv, CMD_802_11_INACTIVITY_TIMEOUT, &cmd);
-
-	if (!ret)
-		*timeout = le16_to_cpu(cmd.timeout);
-
-	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
-	return 0;
-}
-
 int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action,
 				struct sleep_params *sp)
 {
@@ -396,197 +366,6 @@
 	return ret;
 }
 
-int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
-			   struct assoc_request *assoc)
-{
-	struct cmd_ds_802_11_set_wep cmd;
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	memset(&cmd, 0, sizeof(cmd));
-	cmd.hdr.command = cpu_to_le16(CMD_802_11_SET_WEP);
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
-	cmd.action = cpu_to_le16(cmd_action);
-
-	if (cmd_action == CMD_ACT_ADD) {
-		int i;
-
-		/* default tx key index */
-		cmd.keyindex = cpu_to_le16(assoc->wep_tx_keyidx &
-					   CMD_WEP_KEY_INDEX_MASK);
-
-		/* Copy key types and material to host command structure */
-		for (i = 0; i < 4; i++) {
-			struct enc_key *pkey = &assoc->wep_keys[i];
-
-			switch (pkey->len) {
-			case KEY_LEN_WEP_40:
-				cmd.keytype[i] = CMD_TYPE_WEP_40_BIT;
-				memmove(cmd.keymaterial[i], pkey->key, pkey->len);
-				lbs_deb_cmd("SET_WEP: add key %d (40 bit)\n", i);
-				break;
-			case KEY_LEN_WEP_104:
-				cmd.keytype[i] = CMD_TYPE_WEP_104_BIT;
-				memmove(cmd.keymaterial[i], pkey->key, pkey->len);
-				lbs_deb_cmd("SET_WEP: add key %d (104 bit)\n", i);
-				break;
-			case 0:
-				break;
-			default:
-				lbs_deb_cmd("SET_WEP: invalid key %d, length %d\n",
-					    i, pkey->len);
-				ret = -1;
-				goto done;
-				break;
-			}
-		}
-	} else if (cmd_action == CMD_ACT_REMOVE) {
-		/* ACT_REMOVE clears _all_ WEP keys */
-
-		/* default tx key index */
-		cmd.keyindex = cpu_to_le16(priv->wep_tx_keyidx &
-					   CMD_WEP_KEY_INDEX_MASK);
-		lbs_deb_cmd("SET_WEP: remove key %d\n", priv->wep_tx_keyidx);
-	}
-
-	ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd);
-done:
-	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
-	return ret;
-}
-
-int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
-			      uint16_t *enable)
-{
-	struct cmd_ds_802_11_enable_rsn cmd;
-	int ret;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-	cmd.action = cpu_to_le16(cmd_action);
-
-	if (cmd_action == CMD_ACT_GET)
-		cmd.enable = 0;
-	else {
-		if (*enable)
-			cmd.enable = cpu_to_le16(CMD_ENABLE_RSN);
-		else
-			cmd.enable = cpu_to_le16(CMD_DISABLE_RSN);
-		lbs_deb_cmd("ENABLE_RSN: %d\n", *enable);
-	}
-
-	ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd);
-	if (!ret && cmd_action == CMD_ACT_GET)
-		*enable = le16_to_cpu(cmd.enable);
-
-	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
-	return ret;
-}
-
-static void set_one_wpa_key(struct MrvlIEtype_keyParamSet *keyparam,
-                            struct enc_key *key)
-{
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	if (key->flags & KEY_INFO_WPA_ENABLED)
-		keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_ENABLED);
-	if (key->flags & KEY_INFO_WPA_UNICAST)
-		keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_UNICAST);
-	if (key->flags & KEY_INFO_WPA_MCAST)
-		keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_MCAST);
-
-	keyparam->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
-	keyparam->keytypeid = cpu_to_le16(key->type);
-	keyparam->keylen = cpu_to_le16(key->len);
-	memcpy(keyparam->key, key->key, key->len);
-
-	/* Length field doesn't include the {type,length} header */
-	keyparam->length = cpu_to_le16(sizeof(*keyparam) - 4);
-	lbs_deb_leave(LBS_DEB_CMD);
-}
-
-int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
-				struct assoc_request *assoc)
-{
-	struct cmd_ds_802_11_key_material cmd;
-	int ret = 0;
-	int index = 0;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	cmd.action = cpu_to_le16(cmd_action);
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
-	if (cmd_action == CMD_ACT_GET) {
-		cmd.hdr.size = cpu_to_le16(S_DS_GEN + 2);
-	} else {
-		memset(cmd.keyParamSet, 0, sizeof(cmd.keyParamSet));
-
-		if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc->flags)) {
-			set_one_wpa_key(&cmd.keyParamSet[index],
-					&assoc->wpa_unicast_key);
-			index++;
-		}
-
-		if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc->flags)) {
-			set_one_wpa_key(&cmd.keyParamSet[index],
-					&assoc->wpa_mcast_key);
-			index++;
-		}
-
-		/* The common header and as many keys as we included */
-		cmd.hdr.size = cpu_to_le16(offsetof(typeof(cmd),
-						    keyParamSet[index]));
-	}
-	ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd);
-	/* Copy the returned key to driver private data */
-	if (!ret && cmd_action == CMD_ACT_GET) {
-		void *buf_ptr = cmd.keyParamSet;
-		void *resp_end = &(&cmd)[1];
-
-		while (buf_ptr < resp_end) {
-			struct MrvlIEtype_keyParamSet *keyparam = buf_ptr;
-			struct enc_key *key;
-			uint16_t param_set_len = le16_to_cpu(keyparam->length);
-			uint16_t key_len = le16_to_cpu(keyparam->keylen);
-			uint16_t key_flags = le16_to_cpu(keyparam->keyinfo);
-			uint16_t key_type = le16_to_cpu(keyparam->keytypeid);
-			void *end;
-
-			end = (void *)keyparam + sizeof(keyparam->type)
-				+ sizeof(keyparam->length) + param_set_len;
-
-			/* Make sure we don't access past the end of the IEs */
-			if (end > resp_end)
-				break;
-
-			if (key_flags & KEY_INFO_WPA_UNICAST)
-				key = &priv->wpa_unicast_key;
-			else if (key_flags & KEY_INFO_WPA_MCAST)
-				key = &priv->wpa_mcast_key;
-			else
-				break;
-
-			/* Copy returned key into driver */
-			memset(key, 0, sizeof(struct enc_key));
-			if (key_len > sizeof(key->key))
-				break;
-			key->type = key_type;
-			key->flags = key_flags;
-			key->len = key_len;
-			memcpy(key->key, keyparam->key, key->len);
-
-			buf_ptr = end + 1;
-		}
-	}
-
-	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
-	return ret;
-}
-
 /**
  *  @brief Set an SNMP MIB value
  *
@@ -611,7 +390,7 @@
 	switch (oid) {
 	case SNMP_MIB_OID_BSS_TYPE:
 		cmd.bufsize = cpu_to_le16(sizeof(u8));
-		cmd.value[0] = (val == IW_MODE_ADHOC) ? 2 : 1;
+		cmd.value[0] = val;
 		break;
 	case SNMP_MIB_OID_11D_ENABLE:
 	case SNMP_MIB_OID_FRAG_THRESHOLD:
@@ -664,13 +443,7 @@
 
 	switch (le16_to_cpu(cmd.bufsize)) {
 	case sizeof(u8):
-		if (oid == SNMP_MIB_OID_BSS_TYPE) {
-			if (cmd.value[0] == 2)
-				*out_val = IW_MODE_ADHOC;
-			else
-				*out_val = IW_MODE_INFRA;
-		} else
-			*out_val = cmd.value[0];
+		*out_val = cmd.value[0];
 		break;
 	case sizeof(u16):
 		*out_val = le16_to_cpu(*((__le16 *)(&cmd.value)));
@@ -757,7 +530,7 @@
 	cmd->command = cpu_to_le16(CMD_802_11_MONITOR_MODE);
 	cmd->size =
 	    cpu_to_le16(sizeof(struct cmd_ds_802_11_monitor_mode) +
-			     S_DS_GEN);
+			     sizeof(struct cmd_header));
 
 	monitor->action = cpu_to_le16(cmd_action);
 	if (cmd_action == CMD_ACT_SET) {
@@ -768,111 +541,6 @@
 	return 0;
 }
 
-static __le16 lbs_rate_to_fw_bitmap(int rate, int lower_rates_ok)
-{
-/*		Bit  	Rate
-*		15:13 Reserved
-*		12    54 Mbps
-*		11    48 Mbps
-*		10    36 Mbps
-*		9     24 Mbps
-*		8     18 Mbps
-*		7     12 Mbps
-*		6     9 Mbps
-*		5     6 Mbps
-*		4     Reserved
-*		3     11 Mbps
-*		2     5.5 Mbps
-*		1     2 Mbps
-*		0     1 Mbps
-**/
-
-	uint16_t ratemask;
-	int i = lbs_data_rate_to_fw_index(rate);
-	if (lower_rates_ok)
-		ratemask = (0x1fef >> (12 - i));
-	else
-		ratemask = (1 << i);
-	return cpu_to_le16(ratemask);
-}
-
-int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
-				      uint16_t cmd_action)
-{
-	struct cmd_ds_802_11_rate_adapt_rateset cmd;
-	int ret;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	if (!priv->cur_rate && !priv->enablehwauto)
-		return -EINVAL;
-
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
-	cmd.action = cpu_to_le16(cmd_action);
-	cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
-	cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
-	ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
-	if (!ret && cmd_action == CMD_ACT_GET) {
-		priv->ratebitmap = le16_to_cpu(cmd.bitmap);
-		priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
-	}
-
-	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
-	return ret;
-}
-EXPORT_SYMBOL_GPL(lbs_cmd_802_11_rate_adapt_rateset);
-
-/**
- *  @brief Set the data rate
- *
- *  @param priv    	A pointer to struct lbs_private structure
- *  @param rate  	The desired data rate, or 0 to clear a locked rate
- *
- *  @return 	   	0 on success, error on failure
- */
-int lbs_set_data_rate(struct lbs_private *priv, u8 rate)
-{
-	struct cmd_ds_802_11_data_rate cmd;
-	int ret = 0;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	memset(&cmd, 0, sizeof(cmd));
-	cmd.hdr.size = cpu_to_le16(sizeof(cmd));
-
-	if (rate > 0) {
-		cmd.action = cpu_to_le16(CMD_ACT_SET_TX_FIX_RATE);
-		cmd.rates[0] = lbs_data_rate_to_fw_index(rate);
-		if (cmd.rates[0] == 0) {
-			lbs_deb_cmd("DATA_RATE: invalid requested rate of"
-			            " 0x%02X\n", rate);
-			ret = 0;
-			goto out;
-		}
-		lbs_deb_cmd("DATA_RATE: set fixed 0x%02X\n", cmd.rates[0]);
-	} else {
-		cmd.action = cpu_to_le16(CMD_ACT_SET_TX_AUTO);
-		lbs_deb_cmd("DATA_RATE: setting auto\n");
-	}
-
-	ret = lbs_cmd_with_response(priv, CMD_802_11_DATA_RATE, &cmd);
-	if (ret)
-		goto out;
-
-	lbs_deb_hex(LBS_DEB_CMD, "DATA_RATE_RESP", (u8 *) &cmd, sizeof (cmd));
-
-	/* FIXME: get actual rates FW can do if this command actually returns
-	 * all data rates supported.
-	 */
-	priv->cur_rate = lbs_fw_index_to_data_rate(cmd.rates[0]);
-	lbs_deb_cmd("DATA_RATE: current rate is 0x%02x\n", priv->cur_rate);
-
-out:
-	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
-	return ret;
-}
-
 /**
  *  @brief Get the radio channel
  *
@@ -880,7 +548,7 @@
  *
  *  @return 	   	The channel on success, error on failure
  */
-int lbs_get_channel(struct lbs_private *priv)
+static int lbs_get_channel(struct lbs_private *priv)
 {
 	struct cmd_ds_802_11_rf_channel cmd;
 	int ret = 0;
@@ -912,7 +580,7 @@
 
 	ret = lbs_get_channel(priv);
 	if (ret > 0) {
-		priv->curbssparams.channel = ret;
+		priv->channel = ret;
 		ret = 0;
 	}
 	lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -931,7 +599,7 @@
 {
 	struct cmd_ds_802_11_rf_channel cmd;
 #ifdef DEBUG
-	u8 old_channel = priv->curbssparams.channel;
+	u8 old_channel = priv->channel;
 #endif
 	int ret = 0;
 
@@ -946,36 +614,15 @@
 	if (ret)
 		goto out;
 
-	priv->curbssparams.channel = (uint8_t) le16_to_cpu(cmd.channel);
+	priv->channel = (uint8_t) le16_to_cpu(cmd.channel);
 	lbs_deb_cmd("channel switch from %d to %d\n", old_channel,
-		priv->curbssparams.channel);
+		priv->channel);
 
 out:
 	lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
 	return ret;
 }
 
-static int lbs_cmd_802_11_rssi(struct lbs_private *priv,
-				struct cmd_ds_command *cmd)
-{
-
-	lbs_deb_enter(LBS_DEB_CMD);
-	cmd->command = cpu_to_le16(CMD_802_11_RSSI);
-	cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rssi) + S_DS_GEN);
-	cmd->params.rssi.N = cpu_to_le16(DEFAULT_BCN_AVG_FACTOR);
-
-	/* reset Beacon SNR/NF/RSSI values */
-	priv->SNR[TYPE_BEACON][TYPE_NOAVG] = 0;
-	priv->SNR[TYPE_BEACON][TYPE_AVG] = 0;
-	priv->NF[TYPE_BEACON][TYPE_NOAVG] = 0;
-	priv->NF[TYPE_BEACON][TYPE_AVG] = 0;
-	priv->RSSI[TYPE_BEACON][TYPE_NOAVG] = 0;
-	priv->RSSI[TYPE_BEACON][TYPE_AVG] = 0;
-
-	lbs_deb_leave(LBS_DEB_CMD);
-	return 0;
-}
-
 static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr,
 			       u8 cmd_action, void *pdata_buf)
 {
@@ -992,7 +639,7 @@
 
 			cmdptr->size =
 			    cpu_to_le16(sizeof (struct cmd_ds_mac_reg_access)
-					+ S_DS_GEN);
+					+ sizeof(struct cmd_header));
 			macreg =
 			    (struct cmd_ds_mac_reg_access *)&cmdptr->params.
 			    macreg;
@@ -1011,7 +658,7 @@
 			cmdptr->size =
 			    cpu_to_le16(sizeof
 					     (struct cmd_ds_bbp_reg_access)
-					     + S_DS_GEN);
+					     + sizeof(struct cmd_header));
 			bbpreg =
 			    (struct cmd_ds_bbp_reg_access *)&cmdptr->params.
 			    bbpreg;
@@ -1030,7 +677,7 @@
 			cmdptr->size =
 			    cpu_to_le16(sizeof
 					     (struct cmd_ds_rf_reg_access) +
-					     S_DS_GEN);
+					     sizeof(struct cmd_header));
 			rfreg =
 			    (struct cmd_ds_rf_reg_access *)&cmdptr->params.
 			    rfreg;
@@ -1057,7 +704,8 @@
 	lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action);
 
 	cmd->command = cpu_to_le16(CMD_BT_ACCESS);
-	cmd->size = cpu_to_le16(sizeof(struct cmd_ds_bt_access) + S_DS_GEN);
+	cmd->size = cpu_to_le16(sizeof(struct cmd_ds_bt_access) +
+		sizeof(struct cmd_header));
 	cmd->result = 0;
 	bt_access->action = cpu_to_le16(cmd_action);
 
@@ -1094,7 +742,8 @@
 	lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action);
 
 	cmd->command = cpu_to_le16(CMD_FWT_ACCESS);
-	cmd->size = cpu_to_le16(sizeof(struct cmd_ds_fwt_access) + S_DS_GEN);
+	cmd->size = cpu_to_le16(sizeof(struct cmd_ds_fwt_access) +
+		sizeof(struct cmd_header));
 	cmd->result = 0;
 
 	if (pdata_buf)
@@ -1200,7 +849,7 @@
 		ie->val.mesh_id_len = priv->mesh_ssid_len;
 		memcpy(ie->val.mesh_id, priv->mesh_ssid, priv->mesh_ssid_len);
 		ie->len = sizeof(struct mrvl_meshie_val) -
-			IW_ESSID_MAX_SIZE + priv->mesh_ssid_len;
+			IEEE80211_MAX_SSID_LEN + priv->mesh_ssid_len;
 		cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie_val));
 		break;
 	case CMD_ACT_MESH_CONFIG_STOP:
@@ -1215,27 +864,6 @@
 	return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv);
 }
 
-static int lbs_cmd_bcn_ctrl(struct lbs_private * priv,
-				struct cmd_ds_command *cmd,
-				u16 cmd_action)
-{
-	struct cmd_ds_802_11_beacon_control
-		*bcn_ctrl = &cmd->params.bcn_ctrl;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-	cmd->size =
-	    cpu_to_le16(sizeof(struct cmd_ds_802_11_beacon_control)
-			     + S_DS_GEN);
-	cmd->command = cpu_to_le16(CMD_802_11_BEACON_CTRL);
-
-	bcn_ctrl->action = cpu_to_le16(cmd_action);
-	bcn_ctrl->beacon_enable = cpu_to_le16(priv->beacon_enable);
-	bcn_ctrl->beacon_period = cpu_to_le16(priv->beacon_period);
-
-	lbs_deb_leave(LBS_DEB_CMD);
-	return 0;
-}
-
 static void lbs_queue_cmd(struct lbs_private *priv,
 			  struct cmd_ctrl_node *cmdnode)
 {
@@ -1531,7 +1159,7 @@
 
 		cmdptr->command = cpu_to_le16(cmd_no);
 		cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_afc) +
-					   S_DS_GEN);
+					   sizeof(struct cmd_header));
 
 		memmove(&cmdptr->params.afc,
 			pdata_buf, sizeof(struct cmd_ds_802_11_afc));
@@ -1539,45 +1167,17 @@
 		ret = 0;
 		goto done;
 
-	case CMD_802_11D_DOMAIN_INFO:
-		ret = lbs_cmd_802_11d_domain_info(priv, cmdptr,
-						   cmd_no, cmd_action);
-		break;
-
 	case CMD_802_11_TPC_CFG:
 		cmdptr->command = cpu_to_le16(CMD_802_11_TPC_CFG);
 		cmdptr->size =
 		    cpu_to_le16(sizeof(struct cmd_ds_802_11_tpc_cfg) +
-				     S_DS_GEN);
+				     sizeof(struct cmd_header));
 
 		memmove(&cmdptr->params.tpccfg,
 			pdata_buf, sizeof(struct cmd_ds_802_11_tpc_cfg));
 
 		ret = 0;
 		break;
-	case CMD_802_11_LED_GPIO_CTRL:
-		{
-			struct mrvl_ie_ledgpio *gpio =
-			    (struct mrvl_ie_ledgpio*)
-			    cmdptr->params.ledgpio.data;
-
-			memmove(&cmdptr->params.ledgpio,
-				pdata_buf,
-				sizeof(struct cmd_ds_802_11_led_ctrl));
-
-			cmdptr->command =
-			    cpu_to_le16(CMD_802_11_LED_GPIO_CTRL);
-
-#define ACTION_NUMLED_TLVTYPE_LEN_FIELDS_LEN 8
-			cmdptr->size =
-			    cpu_to_le16(le16_to_cpu(gpio->header.len)
-				+ S_DS_GEN
-				+ ACTION_NUMLED_TLVTYPE_LEN_FIELDS_LEN);
-			gpio->header.len = gpio->header.len;
-
-			ret = 0;
-			break;
-		}
 
 	case CMD_BT_ACCESS:
 		ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf);
@@ -1587,18 +1187,12 @@
 		ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf);
 		break;
 
-	case CMD_GET_TSF:
-		cmdptr->command = cpu_to_le16(CMD_GET_TSF);
-		cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_get_tsf) +
-					   S_DS_GEN);
-		ret = 0;
-		break;
 	case CMD_802_11_BEACON_CTRL:
 		ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action);
 		break;
 	case CMD_802_11_DEEP_SLEEP:
 		cmdptr->command = cpu_to_le16(CMD_802_11_DEEP_SLEEP);
-		cmdptr->size = cpu_to_le16(S_DS_GEN);
+		cmdptr->size = cpu_to_le16(sizeof(struct cmd_header));
 		break;
 	default:
 		lbs_pr_err("PREP_CMD: unknown command 0x%04x\n", cmd_no);
@@ -1917,30 +1511,6 @@
 	return ret;
 }
 
-void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str)
-{
-	union iwreq_data iwrq;
-	u8 buf[50];
-
-	lbs_deb_enter(LBS_DEB_WEXT);
-
-	memset(&iwrq, 0, sizeof(union iwreq_data));
-	memset(buf, 0, sizeof(buf));
-
-	snprintf(buf, sizeof(buf) - 1, "%s", str);
-
-	iwrq.data.length = strlen(buf) + 1 + IW_EV_LCP_LEN;
-
-	/* Send Event to upper layer */
-	lbs_deb_wext("event indication string %s\n", (char *)buf);
-	lbs_deb_wext("event indication length %d\n", iwrq.data.length);
-	lbs_deb_wext("sending wireless event IWEVCUSTOM for %s\n", str);
-
-	wireless_send_event(priv->dev, IWEVCUSTOM, &iwrq, buf);
-
-	lbs_deb_leave(LBS_DEB_WEXT);
-}
-
 static void lbs_send_confirmsleep(struct lbs_private *priv)
 {
 	unsigned long flags;
@@ -2118,7 +1688,7 @@
 }
 
 
-static struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
+struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
 	uint16_t command, struct cmd_header *in_cmd, int in_cmd_size,
 	int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
 	unsigned long callback_arg)
@@ -2216,5 +1786,3 @@
 	return ret;
 }
 EXPORT_SYMBOL_GPL(__lbs_cmd);
-
-
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index 392e578..2862748 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -3,11 +3,30 @@
 #ifndef _LBS_CMD_H_
 #define _LBS_CMD_H_
 
-#include "hostcmd.h"
+#include "host.h"
 #include "dev.h"
 
+
+/* Command & response transfer between host and card */
+
+struct cmd_ctrl_node {
+	struct list_head list;
+	int result;
+	/* command response */
+	int (*callback)(struct lbs_private *,
+			unsigned long,
+			struct cmd_header *);
+	unsigned long callback_arg;
+	/* command data */
+	struct cmd_header *cmdbuf;
+	/* wait queue */
+	u16 cmdwaitqwoken;
+	wait_queue_head_t cmdwait_q;
+};
+
+
 /* lbs_cmd() infers the size of the buffer to copy data back into, from
-   the size of the target of the pointer. Since the command to be sent 
+   the size of the target of the pointer. Since the command to be sent
    may often be smaller, that size is set in cmd->size by the caller.*/
 #define lbs_cmd(priv, cmdnr, cmd, cb, cb_arg)	({		\
 	uint16_t __sz = le16_to_cpu((cmd)->hdr.size);		\
@@ -18,6 +37,11 @@
 #define lbs_cmd_with_response(priv, cmdnr, cmd)	\
 	lbs_cmd(priv, cmdnr, cmd, lbs_cmd_copyback, (unsigned long) (cmd))
 
+int lbs_prepare_and_send_command(struct lbs_private *priv,
+	u16 cmd_no,
+	u16 cmd_action,
+	u16 wait_option, u32 cmd_oid, void *pdata_buf);
+
 void lbs_cmd_async(struct lbs_private *priv, uint16_t command,
 	struct cmd_header *in_cmd, int in_cmd_size);
 
@@ -26,62 +50,93 @@
 	      int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
 	      unsigned long callback_arg);
 
-int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
-		int8_t p1, int8_t p2);
-
-int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
-		int8_t p2, int usesnr);
-
-int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
-		int8_t p1, int8_t p2);
-
-int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
-		int8_t p2, int usesnr);
+struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
+	uint16_t command, struct cmd_header *in_cmd, int in_cmd_size,
+	int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
+	unsigned long callback_arg);
 
 int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra,
 		     struct cmd_header *resp);
 
+int lbs_allocate_cmd_buffer(struct lbs_private *priv);
+int lbs_free_cmd_buffer(struct lbs_private *priv);
+
+int lbs_execute_next_command(struct lbs_private *priv);
+void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
+			  int result);
+int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len);
+
+
+/* From cmdresp.c */
+
+void lbs_mac_event_disconnected(struct lbs_private *priv);
+
+
+
+/* Events */
+
+int lbs_process_event(struct lbs_private *priv, u32 event);
+
+
+/* Actual commands */
+
 int lbs_update_hw_spec(struct lbs_private *priv);
 
-int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
-		    struct cmd_ds_mesh_access *cmd);
-
-int lbs_set_data_rate(struct lbs_private *priv, u8 rate);
-
-int lbs_get_channel(struct lbs_private *priv);
 int lbs_set_channel(struct lbs_private *priv, u8 channel);
 
-int lbs_mesh_config_send(struct lbs_private *priv,
-			 struct cmd_ds_mesh_config *cmd,
-			 uint16_t action, uint16_t type);
-int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
+int lbs_update_channel(struct lbs_private *priv);
 
 int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria,
 		struct wol_config *p_wol_config);
-int lbs_suspend(struct lbs_private *priv);
-void lbs_resume(struct lbs_private *priv);
 
-int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
-				      uint16_t cmd_action);
-int lbs_cmd_802_11_inactivity_timeout(struct lbs_private *priv,
-				      uint16_t cmd_action, uint16_t *timeout);
 int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action,
 				struct sleep_params *sp);
-int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
-			   struct assoc_request *assoc);
-int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
-			      uint16_t *enable);
-int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
-				struct assoc_request *assoc);
+
+void lbs_ps_sleep(struct lbs_private *priv, int wait_option);
+
+void lbs_ps_wakeup(struct lbs_private *priv, int wait_option);
+
+void lbs_ps_confirm_sleep(struct lbs_private *priv);
+
+int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on);
+
+void lbs_set_mac_control(struct lbs_private *priv);
 
 int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
 		     s16 *maxlevel);
-int lbs_set_tx_power(struct lbs_private *priv, s16 dbm);
-
-int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on);
 
 int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val);
 
 int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val);
 
+
+/* Mesh related */
+
+int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
+		    struct cmd_ds_mesh_access *cmd);
+
+int lbs_mesh_config_send(struct lbs_private *priv,
+			 struct cmd_ds_mesh_config *cmd,
+			 uint16_t action, uint16_t type);
+
+int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
+
+
+/* Commands only used in wext.c, assoc. and scan.c */
+
+int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
+		int8_t p1, int8_t p2);
+
+int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
+		int8_t p2, int usesnr);
+
+int lbs_set_data_rate(struct lbs_private *priv, u8 rate);
+
+int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
+				      uint16_t cmd_action);
+
+int lbs_set_tx_power(struct lbs_private *priv, s16 dbm);
+
+int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep);
+
 #endif /* _LBS_CMD_H */
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 9ee8bd1..21d5769 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -11,6 +11,7 @@
 
 #include "host.h"
 #include "decl.h"
+#include "cmd.h"
 #include "defs.h"
 #include "dev.h"
 #include "assoc.h"
@@ -26,23 +27,17 @@
  */
 void lbs_mac_event_disconnected(struct lbs_private *priv)
 {
-	union iwreq_data wrqu;
-
 	if (priv->connect_status != LBS_CONNECTED)
 		return;
 
 	lbs_deb_enter(LBS_DEB_ASSOC);
 
-	memset(wrqu.ap_addr.sa_data, 0x00, ETH_ALEN);
-	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-
 	/*
 	 * Cisco AP sends EAP failure and de-auth in less than 0.5 ms.
 	 * It causes problem in the Supplicant
 	 */
-
 	msleep_interruptible(1000);
-	wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
+	lbs_send_disconnect_notification(priv);
 
 	/* report disconnect to upper layer */
 	netif_stop_queue(priv->dev);
@@ -67,7 +62,7 @@
 	 * no longer valid.
 	 */
 	memset(&priv->curbssparams.bssid, 0, ETH_ALEN);
-	memset(&priv->curbssparams.ssid, 0, IW_ESSID_MAX_SIZE);
+	memset(&priv->curbssparams.ssid, 0, IEEE80211_MAX_SSID_LEN);
 	priv->curbssparams.ssid_len = 0;
 
 	if (priv->psstate != PS_STATE_FULL_POWER) {
@@ -78,32 +73,6 @@
 	lbs_deb_leave(LBS_DEB_ASSOC);
 }
 
-/**
- *  @brief This function handles MIC failure event.
- *
- *  @param priv    A pointer to struct lbs_private structure
- *  @para  event   the event id
- *  @return 	   n/a
- */
-static void handle_mic_failureevent(struct lbs_private *priv, u32 event)
-{
-	char buf[50];
-
-	lbs_deb_enter(LBS_DEB_CMD);
-	memset(buf, 0, sizeof(buf));
-
-	sprintf(buf, "%s", "MLME-MICHAELMICFAILURE.indication ");
-
-	if (event == MACREG_INT_CODE_MIC_ERR_UNICAST) {
-		strcat(buf, "unicast ");
-	} else {
-		strcat(buf, "multicast ");
-	}
-
-	lbs_send_iwevcustom_event(priv, buf);
-	lbs_deb_leave(LBS_DEB_CMD);
-}
-
 static int lbs_ret_reg_access(struct lbs_private *priv,
 			       u16 type, struct cmd_ds_command *resp)
 {
@@ -147,53 +116,6 @@
 	return ret;
 }
 
-static int lbs_ret_802_11_rssi(struct lbs_private *priv,
-				struct cmd_ds_command *resp)
-{
-	struct cmd_ds_802_11_rssi_rsp *rssirsp = &resp->params.rssirsp;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	/* store the non average value */
-	priv->SNR[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->SNR);
-	priv->NF[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->noisefloor);
-
-	priv->SNR[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgSNR);
-	priv->NF[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgnoisefloor);
-
-	priv->RSSI[TYPE_BEACON][TYPE_NOAVG] =
-	    CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG],
-		     priv->NF[TYPE_BEACON][TYPE_NOAVG]);
-
-	priv->RSSI[TYPE_BEACON][TYPE_AVG] =
-	    CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_AVG] / AVG_SCALE,
-		     priv->NF[TYPE_BEACON][TYPE_AVG] / AVG_SCALE);
-
-	lbs_deb_cmd("RSSI: beacon %d, avg %d\n",
-	       priv->RSSI[TYPE_BEACON][TYPE_NOAVG],
-	       priv->RSSI[TYPE_BEACON][TYPE_AVG]);
-
-	lbs_deb_leave(LBS_DEB_CMD);
-	return 0;
-}
-
-static int lbs_ret_802_11_bcn_ctrl(struct lbs_private * priv,
-					struct cmd_ds_command *resp)
-{
-	struct cmd_ds_802_11_beacon_control *bcn_ctrl =
-	    &resp->params.bcn_ctrl;
-
-	lbs_deb_enter(LBS_DEB_CMD);
-
-	if (bcn_ctrl->action == CMD_ACT_GET) {
-		priv->beacon_enable = (u8) le16_to_cpu(bcn_ctrl->beacon_enable);
-		priv->beacon_period = le16_to_cpu(bcn_ctrl->beacon_period);
-	}
-
-	lbs_deb_enter(LBS_DEB_CMD);
-	return 0;
-}
-
 static inline int handle_cmd_response(struct lbs_private *priv,
 				      struct cmd_header *cmd_response)
 {
@@ -227,29 +149,13 @@
 		ret = lbs_ret_802_11_rssi(priv, resp);
 		break;
 
-	case CMD_RET(CMD_802_11D_DOMAIN_INFO):
-		ret = lbs_ret_802_11d_domain_info(resp);
-		break;
-
 	case CMD_RET(CMD_802_11_TPC_CFG):
 		spin_lock_irqsave(&priv->driver_lock, flags);
 		memmove((void *)priv->cur_cmd->callback_arg, &resp->params.tpccfg,
 			sizeof(struct cmd_ds_802_11_tpc_cfg));
 		spin_unlock_irqrestore(&priv->driver_lock, flags);
 		break;
-	case CMD_RET(CMD_802_11_LED_GPIO_CTRL):
-		spin_lock_irqsave(&priv->driver_lock, flags);
-		memmove((void *)priv->cur_cmd->callback_arg, &resp->params.ledgpio,
-			sizeof(struct cmd_ds_802_11_led_ctrl));
-		spin_unlock_irqrestore(&priv->driver_lock, flags);
-		break;
 
-	case CMD_RET(CMD_GET_TSF):
-		spin_lock_irqsave(&priv->driver_lock, flags);
-		memcpy((void *)priv->cur_cmd->callback_arg,
-		       &resp->params.gettsf.tsfvalue, sizeof(u64));
-		spin_unlock_irqrestore(&priv->driver_lock, flags);
-		break;
 	case CMD_RET(CMD_BT_ACCESS):
 		spin_lock_irqsave(&priv->driver_lock, flags);
 		if (priv->cur_cmd->callback_arg)
@@ -545,12 +451,12 @@
 
 	case MACREG_INT_CODE_MIC_ERR_UNICAST:
 		lbs_deb_cmd("EVENT: UNICAST MIC ERROR\n");
-		handle_mic_failureevent(priv, MACREG_INT_CODE_MIC_ERR_UNICAST);
+		lbs_send_mic_failureevent(priv, event);
 		break;
 
 	case MACREG_INT_CODE_MIC_ERR_MULTICAST:
 		lbs_deb_cmd("EVENT: MULTICAST MIC ERROR\n");
-		handle_mic_failureevent(priv, MACREG_INT_CODE_MIC_ERR_MULTICAST);
+		lbs_send_mic_failureevent(priv, event);
 		break;
 
 	case MACREG_INT_CODE_MIB_CHANGED:
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index 893a55c..587b0cb 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -451,10 +451,12 @@
 				CMD_MAC_REG_ACCESS, 0,
 				CMD_OPTION_WAITFORRSP, 0, &offval);
 	mdelay(10);
-	pos += snprintf(buf+pos, len-pos, "MAC[0x%x] = 0x%08x\n",
+	if (!ret) {
+		pos += snprintf(buf+pos, len-pos, "MAC[0x%x] = 0x%08x\n",
 				priv->mac_offset, priv->offsetvalue.value);
 
-	ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
+		ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
+	}
 	free_page(addr);
 	return ret;
 }
@@ -514,7 +516,8 @@
 				CMD_OPTION_WAITFORRSP, 0, &offval);
 	mdelay(10);
 
-	res = count;
+	if (!res)
+		res = count;
 out_unlock:
 	free_page(addr);
 	return res;
@@ -539,10 +542,12 @@
 				CMD_BBP_REG_ACCESS, 0,
 				CMD_OPTION_WAITFORRSP, 0, &offval);
 	mdelay(10);
-	pos += snprintf(buf+pos, len-pos, "BBP[0x%x] = 0x%08x\n",
+	if (!ret) {
+		pos += snprintf(buf+pos, len-pos, "BBP[0x%x] = 0x%08x\n",
 				priv->bbp_offset, priv->offsetvalue.value);
 
-	ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
+		ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
+	}
 	free_page(addr);
 
 	return ret;
@@ -603,7 +608,8 @@
 				CMD_OPTION_WAITFORRSP, 0, &offval);
 	mdelay(10);
 
-	res = count;
+	if (!res)
+		res = count;
 out_unlock:
 	free_page(addr);
 	return res;
@@ -628,10 +634,12 @@
 				CMD_RF_REG_ACCESS, 0,
 				CMD_OPTION_WAITFORRSP, 0, &offval);
 	mdelay(10);
-	pos += snprintf(buf+pos, len-pos, "RF[0x%x] = 0x%08x\n",
+	if (!ret) {
+		pos += snprintf(buf+pos, len-pos, "RF[0x%x] = 0x%08x\n",
 				priv->rf_offset, priv->offsetvalue.value);
 
-	ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
+		ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
+	}
 	free_page(addr);
 
 	return ret;
@@ -692,7 +700,8 @@
 				CMD_OPTION_WAITFORRSP, 0, &offval);
 	mdelay(10);
 
-	res = count;
+	if (!res)
+		res = count;
 out_unlock:
 	free_page(addr);
 	return res;
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index fb91c36..678f7c9 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -8,74 +8,48 @@
 
 #include <linux/netdevice.h>
 
-#include "defs.h"
 
-/** Function Prototype Declaration */
 struct lbs_private;
 struct sk_buff;
 struct net_device;
-struct cmd_ctrl_node;
-struct cmd_ds_command;
 
-void lbs_set_mac_control(struct lbs_private *priv);
 
+/* ethtool.c */
+extern const struct ethtool_ops lbs_ethtool_ops;
+
+
+/* tx.c */
 void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count);
-
-int lbs_free_cmd_buffer(struct lbs_private *priv);
-
-int lbs_prepare_and_send_command(struct lbs_private *priv,
-	u16 cmd_no,
-	u16 cmd_action,
-	u16 wait_option, u32 cmd_oid, void *pdata_buf);
-
-int lbs_allocate_cmd_buffer(struct lbs_private *priv);
-int lbs_execute_next_command(struct lbs_private *priv);
-int lbs_process_event(struct lbs_private *priv, u32 event);
-void lbs_queue_event(struct lbs_private *priv, u32 event);
-void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx);
-int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep);
-int lbs_enter_auto_deep_sleep(struct lbs_private *priv);
-int lbs_exit_auto_deep_sleep(struct lbs_private *priv);
-
-u32 lbs_fw_index_to_data_rate(u8 index);
-u8 lbs_data_rate_to_fw_index(u32 rate);
-
-/** The proc fs interface */
-int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len);
-void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
-			  int result);
 netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb,
 				struct net_device *dev);
-int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band);
 
+/* rx.c */
 int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *);
 
-void lbs_ps_sleep(struct lbs_private *priv, int wait_option);
-void lbs_ps_confirm_sleep(struct lbs_private *priv);
-void lbs_ps_wakeup(struct lbs_private *priv, int wait_option);
-
-struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
-	struct lbs_private *priv,
-	u8 band,
-	u16 channel);
-
-void lbs_mac_event_disconnected(struct lbs_private *priv);
-
-void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str);
 
 /* persistcfg.c */
 void lbs_persist_config_init(struct net_device *net);
 void lbs_persist_config_remove(struct net_device *net);
 
+
 /* main.c */
-struct chan_freq_power *lbs_get_region_cfp_table(u8 region,
-	int *cfp_no);
 struct lbs_private *lbs_add_card(void *card, struct device *dmdev);
 void lbs_remove_card(struct lbs_private *priv);
 int lbs_start_card(struct lbs_private *priv);
 void lbs_stop_card(struct lbs_private *priv);
 void lbs_host_to_card_done(struct lbs_private *priv);
 
-int lbs_update_channel(struct lbs_private *priv);
+int lbs_suspend(struct lbs_private *priv);
+void lbs_resume(struct lbs_private *priv);
+
+void lbs_queue_event(struct lbs_private *priv, u32 event);
+void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx);
+
+int lbs_enter_auto_deep_sleep(struct lbs_private *priv);
+int lbs_exit_auto_deep_sleep(struct lbs_private *priv);
+
+u32 lbs_fw_index_to_data_rate(u8 index);
+u8 lbs_data_rate_to_fw_index(u32 rate);
+
 
 #endif
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index 1cf5d59..6b6ea9f 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -322,7 +322,6 @@
 extern const char lbs_driver_version[];
 extern u16 lbs_region_code_to_index[MRVDRV_MAX_REGION_CODE];
 
-extern u8 lbs_bg_rates[MAX_RATES];
 
 /** ENUM definition*/
 /** SNRNF_TYPE */
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 8abb28a..1a67511 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -6,75 +6,10 @@
 #ifndef _LBS_DEV_H_
 #define _LBS_DEV_H_
 
-#include <linux/netdevice.h>
-#include <linux/wireless.h>
-#include <linux/ethtool.h>
-#include <linux/debugfs.h>
+#include "scan.h"
+#include "assoc.h"
 
-#include "defs.h"
-#include "hostcmd.h"
 
-extern const struct ethtool_ops lbs_ethtool_ops;
-
-#define	MAX_BSSID_PER_CHANNEL		16
-
-#define NR_TX_QUEUE			3
-
-/* For the extended Scan */
-#define MAX_EXTENDED_SCAN_BSSID_LIST    MAX_BSSID_PER_CHANNEL * \
-						MRVDRV_MAX_CHANNEL_SIZE + 1
-
-#define	MAX_REGION_CHANNEL_NUM	2
-
-/** Chan-freq-TxPower mapping table*/
-struct chan_freq_power {
-	/** channel Number		*/
-	u16 channel;
-	/** frequency of this channel	*/
-	u32 freq;
-	/** Max allowed Tx power level	*/
-	u16 maxtxpower;
-	/** TRUE:channel unsupported;  FLASE:supported*/
-	u8 unsupported;
-};
-
-/** region-band mapping table*/
-struct region_channel {
-	/** TRUE if this entry is valid		     */
-	u8 valid;
-	/** region code for US, Japan ...	     */
-	u8 region;
-	/** band B/G/A, used for BAND_CONFIG cmd	     */
-	u8 band;
-	/** Actual No. of elements in the array below */
-	u8 nrcfp;
-	/** chan-freq-txpower mapping table*/
-	struct chan_freq_power *CFP;
-};
-
-struct lbs_802_11_security {
-	u8 WPAenabled;
-	u8 WPA2enabled;
-	u8 wep_enabled;
-	u8 auth_mode;
-	u32 key_mgmt;
-};
-
-/** Current Basic Service Set State Structure */
-struct current_bss_params {
-	/** bssid */
-	u8 bssid[ETH_ALEN];
-	/** ssid */
-	u8 ssid[IW_ESSID_MAX_SIZE + 1];
-	u8 ssid_len;
-
-	/** band */
-	u8 band;
-	/** channel */
-	u8 channel;
-	/** zero-terminated array of supported data rates */
-	u8 rates[MAX_RATES + 1];
-};
 
 /** sleep_params */
 struct sleep_params {
@@ -100,113 +35,96 @@
 
 /** Private structure for the MV device */
 struct lbs_private {
+
+	/* Basic networking */
+	struct net_device *dev;
+	u32 connect_status;
+	int infra_open;
+	struct work_struct mcast_work;
+	u32 nr_of_multicastmacaddr;
+	u8 multicastlist[MRVDRV_MAX_MULTICAST_LIST_SIZE][ETH_ALEN];
+
+	/* CFG80211 */
 	struct wireless_dev *wdev;
+
+	/* Mesh */
+	struct net_device *mesh_dev; /* Virtual device */
+	u32 mesh_connect_status;
+	struct lbs_mesh_stats mstats;
 	int mesh_open;
 	int mesh_fw_ver;
-	int infra_open;
 	int mesh_autostart_enabled;
+	uint16_t mesh_tlv;
+	u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1];
+	u8 mesh_ssid_len;
+	struct work_struct sync_channel;
 
-	char name[DEV_NAME_LEN];
-
-	void *card;
-	struct net_device *dev;
-
-	struct net_device *mesh_dev; /* Virtual device */
+	/* Monitor mode */
 	struct net_device *rtap_net_dev;
+	u32 monitormode;
 
-	struct iw_statistics wstats;
-	struct lbs_mesh_stats mstats;
+	/* Debugfs */
 	struct dentry *debugfs_dir;
 	struct dentry *debugfs_debug;
 	struct dentry *debugfs_files[6];
-
 	struct dentry *events_dir;
 	struct dentry *debugfs_events_files[6];
-
 	struct dentry *regs_dir;
 	struct dentry *debugfs_regs_files[6];
 
+	/* Hardware debugging */
 	u32 mac_offset;
 	u32 bbp_offset;
 	u32 rf_offset;
+	struct lbs_offset_value offsetvalue;
 
-	/** Deep sleep flag */
+	/* Power management */
+	u16 psmode;
+	u32 psstate;
+	u8 needtowakeup;
+
+	/* Deep sleep */
 	int is_deep_sleep;
-	/** Auto deep sleep enabled flag */
 	int is_auto_deep_sleep_enabled;
-	/** Device wakeup required flag */
 	int wakeup_dev_required;
-	/** Auto deep sleep flag*/
 	int is_activity_detected;
-	/** Auto deep sleep timeout (in miliseconds) */
-	int auto_deep_sleep_timeout;
+	int auto_deep_sleep_timeout; /* in ms */
+	wait_queue_head_t ds_awake_q;
+	struct timer_list auto_deepsleep_timer;
 
-	/** Deep sleep wait queue */
-	wait_queue_head_t       ds_awake_q;
-
-	/* Download sent:
-	   bit0 1/0=data_sent/data_tx_done,
-	   bit1 1/0=cmd_sent/cmd_tx_done,
-	   all other bits reserved 0 */
-	u8 dnld_sent;
-
-	/** thread to service interrupts */
-	struct task_struct *main_thread;
-	wait_queue_head_t waitq;
-	struct workqueue_struct *work_thread;
-
-	struct work_struct mcast_work;
-
-	/** Scanning */
-	struct delayed_work scan_work;
-	struct delayed_work assoc_work;
-	struct work_struct sync_channel;
-	/* remember which channel was scanned last, != 0 if currently scanning */
-	int scan_channel;
-	u8 scan_ssid[IW_ESSID_MAX_SIZE + 1];
-	u8 scan_ssid_len;
-
-	/** Hardware access */
+	/* Hardware access */
+	void *card;
+	u8 fw_ready;
+	u8 surpriseremoved;
 	int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
 	void (*reset_card) (struct lbs_private *priv);
 	int (*enter_deep_sleep) (struct lbs_private *priv);
 	int (*exit_deep_sleep) (struct lbs_private *priv);
 	int (*reset_deep_sleep_wakeup) (struct lbs_private *priv);
 
-	/* Wake On LAN */
-	uint32_t wol_criteria;
-	uint8_t wol_gpio;
-	uint8_t wol_gap;
-
-	/** Wlan adapter data structure*/
-	/** STATUS variables */
+	/* Adapter info (from EEPROM) */
 	u32 fwrelease;
 	u32 fwcapinfo;
+	u16 regioncode;
+	u8 current_addr[ETH_ALEN];
 
-	struct mutex lock;
-
-	/* TX packet ready to be sent... */
-	int tx_pending_len;		/* -1 while building packet */
-
-	u8 tx_pending_buf[LBS_UPLD_SIZE];
-	/* protected by hard_start_xmit serialization */
-
-	/** command-related variables */
+	/* Command download */
+	u8 dnld_sent;
+	/* bit0 1/0=data_sent/data_tx_done,
+	   bit1 1/0=cmd_sent/cmd_tx_done,
+	   all other bits reserved 0 */
 	u16 seqnum;
-
 	struct cmd_ctrl_node *cmd_array;
-	/** Current command */
 	struct cmd_ctrl_node *cur_cmd;
-	int cur_cmd_retcode;
-	/** command Queues */
-	/** Free command buffers */
-	struct list_head cmdfreeq;
-	/** Pending command buffers */
-	struct list_head cmdpendingq;
-
+	struct list_head cmdfreeq;    /* free command buffers */
+	struct list_head cmdpendingq; /* pending command buffers */
 	wait_queue_head_t cmd_pending;
+	struct timer_list command_timer;
+	int nr_retries;
+	int cmd_timed_out;
 
 	/* Command responses sent from the hardware to the driver */
+	int cur_cmd_retcode;
 	u8 resp_idx;
 	u8 resp_buf[2][LBS_UPLD_SIZE];
 	u32 resp_len[2];
@@ -214,96 +132,76 @@
 	/* Events sent from hardware to driver */
 	struct kfifo *event_fifo;
 
-	/* nickname */
-	u8 nodename[16];
+	/** thread to service interrupts */
+	struct task_struct *main_thread;
+	wait_queue_head_t waitq;
+	struct workqueue_struct *work_thread;
 
-	/** spin locks */
-	spinlock_t driver_lock;
+	/** Encryption stuff */
+	struct lbs_802_11_security secinfo;
+	struct enc_key wpa_mcast_key;
+	struct enc_key wpa_unicast_key;
+	u8 wpa_ie[MAX_WPA_IE_LEN];
+	u8 wpa_ie_len;
+	u16 wep_tx_keyidx;
+	struct enc_key wep_keys[4];
 
-	/** Timers */
-	struct timer_list command_timer;
-	struct timer_list auto_deepsleep_timer;
-	int nr_retries;
-	int cmd_timed_out;
+	/* Wake On LAN */
+	uint32_t wol_criteria;
+	uint8_t wol_gpio;
+	uint8_t wol_gap;
 
-	/** current ssid/bssid related parameters*/
-	struct current_bss_params curbssparams;
-
-	uint16_t mesh_tlv;
-	u8 mesh_ssid[IW_ESSID_MAX_SIZE + 1];
-	u8 mesh_ssid_len;
-
-	/* IW_MODE_* */
-	u8 mode;
-
-	/* Scan results list */
-	struct list_head network_list;
-	struct list_head network_free_list;
-	struct bss_descriptor *networks;
-
-	u16 beacon_period;
-	u8 beacon_enable;
-	u8 adhoccreate;
-
-	/** capability Info used in Association, start, join */
-	u16 capability;
-
-	/** MAC address information */
-	u8 current_addr[ETH_ALEN];
-	u8 multicastlist[MRVDRV_MAX_MULTICAST_LIST_SIZE][ETH_ALEN];
-	u32 nr_of_multicastmacaddr;
-
-	/** 802.11 statistics */
-//	struct cmd_DS_802_11_GET_STAT wlan802_11Stat;
-
-	uint16_t enablehwauto;
-	uint16_t ratebitmap;
-
+	/* Transmitting */
+	int tx_pending_len;		/* -1 while building packet */
+	u8 tx_pending_buf[LBS_UPLD_SIZE];
+	/* protected by hard_start_xmit serialization */
 	u8 txretrycount;
-
-	/** Tx-related variables (for single packet tx) */
 	struct sk_buff *currenttxskb;
 
-	/** NIC Operation characteristics */
+	/* Locks */
+	struct mutex lock;
+	spinlock_t driver_lock;
+
+	/* NIC/link operation characteristics */
 	u16 mac_control;
-	u32 connect_status;
-	u32 mesh_connect_status;
-	u16 regioncode;
+	u8 radio_on;
+	u8 channel;
 	s16 txpower_cur;
 	s16 txpower_min;
 	s16 txpower_max;
 
-	/** POWER MANAGEMENT AND PnP SUPPORT */
-	u8 surpriseremoved;
+	/** Scanning */
+	struct delayed_work scan_work;
+	int scan_channel;
+	/* remember which channel was scanned last, != 0 if currently scanning */
+	u8 scan_ssid[IEEE80211_MAX_SSID_LEN + 1];
+	u8 scan_ssid_len;
 
-	u16 psmode;		/* Wlan802_11PowermodeCAM=disable
-				   Wlan802_11PowermodeMAX_PSP=enable */
-	u32 psstate;
-	u8 needtowakeup;
-
+	/* Associating */
+	struct delayed_work assoc_work;
+	struct current_bss_params curbssparams;
+	u8 mode;
+	struct list_head network_list;
+	struct list_head network_free_list;
+	struct bss_descriptor *networks;
 	struct assoc_request * pending_assoc_req;
 	struct assoc_request * in_progress_assoc_req;
+	u16 capability;
+	uint16_t enablehwauto;
+	uint16_t ratebitmap;
 
-	/** Encryption parameter */
-	struct lbs_802_11_security secinfo;
+	/* ADHOC */
+	u16 beacon_period;
+	u8 beacon_enable;
+	u8 adhoccreate;
 
-	/** WEP keys */
-	struct enc_key wep_keys[4];
-	u16 wep_tx_keyidx;
-
-	/** WPA keys */
-	struct enc_key wpa_mcast_key;
-	struct enc_key wpa_unicast_key;
-
-/*
- * In theory, the IE is limited to the IE length, 255,
- * but in practice 64 bytes are enough.
- */
-#define MAX_WPA_IE_LEN 64
-
-	/** WPA Information Elements*/
-	u8 wpa_ie[MAX_WPA_IE_LEN];
-	u8 wpa_ie_len;
+	/* WEXT */
+	char name[DEV_NAME_LEN];
+	u8 nodename[16];
+	struct iw_statistics wstats;
+	u8 cur_rate;
+#define	MAX_REGION_CHANNEL_NUM	2
+	struct region_channel region_channel[MAX_REGION_CHANNEL_NUM];
 
 	/** Requested Signal Strength*/
 	u16 SNR[MAX_TYPE_B][MAX_TYPE_AVG];
@@ -313,116 +211,8 @@
 	u8 rawNF[DEFAULT_DATA_AVG_FACTOR];
 	u16 nextSNRNF;
 	u16 numSNRNF;
-
-	u8 radio_on;
-
-	/** data rate stuff */
-	u8 cur_rate;
-
-	/** RF calibration data */
-
-#define	MAX_REGION_CHANNEL_NUM	2
-	/** region channel data */
-	struct region_channel region_channel[MAX_REGION_CHANNEL_NUM];
-
-	struct region_channel universal_channel[MAX_REGION_CHANNEL_NUM];
-
-	/** 11D and Domain Regulatory Data */
-	struct lbs_802_11d_domain_reg domainreg;
-	struct parsed_region_chan_11d parsed_region_chan;
-
-	/** FSM variable for 11d support */
-	u32 enable11d;
-
-	/**	MISCELLANEOUS */
-	struct lbs_offset_value offsetvalue;
-
-	u32 monitormode;
-	u8 fw_ready;
 };
 
 extern struct cmd_confirm_sleep confirm_sleep;
 
-/**
- *  @brief Structure used to store information for each beacon/probe response
- */
-struct bss_descriptor {
-	u8 bssid[ETH_ALEN];
-
-	u8 ssid[IW_ESSID_MAX_SIZE + 1];
-	u8 ssid_len;
-
-	u16 capability;
-	u32 rssi;
-	u32 channel;
-	u16 beaconperiod;
-	__le16 atimwindow;
-
-	/* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */
-	u8 mode;
-
-	/* zero-terminated array of supported data rates */
-	u8 rates[MAX_RATES + 1];
-
-	unsigned long last_scanned;
-
-	union ieee_phy_param_set phy;
-	union ieee_ss_param_set ss;
-
-	struct ieee_ie_country_info_full_set countryinfo;
-
-	u8 wpa_ie[MAX_WPA_IE_LEN];
-	size_t wpa_ie_len;
-	u8 rsn_ie[MAX_WPA_IE_LEN];
-	size_t rsn_ie_len;
-
-	u8 mesh;
-
-	struct list_head list;
-};
-
-/** Association request
- *
- * Encapsulates all the options that describe a specific assocation request
- * or configuration of the wireless card's radio, mode, and security settings.
- */
-struct assoc_request {
-#define ASSOC_FLAG_SSID			1
-#define ASSOC_FLAG_CHANNEL		2
-#define ASSOC_FLAG_BAND			3
-#define ASSOC_FLAG_MODE			4
-#define ASSOC_FLAG_BSSID		5
-#define ASSOC_FLAG_WEP_KEYS		6
-#define ASSOC_FLAG_WEP_TX_KEYIDX	7
-#define ASSOC_FLAG_WPA_MCAST_KEY	8
-#define ASSOC_FLAG_WPA_UCAST_KEY	9
-#define ASSOC_FLAG_SECINFO		10
-#define ASSOC_FLAG_WPA_IE		11
-	unsigned long flags;
-
-	u8 ssid[IW_ESSID_MAX_SIZE + 1];
-	u8 ssid_len;
-	u8 channel;
-	u8 band;
-	u8 mode;
-	u8 bssid[ETH_ALEN] __attribute__ ((aligned (2)));
-
-	/** WEP keys */
-	struct enc_key wep_keys[4];
-	u16 wep_tx_keyidx;
-
-	/** WPA keys */
-	struct enc_key wpa_mcast_key;
-	struct enc_key wpa_unicast_key;
-
-	struct lbs_802_11_security secinfo;
-
-	/** WPA Information Elements*/
-	u8 wpa_ie[MAX_WPA_IE_LEN];
-	u8 wpa_ie_len;
-
-	/* BSS to associate with for infrastructure of Ad-Hoc join */
-	struct bss_descriptor bss;
-};
-
 #endif
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index c055daa..3809c0b 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -1,202 +1,190 @@
 /**
-  * This file contains definitions of WLAN commands.
+  * This file function prototypes, data structure
+  * and  definitions for all the host/station commands
   */
 
 #ifndef _LBS_HOST_H_
 #define _LBS_HOST_H_
 
-/** PUBLIC DEFINITIONS */
-#define DEFAULT_AD_HOC_CHANNEL			6
-#define	DEFAULT_AD_HOC_CHANNEL_A		36
+#include "types.h"
+#include "defs.h"
 
-#define CMD_OPTION_WAITFORRSP			0x0002
+#define DEFAULT_AD_HOC_CHANNEL                  6
+
+#define CMD_OPTION_WAITFORRSP                   0x0002
 
 /** Host command IDs */
 
 /* Return command are almost always the same as the host command, but with
  * bit 15 set high.  There are a few exceptions, though...
  */
-#define CMD_RET(cmd)			(0x8000 | cmd)
+#define CMD_RET(cmd)                            (0x8000 | cmd)
 
 /* Return command convention exceptions: */
-#define CMD_RET_802_11_ASSOCIATE		0x8012
+#define CMD_RET_802_11_ASSOCIATE                0x8012
 
 /* Command codes */
-#define CMD_GET_HW_SPEC				0x0003
-#define	CMD_EEPROM_UPDATE			0x0004
-#define CMD_802_11_RESET			0x0005
-#define	CMD_802_11_SCAN				0x0006
-#define CMD_802_11_GET_LOG			0x000b
-#define CMD_MAC_MULTICAST_ADR			0x0010
-#define CMD_802_11_AUTHENTICATE			0x0011
-#define CMD_802_11_EEPROM_ACCESS		0x0059
-#define CMD_802_11_ASSOCIATE			0x0050
-#define CMD_802_11_SET_WEP			0x0013
-#define CMD_802_11_GET_STAT			0x0014
-#define CMD_802_3_GET_STAT			0x0015
-#define CMD_802_11_SNMP_MIB			0x0016
-#define CMD_MAC_REG_MAP				0x0017
-#define CMD_BBP_REG_MAP				0x0018
-#define CMD_MAC_REG_ACCESS			0x0019
-#define CMD_BBP_REG_ACCESS			0x001a
-#define CMD_RF_REG_ACCESS			0x001b
-#define CMD_802_11_RADIO_CONTROL		0x001c
-#define CMD_802_11_RF_CHANNEL			0x001d
-#define CMD_802_11_RF_TX_POWER			0x001e
-#define CMD_802_11_RSSI				0x001f
-#define CMD_802_11_RF_ANTENNA			0x0020
-#define CMD_802_11_PS_MODE			0x0021
-#define CMD_802_11_DATA_RATE			0x0022
-#define CMD_RF_REG_MAP				0x0023
-#define CMD_802_11_DEAUTHENTICATE		0x0024
-#define CMD_802_11_REASSOCIATE			0x0025
-#define CMD_MAC_CONTROL				0x0028
-#define CMD_802_11_AD_HOC_START			0x002b
-#define CMD_802_11_AD_HOC_JOIN			0x002c
-#define CMD_802_11_QUERY_TKIP_REPLY_CNTRS	0x002e
-#define CMD_802_11_ENABLE_RSN			0x002f
-#define CMD_802_11_SET_AFC			0x003c
-#define CMD_802_11_GET_AFC			0x003d
-#define CMD_802_11_DEEP_SLEEP                  0x003e
-#define CMD_802_11_AD_HOC_STOP			0x0040
-#define CMD_802_11_HOST_SLEEP_CFG		0x0043
-#define CMD_802_11_WAKEUP_CONFIRM		0x0044
-#define CMD_802_11_HOST_SLEEP_ACTIVATE		0x0045
-#define CMD_802_11_BEACON_STOP			0x0049
-#define CMD_802_11_MAC_ADDRESS			0x004d
-#define CMD_802_11_LED_GPIO_CTRL		0x004e
-#define CMD_802_11_EEPROM_ACCESS		0x0059
-#define CMD_802_11_BAND_CONFIG			0x0058
-#define CMD_GSPI_BUS_CONFIG			0x005a
-#define CMD_802_11D_DOMAIN_INFO			0x005b
-#define CMD_802_11_KEY_MATERIAL			0x005e
-#define CMD_802_11_SLEEP_PARAMS			0x0066
-#define CMD_802_11_INACTIVITY_TIMEOUT		0x0067
-#define CMD_802_11_SLEEP_PERIOD			0x0068
-#define CMD_802_11_TPC_CFG			0x0072
-#define CMD_802_11_PA_CFG			0x0073
-#define CMD_802_11_FW_WAKE_METHOD		0x0074
-#define CMD_802_11_SUBSCRIBE_EVENT		0x0075
-#define CMD_802_11_RATE_ADAPT_RATESET		0x0076
-#define CMD_802_11_TX_RATE_QUERY		0x007f
-#define	CMD_GET_TSF				0x0080
-#define CMD_BT_ACCESS				0x0087
-#define CMD_FWT_ACCESS				0x0095
-#define CMD_802_11_MONITOR_MODE			0x0098
-#define CMD_MESH_ACCESS				0x009b
-#define CMD_MESH_CONFIG_OLD			0x00a3
-#define CMD_MESH_CONFIG				0x00ac
-#define	CMD_SET_BOOT2_VER			0x00a5
-#define	CMD_FUNC_INIT				0x00a9
-#define	CMD_FUNC_SHUTDOWN			0x00aa
-#define CMD_802_11_BEACON_CTRL			0x00b0
+#define CMD_GET_HW_SPEC                         0x0003
+#define CMD_EEPROM_UPDATE                       0x0004
+#define CMD_802_11_RESET                        0x0005
+#define CMD_802_11_SCAN                         0x0006
+#define CMD_802_11_GET_LOG                      0x000b
+#define CMD_MAC_MULTICAST_ADR                   0x0010
+#define CMD_802_11_AUTHENTICATE                 0x0011
+#define CMD_802_11_EEPROM_ACCESS                0x0059
+#define CMD_802_11_ASSOCIATE                    0x0050
+#define CMD_802_11_SET_WEP                      0x0013
+#define CMD_802_11_GET_STAT                     0x0014
+#define CMD_802_3_GET_STAT                      0x0015
+#define CMD_802_11_SNMP_MIB                     0x0016
+#define CMD_MAC_REG_MAP                         0x0017
+#define CMD_BBP_REG_MAP                         0x0018
+#define CMD_MAC_REG_ACCESS                      0x0019
+#define CMD_BBP_REG_ACCESS                      0x001a
+#define CMD_RF_REG_ACCESS                       0x001b
+#define CMD_802_11_RADIO_CONTROL                0x001c
+#define CMD_802_11_RF_CHANNEL                   0x001d
+#define CMD_802_11_RF_TX_POWER                  0x001e
+#define CMD_802_11_RSSI                         0x001f
+#define CMD_802_11_RF_ANTENNA                   0x0020
+#define CMD_802_11_PS_MODE                      0x0021
+#define CMD_802_11_DATA_RATE                    0x0022
+#define CMD_RF_REG_MAP                          0x0023
+#define CMD_802_11_DEAUTHENTICATE               0x0024
+#define CMD_802_11_REASSOCIATE                  0x0025
+#define CMD_MAC_CONTROL                         0x0028
+#define CMD_802_11_AD_HOC_START                 0x002b
+#define CMD_802_11_AD_HOC_JOIN                  0x002c
+#define CMD_802_11_QUERY_TKIP_REPLY_CNTRS       0x002e
+#define CMD_802_11_ENABLE_RSN                   0x002f
+#define CMD_802_11_SET_AFC                      0x003c
+#define CMD_802_11_GET_AFC                      0x003d
+#define CMD_802_11_DEEP_SLEEP                   0x003e
+#define CMD_802_11_AD_HOC_STOP                  0x0040
+#define CMD_802_11_HOST_SLEEP_CFG               0x0043
+#define CMD_802_11_WAKEUP_CONFIRM               0x0044
+#define CMD_802_11_HOST_SLEEP_ACTIVATE          0x0045
+#define CMD_802_11_BEACON_STOP                  0x0049
+#define CMD_802_11_MAC_ADDRESS                  0x004d
+#define CMD_802_11_LED_GPIO_CTRL                0x004e
+#define CMD_802_11_EEPROM_ACCESS                0x0059
+#define CMD_802_11_BAND_CONFIG                  0x0058
+#define CMD_GSPI_BUS_CONFIG                     0x005a
+#define CMD_802_11D_DOMAIN_INFO                 0x005b
+#define CMD_802_11_KEY_MATERIAL                 0x005e
+#define CMD_802_11_SLEEP_PARAMS                 0x0066
+#define CMD_802_11_INACTIVITY_TIMEOUT           0x0067
+#define CMD_802_11_SLEEP_PERIOD                 0x0068
+#define CMD_802_11_TPC_CFG                      0x0072
+#define CMD_802_11_PA_CFG                       0x0073
+#define CMD_802_11_FW_WAKE_METHOD               0x0074
+#define CMD_802_11_SUBSCRIBE_EVENT              0x0075
+#define CMD_802_11_RATE_ADAPT_RATESET           0x0076
+#define CMD_802_11_TX_RATE_QUERY                0x007f
+#define CMD_GET_TSF                             0x0080
+#define CMD_BT_ACCESS                           0x0087
+#define CMD_FWT_ACCESS                          0x0095
+#define CMD_802_11_MONITOR_MODE                 0x0098
+#define CMD_MESH_ACCESS                         0x009b
+#define CMD_MESH_CONFIG_OLD                     0x00a3
+#define CMD_MESH_CONFIG                         0x00ac
+#define CMD_SET_BOOT2_VER                       0x00a5
+#define CMD_FUNC_INIT                           0x00a9
+#define CMD_FUNC_SHUTDOWN                       0x00aa
+#define CMD_802_11_BEACON_CTRL                  0x00b0
 
 /* For the IEEE Power Save */
-#define CMD_SUBCMD_ENTER_PS		0x0030
-#define CMD_SUBCMD_EXIT_PS		0x0031
-#define CMD_SUBCMD_SLEEP_CONFIRMED	0x0034
-#define CMD_SUBCMD_FULL_POWERDOWN	0x0035
-#define CMD_SUBCMD_FULL_POWERUP		0x0036
+#define CMD_SUBCMD_ENTER_PS                     0x0030
+#define CMD_SUBCMD_EXIT_PS                      0x0031
+#define CMD_SUBCMD_SLEEP_CONFIRMED              0x0034
+#define CMD_SUBCMD_FULL_POWERDOWN               0x0035
+#define CMD_SUBCMD_FULL_POWERUP                 0x0036
 
-#define CMD_ENABLE_RSN			0x0001
-#define CMD_DISABLE_RSN			0x0000
+#define CMD_ENABLE_RSN                          0x0001
+#define CMD_DISABLE_RSN                         0x0000
 
-#define CMD_ACT_GET			0x0000
-#define CMD_ACT_SET			0x0001
-#define CMD_ACT_GET_AES			0x0002
-#define CMD_ACT_SET_AES			0x0003
-#define CMD_ACT_REMOVE_AES		0x0004
+#define CMD_ACT_GET                             0x0000
+#define CMD_ACT_SET                             0x0001
 
 /* Define action or option for CMD_802_11_SET_WEP */
-#define CMD_ACT_ADD			0x0002
-#define CMD_ACT_REMOVE			0x0004
-#define CMD_ACT_USE_DEFAULT		0x0008
+#define CMD_ACT_ADD                             0x0002
+#define CMD_ACT_REMOVE                          0x0004
 
-#define CMD_TYPE_WEP_40_BIT		0x01
-#define CMD_TYPE_WEP_104_BIT		0x02
+#define CMD_TYPE_WEP_40_BIT                     0x01
+#define CMD_TYPE_WEP_104_BIT                    0x02
 
-#define CMD_NUM_OF_WEP_KEYS		4
+#define CMD_NUM_OF_WEP_KEYS                     4
 
-#define CMD_WEP_KEY_INDEX_MASK		0x3fff
-
-/* Define action or option for CMD_802_11_RESET */
-#define CMD_ACT_HALT			0x0003
+#define CMD_WEP_KEY_INDEX_MASK                  0x3fff
 
 /* Define action or option for CMD_802_11_SCAN */
-#define CMD_BSS_TYPE_BSS		0x0001
-#define CMD_BSS_TYPE_IBSS		0x0002
-#define CMD_BSS_TYPE_ANY		0x0003
+#define CMD_BSS_TYPE_BSS                        0x0001
+#define CMD_BSS_TYPE_IBSS                       0x0002
+#define CMD_BSS_TYPE_ANY                        0x0003
 
 /* Define action or option for CMD_802_11_SCAN */
-#define CMD_SCAN_TYPE_ACTIVE		0x0000
-#define CMD_SCAN_TYPE_PASSIVE		0x0001
+#define CMD_SCAN_TYPE_ACTIVE                    0x0000
+#define CMD_SCAN_TYPE_PASSIVE                   0x0001
 
-#define CMD_SCAN_RADIO_TYPE_BG		0
+#define CMD_SCAN_RADIO_TYPE_BG                  0
 
-#define	CMD_SCAN_PROBE_DELAY_TIME	0
+#define CMD_SCAN_PROBE_DELAY_TIME               0
 
 /* Define action or option for CMD_MAC_CONTROL */
-#define CMD_ACT_MAC_RX_ON			0x0001
-#define CMD_ACT_MAC_TX_ON			0x0002
-#define CMD_ACT_MAC_LOOPBACK_ON			0x0004
-#define CMD_ACT_MAC_WEP_ENABLE			0x0008
-#define CMD_ACT_MAC_INT_ENABLE			0x0010
-#define CMD_ACT_MAC_MULTICAST_ENABLE		0x0020
-#define CMD_ACT_MAC_BROADCAST_ENABLE		0x0040
-#define CMD_ACT_MAC_PROMISCUOUS_ENABLE		0x0080
-#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE	0x0100
-#define CMD_ACT_MAC_STRICT_PROTECTION_ENABLE	0x0400
+#define CMD_ACT_MAC_RX_ON                       0x0001
+#define CMD_ACT_MAC_TX_ON                       0x0002
+#define CMD_ACT_MAC_LOOPBACK_ON                 0x0004
+#define CMD_ACT_MAC_WEP_ENABLE                  0x0008
+#define CMD_ACT_MAC_INT_ENABLE                  0x0010
+#define CMD_ACT_MAC_MULTICAST_ENABLE            0x0020
+#define CMD_ACT_MAC_BROADCAST_ENABLE            0x0040
+#define CMD_ACT_MAC_PROMISCUOUS_ENABLE          0x0080
+#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE        0x0100
+#define CMD_ACT_MAC_STRICT_PROTECTION_ENABLE    0x0400
 
 /* Event flags for CMD_802_11_SUBSCRIBE_EVENT */
-#define CMD_SUBSCRIBE_RSSI_LOW		0x0001
-#define CMD_SUBSCRIBE_SNR_LOW		0x0002
-#define CMD_SUBSCRIBE_FAILCOUNT		0x0004
-#define CMD_SUBSCRIBE_BCNMISS		0x0008
-#define CMD_SUBSCRIBE_RSSI_HIGH		0x0010
-#define CMD_SUBSCRIBE_SNR_HIGH		0x0020
+#define CMD_SUBSCRIBE_RSSI_LOW                  0x0001
+#define CMD_SUBSCRIBE_SNR_LOW                   0x0002
+#define CMD_SUBSCRIBE_FAILCOUNT                 0x0004
+#define CMD_SUBSCRIBE_BCNMISS                   0x0008
+#define CMD_SUBSCRIBE_RSSI_HIGH                 0x0010
+#define CMD_SUBSCRIBE_SNR_HIGH                  0x0020
 
-#define RADIO_PREAMBLE_LONG	0x00
-#define RADIO_PREAMBLE_SHORT	0x02
-#define RADIO_PREAMBLE_AUTO	0x04
+#define RADIO_PREAMBLE_LONG                     0x00
+#define RADIO_PREAMBLE_SHORT                    0x02
+#define RADIO_PREAMBLE_AUTO                     0x04
 
 /* Define action or option for CMD_802_11_RF_CHANNEL */
-#define CMD_OPT_802_11_RF_CHANNEL_GET	0x00
-#define CMD_OPT_802_11_RF_CHANNEL_SET	0x01
+#define CMD_OPT_802_11_RF_CHANNEL_GET           0x00
+#define CMD_OPT_802_11_RF_CHANNEL_SET           0x01
 
 /* Define action or option for CMD_802_11_DATA_RATE */
-#define CMD_ACT_SET_TX_AUTO		0x0000
-#define CMD_ACT_SET_TX_FIX_RATE		0x0001
-#define CMD_ACT_GET_TX_RATE		0x0002
-
-#define CMD_ACT_SET_RX			0x0001
-#define	CMD_ACT_SET_TX			0x0002
-#define CMD_ACT_SET_BOTH		0x0003
-#define	CMD_ACT_GET_RX			0x0004
-#define CMD_ACT_GET_TX			0x0008
-#define	CMD_ACT_GET_BOTH		0x000c
+#define CMD_ACT_SET_TX_AUTO                     0x0000
+#define CMD_ACT_SET_TX_FIX_RATE                 0x0001
+#define CMD_ACT_GET_TX_RATE                     0x0002
 
 /* Define action or option for CMD_802_11_PS_MODE */
-#define CMD_TYPE_CAM			0x0000
-#define	CMD_TYPE_MAX_PSP		0x0001
-#define CMD_TYPE_FAST_PSP		0x0002
+#define CMD_TYPE_CAM                            0x0000
+#define CMD_TYPE_MAX_PSP                        0x0001
+#define CMD_TYPE_FAST_PSP                       0x0002
 
 /* Options for CMD_802_11_FW_WAKE_METHOD */
-#define CMD_WAKE_METHOD_UNCHANGED	0x0000
-#define CMD_WAKE_METHOD_COMMAND_INT	0x0001
-#define CMD_WAKE_METHOD_GPIO		0x0002
+#define CMD_WAKE_METHOD_UNCHANGED               0x0000
+#define CMD_WAKE_METHOD_COMMAND_INT             0x0001
+#define CMD_WAKE_METHOD_GPIO                    0x0002
 
 /* Object IDs for CMD_802_11_SNMP_MIB */
-#define SNMP_MIB_OID_BSS_TYPE		0x0000
-#define SNMP_MIB_OID_OP_RATE_SET	0x0001
-#define SNMP_MIB_OID_BEACON_PERIOD	0x0002  /* Reserved on v9+ */
-#define SNMP_MIB_OID_DTIM_PERIOD	0x0003  /* Reserved on v9+ */
-#define SNMP_MIB_OID_ASSOC_TIMEOUT	0x0004  /* Reserved on v9+ */
-#define SNMP_MIB_OID_RTS_THRESHOLD	0x0005
-#define SNMP_MIB_OID_SHORT_RETRY_LIMIT	0x0006
-#define SNMP_MIB_OID_LONG_RETRY_LIMIT	0x0007
-#define SNMP_MIB_OID_FRAG_THRESHOLD	0x0008
-#define SNMP_MIB_OID_11D_ENABLE		0x0009
-#define SNMP_MIB_OID_11H_ENABLE		0x000A
+#define SNMP_MIB_OID_BSS_TYPE                   0x0000
+#define SNMP_MIB_OID_OP_RATE_SET                0x0001
+#define SNMP_MIB_OID_BEACON_PERIOD              0x0002  /* Reserved on v9+ */
+#define SNMP_MIB_OID_DTIM_PERIOD                0x0003  /* Reserved on v9+ */
+#define SNMP_MIB_OID_ASSOC_TIMEOUT              0x0004  /* Reserved on v9+ */
+#define SNMP_MIB_OID_RTS_THRESHOLD              0x0005
+#define SNMP_MIB_OID_SHORT_RETRY_LIMIT          0x0006
+#define SNMP_MIB_OID_LONG_RETRY_LIMIT           0x0007
+#define SNMP_MIB_OID_FRAG_THRESHOLD             0x0008
+#define SNMP_MIB_OID_11D_ENABLE                 0x0009
+#define SNMP_MIB_OID_11H_ENABLE                 0x000A
 
 /* Define action or option for CMD_BT_ACCESS */
 enum cmd_bt_access_opts {
@@ -303,4 +291,672 @@
 #define MACREG_INT_CODE_MESH_AUTO_STARTED	35
 #define MACREG_INT_CODE_FIRMWARE_READY		48
 
+
+/* 802.11-related definitions */
+
+/* TxPD descriptor */
+struct txpd {
+	/* union to cope up with later FW revisions */
+	union {
+		/* Current Tx packet status */
+		__le32 tx_status;
+		struct {
+			/* BSS type: client, AP, etc. */
+			u8 bss_type;
+			/* BSS number */
+			u8 bss_num;
+			/* Reserved */
+			__le16 reserved;
+		} bss;
+	} u;
+	/* Tx control */
+	__le32 tx_control;
+	__le32 tx_packet_location;
+	/* Tx packet length */
+	__le16 tx_packet_length;
+	/* First 2 byte of destination MAC address */
+	u8 tx_dest_addr_high[2];
+	/* Last 4 byte of destination MAC address */
+	u8 tx_dest_addr_low[4];
+	/* Pkt Priority */
+	u8 priority;
+	/* Pkt Trasnit Power control */
+	u8 powermgmt;
+	/* Amount of time the packet has been queued (units = 2ms) */
+	u8 pktdelay_2ms;
+	/* reserved */
+	u8 reserved1;
+} __attribute__ ((packed));
+
+/* RxPD Descriptor */
+struct rxpd {
+	/* union to cope up with later FW revisions */
+	union {
+		/* Current Rx packet status */
+		__le16 status;
+		struct {
+			/* BSS type: client, AP, etc. */
+			u8 bss_type;
+			/* BSS number */
+			u8 bss_num;
+		} __attribute__ ((packed)) bss;
+	} __attribute__ ((packed)) u;
+
+	/* SNR */
+	u8 snr;
+
+	/* Tx control */
+	u8 rx_control;
+
+	/* Pkt length */
+	__le16 pkt_len;
+
+	/* Noise Floor */
+	u8 nf;
+
+	/* Rx Packet Rate */
+	u8 rx_rate;
+
+	/* Pkt addr */
+	__le32 pkt_ptr;
+
+	/* Next Rx RxPD addr */
+	__le32 next_rxpd_ptr;
+
+	/* Pkt Priority */
+	u8 priority;
+	u8 reserved[3];
+} __attribute__ ((packed));
+
+struct cmd_header {
+	__le16 command;
+	__le16 size;
+	__le16 seqnum;
+	__le16 result;
+} __attribute__ ((packed));
+
+/* Generic structure to hold all key types. */
+struct enc_key {
+	u16 len;
+	u16 flags;  /* KEY_INFO_* from defs.h */
+	u16 type; /* KEY_TYPE_* from defs.h */
+	u8 key[32];
+};
+
+/* lbs_offset_value */
+struct lbs_offset_value {
+	u32 offset;
+	u32 value;
+} __attribute__ ((packed));
+
+/*
+ * Define data structure for CMD_GET_HW_SPEC
+ * This structure defines the response for the GET_HW_SPEC command
+ */
+struct cmd_ds_get_hw_spec {
+	struct cmd_header hdr;
+
+	/* HW Interface version number */
+	__le16 hwifversion;
+	/* HW version number */
+	__le16 version;
+	/* Max number of TxPD FW can handle */
+	__le16 nr_txpd;
+	/* Max no of Multicast address */
+	__le16 nr_mcast_adr;
+	/* MAC address */
+	u8 permanentaddr[6];
+
+	/* region Code */
+	__le16 regioncode;
+
+	/* Number of antenna used */
+	__le16 nr_antenna;
+
+	/* FW release number, example 0x01030304 = 2.3.4p1 */
+	__le32 fwrelease;
+
+	/* Base Address of TxPD queue */
+	__le32 wcb_base;
+	/* Read Pointer of RxPd queue */
+	__le32 rxpd_rdptr;
+
+	/* Write Pointer of RxPd queue */
+	__le32 rxpd_wrptr;
+
+	/*FW/HW capability */
+	__le32 fwcapinfo;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_subscribe_event {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 events;
+
+	/* A TLV to the CMD_802_11_SUBSCRIBE_EVENT command can contain a
+	 * number of TLVs. From the v5.1 manual, those TLVs would add up to
+	 * 40 bytes. However, future firmware might add additional TLVs, so I
+	 * bump this up a bit.
+	 */
+	uint8_t tlv[128];
+} __attribute__ ((packed));
+
+/*
+ * This scan handle Country Information IE(802.11d compliant)
+ * Define data structure for CMD_802_11_SCAN
+ */
+struct cmd_ds_802_11_scan {
+	struct cmd_header hdr;
+
+	uint8_t bsstype;
+	uint8_t bssid[ETH_ALEN];
+	uint8_t tlvbuffer[0];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_scan_rsp {
+	struct cmd_header hdr;
+
+	__le16 bssdescriptsize;
+	uint8_t nr_sets;
+	uint8_t bssdesc_and_tlvbuffer[0];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_get_log {
+	struct cmd_header hdr;
+
+	__le32 mcasttxframe;
+	__le32 failed;
+	__le32 retry;
+	__le32 multiretry;
+	__le32 framedup;
+	__le32 rtssuccess;
+	__le32 rtsfailure;
+	__le32 ackfailure;
+	__le32 rxfrag;
+	__le32 mcastrxframe;
+	__le32 fcserror;
+	__le32 txframe;
+	__le32 wepundecryptable;
+} __attribute__ ((packed));
+
+struct cmd_ds_mac_control {
+	struct cmd_header hdr;
+	__le16 action;
+	u16 reserved;
+} __attribute__ ((packed));
+
+struct cmd_ds_mac_multicast_adr {
+	struct cmd_header hdr;
+	__le16 action;
+	__le16 nr_of_adrs;
+	u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_authenticate {
+	struct cmd_header hdr;
+
+	u8 bssid[ETH_ALEN];
+	u8 authtype;
+	u8 reserved[10];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_deauthenticate {
+	struct cmd_header hdr;
+
+	u8 macaddr[ETH_ALEN];
+	__le16 reasoncode;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_associate {
+	struct cmd_header hdr;
+
+	u8 bssid[6];
+	__le16 capability;
+	__le16 listeninterval;
+	__le16 bcnperiod;
+	u8 dtimperiod;
+	u8 iebuf[512];    /* Enough for required and most optional IEs */
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_associate_response {
+	struct cmd_header hdr;
+
+	__le16 capability;
+	__le16 statuscode;
+	__le16 aid;
+	u8 iebuf[512];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_set_wep {
+	struct cmd_header hdr;
+
+	/* ACT_ADD, ACT_REMOVE or ACT_ENABLE */
+	__le16 action;
+
+	/* key Index selected for Tx */
+	__le16 keyindex;
+
+	/* 40, 128bit or TXWEP */
+	uint8_t keytype[4];
+	uint8_t keymaterial[4][16];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_snmp_mib {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 oid;
+	__le16 bufsize;
+	u8 value[128];
+} __attribute__ ((packed));
+
+struct cmd_ds_mac_reg_access {
+	__le16 action;
+	__le16 offset;
+	__le32 value;
+} __attribute__ ((packed));
+
+struct cmd_ds_bbp_reg_access {
+	__le16 action;
+	__le16 offset;
+	u8 value;
+	u8 reserved[3];
+} __attribute__ ((packed));
+
+struct cmd_ds_rf_reg_access {
+	__le16 action;
+	__le16 offset;
+	u8 value;
+	u8 reserved[3];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_radio_control {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 control;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_beacon_control {
+	__le16 action;
+	__le16 beacon_enable;
+	__le16 beacon_period;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_sleep_params {
+	struct cmd_header hdr;
+
+	/* ACT_GET/ACT_SET */
+	__le16 action;
+
+	/* Sleep clock error in ppm */
+	__le16 error;
+
+	/* Wakeup offset in usec */
+	__le16 offset;
+
+	/* Clock stabilization time in usec */
+	__le16 stabletime;
+
+	/* control periodic calibration */
+	uint8_t calcontrol;
+
+	/* control the use of external sleep clock */
+	uint8_t externalsleepclk;
+
+	/* reserved field, should be set to zero */
+	__le16 reserved;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_rf_channel {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 channel;
+	__le16 rftype;      /* unused */
+	__le16 reserved;    /* unused */
+	u8 channellist[32]; /* unused */
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_rssi {
+	/* weighting factor */
+	__le16 N;
+
+	__le16 reserved_0;
+	__le16 reserved_1;
+	__le16 reserved_2;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_rssi_rsp {
+	__le16 SNR;
+	__le16 noisefloor;
+	__le16 avgSNR;
+	__le16 avgnoisefloor;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_mac_address {
+	struct cmd_header hdr;
+
+	__le16 action;
+	u8 macadd[ETH_ALEN];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_rf_tx_power {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 curlevel;
+	s8 maxlevel;
+	s8 minlevel;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_monitor_mode {
+	__le16 action;
+	__le16 mode;
+} __attribute__ ((packed));
+
+struct cmd_ds_set_boot2_ver {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 version;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_fw_wake_method {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 method;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_ps_mode {
+	__le16 action;
+	__le16 nullpktinterval;
+	__le16 multipledtim;
+	__le16 reserved;
+	__le16 locallisteninterval;
+} __attribute__ ((packed));
+
+struct cmd_confirm_sleep {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 nullpktinterval;
+	__le16 multipledtim;
+	__le16 reserved;
+	__le16 locallisteninterval;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_data_rate {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 reserved;
+	u8 rates[MAX_RATES];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_rate_adapt_rateset {
+	struct cmd_header hdr;
+	__le16 action;
+	__le16 enablehwauto;
+	__le16 bitmap;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_ad_hoc_start {
+	struct cmd_header hdr;
+
+	u8 ssid[IEEE80211_MAX_SSID_LEN];
+	u8 bsstype;
+	__le16 beaconperiod;
+	u8 dtimperiod;   /* Reserved on v9 and later */
+	struct ieee_ie_ibss_param_set ibss;
+	u8 reserved1[4];
+	struct ieee_ie_ds_param_set ds;
+	u8 reserved2[4];
+	__le16 probedelay;  /* Reserved on v9 and later */
+	__le16 capability;
+	u8 rates[MAX_RATES];
+	u8 tlv_memory_size_pad[100];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_ad_hoc_result {
+	struct cmd_header hdr;
+
+	u8 pad[3];
+	u8 bssid[ETH_ALEN];
+} __attribute__ ((packed));
+
+struct adhoc_bssdesc {
+	u8 bssid[ETH_ALEN];
+	u8 ssid[IEEE80211_MAX_SSID_LEN];
+	u8 type;
+	__le16 beaconperiod;
+	u8 dtimperiod;
+	__le64 timestamp;
+	__le64 localtime;
+	struct ieee_ie_ds_param_set ds;
+	u8 reserved1[4];
+	struct ieee_ie_ibss_param_set ibss;
+	u8 reserved2[4];
+	__le16 capability;
+	u8 rates[MAX_RATES];
+
+	/* DO NOT ADD ANY FIELDS TO THIS STRUCTURE. It is used below in the
+	 * Adhoc join command and will cause a binary layout mismatch with
+	 * the firmware
+	 */
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_ad_hoc_join {
+	struct cmd_header hdr;
+
+	struct adhoc_bssdesc bss;
+	__le16 failtimeout;   /* Reserved on v9 and later */
+	__le16 probedelay;    /* Reserved on v9 and later */
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_ad_hoc_stop {
+	struct cmd_header hdr;
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_enable_rsn {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 enable;
+} __attribute__ ((packed));
+
+struct MrvlIEtype_keyParamSet {
+	/* type ID */
+	__le16 type;
+
+	/* length of Payload */
+	__le16 length;
+
+	/* type of key: WEP=0, TKIP=1, AES=2 */
+	__le16 keytypeid;
+
+	/* key control Info specific to a keytypeid */
+	__le16 keyinfo;
+
+	/* length of key */
+	__le16 keylen;
+
+	/* key material of size keylen */
+	u8 key[32];
+} __attribute__ ((packed));
+
+#define MAX_WOL_RULES 		16
+
+struct host_wol_rule {
+	uint8_t rule_no;
+	uint8_t rule_ops;
+	__le16 sig_offset;
+	__le16 sig_length;
+	__le16 reserve;
+	__be32 sig_mask;
+	__be32 signature;
+} __attribute__ ((packed));
+
+struct wol_config {
+	uint8_t action;
+	uint8_t pattern;
+	uint8_t no_rules_in_cmd;
+	uint8_t result;
+	struct host_wol_rule rule[MAX_WOL_RULES];
+} __attribute__ ((packed));
+
+struct cmd_ds_host_sleep {
+	struct cmd_header hdr;
+	__le32 criteria;
+	uint8_t gpio;
+	uint16_t gap;
+	struct wol_config wol_conf;
+} __attribute__ ((packed));
+
+
+
+struct cmd_ds_802_11_key_material {
+	struct cmd_header hdr;
+
+	__le16 action;
+	struct MrvlIEtype_keyParamSet keyParamSet[2];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_eeprom_access {
+	struct cmd_header hdr;
+	__le16 action;
+	__le16 offset;
+	__le16 len;
+	/* firmware says it returns a maximum of 20 bytes */
+#define LBS_EEPROM_READ_LEN 20
+	u8 value[LBS_EEPROM_READ_LEN];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_tpc_cfg {
+	struct cmd_header hdr;
+
+	__le16 action;
+	uint8_t enable;
+	int8_t P0;
+	int8_t P1;
+	int8_t P2;
+	uint8_t usesnr;
+} __attribute__ ((packed));
+
+
+struct cmd_ds_802_11_pa_cfg {
+	struct cmd_header hdr;
+
+	__le16 action;
+	uint8_t enable;
+	int8_t P0;
+	int8_t P1;
+	int8_t P2;
+} __attribute__ ((packed));
+
+
+struct cmd_ds_802_11_led_ctrl {
+	__le16 action;
+	__le16 numled;
+	u8 data[256];
+} __attribute__ ((packed));
+
+struct cmd_ds_802_11_afc {
+	__le16 afc_auto;
+	union {
+		struct {
+			__le16 threshold;
+			__le16 period;
+		};
+		struct {
+			__le16 timing_offset; /* signed */
+			__le16 carrier_offset; /* signed */
+		};
+	};
+} __attribute__ ((packed));
+
+struct cmd_tx_rate_query {
+	__le16 txrate;
+} __attribute__ ((packed));
+
+struct cmd_ds_get_tsf {
+	__le64 tsfvalue;
+} __attribute__ ((packed));
+
+struct cmd_ds_bt_access {
+	__le16 action;
+	__le32 id;
+	u8 addr1[ETH_ALEN];
+	u8 addr2[ETH_ALEN];
+} __attribute__ ((packed));
+
+struct cmd_ds_fwt_access {
+	__le16 action;
+	__le32 id;
+	u8 valid;
+	u8 da[ETH_ALEN];
+	u8 dir;
+	u8 ra[ETH_ALEN];
+	__le32 ssn;
+	__le32 dsn;
+	__le32 metric;
+	u8 rate;
+	u8 hopcount;
+	u8 ttl;
+	__le32 expiration;
+	u8 sleepmode;
+	__le32 snr;
+	__le32 references;
+	u8 prec[ETH_ALEN];
+} __attribute__ ((packed));
+
+struct cmd_ds_mesh_config {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le16 channel;
+	__le16 type;
+	__le16 length;
+	u8 data[128];	/* last position reserved */
+} __attribute__ ((packed));
+
+struct cmd_ds_mesh_access {
+	struct cmd_header hdr;
+
+	__le16 action;
+	__le32 data[32];	/* last position reserved */
+} __attribute__ ((packed));
+
+/* Number of stats counters returned by the firmware */
+#define MESH_STATS_NUM 8
+
+struct cmd_ds_command {
+	/* command header */
+	__le16 command;
+	__le16 size;
+	__le16 seqnum;
+	__le16 result;
+
+	/* command Body */
+	union {
+		struct cmd_ds_802_11_ps_mode psmode;
+		struct cmd_ds_802_11_monitor_mode monitor;
+		struct cmd_ds_802_11_rssi rssi;
+		struct cmd_ds_802_11_rssi_rsp rssirsp;
+		struct cmd_ds_mac_reg_access macreg;
+		struct cmd_ds_bbp_reg_access bbpreg;
+		struct cmd_ds_rf_reg_access rfreg;
+
+		struct cmd_ds_802_11_tpc_cfg tpccfg;
+		struct cmd_ds_802_11_afc afc;
+		struct cmd_ds_802_11_led_ctrl ledgpio;
+
+		struct cmd_ds_bt_access bt;
+		struct cmd_ds_fwt_access fwt;
+		struct cmd_ds_802_11_beacon_control bcn_ctrl;
+	} params;
+} __attribute__ ((packed));
+
 #endif
diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h
deleted file mode 100644
index c8a1998..0000000
--- a/drivers/net/wireless/libertas/hostcmd.h
+++ /dev/null
@@ -1,800 +0,0 @@
-/*
- * This file contains the function prototypes, data structure
- * and defines for all the host/station commands
- */
-#ifndef _LBS_HOSTCMD_H
-#define _LBS_HOSTCMD_H
-
-#include <linux/wireless.h>
-#include "11d.h"
-#include "types.h"
-
-/* 802.11-related definitions */
-
-/* TxPD descriptor */
-struct txpd {
-	/* union to cope up with later FW revisions */
-	union {
-		/* Current Tx packet status */
-		__le32 tx_status;
-		struct {
-			/* BSS type: client, AP, etc. */
-			u8 bss_type;
-			/* BSS number */
-			u8 bss_num;
-			/* Reserved */
-			__le16 reserved;
-		} bss;
-	} u;
-	/* Tx control */
-	__le32 tx_control;
-	__le32 tx_packet_location;
-	/* Tx packet length */
-	__le16 tx_packet_length;
-	/* First 2 byte of destination MAC address */
-	u8 tx_dest_addr_high[2];
-	/* Last 4 byte of destination MAC address */
-	u8 tx_dest_addr_low[4];
-	/* Pkt Priority */
-	u8 priority;
-	/* Pkt Trasnit Power control */
-	u8 powermgmt;
-	/* Amount of time the packet has been queued in the driver (units = 2ms) */
-	u8 pktdelay_2ms;
-	/* reserved */
-	u8 reserved1;
-} __attribute__ ((packed));
-
-/* RxPD Descriptor */
-struct rxpd {
-	/* union to cope up with later FW revisions */
-	union {
-		/* Current Rx packet status */
-		__le16 status;
-		struct {
-			/* BSS type: client, AP, etc. */
-			u8 bss_type;
-			/* BSS number */
-			u8 bss_num;
-		} __attribute__ ((packed)) bss;
-	} __attribute__ ((packed)) u;
-
-	/* SNR */
-	u8 snr;
-
-	/* Tx control */
-	u8 rx_control;
-
-	/* Pkt length */
-	__le16 pkt_len;
-
-	/* Noise Floor */
-	u8 nf;
-
-	/* Rx Packet Rate */
-	u8 rx_rate;
-
-	/* Pkt addr */
-	__le32 pkt_ptr;
-
-	/* Next Rx RxPD addr */
-	__le32 next_rxpd_ptr;
-
-	/* Pkt Priority */
-	u8 priority;
-	u8 reserved[3];
-} __attribute__ ((packed));
-
-struct cmd_header {
-	__le16 command;
-	__le16 size;
-	__le16 seqnum;
-	__le16 result;
-} __attribute__ ((packed));
-
-struct cmd_ctrl_node {
-	struct list_head list;
-	int result;
-	/* command response */
-	int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *);
-	unsigned long callback_arg;
-	/* command data */
-	struct cmd_header *cmdbuf;
-	/* wait queue */
-	u16 cmdwaitqwoken;
-	wait_queue_head_t cmdwait_q;
-};
-
-/* Generic structure to hold all key types. */
-struct enc_key {
-	u16 len;
-	u16 flags;  /* KEY_INFO_* from defs.h */
-	u16 type; /* KEY_TYPE_* from defs.h */
-	u8 key[32];
-};
-
-/* lbs_offset_value */
-struct lbs_offset_value {
-	u32 offset;
-	u32 value;
-} __attribute__ ((packed));
-
-/* Define general data structure */
-/* cmd_DS_GEN */
-struct cmd_ds_gen {
-	__le16 command;
-	__le16 size;
-	__le16 seqnum;
-	__le16 result;
-	void *cmdresp[0];
-} __attribute__ ((packed));
-
-#define S_DS_GEN sizeof(struct cmd_ds_gen)
-
-
-/*
- * Define data structure for CMD_GET_HW_SPEC
- * This structure defines the response for the GET_HW_SPEC command
- */
-struct cmd_ds_get_hw_spec {
-	struct cmd_header hdr;
-
-	/* HW Interface version number */
-	__le16 hwifversion;
-	/* HW version number */
-	__le16 version;
-	/* Max number of TxPD FW can handle */
-	__le16 nr_txpd;
-	/* Max no of Multicast address */
-	__le16 nr_mcast_adr;
-	/* MAC address */
-	u8 permanentaddr[6];
-
-	/* region Code */
-	__le16 regioncode;
-
-	/* Number of antenna used */
-	__le16 nr_antenna;
-
-	/* FW release number, example 0x01030304 = 2.3.4p1 */
-	__le32 fwrelease;
-
-	/* Base Address of TxPD queue */
-	__le32 wcb_base;
-	/* Read Pointer of RxPd queue */
-	__le32 rxpd_rdptr;
-
-	/* Write Pointer of RxPd queue */
-	__le32 rxpd_wrptr;
-
-	/*FW/HW capability */
-	__le32 fwcapinfo;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_subscribe_event {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 events;
-
-	/* A TLV to the CMD_802_11_SUBSCRIBE_EVENT command can contain a
-	 * number of TLVs. From the v5.1 manual, those TLVs would add up to
-	 * 40 bytes. However, future firmware might add additional TLVs, so I
-	 * bump this up a bit.
-	 */
-	uint8_t tlv[128];
-} __attribute__ ((packed));
-
-/*
- * This scan handle Country Information IE(802.11d compliant)
- * Define data structure for CMD_802_11_SCAN
- */
-struct cmd_ds_802_11_scan {
-	struct cmd_header hdr;
-
-	uint8_t bsstype;
-	uint8_t bssid[ETH_ALEN];
-	uint8_t tlvbuffer[0];
-#if 0
-	mrvlietypes_ssidparamset_t ssidParamSet;
-	mrvlietypes_chanlistparamset_t ChanListParamSet;
-	mrvlietypes_ratesparamset_t OpRateSet;
-#endif
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_scan_rsp {
-	struct cmd_header hdr;
-
-	__le16 bssdescriptsize;
-	uint8_t nr_sets;
-	uint8_t bssdesc_and_tlvbuffer[0];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_get_log {
-	struct cmd_header hdr;
-
-	__le32 mcasttxframe;
-	__le32 failed;
-	__le32 retry;
-	__le32 multiretry;
-	__le32 framedup;
-	__le32 rtssuccess;
-	__le32 rtsfailure;
-	__le32 ackfailure;
-	__le32 rxfrag;
-	__le32 mcastrxframe;
-	__le32 fcserror;
-	__le32 txframe;
-	__le32 wepundecryptable;
-} __attribute__ ((packed));
-
-struct cmd_ds_mac_control {
-	struct cmd_header hdr;
-	__le16 action;
-	u16 reserved;
-} __attribute__ ((packed));
-
-struct cmd_ds_mac_multicast_adr {
-	struct cmd_header hdr;
-	__le16 action;
-	__le16 nr_of_adrs;
-	u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
-} __attribute__ ((packed));
-
-struct cmd_ds_gspi_bus_config {
-	struct cmd_header hdr;
-	__le16 action;
-	__le16 bus_delay_mode;
-	__le16 host_time_delay_to_read_port;
-	__le16 host_time_delay_to_read_register;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_authenticate {
-	struct cmd_header hdr;
-
-	u8 bssid[ETH_ALEN];
-	u8 authtype;
-	u8 reserved[10];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_deauthenticate {
-	struct cmd_header hdr;
-
-	u8 macaddr[ETH_ALEN];
-	__le16 reasoncode;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_associate {
-	struct cmd_header hdr;
-
-	u8 bssid[6];
-	__le16 capability;
-	__le16 listeninterval;
-	__le16 bcnperiod;
-	u8 dtimperiod;
-	u8 iebuf[512];    /* Enough for required and most optional IEs */
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_associate_response {
-	struct cmd_header hdr;
-
-	__le16 capability;
-	__le16 statuscode;
-	__le16 aid;
-	u8 iebuf[512];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_set_wep {
-	struct cmd_header hdr;
-
-	/* ACT_ADD, ACT_REMOVE or ACT_ENABLE */
-	__le16 action;
-
-	/* key Index selected for Tx */
-	__le16 keyindex;
-
-	/* 40, 128bit or TXWEP */
-	uint8_t keytype[4];
-	uint8_t keymaterial[4][16];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_3_get_stat {
-	__le32 xmitok;
-	__le32 rcvok;
-	__le32 xmiterror;
-	__le32 rcverror;
-	__le32 rcvnobuffer;
-	__le32 rcvcrcerror;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_get_stat {
-	__le32 txfragmentcnt;
-	__le32 mcasttxframecnt;
-	__le32 failedcnt;
-	__le32 retrycnt;
-	__le32 Multipleretrycnt;
-	__le32 rtssuccesscnt;
-	__le32 rtsfailurecnt;
-	__le32 ackfailurecnt;
-	__le32 frameduplicatecnt;
-	__le32 rxfragmentcnt;
-	__le32 mcastrxframecnt;
-	__le32 fcserrorcnt;
-	__le32 bcasttxframecnt;
-	__le32 bcastrxframecnt;
-	__le32 txbeacon;
-	__le32 rxbeacon;
-	__le32 wepundecryptable;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_snmp_mib {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 oid;
-	__le16 bufsize;
-	u8 value[128];
-} __attribute__ ((packed));
-
-struct cmd_ds_mac_reg_map {
-	__le16 buffersize;
-	u8 regmap[128];
-	__le16 reserved;
-} __attribute__ ((packed));
-
-struct cmd_ds_bbp_reg_map {
-	__le16 buffersize;
-	u8 regmap[128];
-	__le16 reserved;
-} __attribute__ ((packed));
-
-struct cmd_ds_rf_reg_map {
-	__le16 buffersize;
-	u8 regmap[64];
-	__le16 reserved;
-} __attribute__ ((packed));
-
-struct cmd_ds_mac_reg_access {
-	__le16 action;
-	__le16 offset;
-	__le32 value;
-} __attribute__ ((packed));
-
-struct cmd_ds_bbp_reg_access {
-	__le16 action;
-	__le16 offset;
-	u8 value;
-	u8 reserved[3];
-} __attribute__ ((packed));
-
-struct cmd_ds_rf_reg_access {
-	__le16 action;
-	__le16 offset;
-	u8 value;
-	u8 reserved[3];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_radio_control {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 control;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_beacon_control {
-	__le16 action;
-	__le16 beacon_enable;
-	__le16 beacon_period;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_sleep_params {
-	struct cmd_header hdr;
-
-	/* ACT_GET/ACT_SET */
-	__le16 action;
-
-	/* Sleep clock error in ppm */
-	__le16 error;
-
-	/* Wakeup offset in usec */
-	__le16 offset;
-
-	/* Clock stabilization time in usec */
-	__le16 stabletime;
-
-	/* control periodic calibration */
-	uint8_t calcontrol;
-
-	/* control the use of external sleep clock */
-	uint8_t externalsleepclk;
-
-	/* reserved field, should be set to zero */
-	__le16 reserved;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_inactivity_timeout {
-	struct cmd_header hdr;
-
-	/* ACT_GET/ACT_SET */
-	__le16 action;
-
-	/* Inactivity timeout in msec */
-	__le16 timeout;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_rf_channel {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 channel;
-	__le16 rftype;      /* unused */
-	__le16 reserved;    /* unused */
-	u8 channellist[32]; /* unused */
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_rssi {
-	/* weighting factor */
-	__le16 N;
-
-	__le16 reserved_0;
-	__le16 reserved_1;
-	__le16 reserved_2;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_rssi_rsp {
-	__le16 SNR;
-	__le16 noisefloor;
-	__le16 avgSNR;
-	__le16 avgnoisefloor;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_mac_address {
-	struct cmd_header hdr;
-
-	__le16 action;
-	u8 macadd[ETH_ALEN];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_rf_tx_power {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 curlevel;
-	s8 maxlevel;
-	s8 minlevel;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_rf_antenna {
-	__le16 action;
-
-	/* Number of antennas or 0xffff(diversity) */
-	__le16 antennamode;
-
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_monitor_mode {
-	__le16 action;
-	__le16 mode;
-} __attribute__ ((packed));
-
-struct cmd_ds_set_boot2_ver {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 version;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_fw_wake_method {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 method;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_sleep_period {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 period;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_ps_mode {
-	__le16 action;
-	__le16 nullpktinterval;
-	__le16 multipledtim;
-	__le16 reserved;
-	__le16 locallisteninterval;
-} __attribute__ ((packed));
-
-struct cmd_confirm_sleep {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 nullpktinterval;
-	__le16 multipledtim;
-	__le16 reserved;
-	__le16 locallisteninterval;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_data_rate {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 reserved;
-	u8 rates[MAX_RATES];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_rate_adapt_rateset {
-	struct cmd_header hdr;
-	__le16 action;
-	__le16 enablehwauto;
-	__le16 bitmap;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_ad_hoc_start {
-	struct cmd_header hdr;
-
-	u8 ssid[IW_ESSID_MAX_SIZE];
-	u8 bsstype;
-	__le16 beaconperiod;
-	u8 dtimperiod;   /* Reserved on v9 and later */
-	struct ieee_ie_ibss_param_set ibss;
-	u8 reserved1[4];
-	struct ieee_ie_ds_param_set ds;
-	u8 reserved2[4];
-	__le16 probedelay;  /* Reserved on v9 and later */
-	__le16 capability;
-	u8 rates[MAX_RATES];
-	u8 tlv_memory_size_pad[100];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_ad_hoc_result {
-	struct cmd_header hdr;
-
-	u8 pad[3];
-	u8 bssid[ETH_ALEN];
-} __attribute__ ((packed));
-
-struct adhoc_bssdesc {
-	u8 bssid[ETH_ALEN];
-	u8 ssid[IW_ESSID_MAX_SIZE];
-	u8 type;
-	__le16 beaconperiod;
-	u8 dtimperiod;
-	__le64 timestamp;
-	__le64 localtime;
-	struct ieee_ie_ds_param_set ds;
-	u8 reserved1[4];
-	struct ieee_ie_ibss_param_set ibss;
-	u8 reserved2[4];
-	__le16 capability;
-	u8 rates[MAX_RATES];
-
-	/* DO NOT ADD ANY FIELDS TO THIS STRUCTURE. It is used below in the
-	 * Adhoc join command and will cause a binary layout mismatch with
-	 * the firmware
-	 */
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_ad_hoc_join {
-	struct cmd_header hdr;
-
-	struct adhoc_bssdesc bss;
-	__le16 failtimeout;   /* Reserved on v9 and later */
-	__le16 probedelay;    /* Reserved on v9 and later */
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_ad_hoc_stop {
-	struct cmd_header hdr;
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_enable_rsn {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le16 enable;
-} __attribute__ ((packed));
-
-struct MrvlIEtype_keyParamSet {
-	/* type ID */
-	__le16 type;
-
-	/* length of Payload */
-	__le16 length;
-
-	/* type of key: WEP=0, TKIP=1, AES=2 */
-	__le16 keytypeid;
-
-	/* key control Info specific to a keytypeid */
-	__le16 keyinfo;
-
-	/* length of key */
-	__le16 keylen;
-
-	/* key material of size keylen */
-	u8 key[32];
-} __attribute__ ((packed));
-
-#define MAX_WOL_RULES 		16
-
-struct host_wol_rule {
-	uint8_t rule_no;
-	uint8_t rule_ops;
-	__le16 sig_offset;
-	__le16 sig_length;
-	__le16 reserve;
-	__be32 sig_mask;
-	__be32 signature;
-} __attribute__ ((packed));
-
-struct wol_config {
-	uint8_t action;
-	uint8_t pattern;
-	uint8_t no_rules_in_cmd;
-	uint8_t result;
-	struct host_wol_rule rule[MAX_WOL_RULES];
-} __attribute__ ((packed));
-
-struct cmd_ds_host_sleep {
-	struct cmd_header hdr;
-	__le32 criteria;
-	uint8_t gpio;
-	uint16_t gap;
-	struct wol_config wol_conf;
-} __attribute__ ((packed));
-
-
-
-struct cmd_ds_802_11_key_material {
-	struct cmd_header hdr;
-
-	__le16 action;
-	struct MrvlIEtype_keyParamSet keyParamSet[2];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_eeprom_access {
-	struct cmd_header hdr;
-	__le16 action;
-	__le16 offset;
-	__le16 len;
-	/* firmware says it returns a maximum of 20 bytes */
-#define LBS_EEPROM_READ_LEN 20
-	u8 value[LBS_EEPROM_READ_LEN];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_tpc_cfg {
-	struct cmd_header hdr;
-
-	__le16 action;
-	uint8_t enable;
-	int8_t P0;
-	int8_t P1;
-	int8_t P2;
-	uint8_t usesnr;
-} __attribute__ ((packed));
-
-
-struct cmd_ds_802_11_pa_cfg {
-	struct cmd_header hdr;
-
-	__le16 action;
-	uint8_t enable;
-	int8_t P0;
-	int8_t P1;
-	int8_t P2;
-} __attribute__ ((packed));
-
-
-struct cmd_ds_802_11_led_ctrl {
-	__le16 action;
-	__le16 numled;
-	u8 data[256];
-} __attribute__ ((packed));
-
-struct cmd_ds_802_11_afc {
-	__le16 afc_auto;
-	union {
-		struct {
-			__le16 threshold;
-			__le16 period;
-		};
-		struct {
-			__le16 timing_offset; /* signed */
-			__le16 carrier_offset; /* signed */
-		};
-	};
-} __attribute__ ((packed));
-
-struct cmd_tx_rate_query {
-	__le16 txrate;
-} __attribute__ ((packed));
-
-struct cmd_ds_get_tsf {
-	__le64 tsfvalue;
-} __attribute__ ((packed));
-
-struct cmd_ds_bt_access {
-	__le16 action;
-	__le32 id;
-	u8 addr1[ETH_ALEN];
-	u8 addr2[ETH_ALEN];
-} __attribute__ ((packed));
-
-struct cmd_ds_fwt_access {
-	__le16 action;
-	__le32 id;
-	u8 valid;
-	u8 da[ETH_ALEN];
-	u8 dir;
-	u8 ra[ETH_ALEN];
-	__le32 ssn;
-	__le32 dsn;
-	__le32 metric;
-	u8 rate;
-	u8 hopcount;
-	u8 ttl;
-	__le32 expiration;
-	u8 sleepmode;
-	__le32 snr;
-	__le32 references;
-	u8 prec[ETH_ALEN];
-} __attribute__ ((packed));
-
-
-struct cmd_ds_mesh_config {
-	struct cmd_header hdr;
-
-        __le16 action;
-        __le16 channel;
-        __le16 type;
-        __le16 length;
-        u8 data[128];   /* last position reserved */
-} __attribute__ ((packed));
-
-
-struct cmd_ds_mesh_access {
-	struct cmd_header hdr;
-
-	__le16 action;
-	__le32 data[32];	/* last position reserved */
-} __attribute__ ((packed));
-
-/* Number of stats counters returned by the firmware */
-#define MESH_STATS_NUM 8
-
-struct cmd_ds_command {
-	/* command header */
-	__le16 command;
-	__le16 size;
-	__le16 seqnum;
-	__le16 result;
-
-	/* command Body */
-	union {
-		struct cmd_ds_802_11_ps_mode psmode;
-		struct cmd_ds_802_11_get_stat gstat;
-		struct cmd_ds_802_3_get_stat gstat_8023;
-		struct cmd_ds_802_11_rf_antenna rant;
-		struct cmd_ds_802_11_monitor_mode monitor;
-		struct cmd_ds_802_11_rssi rssi;
-		struct cmd_ds_802_11_rssi_rsp rssirsp;
-		struct cmd_ds_mac_reg_access macreg;
-		struct cmd_ds_bbp_reg_access bbpreg;
-		struct cmd_ds_rf_reg_access rfreg;
-
-		struct cmd_ds_802_11d_domain_info domaininfo;
-		struct cmd_ds_802_11d_domain_info domaininforesp;
-
-		struct cmd_ds_802_11_tpc_cfg tpccfg;
-		struct cmd_ds_802_11_afc afc;
-		struct cmd_ds_802_11_led_ctrl ledgpio;
-
-		struct cmd_tx_rate_query txrate;
-		struct cmd_ds_bt_access bt;
-		struct cmd_ds_fwt_access fwt;
-		struct cmd_ds_get_tsf gettsf;
-		struct cmd_ds_802_11_beacon_control bcn_ctrl;
-	} params;
-} __attribute__ ((packed));
-
-#endif
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 06df2e1..30d9d0e 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -134,7 +134,7 @@
 static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len)
 {
 	int err = 0;
-	u16 reg_out = cpu_to_le16(reg | IF_SPI_WRITE_OPERATION_MASK);
+	__le16 reg_out = cpu_to_le16(reg | IF_SPI_WRITE_OPERATION_MASK);
 	struct spi_message m;
 	struct spi_transfer reg_trans;
 	struct spi_transfer data_trans;
@@ -166,7 +166,7 @@
 
 static inline int spu_write_u16(struct if_spi_card *card, u16 reg, u16 val)
 {
-	u16 buff;
+	__le16 buff;
 
 	buff = cpu_to_le16(val);
 	return spu_write(card, reg, (u8 *)&buff, sizeof(u16));
@@ -188,7 +188,7 @@
 {
 	unsigned int delay;
 	int err = 0;
-	u16 reg_out = cpu_to_le16(reg | IF_SPI_READ_OPERATION_MASK);
+	__le16 reg_out = cpu_to_le16(reg | IF_SPI_READ_OPERATION_MASK);
 	struct spi_message m;
 	struct spi_transfer reg_trans;
 	struct spi_transfer dummy_trans;
@@ -235,7 +235,7 @@
 /* Read 16 bits from an SPI register */
 static inline int spu_read_u16(struct if_spi_card *card, u16 reg, u16 *val)
 {
-	u16 buf;
+	__le16 buf;
 	int ret;
 
 	ret = spu_read(card, reg, (u8 *)&buf, sizeof(buf));
@@ -248,7 +248,7 @@
  * The low 16 bits are read first. */
 static int spu_read_u32(struct if_spi_card *card, u16 reg, u32 *val)
 {
-	u32 buf;
+	__le32 buf;
 	int err;
 
 	err = spu_read(card, reg, (u8 *)&buf, sizeof(buf));
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index a8262de..f12d667 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -511,7 +511,7 @@
 	/* Fill the receive configuration URB and initialise the Rx call back */
 	usb_fill_bulk_urb(cardp->rx_urb, cardp->udev,
 			  usb_rcvbulkpipe(cardp->udev, cardp->ep_in),
-			  (void *) (skb->tail + (size_t) IPFIELD_ALIGN_OFFSET),
+			  skb->data + IPFIELD_ALIGN_OFFSET,
 			  MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn,
 			  cardp);
 
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 87bfd17..01f46cf 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -45,119 +45,6 @@
 struct cmd_confirm_sleep confirm_sleep;
 
 
-#define LBS_TX_PWR_DEFAULT		20	/*100mW */
-#define LBS_TX_PWR_US_DEFAULT		20	/*100mW */
-#define LBS_TX_PWR_JP_DEFAULT		16	/*50mW */
-#define LBS_TX_PWR_FR_DEFAULT		20	/*100mW */
-#define LBS_TX_PWR_EMEA_DEFAULT	20	/*100mW */
-
-/* Format { channel, frequency (MHz), maxtxpower } */
-/* band: 'B/G', region: USA FCC/Canada IC */
-static struct chan_freq_power channel_freq_power_US_BG[] = {
-	{1, 2412, LBS_TX_PWR_US_DEFAULT},
-	{2, 2417, LBS_TX_PWR_US_DEFAULT},
-	{3, 2422, LBS_TX_PWR_US_DEFAULT},
-	{4, 2427, LBS_TX_PWR_US_DEFAULT},
-	{5, 2432, LBS_TX_PWR_US_DEFAULT},
-	{6, 2437, LBS_TX_PWR_US_DEFAULT},
-	{7, 2442, LBS_TX_PWR_US_DEFAULT},
-	{8, 2447, LBS_TX_PWR_US_DEFAULT},
-	{9, 2452, LBS_TX_PWR_US_DEFAULT},
-	{10, 2457, LBS_TX_PWR_US_DEFAULT},
-	{11, 2462, LBS_TX_PWR_US_DEFAULT}
-};
-
-/* band: 'B/G', region: Europe ETSI */
-static struct chan_freq_power channel_freq_power_EU_BG[] = {
-	{1, 2412, LBS_TX_PWR_EMEA_DEFAULT},
-	{2, 2417, LBS_TX_PWR_EMEA_DEFAULT},
-	{3, 2422, LBS_TX_PWR_EMEA_DEFAULT},
-	{4, 2427, LBS_TX_PWR_EMEA_DEFAULT},
-	{5, 2432, LBS_TX_PWR_EMEA_DEFAULT},
-	{6, 2437, LBS_TX_PWR_EMEA_DEFAULT},
-	{7, 2442, LBS_TX_PWR_EMEA_DEFAULT},
-	{8, 2447, LBS_TX_PWR_EMEA_DEFAULT},
-	{9, 2452, LBS_TX_PWR_EMEA_DEFAULT},
-	{10, 2457, LBS_TX_PWR_EMEA_DEFAULT},
-	{11, 2462, LBS_TX_PWR_EMEA_DEFAULT},
-	{12, 2467, LBS_TX_PWR_EMEA_DEFAULT},
-	{13, 2472, LBS_TX_PWR_EMEA_DEFAULT}
-};
-
-/* band: 'B/G', region: Spain */
-static struct chan_freq_power channel_freq_power_SPN_BG[] = {
-	{10, 2457, LBS_TX_PWR_DEFAULT},
-	{11, 2462, LBS_TX_PWR_DEFAULT}
-};
-
-/* band: 'B/G', region: France */
-static struct chan_freq_power channel_freq_power_FR_BG[] = {
-	{10, 2457, LBS_TX_PWR_FR_DEFAULT},
-	{11, 2462, LBS_TX_PWR_FR_DEFAULT},
-	{12, 2467, LBS_TX_PWR_FR_DEFAULT},
-	{13, 2472, LBS_TX_PWR_FR_DEFAULT}
-};
-
-/* band: 'B/G', region: Japan */
-static struct chan_freq_power channel_freq_power_JPN_BG[] = {
-	{1, 2412, LBS_TX_PWR_JP_DEFAULT},
-	{2, 2417, LBS_TX_PWR_JP_DEFAULT},
-	{3, 2422, LBS_TX_PWR_JP_DEFAULT},
-	{4, 2427, LBS_TX_PWR_JP_DEFAULT},
-	{5, 2432, LBS_TX_PWR_JP_DEFAULT},
-	{6, 2437, LBS_TX_PWR_JP_DEFAULT},
-	{7, 2442, LBS_TX_PWR_JP_DEFAULT},
-	{8, 2447, LBS_TX_PWR_JP_DEFAULT},
-	{9, 2452, LBS_TX_PWR_JP_DEFAULT},
-	{10, 2457, LBS_TX_PWR_JP_DEFAULT},
-	{11, 2462, LBS_TX_PWR_JP_DEFAULT},
-	{12, 2467, LBS_TX_PWR_JP_DEFAULT},
-	{13, 2472, LBS_TX_PWR_JP_DEFAULT},
-	{14, 2484, LBS_TX_PWR_JP_DEFAULT}
-};
-
-/**
- * the structure for channel, frequency and power
- */
-struct region_cfp_table {
-	u8 region;
-	struct chan_freq_power *cfp_BG;
-	int cfp_no_BG;
-};
-
-/**
- * the structure for the mapping between region and CFP
- */
-static struct region_cfp_table region_cfp_table[] = {
-	{0x10,			/*US FCC */
-	 channel_freq_power_US_BG,
-	 ARRAY_SIZE(channel_freq_power_US_BG),
-	 }
-	,
-	{0x20,			/*CANADA IC */
-	 channel_freq_power_US_BG,
-	 ARRAY_SIZE(channel_freq_power_US_BG),
-	 }
-	,
-	{0x30, /*EU*/ channel_freq_power_EU_BG,
-	 ARRAY_SIZE(channel_freq_power_EU_BG),
-	 }
-	,
-	{0x31, /*SPAIN*/ channel_freq_power_SPN_BG,
-	 ARRAY_SIZE(channel_freq_power_SPN_BG),
-	 }
-	,
-	{0x32, /*FRANCE*/ channel_freq_power_FR_BG,
-	 ARRAY_SIZE(channel_freq_power_FR_BG),
-	 }
-	,
-	{0x40, /*JAPAN*/ channel_freq_power_JPN_BG,
-	 ARRAY_SIZE(channel_freq_power_JPN_BG),
-	 }
-	,
-/*Add new region here */
-};
-
 /**
  * the table to keep region code
  */
@@ -165,13 +52,6 @@
     { 0x10, 0x20, 0x30, 0x31, 0x32, 0x40 };
 
 /**
- * 802.11b/g supported bitrates (in 500Kb/s units)
- */
-u8 lbs_bg_rates[MAX_RATES] =
-    { 0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c,
-0x00, 0x00 };
-
-/**
  * FW rate table.  FW refers to rates by their index in this table, not by the
  * rate value itself.  Values of 0x00 are
  * reserved positions.
@@ -405,7 +285,7 @@
 		return count;
 	if (enable)
 		action = CMD_ACT_MESH_CONFIG_START;
-	ret = lbs_mesh_config(priv, action, priv->curbssparams.channel);
+	ret = lbs_mesh_config(priv, action, priv->channel);
 	if (ret)
 		return ret;
 
@@ -1089,6 +969,8 @@
 			ret = lbs_prepare_and_send_command(priv,
 					CMD_802_11_DEEP_SLEEP, 0,
 					0, 0, NULL);
+			if (ret)
+				lbs_pr_err("Enter Deep Sleep command failed\n");
 		}
 	}
 	mod_timer(&priv->auto_deepsleep_timer , jiffies +
@@ -1164,7 +1046,7 @@
 	priv->mesh_connect_status = LBS_DISCONNECTED;
 	priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
 	priv->mode = IW_MODE_INFRA;
-	priv->curbssparams.channel = DEFAULT_AD_HOC_CHANNEL;
+	priv->channel = DEFAULT_AD_HOC_CHANNEL;
 	priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
 	priv->radio_on = 1;
 	priv->enablehwauto = 1;
@@ -1345,7 +1227,6 @@
 void lbs_remove_card(struct lbs_private *priv)
 {
 	struct net_device *dev = priv->dev;
-	union iwreq_data wrqu;
 
 	lbs_deb_enter(LBS_DEB_MAIN);
 
@@ -1370,9 +1251,7 @@
 		lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP);
 	}
 
-	memset(wrqu.ap_addr.sa_data, 0xaa, ETH_ALEN);
-	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-	wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
+	lbs_send_disconnect_notification(priv);
 
 	if (priv->is_deep_sleep) {
 		priv->is_deep_sleep = 0;
@@ -1406,9 +1285,6 @@
 	if (ret)
 		goto done;
 
-	/* init 802.11d */
-	lbs_init_11d(priv);
-
 	if (lbs_cfg_register(priv)) {
 		lbs_pr_err("cannot register device\n");
 		goto done;
@@ -1435,10 +1311,10 @@
 
 		priv->mesh_tlv = TLV_TYPE_OLD_MESH_ID;
 		if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
-				    priv->curbssparams.channel)) {
+				    priv->channel)) {
 			priv->mesh_tlv = TLV_TYPE_MESH_ID;
 			if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
-					    priv->curbssparams.channel))
+					    priv->channel))
 				priv->mesh_tlv = 0;
 		}
 	} else if (priv->mesh_fw_ver == MESH_FW_NEW) {
@@ -1447,7 +1323,7 @@
 		 */
 		priv->mesh_tlv = TLV_TYPE_MESH_ID;
 		if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
-				    priv->curbssparams.channel))
+				    priv->channel))
 			priv->mesh_tlv = 0;
 	}
 	if (priv->mesh_tlv) {
@@ -1618,68 +1494,6 @@
 	lbs_deb_leave(LBS_DEB_MESH);
 }
 
-/**
- *  @brief This function finds the CFP in
- *  region_cfp_table based on region and band parameter.
- *
- *  @param region  The region code
- *  @param band	   The band
- *  @param cfp_no  A pointer to CFP number
- *  @return 	   A pointer to CFP
- */
-struct chan_freq_power *lbs_get_region_cfp_table(u8 region, int *cfp_no)
-{
-	int i, end;
-
-	lbs_deb_enter(LBS_DEB_MAIN);
-
-	end = ARRAY_SIZE(region_cfp_table);
-
-	for (i = 0; i < end ; i++) {
-		lbs_deb_main("region_cfp_table[i].region=%d\n",
-			region_cfp_table[i].region);
-		if (region_cfp_table[i].region == region) {
-			*cfp_no = region_cfp_table[i].cfp_no_BG;
-			lbs_deb_leave(LBS_DEB_MAIN);
-			return region_cfp_table[i].cfp_BG;
-		}
-	}
-
-	lbs_deb_leave_args(LBS_DEB_MAIN, "ret NULL");
-	return NULL;
-}
-
-int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band)
-{
-	int ret = 0;
-	int i = 0;
-
-	struct chan_freq_power *cfp;
-	int cfp_no;
-
-	lbs_deb_enter(LBS_DEB_MAIN);
-
-	memset(priv->region_channel, 0, sizeof(priv->region_channel));
-
-	cfp = lbs_get_region_cfp_table(region, &cfp_no);
-	if (cfp != NULL) {
-		priv->region_channel[i].nrcfp = cfp_no;
-		priv->region_channel[i].CFP = cfp;
-	} else {
-		lbs_deb_main("wrong region code %#x in band B/G\n",
-		       region);
-		ret = -1;
-		goto out;
-	}
-	priv->region_channel[i].valid = 1;
-	priv->region_channel[i].region = region;
-	priv->region_channel[i].band = band;
-	i++;
-out:
-	lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
-	return ret;
-}
-
 void lbs_queue_event(struct lbs_private *priv, u32 event)
 {
 	unsigned long flags;
diff --git a/drivers/net/wireless/libertas/persistcfg.c b/drivers/net/wireless/libertas/persistcfg.c
index 18fe29f..871f914 100644
--- a/drivers/net/wireless/libertas/persistcfg.c
+++ b/drivers/net/wireless/libertas/persistcfg.c
@@ -187,9 +187,9 @@
 	if (ret)
 		return ret;
 
-	if (defs.meshie.val.mesh_id_len > IW_ESSID_MAX_SIZE) {
+	if (defs.meshie.val.mesh_id_len > IEEE80211_MAX_SSID_LEN) {
 		lbs_pr_err("inconsistent mesh ID length");
-		defs.meshie.val.mesh_id_len = IW_ESSID_MAX_SIZE;
+		defs.meshie.val.mesh_id_len = IEEE80211_MAX_SSID_LEN;
 	}
 
 	/* SSID not null terminated: reserve room for \0 + \n */
@@ -214,7 +214,7 @@
 	int len;
 	int ret;
 
-	if (count < 2 || count > IW_ESSID_MAX_SIZE + 1)
+	if (count < 2 || count > IEEE80211_MAX_SSID_LEN + 1)
 		return -EINVAL;
 
 	memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config));
@@ -233,7 +233,7 @@
 	/* SSID len */
 	ie->val.mesh_id_len = len;
 	/* IE len */
-	ie->len = sizeof(struct mrvl_meshie_val) - IW_ESSID_MAX_SIZE + len;
+	ie->len = sizeof(struct mrvl_meshie_val) - IEEE80211_MAX_SSID_LEN + len;
 
 	ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
 				   CMD_TYPE_MESH_SET_MESH_IE);
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 65f02cc..9f18a19 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -4,7 +4,7 @@
 #include <linux/etherdevice.h>
 #include <linux/types.h>
 
-#include "hostcmd.h"
+#include "host.h"
 #include "radiotap.h"
 #include "decl.h"
 #include "dev.h"
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 6c95af3..c6a6c04 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -12,18 +12,19 @@
 #include <net/lib80211.h>
 
 #include "host.h"
-#include "decl.h"
 #include "dev.h"
 #include "scan.h"
+#include "assoc.h"
+#include "wext.h"
 #include "cmd.h"
 
 //! Approximate amount of data needed to pass a scan result back to iwlist
 #define MAX_SCAN_CELL_SIZE  (IW_EV_ADDR_LEN             \
-                             + IW_ESSID_MAX_SIZE        \
+                             + IEEE80211_MAX_SSID_LEN   \
                              + IW_EV_UINT_LEN           \
                              + IW_EV_FREQ_LEN           \
                              + IW_EV_QUAL_LEN           \
-                             + IW_ESSID_MAX_SIZE        \
+                             + IEEE80211_MAX_SSID_LEN   \
                              + IW_EV_PARAM_LEN          \
                              + 40)	/* 40 for WPAIE */
 
@@ -121,6 +122,189 @@
 
 
 
+/*********************************************************************/
+/*                                                                   */
+/* Region channel support                                            */
+/*                                                                   */
+/*********************************************************************/
+
+#define LBS_TX_PWR_DEFAULT		20	/*100mW */
+#define LBS_TX_PWR_US_DEFAULT		20	/*100mW */
+#define LBS_TX_PWR_JP_DEFAULT		16	/*50mW */
+#define LBS_TX_PWR_FR_DEFAULT		20	/*100mW */
+#define LBS_TX_PWR_EMEA_DEFAULT	20	/*100mW */
+
+/* Format { channel, frequency (MHz), maxtxpower } */
+/* band: 'B/G', region: USA FCC/Canada IC */
+static struct chan_freq_power channel_freq_power_US_BG[] = {
+	{1, 2412, LBS_TX_PWR_US_DEFAULT},
+	{2, 2417, LBS_TX_PWR_US_DEFAULT},
+	{3, 2422, LBS_TX_PWR_US_DEFAULT},
+	{4, 2427, LBS_TX_PWR_US_DEFAULT},
+	{5, 2432, LBS_TX_PWR_US_DEFAULT},
+	{6, 2437, LBS_TX_PWR_US_DEFAULT},
+	{7, 2442, LBS_TX_PWR_US_DEFAULT},
+	{8, 2447, LBS_TX_PWR_US_DEFAULT},
+	{9, 2452, LBS_TX_PWR_US_DEFAULT},
+	{10, 2457, LBS_TX_PWR_US_DEFAULT},
+	{11, 2462, LBS_TX_PWR_US_DEFAULT}
+};
+
+/* band: 'B/G', region: Europe ETSI */
+static struct chan_freq_power channel_freq_power_EU_BG[] = {
+	{1, 2412, LBS_TX_PWR_EMEA_DEFAULT},
+	{2, 2417, LBS_TX_PWR_EMEA_DEFAULT},
+	{3, 2422, LBS_TX_PWR_EMEA_DEFAULT},
+	{4, 2427, LBS_TX_PWR_EMEA_DEFAULT},
+	{5, 2432, LBS_TX_PWR_EMEA_DEFAULT},
+	{6, 2437, LBS_TX_PWR_EMEA_DEFAULT},
+	{7, 2442, LBS_TX_PWR_EMEA_DEFAULT},
+	{8, 2447, LBS_TX_PWR_EMEA_DEFAULT},
+	{9, 2452, LBS_TX_PWR_EMEA_DEFAULT},
+	{10, 2457, LBS_TX_PWR_EMEA_DEFAULT},
+	{11, 2462, LBS_TX_PWR_EMEA_DEFAULT},
+	{12, 2467, LBS_TX_PWR_EMEA_DEFAULT},
+	{13, 2472, LBS_TX_PWR_EMEA_DEFAULT}
+};
+
+/* band: 'B/G', region: Spain */
+static struct chan_freq_power channel_freq_power_SPN_BG[] = {
+	{10, 2457, LBS_TX_PWR_DEFAULT},
+	{11, 2462, LBS_TX_PWR_DEFAULT}
+};
+
+/* band: 'B/G', region: France */
+static struct chan_freq_power channel_freq_power_FR_BG[] = {
+	{10, 2457, LBS_TX_PWR_FR_DEFAULT},
+	{11, 2462, LBS_TX_PWR_FR_DEFAULT},
+	{12, 2467, LBS_TX_PWR_FR_DEFAULT},
+	{13, 2472, LBS_TX_PWR_FR_DEFAULT}
+};
+
+/* band: 'B/G', region: Japan */
+static struct chan_freq_power channel_freq_power_JPN_BG[] = {
+	{1, 2412, LBS_TX_PWR_JP_DEFAULT},
+	{2, 2417, LBS_TX_PWR_JP_DEFAULT},
+	{3, 2422, LBS_TX_PWR_JP_DEFAULT},
+	{4, 2427, LBS_TX_PWR_JP_DEFAULT},
+	{5, 2432, LBS_TX_PWR_JP_DEFAULT},
+	{6, 2437, LBS_TX_PWR_JP_DEFAULT},
+	{7, 2442, LBS_TX_PWR_JP_DEFAULT},
+	{8, 2447, LBS_TX_PWR_JP_DEFAULT},
+	{9, 2452, LBS_TX_PWR_JP_DEFAULT},
+	{10, 2457, LBS_TX_PWR_JP_DEFAULT},
+	{11, 2462, LBS_TX_PWR_JP_DEFAULT},
+	{12, 2467, LBS_TX_PWR_JP_DEFAULT},
+	{13, 2472, LBS_TX_PWR_JP_DEFAULT},
+	{14, 2484, LBS_TX_PWR_JP_DEFAULT}
+};
+
+/**
+ * the structure for channel, frequency and power
+ */
+struct region_cfp_table {
+	u8 region;
+	struct chan_freq_power *cfp_BG;
+	int cfp_no_BG;
+};
+
+/**
+ * the structure for the mapping between region and CFP
+ */
+static struct region_cfp_table region_cfp_table[] = {
+	{0x10,			/*US FCC */
+	 channel_freq_power_US_BG,
+	 ARRAY_SIZE(channel_freq_power_US_BG),
+	 }
+	,
+	{0x20,			/*CANADA IC */
+	 channel_freq_power_US_BG,
+	 ARRAY_SIZE(channel_freq_power_US_BG),
+	 }
+	,
+	{0x30, /*EU*/ channel_freq_power_EU_BG,
+	 ARRAY_SIZE(channel_freq_power_EU_BG),
+	 }
+	,
+	{0x31, /*SPAIN*/ channel_freq_power_SPN_BG,
+	 ARRAY_SIZE(channel_freq_power_SPN_BG),
+	 }
+	,
+	{0x32, /*FRANCE*/ channel_freq_power_FR_BG,
+	 ARRAY_SIZE(channel_freq_power_FR_BG),
+	 }
+	,
+	{0x40, /*JAPAN*/ channel_freq_power_JPN_BG,
+	 ARRAY_SIZE(channel_freq_power_JPN_BG),
+	 }
+	,
+/*Add new region here */
+};
+
+/**
+ *  @brief This function finds the CFP in
+ *  region_cfp_table based on region and band parameter.
+ *
+ *  @param region  The region code
+ *  @param band	   The band
+ *  @param cfp_no  A pointer to CFP number
+ *  @return 	   A pointer to CFP
+ */
+static struct chan_freq_power *lbs_get_region_cfp_table(u8 region, int *cfp_no)
+{
+	int i, end;
+
+	lbs_deb_enter(LBS_DEB_MAIN);
+
+	end = ARRAY_SIZE(region_cfp_table);
+
+	for (i = 0; i < end ; i++) {
+		lbs_deb_main("region_cfp_table[i].region=%d\n",
+			region_cfp_table[i].region);
+		if (region_cfp_table[i].region == region) {
+			*cfp_no = region_cfp_table[i].cfp_no_BG;
+			lbs_deb_leave(LBS_DEB_MAIN);
+			return region_cfp_table[i].cfp_BG;
+		}
+	}
+
+	lbs_deb_leave_args(LBS_DEB_MAIN, "ret NULL");
+	return NULL;
+}
+
+int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band)
+{
+	int ret = 0;
+	int i = 0;
+
+	struct chan_freq_power *cfp;
+	int cfp_no;
+
+	lbs_deb_enter(LBS_DEB_MAIN);
+
+	memset(priv->region_channel, 0, sizeof(priv->region_channel));
+
+	cfp = lbs_get_region_cfp_table(region, &cfp_no);
+	if (cfp != NULL) {
+		priv->region_channel[i].nrcfp = cfp_no;
+		priv->region_channel[i].CFP = cfp;
+	} else {
+		lbs_deb_main("wrong region code %#x in band B/G\n",
+		       region);
+		ret = -1;
+		goto out;
+	}
+	priv->region_channel[i].valid = 1;
+	priv->region_channel[i].region = region;
+	priv->region_channel[i].band = band;
+	i++;
+out:
+	lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
+	return ret;
+}
+
+
+
 
 /*********************************************************************/
 /*                                                                   */
@@ -161,31 +345,15 @@
 	scantype = CMD_SCAN_TYPE_ACTIVE;
 
 	for (rgnidx = 0; rgnidx < ARRAY_SIZE(priv->region_channel); rgnidx++) {
-		if (priv->enable11d && (priv->connect_status != LBS_CONNECTED)
-		    && (priv->mesh_connect_status != LBS_CONNECTED)) {
-			/* Scan all the supported chan for the first scan */
-			if (!priv->universal_channel[rgnidx].valid)
-				continue;
-			scanregion = &priv->universal_channel[rgnidx];
-
-			/* clear the parsed_region_chan for the first scan */
-			memset(&priv->parsed_region_chan, 0x00,
-			       sizeof(priv->parsed_region_chan));
-		} else {
-			if (!priv->region_channel[rgnidx].valid)
-				continue;
-			scanregion = &priv->region_channel[rgnidx];
-		}
+		if (!priv->region_channel[rgnidx].valid)
+			continue;
+		scanregion = &priv->region_channel[rgnidx];
 
 		for (nextchan = 0; nextchan < scanregion->nrcfp; nextchan++, chanidx++) {
 			struct chanscanparamset *chan = &scanchanlist[chanidx];
 
 			cfp = scanregion->CFP + nextchan;
 
-			if (priv->enable11d)
-				scantype = lbs_get_scan_type_11d(cfp->channel,
-								 &priv->parsed_region_chan);
-
 			if (scanregion->band == BAND_B || scanregion->band == BAND_G)
 				chan->radiotype = CMD_SCAN_RADIO_TYPE_BG;
 
@@ -519,7 +687,6 @@
 	struct ieee_ie_cf_param_set *cf;
 	struct ieee_ie_ibss_param_set *ibss;
 	DECLARE_SSID_BUF(ssid);
-	struct ieee_ie_country_info_set *pcountryinfo;
 	uint8_t *pos, *end, *p;
 	uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0;
 	uint16_t beaconsize = 0;
@@ -642,26 +809,6 @@
 			lbs_deb_scan("got IBSS IE\n");
 			break;
 
-		case WLAN_EID_COUNTRY:
-			pcountryinfo = (struct ieee_ie_country_info_set *) pos;
-			lbs_deb_scan("got COUNTRY IE\n");
-			if (pcountryinfo->header.len < sizeof(pcountryinfo->countrycode)
-			    || pcountryinfo->header.len > 254) {
-				lbs_deb_scan("%s: 11D- Err CountryInfo len %d, min %zd, max 254\n",
-					     __func__,
-					     pcountryinfo->header.len,
-					     sizeof(pcountryinfo->countrycode));
-				ret = -1;
-				goto done;
-			}
-
-			memcpy(&bss->countryinfo, pcountryinfo,
-				pcountryinfo->header.len + 2);
-			lbs_deb_hex(LBS_DEB_SCAN, "process_bss: 11d countryinfo",
-				    (uint8_t *) pcountryinfo,
-				    (int) (pcountryinfo->header.len + 2));
-			break;
-
 		case WLAN_EID_EXT_SUPP_RATES:
 			/* only process extended supported rate if data rate is
 			 * already found. Data rate IE should come before
@@ -812,7 +959,7 @@
 	/* SSID */
 	iwe.cmd = SIOCGIWESSID;
 	iwe.u.data.flags = 1;
-	iwe.u.data.length = min((uint32_t) bss->ssid_len, (uint32_t) IW_ESSID_MAX_SIZE);
+	iwe.u.data.length = min((uint32_t) bss->ssid_len, (uint32_t) IEEE80211_MAX_SSID_LEN);
 	start = iwe_stream_add_point(info, start, stop, &iwe, bss->ssid);
 
 	/* Mode */
@@ -1022,9 +1169,12 @@
 		return -EAGAIN;
 
 	/* Update RSSI if current BSS is a locally created ad-hoc BSS */
-	if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate)
-		lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
-					     CMD_OPTION_WAITFORRSP, 0, NULL);
+	if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate) {
+		err = lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
+				CMD_OPTION_WAITFORRSP, 0, NULL);
+		if (err)
+			goto out;
+	}
 
 	mutex_lock(&priv->lock);
 	list_for_each_entry_safe (iter_bss, safe, &priv->network_list, list) {
@@ -1058,7 +1208,7 @@
 
 	dwrq->length = (ev - extra);
 	dwrq->flags = 0;
-
+out:
 	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", err);
 	return err;
 }
@@ -1141,11 +1291,11 @@
 	/* The size of the TLV buffer is equal to the entire command response
 	 *   size (scanrespsize) minus the fixed fields (sizeof()'s), the
 	 *   BSS Descriptions (bssdescriptsize as bytesLef) and the command
-	 *   response header (S_DS_GEN)
+	 *   response header (sizeof(struct cmd_header))
 	 */
 	tlvbufsize = scanrespsize - (bytesleft + sizeof(scanresp->bssdescriptsize)
 				     + sizeof(scanresp->nr_sets)
-				     + S_DS_GEN);
+				     + sizeof(struct cmd_header));
 
 	/*
 	 *  Process each scan response returned (scanresp->nr_sets). Save
diff --git a/drivers/net/wireless/libertas/scan.h b/drivers/net/wireless/libertas/scan.h
index fab7d5d..8fb1706 100644
--- a/drivers/net/wireless/libertas/scan.h
+++ b/drivers/net/wireless/libertas/scan.h
@@ -9,8 +9,36 @@
 
 #include <net/iw_handler.h>
 
+struct lbs_private;
+
 #define MAX_NETWORK_COUNT 128
 
+/** Chan-freq-TxPower mapping table*/
+struct chan_freq_power {
+	/** channel Number		*/
+	u16 channel;
+	/** frequency of this channel	*/
+	u32 freq;
+	/** Max allowed Tx power level	*/
+	u16 maxtxpower;
+	/** TRUE:channel unsupported;  FLASE:supported*/
+	u8 unsupported;
+};
+
+/** region-band mapping table*/
+struct region_channel {
+	/** TRUE if this entry is valid		     */
+	u8 valid;
+	/** region code for US, Japan ...	     */
+	u8 region;
+	/** band B/G/A, used for BAND_CONFIG cmd	     */
+	u8 band;
+	/** Actual No. of elements in the array below */
+	u8 nrcfp;
+	/** chan-freq-txpower mapping table*/
+	struct chan_freq_power *CFP;
+};
+
 /**
  *  @brief Maximum number of channels that can be sent in a setuserscan ioctl
  */
@@ -18,6 +46,8 @@
 
 int lbs_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len);
 
+int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band);
+
 int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid,
 				u8 ssid_len);
 
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 4c018f7..5d7c011 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -4,7 +4,7 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 
-#include "hostcmd.h"
+#include "host.h"
 #include "radiotap.h"
 #include "decl.h"
 #include "defs.h"
diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h
index 99905df..3e72c86 100644
--- a/drivers/net/wireless/libertas/types.h
+++ b/drivers/net/wireless/libertas/types.h
@@ -5,8 +5,8 @@
 #define _LBS_TYPES_H_
 
 #include <linux/if_ether.h>
+#include <linux/ieee80211.h>
 #include <asm/byteorder.h>
-#include <linux/wireless.h>
 
 struct ieee_ie_header {
 	u8 id;
@@ -247,7 +247,7 @@
 	uint8_t active_metric_id;
 	uint8_t mesh_capability;
 	uint8_t mesh_id_len;
-	uint8_t mesh_id[IW_ESSID_MAX_SIZE];
+	uint8_t mesh_id[IEEE80211_MAX_SSID_LEN];
 } __attribute__ ((packed));
 
 struct mrvl_meshie {
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index 69dd19b..a8eb9e1 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -45,6 +45,64 @@
 	priv->pending_assoc_req = NULL;
 }
 
+void lbs_send_disconnect_notification(struct lbs_private *priv)
+{
+	union iwreq_data wrqu;
+
+	memset(wrqu.ap_addr.sa_data, 0x00, ETH_ALEN);
+	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+	wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
+}
+
+static void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str)
+{
+	union iwreq_data iwrq;
+	u8 buf[50];
+
+	lbs_deb_enter(LBS_DEB_WEXT);
+
+	memset(&iwrq, 0, sizeof(union iwreq_data));
+	memset(buf, 0, sizeof(buf));
+
+	snprintf(buf, sizeof(buf) - 1, "%s", str);
+
+	iwrq.data.length = strlen(buf) + 1 + IW_EV_LCP_LEN;
+
+	/* Send Event to upper layer */
+	lbs_deb_wext("event indication string %s\n", (char *)buf);
+	lbs_deb_wext("event indication length %d\n", iwrq.data.length);
+	lbs_deb_wext("sending wireless event IWEVCUSTOM for %s\n", str);
+
+	wireless_send_event(priv->dev, IWEVCUSTOM, &iwrq, buf);
+
+	lbs_deb_leave(LBS_DEB_WEXT);
+}
+
+/**
+ *  @brief This function handles MIC failure event.
+ *
+ *  @param priv    A pointer to struct lbs_private structure
+ *  @para  event   the event id
+ *  @return 	   n/a
+ */
+void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event)
+{
+	char buf[50];
+
+	lbs_deb_enter(LBS_DEB_CMD);
+	memset(buf, 0, sizeof(buf));
+
+	sprintf(buf, "%s", "MLME-MICHAELMICFAILURE.indication ");
+
+	if (event == MACREG_INT_CODE_MIC_ERR_UNICAST)
+		strcat(buf, "unicast ");
+	else
+		strcat(buf, "multicast ");
+
+	lbs_send_iwevcustom_event(priv, buf);
+	lbs_deb_leave(LBS_DEB_CMD);
+}
+
 /**
  *  @brief Find the channel frequency power info with specific channel
  *
@@ -65,8 +123,6 @@
 	for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) {
 		rc = &priv->region_channel[j];
 
-		if (priv->enable11d)
-			rc = &priv->universal_channel[j];
 		if (!rc->valid || !rc->CFP)
 			continue;
 		if (rc->band != band)
@@ -106,8 +162,6 @@
 	for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) {
 		rc = &priv->region_channel[j];
 
-		if (priv->enable11d)
-			rc = &priv->universal_channel[j];
 		if (!rc->valid || !rc->CFP)
 			continue;
 		if (rc->band != band)
@@ -168,12 +222,12 @@
 	lbs_deb_enter(LBS_DEB_WEXT);
 
 	cfp = lbs_find_cfp_by_band_and_channel(priv, 0,
-					   priv->curbssparams.channel);
+					   priv->channel);
 
 	if (!cfp) {
-		if (priv->curbssparams.channel)
+		if (priv->channel)
 			lbs_deb_wext("invalid channel %d\n",
-			       priv->curbssparams.channel);
+			       priv->channel);
 		return -EINVAL;
 	}
 
@@ -546,8 +600,6 @@
 	struct chan_freq_power *cfp;
 	u8 rates[MAX_RATES + 1];
 
-	u8 flag = 0;
-
 	lbs_deb_enter(LBS_DEB_WEXT);
 
 	dwrq->length = sizeof(struct iw_range);
@@ -569,52 +621,21 @@
 
 	range->scan_capa = IW_SCAN_CAPA_ESSID;
 
-	if (priv->enable11d &&
-	    (priv->connect_status == LBS_CONNECTED ||
-	    priv->mesh_connect_status == LBS_CONNECTED)) {
-		u8 chan_no;
-		u8 band;
-
-		struct parsed_region_chan_11d *parsed_region_chan =
-		    &priv->parsed_region_chan;
-
-		if (parsed_region_chan == NULL) {
-			lbs_deb_wext("11d: parsed_region_chan is NULL\n");
-			goto out;
-		}
-		band = parsed_region_chan->band;
-		lbs_deb_wext("band %d, nr_char %d\n", band,
-		       parsed_region_chan->nr_chan);
-
+	for (j = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
+	     && (j < ARRAY_SIZE(priv->region_channel)); j++) {
+		cfp = priv->region_channel[j].CFP;
 		for (i = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
-		     && (i < parsed_region_chan->nr_chan); i++) {
-			chan_no = parsed_region_chan->chanpwr[i].chan;
-			lbs_deb_wext("chan_no %d\n", chan_no);
-			range->freq[range->num_frequency].i = (long)chan_no;
+		     && priv->region_channel[j].valid
+		     && cfp
+		     && (i < priv->region_channel[j].nrcfp); i++) {
+			range->freq[range->num_frequency].i =
+			    (long)cfp->channel;
 			range->freq[range->num_frequency].m =
-			    (long)lbs_chan_2_freq(chan_no) * 100000;
+			    (long)cfp->freq * 100000;
 			range->freq[range->num_frequency].e = 1;
+			cfp++;
 			range->num_frequency++;
 		}
-		flag = 1;
-	}
-	if (!flag) {
-		for (j = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
-		     && (j < ARRAY_SIZE(priv->region_channel)); j++) {
-			cfp = priv->region_channel[j].CFP;
-			for (i = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
-			     && priv->region_channel[j].valid
-			     && cfp
-			     && (i < priv->region_channel[j].nrcfp); i++) {
-				range->freq[range->num_frequency].i =
-				    (long)cfp->channel;
-				range->freq[range->num_frequency].m =
-				    (long)cfp->freq * 100000;
-				range->freq[range->num_frequency].e = 1;
-				cfp++;
-				range->num_frequency++;
-			}
-		}
 	}
 
 	lbs_deb_wext("IW_MAX_FREQUENCIES %d, num_frequency %d\n",
@@ -699,7 +720,6 @@
 		                  | IW_ENC_CAPA_CIPHER_CCMP;
 	}
 
-out:
 	lbs_deb_leave(LBS_DEB_WEXT);
 	return 0;
 }
@@ -832,7 +852,7 @@
 	u32 rssi_qual;
 	u32 tx_qual;
 	u32 quality = 0;
-	int stats_valid = 0;
+	int ret, stats_valid = 0;
 	u8 rssi;
 	u32 tx_retries;
 	struct cmd_ds_802_11_get_log log;
@@ -881,7 +901,9 @@
 
 	memset(&log, 0, sizeof(log));
 	log.hdr.size = cpu_to_le16(sizeof(log));
-	lbs_cmd_with_response(priv, CMD_802_11_GET_LOG, &log);
+	ret = lbs_cmd_with_response(priv, CMD_802_11_GET_LOG, &log);
+	if (ret)
+		goto out;
 
 	tx_retries = le32_to_cpu(log.retry);
 
@@ -909,8 +931,10 @@
 	stats_valid = 1;
 
 	/* update stats asynchronously for future calls */
-	lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
+	ret = lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
 					0, 0, NULL);
+	if (ret)
+		lbs_pr_err("RSSI command failed\n");
 out:
 	if (!stats_valid) {
 		priv->wstats.miss.beacon = 0;
@@ -1020,7 +1044,7 @@
 		goto out;
 	}
 
-	if (fwrq->m != priv->curbssparams.channel) {
+	if (fwrq->m != priv->channel) {
 		lbs_deb_wext("mesh channel change forces eth disconnect\n");
 		if (priv->mode == IW_MODE_INFRA)
 			lbs_cmd_80211_deauthenticate(priv,
@@ -2023,7 +2047,7 @@
 {
 	struct lbs_private *priv = dev->ml_priv;
 	int ret = 0;
-	u8 ssid[IW_ESSID_MAX_SIZE];
+	u8 ssid[IEEE80211_MAX_SSID_LEN];
 	u8 ssid_len = 0;
 	struct assoc_request * assoc_req;
 	int in_ssid_len = dwrq->length;
@@ -2037,7 +2061,7 @@
 	}
 
 	/* Check the size of the string */
-	if (in_ssid_len > IW_ESSID_MAX_SIZE) {
+	if (in_ssid_len > IEEE80211_MAX_SSID_LEN) {
 		ret = -E2BIG;
 		goto out;
 	}
@@ -2068,7 +2092,7 @@
 			ret = -ENOMEM;
 		} else {
 			/* Copy the SSID to the association request */
-			memcpy(&assoc_req->ssid, &ssid, IW_ESSID_MAX_SIZE);
+			memcpy(&assoc_req->ssid, &ssid, IEEE80211_MAX_SSID_LEN);
 			assoc_req->ssid_len = ssid_len;
 			set_bit(ASSOC_FLAG_SSID, &assoc_req->flags);
 			lbs_postpone_association_work(priv);
@@ -2119,7 +2143,7 @@
 	}
 
 	/* Check the size of the string */
-	if (dwrq->length > IW_ESSID_MAX_SIZE) {
+	if (dwrq->length > IEEE80211_MAX_SSID_LEN) {
 		ret = -E2BIG;
 		goto out;
 	}
@@ -2134,7 +2158,7 @@
 	}
 
 	lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
-			priv->curbssparams.channel);
+			priv->channel);
  out:
 	lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
 	return ret;
diff --git a/drivers/net/wireless/libertas/wext.h b/drivers/net/wireless/libertas/wext.h
index 4c08db4..7863baf 100644
--- a/drivers/net/wireless/libertas/wext.h
+++ b/drivers/net/wireless/libertas/wext.h
@@ -4,7 +4,15 @@
 #ifndef	_LBS_WEXT_H_
 #define	_LBS_WEXT_H_
 
+void lbs_send_disconnect_notification(struct lbs_private *priv);
+void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event);
+
 extern struct iw_handler_def lbs_handler_def;
 extern struct iw_handler_def mesh_handler_def;
 
+struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
+	struct lbs_private *priv,
+	u8 band,
+	u16 channel);
+
 #endif
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 359652d..404830f 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -60,8 +60,15 @@
 /* Set priv->firmware type, determine firmware properties
  * This function can be called before we have registerred with netdev,
  * so all errors go out with dev_* rather than printk
+ *
+ * If non-NULL stores a firmware description in fw_name.
+ * If non-NULL stores a HW version in hw_ver
+ *
+ * These are output via generic cfg80211 ethtool support.
  */
-int determine_fw_capabilities(struct orinoco_private *priv)
+int determine_fw_capabilities(struct orinoco_private *priv,
+			      char *fw_name, size_t fw_name_len,
+			      u32 *hw_ver)
 {
 	struct device *dev = priv->dev;
 	hermes_t *hw = &priv->hw;
@@ -85,6 +92,12 @@
 	dev_info(dev, "Hardware identity %04x:%04x:%04x:%04x\n",
 		 nic_id.id, nic_id.variant, nic_id.major, nic_id.minor);
 
+	if (hw_ver)
+		*hw_ver = (((nic_id.id & 0xff) << 24) |
+			   ((nic_id.variant & 0xff) << 16) |
+			   ((nic_id.major & 0xff) << 8) |
+			   (nic_id.minor & 0xff));
+
 	priv->firmware_type = determine_firmware_type(&nic_id);
 
 	/* Get the firmware version */
@@ -135,8 +148,9 @@
 	case FIRMWARE_TYPE_AGERE:
 		/* Lucent Wavelan IEEE, Lucent Orinoco, Cabletron RoamAbout,
 		   ELSA, Melco, HP, IBM, Dell 1150, Compaq 110/210 */
-		snprintf(priv->fw_name, sizeof(priv->fw_name) - 1,
-			 "Lucent/Agere %d.%02d", sta_id.major, sta_id.minor);
+		if (fw_name)
+			snprintf(fw_name, fw_name_len, "Lucent/Agere %d.%02d",
+				 sta_id.major, sta_id.minor);
 
 		firmver = ((unsigned long)sta_id.major << 16) | sta_id.minor;
 
@@ -185,8 +199,8 @@
 			tmp[SYMBOL_MAX_VER_LEN] = '\0';
 		}
 
-		snprintf(priv->fw_name, sizeof(priv->fw_name) - 1,
-			 "Symbol %s", tmp);
+		if (fw_name)
+			snprintf(fw_name, fw_name_len, "Symbol %s", tmp);
 
 		priv->has_ibss = (firmver >= 0x20000);
 		priv->has_wep = (firmver >= 0x15012);
@@ -224,9 +238,9 @@
 		 * different and less well tested */
 		/* D-Link MAC : 00:40:05:* */
 		/* Addtron MAC : 00:90:D1:* */
-		snprintf(priv->fw_name, sizeof(priv->fw_name) - 1,
-			 "Intersil %d.%d.%d", sta_id.major, sta_id.minor,
-			 sta_id.variant);
+		if (fw_name)
+			snprintf(fw_name, fw_name_len, "Intersil %d.%d.%d",
+				 sta_id.major, sta_id.minor, sta_id.variant);
 
 		firmver = ((unsigned long)sta_id.major << 16) |
 			((unsigned long)sta_id.minor << 8) | sta_id.variant;
@@ -245,7 +259,8 @@
 		}
 		break;
 	}
-	dev_info(dev, "Firmware determined as %s\n", priv->fw_name);
+	if (fw_name)
+		dev_info(dev, "Firmware determined as %s\n", fw_name);
 
 	return 0;
 }
diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h
index 8df6e87..e2f7fdc 100644
--- a/drivers/net/wireless/orinoco/hw.h
+++ b/drivers/net/wireless/orinoco/hw.h
@@ -24,7 +24,8 @@
 struct orinoco_private;
 struct dev_addr_list;
 
-int determine_fw_capabilities(struct orinoco_private *priv);
+int determine_fw_capabilities(struct orinoco_private *priv, char *fw_name,
+			      size_t fw_name_len, u32 *hw_ver);
 int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr);
 int orinoco_hw_allocate_fid(struct orinoco_private *priv);
 int orinoco_get_bitratemode(int bitrate, int automatic);
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 5fdc59c..753a180 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -83,7 +83,6 @@
 #include <linux/device.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
-#include <linux/ethtool.h>
 #include <linux/suspend.h>
 #include <linux/if_arp.h>
 #include <linux/wireless.h>
@@ -162,8 +161,6 @@
 				 | HERMES_EV_WTERR | HERMES_EV_INFO \
 				 | HERMES_EV_INFDROP)
 
-static const struct ethtool_ops orinoco_ethtool_ops;
-
 /********************************************************************/
 /* Data types                                                       */
 /********************************************************************/
@@ -1994,7 +1991,9 @@
 		goto out;
 	}
 
-	err = determine_fw_capabilities(priv);
+	err = determine_fw_capabilities(priv, wiphy->fw_version,
+					sizeof(wiphy->fw_version),
+					&wiphy->hw_version);
 	if (err != 0) {
 		dev_err(dev, "Incompatible firmware, aborting\n");
 		goto out;
@@ -2010,7 +2009,9 @@
 			priv->do_fw_download = 0;
 
 		/* Check firmware version again */
-		err = determine_fw_capabilities(priv);
+		err = determine_fw_capabilities(priv, wiphy->fw_version,
+						sizeof(wiphy->fw_version),
+						&wiphy->hw_version);
 		if (err != 0) {
 			dev_err(dev, "Incompatible firmware, aborting\n");
 			goto out;
@@ -2212,7 +2213,6 @@
 	dev->ieee80211_ptr = wdev;
 	dev->netdev_ops = &orinoco_netdev_ops;
 	dev->watchdog_timeo = HZ; /* 1 second timeout */
-	dev->ethtool_ops = &orinoco_ethtool_ops;
 	dev->wireless_handlers = &orinoco_handler_def;
 #ifdef WIRELESS_SPY
 	dev->wireless_data = &priv->wireless_data;
@@ -2349,27 +2349,6 @@
 }
 EXPORT_SYMBOL(orinoco_down);
 
-static void orinoco_get_drvinfo(struct net_device *dev,
-				struct ethtool_drvinfo *info)
-{
-	struct orinoco_private *priv = ndev_priv(dev);
-
-	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver) - 1);
-	strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1);
-	strncpy(info->fw_version, priv->fw_name, sizeof(info->fw_version) - 1);
-	if (dev->dev.parent)
-		strncpy(info->bus_info, dev_name(dev->dev.parent),
-			sizeof(info->bus_info) - 1);
-	else
-		snprintf(info->bus_info, sizeof(info->bus_info) - 1,
-			 "PCMCIA %p", priv->hw.iobase);
-}
-
-static const struct ethtool_ops orinoco_ethtool_ops = {
-	.get_drvinfo = orinoco_get_drvinfo,
-	.get_link = ethtool_op_get_link,
-};
-
 /********************************************************************/
 /* Module initialization                                            */
 /********************************************************************/
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index 9ac6f1d..665ef56 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -93,7 +93,6 @@
 
 	/* Capabilities of the hardware/firmware */
 	fwtype_t firmware_type;
-	char fw_name[32];
 	int ibss_port;
 	int nicbuf_size;
 	u16 channel_mask;
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index ed1f997..390c0c7 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -53,6 +53,32 @@
 
 	  When compiled as a module, this driver will be called rt61pci.
 
+config RT2800PCI_PCI
+	tristate
+	depends on PCI
+	default y
+
+config RT2800PCI_SOC
+	tristate
+	depends on RALINK_RT288X || RALINK_RT305X
+	default y
+
+config RT2800PCI
+	tristate "Ralink rt2800 (PCI/PCMCIA) support"
+	depends on (RT2800PCI_PCI || RT2800PCI_SOC) && EXPERIMENTAL
+	select RT2X00_LIB_PCI if RT2800PCI_PCI
+	select RT2X00_LIB_SOC if RT2800PCI_SOC
+	select RT2X00_LIB_HT
+	select RT2X00_LIB_FIRMWARE
+	select RT2X00_LIB_CRYPTO
+	select CRC_CCITT
+	select EEPROM_93CX6
+	---help---
+	  This adds support for rt2800 wireless chipset family.
+	  Supported chips: RT2760, RT2790, RT2860, RT2880, RT2890 & RT3052
+
+	  When compiled as a module, this driver will be called "rt2800pci.ko".
+
 config RT2500USB
 	tristate "Ralink rt2500 (USB) support"
 	depends on USB
@@ -95,6 +121,10 @@
 	tristate
 	select RT2X00_LIB
 
+config RT2X00_LIB_SOC
+	tristate
+	select RT2X00_LIB
+
 config RT2X00_LIB_USB
 	tristate
 	select RT2X00_LIB
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile
index 13043ea..912f5f6 100644
--- a/drivers/net/wireless/rt2x00/Makefile
+++ b/drivers/net/wireless/rt2x00/Makefile
@@ -11,10 +11,12 @@
 
 obj-$(CONFIG_RT2X00_LIB)		+= rt2x00lib.o
 obj-$(CONFIG_RT2X00_LIB_PCI)		+= rt2x00pci.o
+obj-$(CONFIG_RT2X00_LIB_SOC)		+= rt2x00soc.o
 obj-$(CONFIG_RT2X00_LIB_USB)		+= rt2x00usb.o
 obj-$(CONFIG_RT2400PCI)			+= rt2400pci.o
 obj-$(CONFIG_RT2500PCI)			+= rt2500pci.o
 obj-$(CONFIG_RT61PCI)			+= rt61pci.o
+obj-$(CONFIG_RT2800PCI)			+= rt2800pci.o
 obj-$(CONFIG_RT2500USB)			+= rt2500usb.o
 obj-$(CONFIG_RT73USB)			+= rt73usb.o
 obj-$(CONFIG_RT2800USB)			+= rt2800usb.o
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
new file mode 100644
index 0000000..be81788
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -0,0 +1,3323 @@
+/*
+	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	<http://rt2x00.serialmonkey.com>
+
+	This program is free software; you can redistribute it and/or modify
+	it under the terms of the GNU General Public License as published by
+	the Free Software Foundation; either version 2 of the License, or
+	(at your option) any later version.
+
+	This program is distributed in the hope that it will be useful,
+	but WITHOUT ANY WARRANTY; without even the implied warranty of
+	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+	GNU General Public License for more details.
+
+	You should have received a copy of the GNU General Public License
+	along with this program; if not, write to the
+	Free Software Foundation, Inc.,
+	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+	Module: rt2800pci
+	Abstract: rt2800pci device specific routines.
+	Supported chipsets: RT2800E & RT2800ED.
+ */
+
+#include <linux/crc-ccitt.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/eeprom_93cx6.h>
+
+#include "rt2x00.h"
+#include "rt2x00pci.h"
+#include "rt2x00soc.h"
+#include "rt2800pci.h"
+
+#ifdef CONFIG_RT2800PCI_PCI_MODULE
+#define CONFIG_RT2800PCI_PCI
+#endif
+
+#ifdef CONFIG_RT2800PCI_WISOC_MODULE
+#define CONFIG_RT2800PCI_WISOC
+#endif
+
+/*
+ * Allow hardware encryption to be disabled.
+ */
+static int modparam_nohwcrypt = 1;
+module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
+
+/*
+ * Register access.
+ * BBP and RF register require indirect register access,
+ * and use the CSR registers PHY_CSR3 and PHY_CSR4 to achieve this.
+ * These indirect registers work with busy bits,
+ * and we will try maximal REGISTER_BUSY_COUNT times to access
+ * the register while taking a REGISTER_BUSY_DELAY us delay
+ * between each attampt. When the busy bit is still set at that time,
+ * the access attempt is considered to have failed,
+ * and we will print an error.
+ */
+#define WAIT_FOR_BBP(__dev, __reg) \
+	rt2x00pci_regbusy_read((__dev), BBP_CSR_CFG, BBP_CSR_CFG_BUSY, (__reg))
+#define WAIT_FOR_RFCSR(__dev, __reg) \
+	rt2x00pci_regbusy_read((__dev), RF_CSR_CFG, RF_CSR_CFG_BUSY, (__reg))
+#define WAIT_FOR_RF(__dev, __reg) \
+	rt2x00pci_regbusy_read((__dev), RF_CSR_CFG0, RF_CSR_CFG0_BUSY, (__reg))
+#define WAIT_FOR_MCU(__dev, __reg) \
+	rt2x00pci_regbusy_read((__dev), H2M_MAILBOX_CSR, \
+			       H2M_MAILBOX_CSR_OWNER, (__reg))
+
+static void rt2800pci_bbp_write(struct rt2x00_dev *rt2x00dev,
+				const unsigned int word, const u8 value)
+{
+	u32 reg;
+
+	mutex_lock(&rt2x00dev->csr_mutex);
+
+	/*
+	 * Wait until the BBP becomes available, afterwards we
+	 * can safely write the new data into the register.
+	 */
+	if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
+		reg = 0;
+		rt2x00_set_field32(&reg, BBP_CSR_CFG_VALUE, value);
+		rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
+		rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
+		rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 0);
+		rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
+
+		rt2x00pci_register_write(rt2x00dev, BBP_CSR_CFG, reg);
+	}
+
+	mutex_unlock(&rt2x00dev->csr_mutex);
+}
+
+static void rt2800pci_bbp_read(struct rt2x00_dev *rt2x00dev,
+			       const unsigned int word, u8 *value)
+{
+	u32 reg;
+
+	mutex_lock(&rt2x00dev->csr_mutex);
+
+	/*
+	 * Wait until the BBP becomes available, afterwards we
+	 * can safely write the read request into the register.
+	 * After the data has been written, we wait until hardware
+	 * returns the correct value, if at any time the register
+	 * doesn't become available in time, reg will be 0xffffffff
+	 * which means we return 0xff to the caller.
+	 */
+	if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
+		reg = 0;
+		rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
+		rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
+		rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 1);
+		rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
+
+		rt2x00pci_register_write(rt2x00dev, BBP_CSR_CFG, reg);
+
+		WAIT_FOR_BBP(rt2x00dev, &reg);
+	}
+
+	*value = rt2x00_get_field32(reg, BBP_CSR_CFG_VALUE);
+
+	mutex_unlock(&rt2x00dev->csr_mutex);
+}
+
+static void rt2800pci_rfcsr_write(struct rt2x00_dev *rt2x00dev,
+				  const unsigned int word, const u8 value)
+{
+	u32 reg;
+
+	mutex_lock(&rt2x00dev->csr_mutex);
+
+	/*
+	 * Wait until the RFCSR becomes available, afterwards we
+	 * can safely write the new data into the register.
+	 */
+	if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
+		reg = 0;
+		rt2x00_set_field32(&reg, RF_CSR_CFG_DATA, value);
+		rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
+		rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 1);
+		rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
+
+		rt2x00pci_register_write(rt2x00dev, RF_CSR_CFG, reg);
+	}
+
+	mutex_unlock(&rt2x00dev->csr_mutex);
+}
+
+static void rt2800pci_rfcsr_read(struct rt2x00_dev *rt2x00dev,
+				 const unsigned int word, u8 *value)
+{
+	u32 reg;
+
+	mutex_lock(&rt2x00dev->csr_mutex);
+
+	/*
+	 * Wait until the RFCSR becomes available, afterwards we
+	 * can safely write the read request into the register.
+	 * After the data has been written, we wait until hardware
+	 * returns the correct value, if at any time the register
+	 * doesn't become available in time, reg will be 0xffffffff
+	 * which means we return 0xff to the caller.
+	 */
+	if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
+		reg = 0;
+		rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
+		rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 0);
+		rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
+
+		rt2x00pci_register_write(rt2x00dev, RF_CSR_CFG, reg);
+
+		WAIT_FOR_RFCSR(rt2x00dev, &reg);
+	}
+
+	*value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA);
+
+	mutex_unlock(&rt2x00dev->csr_mutex);
+}
+
+static void rt2800pci_rf_write(struct rt2x00_dev *rt2x00dev,
+			       const unsigned int word, const u32 value)
+{
+	u32 reg;
+
+	mutex_lock(&rt2x00dev->csr_mutex);
+
+	/*
+	 * Wait until the RF becomes available, afterwards we
+	 * can safely write the new data into the register.
+	 */
+	if (WAIT_FOR_RF(rt2x00dev, &reg)) {
+		reg = 0;
+		rt2x00_set_field32(&reg, RF_CSR_CFG0_REG_VALUE_BW, value);
+		rt2x00_set_field32(&reg, RF_CSR_CFG0_STANDBYMODE, 0);
+		rt2x00_set_field32(&reg, RF_CSR_CFG0_SEL, 0);
+		rt2x00_set_field32(&reg, RF_CSR_CFG0_BUSY, 1);
+
+		rt2x00pci_register_write(rt2x00dev, RF_CSR_CFG0, reg);
+		rt2x00_rf_write(rt2x00dev, word, value);
+	}
+
+	mutex_unlock(&rt2x00dev->csr_mutex);
+}
+
+static void rt2800pci_mcu_request(struct rt2x00_dev *rt2x00dev,
+				  const u8 command, const u8 token,
+				  const u8 arg0, const u8 arg1)
+{
+	u32 reg;
+
+	/*
+	 * RT2880 and RT3052 don't support MCU requests.
+	 */
+	if (rt2x00_rt(&rt2x00dev->chip, RT2880) ||
+	    rt2x00_rt(&rt2x00dev->chip, RT3052))
+		return;
+
+	mutex_lock(&rt2x00dev->csr_mutex);
+
+	/*
+	 * Wait until the MCU becomes available, afterwards we
+	 * can safely write the new data into the register.
+	 */
+	if (WAIT_FOR_MCU(rt2x00dev, &reg)) {
+		rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_OWNER, 1);
+		rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_CMD_TOKEN, token);
+		rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG0, arg0);
+		rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG1, arg1);
+		rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CSR, reg);
+
+		reg = 0;
+		rt2x00_set_field32(&reg, HOST_CMD_CSR_HOST_COMMAND, command);
+		rt2x00pci_register_write(rt2x00dev, HOST_CMD_CSR, reg);
+	}
+
+	mutex_unlock(&rt2x00dev->csr_mutex);
+}
+
+static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
+{
+	unsigned int i;
+	u32 reg;
+
+	for (i = 0; i < 200; i++) {
+		rt2x00pci_register_read(rt2x00dev, H2M_MAILBOX_CID, &reg);
+
+		if ((rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD0) == token) ||
+		    (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD1) == token) ||
+		    (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD2) == token) ||
+		    (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD3) == token))
+			break;
+
+		udelay(REGISTER_BUSY_DELAY);
+	}
+
+	if (i == 200)
+		ERROR(rt2x00dev, "MCU request failed, no response from hardware\n");
+
+	rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
+	rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
+}
+
+#ifdef CONFIG_RT2800PCI_WISOC
+static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
+{
+	u32 *base_addr = (u32 *) KSEG1ADDR(0x1F040000); /* XXX for RT3052 */
+
+	memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
+}
+#else
+static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
+{
+}
+#endif /* CONFIG_RT2800PCI_WISOC */
+
+#ifdef CONFIG_RT2800PCI_PCI
+static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
+{
+	struct rt2x00_dev *rt2x00dev = eeprom->data;
+	u32 reg;
+
+	rt2x00pci_register_read(rt2x00dev, E2PROM_CSR, &reg);
+
+	eeprom->reg_data_in = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_IN);
+	eeprom->reg_data_out = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_OUT);
+	eeprom->reg_data_clock =
+	    !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_CLOCK);
+	eeprom->reg_chip_select =
+	    !!rt2x00_get_field32(reg, E2PROM_CSR_CHIP_SELECT);
+}
+
+static void rt2800pci_eepromregister_write(struct eeprom_93cx6 *eeprom)
+{
+	struct rt2x00_dev *rt2x00dev = eeprom->data;
+	u32 reg = 0;
+
+	rt2x00_set_field32(&reg, E2PROM_CSR_DATA_IN, !!eeprom->reg_data_in);
+	rt2x00_set_field32(&reg, E2PROM_CSR_DATA_OUT, !!eeprom->reg_data_out);
+	rt2x00_set_field32(&reg, E2PROM_CSR_DATA_CLOCK,
+			   !!eeprom->reg_data_clock);
+	rt2x00_set_field32(&reg, E2PROM_CSR_CHIP_SELECT,
+			   !!eeprom->reg_chip_select);
+
+	rt2x00pci_register_write(rt2x00dev, E2PROM_CSR, reg);
+}
+
+static void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
+{
+	struct eeprom_93cx6 eeprom;
+	u32 reg;
+
+	rt2x00pci_register_read(rt2x00dev, E2PROM_CSR, &reg);
+
+	eeprom.data = rt2x00dev;
+	eeprom.register_read = rt2800pci_eepromregister_read;
+	eeprom.register_write = rt2800pci_eepromregister_write;
+	eeprom.width = !rt2x00_get_field32(reg, E2PROM_CSR_TYPE) ?
+	    PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66;
+	eeprom.reg_data_in = 0;
+	eeprom.reg_data_out = 0;
+	eeprom.reg_data_clock = 0;
+	eeprom.reg_chip_select = 0;
+
+	eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom,
+			       EEPROM_SIZE / sizeof(u16));
+}
+
+static void rt2800pci_efuse_read(struct rt2x00_dev *rt2x00dev,
+				 unsigned int i)
+{
+	u32 reg;
+
+	rt2x00pci_register_read(rt2x00dev, EFUSE_CTRL, &reg);
+	rt2x00_set_field32(&reg, EFUSE_CTRL_ADDRESS_IN, i);
+	rt2x00_set_field32(&reg, EFUSE_CTRL_MODE, 0);
+	rt2x00_set_field32(&reg, EFUSE_CTRL_KICK, 1);
+	rt2x00pci_register_write(rt2x00dev, EFUSE_CTRL, reg);
+
+	/* Wait until the EEPROM has been loaded */
+	rt2x00pci_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg);
+
+	/* Apparently the data is read from end to start */
+	rt2x00pci_register_read(rt2x00dev, EFUSE_DATA3,
+				(u32 *)&rt2x00dev->eeprom[i]);
+	rt2x00pci_register_read(rt2x00dev, EFUSE_DATA2,
+				(u32 *)&rt2x00dev->eeprom[i + 2]);
+	rt2x00pci_register_read(rt2x00dev, EFUSE_DATA1,
+				(u32 *)&rt2x00dev->eeprom[i + 4]);
+	rt2x00pci_register_read(rt2x00dev, EFUSE_DATA0,
+				(u32 *)&rt2x00dev->eeprom[i + 6]);
+}
+
+static void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
+{
+	unsigned int i;
+
+	for (i = 0; i < EEPROM_SIZE / sizeof(u16); i += 8)
+		rt2800pci_efuse_read(rt2x00dev, i);
+}
+#else
+static inline void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
+{
+}
+
+static inline void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
+{
+}
+#endif /* CONFIG_RT2800PCI_PCI */
+
+#ifdef CONFIG_RT2X00_LIB_DEBUGFS
+static const struct rt2x00debug rt2800pci_rt2x00debug = {
+	.owner	= THIS_MODULE,
+	.csr	= {
+		.read		= rt2x00pci_register_read,
+		.write		= rt2x00pci_register_write,
+		.flags		= RT2X00DEBUGFS_OFFSET,
+		.word_base	= CSR_REG_BASE,
+		.word_size	= sizeof(u32),
+		.word_count	= CSR_REG_SIZE / sizeof(u32),
+	},
+	.eeprom	= {
+		.read		= rt2x00_eeprom_read,
+		.write		= rt2x00_eeprom_write,
+		.word_base	= EEPROM_BASE,
+		.word_size	= sizeof(u16),
+		.word_count	= EEPROM_SIZE / sizeof(u16),
+	},
+	.bbp	= {
+		.read		= rt2800pci_bbp_read,
+		.write		= rt2800pci_bbp_write,
+		.word_base	= BBP_BASE,
+		.word_size	= sizeof(u8),
+		.word_count	= BBP_SIZE / sizeof(u8),
+	},
+	.rf	= {
+		.read		= rt2x00_rf_read,
+		.write		= rt2800pci_rf_write,
+		.word_base	= RF_BASE,
+		.word_size	= sizeof(u32),
+		.word_count	= RF_SIZE / sizeof(u32),
+	},
+};
+#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
+
+static int rt2800pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
+{
+	u32 reg;
+
+	rt2x00pci_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
+	return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2);
+}
+
+#ifdef CONFIG_RT2X00_LIB_LEDS
+static void rt2800pci_brightness_set(struct led_classdev *led_cdev,
+				     enum led_brightness brightness)
+{
+	struct rt2x00_led *led =
+	    container_of(led_cdev, struct rt2x00_led, led_dev);
+	unsigned int enabled = brightness != LED_OFF;
+	unsigned int bg_mode =
+	    (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
+	unsigned int polarity =
+		rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
+				   EEPROM_FREQ_LED_POLARITY);
+	unsigned int ledmode =
+		rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
+				   EEPROM_FREQ_LED_MODE);
+
+	if (led->type == LED_TYPE_RADIO) {
+		rt2800pci_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
+				      enabled ? 0x20 : 0);
+	} else if (led->type == LED_TYPE_ASSOC) {
+		rt2800pci_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
+				      enabled ? (bg_mode ? 0x60 : 0xa0) : 0x20);
+	} else if (led->type == LED_TYPE_QUALITY) {
+		/*
+		 * The brightness is divided into 6 levels (0 - 5),
+		 * The specs tell us the following levels:
+		 *	0, 1 ,3, 7, 15, 31
+		 * to determine the level in a simple way we can simply
+		 * work with bitshifting:
+		 *	(1 << level) - 1
+		 */
+		rt2800pci_mcu_request(led->rt2x00dev, MCU_LED_STRENGTH, 0xff,
+				      (1 << brightness / (LED_FULL / 6)) - 1,
+				      polarity);
+	}
+}
+
+static int rt2800pci_blink_set(struct led_classdev *led_cdev,
+			       unsigned long *delay_on,
+			       unsigned long *delay_off)
+{
+	struct rt2x00_led *led =
+	    container_of(led_cdev, struct rt2x00_led, led_dev);
+	u32 reg;
+
+	rt2x00pci_register_read(led->rt2x00dev, LED_CFG, &reg);
+	rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, *delay_on);
+	rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
+	rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
+	rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
+	rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 12);
+	rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
+	rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
+	rt2x00pci_register_write(led->rt2x00dev, LED_CFG, reg);
+
+	return 0;
+}
+
+static void rt2800pci_init_led(struct rt2x00_dev *rt2x00dev,
+			       struct rt2x00_led *led,
+			       enum led_type type)
+{
+	led->rt2x00dev = rt2x00dev;
+	led->type = type;
+	led->led_dev.brightness_set = rt2800pci_brightness_set;
+	led->led_dev.blink_set = rt2800pci_blink_set;
+	led->flags = LED_INITIALIZED;
+}
+#endif /* CONFIG_RT2X00_LIB_LEDS */
+
+/*
+ * Configuration handlers.
+ */
+static void rt2800pci_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
+				       struct rt2x00lib_crypto *crypto,
+				       struct ieee80211_key_conf *key)
+{
+	struct mac_wcid_entry wcid_entry;
+	struct mac_iveiv_entry iveiv_entry;
+	u32 offset;
+	u32 reg;
+
+	offset = MAC_WCID_ATTR_ENTRY(key->hw_key_idx);
+
+	rt2x00pci_register_read(rt2x00dev, offset, &reg);
+	rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_KEYTAB,
+			   !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+	rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER,
+			   (crypto->cmd == SET_KEY) * crypto->cipher);
+	rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX,
+			   (crypto->cmd == SET_KEY) * crypto->bssidx);
+	rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher);
+	rt2x00pci_register_write(rt2x00dev, offset, reg);
+
+	offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
+
+	memset(&iveiv_entry, 0, sizeof(iveiv_entry));
+	if ((crypto->cipher == CIPHER_TKIP) ||
+	    (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
+	    (crypto->cipher == CIPHER_AES))
+		iveiv_entry.iv[3] |= 0x20;
+	iveiv_entry.iv[3] |= key->keyidx << 6;
+	rt2x00pci_register_multiwrite(rt2x00dev, offset,
+				      &iveiv_entry, sizeof(iveiv_entry));
+
+	offset = MAC_WCID_ENTRY(key->hw_key_idx);
+
+	memset(&wcid_entry, 0, sizeof(wcid_entry));
+	if (crypto->cmd == SET_KEY)
+		memcpy(&wcid_entry, crypto->address, ETH_ALEN);
+	rt2x00pci_register_multiwrite(rt2x00dev, offset,
+				      &wcid_entry, sizeof(wcid_entry));
+}
+
+static int rt2800pci_config_shared_key(struct rt2x00_dev *rt2x00dev,
+				       struct rt2x00lib_crypto *crypto,
+				       struct ieee80211_key_conf *key)
+{
+	struct hw_key_entry key_entry;
+	struct rt2x00_field32 field;
+	u32 offset;
+	u32 reg;
+
+	if (crypto->cmd == SET_KEY) {
+		key->hw_key_idx = (4 * crypto->bssidx) + key->keyidx;
+
+		memcpy(key_entry.key, crypto->key,
+		       sizeof(key_entry.key));
+		memcpy(key_entry.tx_mic, crypto->tx_mic,
+		       sizeof(key_entry.tx_mic));
+		memcpy(key_entry.rx_mic, crypto->rx_mic,
+		       sizeof(key_entry.rx_mic));
+
+		offset = SHARED_KEY_ENTRY(key->hw_key_idx);
+		rt2x00pci_register_multiwrite(rt2x00dev, offset,
+					      &key_entry, sizeof(key_entry));
+	}
+
+	/*
+	 * The cipher types are stored over multiple registers
+	 * starting with SHARED_KEY_MODE_BASE each word will have
+	 * 32 bits and contains the cipher types for 2 bssidx each.
+	 * Using the correct defines correctly will cause overhead,
+	 * so just calculate the correct offset.
+	 */
+	field.bit_offset = 4 * (key->hw_key_idx % 8);
+	field.bit_mask = 0x7 << field.bit_offset;
+
+	offset = SHARED_KEY_MODE_ENTRY(key->hw_key_idx / 8);
+
+	rt2x00pci_register_read(rt2x00dev, offset, &reg);
+	rt2x00_set_field32(&reg, field,
+			   (crypto->cmd == SET_KEY) * crypto->cipher);
+	rt2x00pci_register_write(rt2x00dev, offset, reg);
+
+	/*
+	 * Update WCID information
+	 */
+	rt2800pci_config_wcid_attr(rt2x00dev, crypto, key);
+
+	return 0;
+}
+
+static int rt2800pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
+					 struct rt2x00lib_crypto *crypto,
+					 struct ieee80211_key_conf *key)
+{
+	struct hw_key_entry key_entry;
+	u32 offset;
+
+	if (crypto->cmd == SET_KEY) {
+		/*
+		 * 1 pairwise key is possible per AID, this means that the AID
+		 * equals our hw_key_idx. Make sure the WCID starts _after_ the
+		 * last possible shared key entry.
+		 */
+		if (crypto->aid > (256 - 32))
+			return -ENOSPC;
+
+		key->hw_key_idx = 32 + crypto->aid;
+
+
+		memcpy(key_entry.key, crypto->key,
+		       sizeof(key_entry.key));
+		memcpy(key_entry.tx_mic, crypto->tx_mic,
+		       sizeof(key_entry.tx_mic));
+		memcpy(key_entry.rx_mic, crypto->rx_mic,
+		       sizeof(key_entry.rx_mic));
+
+		offset = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
+		rt2x00pci_register_multiwrite(rt2x00dev, offset,
+					      &key_entry, sizeof(key_entry));
+	}
+
+	/*
+	 * Update WCID information
+	 */
+	rt2800pci_config_wcid_attr(rt2x00dev, crypto, key);
+
+	return 0;
+}
+
+static void rt2800pci_config_filter(struct rt2x00_dev *rt2x00dev,
+				    const unsigned int filter_flags)
+{
+	u32 reg;
+
+	/*
+	 * Start configuration steps.
+	 * Note that the version error will always be dropped
+	 * and broadcast frames will always be accepted since
+	 * there is no filter for it at this time.
+	 */
+	rt2x00pci_register_read(rt2x00dev, RX_FILTER_CFG, &reg);
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CRC_ERROR,
+			   !(filter_flags & FIF_FCSFAIL));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR,
+			   !(filter_flags & FIF_PLCPFAIL));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME,
+			   !(filter_flags & FIF_PROMISC_IN_BSS));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0);
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1);
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST,
+			   !(filter_flags & FIF_ALLMULTI));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BROADCAST, 0);
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_DUPLICATE, 1);
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CF_END_ACK,
+			   !(filter_flags & FIF_CONTROL));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CF_END,
+			   !(filter_flags & FIF_CONTROL));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_ACK,
+			   !(filter_flags & FIF_CONTROL));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CTS,
+			   !(filter_flags & FIF_CONTROL));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_RTS,
+			   !(filter_flags & FIF_CONTROL));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL,
+			   !(filter_flags & FIF_PSPOLL));
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA, 1);
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR, 0);
+	rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL,
+			   !(filter_flags & FIF_CONTROL));
+	rt2x00pci_register_write(rt2x00dev, RX_FILTER_CFG, reg);
+}
+
+static void rt2800pci_config_intf(struct rt2x00_dev *rt2x00dev,
+				  struct rt2x00_intf *intf,
+				  struct rt2x00intf_conf *conf,
+				  const unsigned int flags)
+{
+	unsigned int beacon_base;
+	u32 reg;
+
+	if (flags & CONFIG_UPDATE_TYPE) {
+		/*
+		 * Clear current synchronisation setup.
+		 * For the Beacon base registers we only need to clear
+		 * the first byte since that byte contains the VALID and OWNER
+		 * bits which (when set to 0) will invalidate the entire beacon.
+		 */
+		beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
+		rt2x00pci_register_write(rt2x00dev, beacon_base, 0);
+
+		/*
+		 * Enable synchronisation.
+		 */
+		rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+		rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+		rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync);
+		rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
+		rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+	}
+
+	if (flags & CONFIG_UPDATE_MAC) {
+		reg = le32_to_cpu(conf->mac[1]);
+		rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff);
+		conf->mac[1] = cpu_to_le32(reg);
+
+		rt2x00pci_register_multiwrite(rt2x00dev, MAC_ADDR_DW0,
+					      conf->mac, sizeof(conf->mac));
+	}
+
+	if (flags & CONFIG_UPDATE_BSSID) {
+		reg = le32_to_cpu(conf->bssid[1]);
+		rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 0);
+		rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 0);
+		conf->bssid[1] = cpu_to_le32(reg);
+
+		rt2x00pci_register_multiwrite(rt2x00dev, MAC_BSSID_DW0,
+					      conf->bssid, sizeof(conf->bssid));
+	}
+}
+
+static void rt2800pci_config_erp(struct rt2x00_dev *rt2x00dev,
+				 struct rt2x00lib_erp *erp)
+{
+	u32 reg;
+
+	rt2x00pci_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
+	rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 0x20);
+	rt2x00pci_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
+	rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY,
+			   !!erp->short_preamble);
+	rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE,
+			   !!erp->short_preamble);
+	rt2x00pci_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL,
+			   erp->cts_protection ? 2 : 0);
+	rt2x00pci_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
+
+	rt2x00pci_register_write(rt2x00dev, LEGACY_BASIC_RATE,
+				 erp->basic_rates);
+	rt2x00pci_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
+
+	rt2x00pci_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
+	rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, erp->slot_time);
+	rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
+	rt2x00pci_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
+	rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, erp->sifs);
+	rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, erp->sifs);
+	rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
+	rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs);
+	rt2x00_set_field32(&reg, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1);
+	rt2x00pci_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+	rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
+			   erp->beacon_int * 16);
+	rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+}
+
+static void rt2800pci_config_ant(struct rt2x00_dev *rt2x00dev,
+				 struct antenna_setup *ant)
+{
+	u8 r1;
+	u8 r3;
+
+	rt2800pci_bbp_read(rt2x00dev, 1, &r1);
+	rt2800pci_bbp_read(rt2x00dev, 3, &r3);
+
+	/*
+	 * Configure the TX antenna.
+	 */
+	switch ((int)ant->tx) {
+	case 1:
+		rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
+		rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
+		break;
+	case 2:
+		rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
+		break;
+	case 3:
+		/* Do nothing */
+		break;
+	}
+
+	/*
+	 * Configure the RX antenna.
+	 */
+	switch ((int)ant->rx) {
+	case 1:
+		rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
+		break;
+	case 2:
+		rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1);
+		break;
+	case 3:
+		rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 2);
+		break;
+	}
+
+	rt2800pci_bbp_write(rt2x00dev, 3, r3);
+	rt2800pci_bbp_write(rt2x00dev, 1, r1);
+}
+
+static void rt2800pci_config_lna_gain(struct rt2x00_dev *rt2x00dev,
+				      struct rt2x00lib_conf *libconf)
+{
+	u16 eeprom;
+	short lna_gain;
+
+	if (libconf->rf.channel <= 14) {
+		rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
+		lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_BG);
+	} else if (libconf->rf.channel <= 64) {
+		rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
+		lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0);
+	} else if (libconf->rf.channel <= 128) {
+		rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
+		lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_LNA_A1);
+	} else {
+		rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
+		lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_LNA_A2);
+	}
+
+	rt2x00dev->lna_gain = lna_gain;
+}
+
+static void rt2800pci_config_channel_rt2x(struct rt2x00_dev *rt2x00dev,
+					  struct ieee80211_conf *conf,
+					  struct rf_channel *rf,
+					  struct channel_info *info)
+{
+	rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
+
+	if (rt2x00dev->default_ant.tx == 1)
+		rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1);
+
+	if (rt2x00dev->default_ant.rx == 1) {
+		rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1);
+		rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
+	} else if (rt2x00dev->default_ant.rx == 2)
+		rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
+
+	if (rf->channel > 14) {
+		/*
+		 * When TX power is below 0, we should increase it by 7 to
+		 * make it a positive value (Minumum value is -7).
+		 * However this means that values between 0 and 7 have
+		 * double meaning, and we should set a 7DBm boost flag.
+		 */
+		rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A_7DBM_BOOST,
+				   (info->tx_power1 >= 0));
+
+		if (info->tx_power1 < 0)
+			info->tx_power1 += 7;
+
+		rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A,
+				   TXPOWER_A_TO_DEV(info->tx_power1));
+
+		rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A_7DBM_BOOST,
+				   (info->tx_power2 >= 0));
+
+		if (info->tx_power2 < 0)
+			info->tx_power2 += 7;
+
+		rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A,
+				   TXPOWER_A_TO_DEV(info->tx_power2));
+	} else {
+		rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G,
+				   TXPOWER_G_TO_DEV(info->tx_power1));
+		rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G,
+				   TXPOWER_G_TO_DEV(info->tx_power2));
+	}
+
+	rt2x00_set_field32(&rf->rf4, RF4_HT40, conf_is_ht40(conf));
+
+	rt2800pci_rf_write(rt2x00dev, 1, rf->rf1);
+	rt2800pci_rf_write(rt2x00dev, 2, rf->rf2);
+	rt2800pci_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
+	rt2800pci_rf_write(rt2x00dev, 4, rf->rf4);
+
+	udelay(200);
+
+	rt2800pci_rf_write(rt2x00dev, 1, rf->rf1);
+	rt2800pci_rf_write(rt2x00dev, 2, rf->rf2);
+	rt2800pci_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004);
+	rt2800pci_rf_write(rt2x00dev, 4, rf->rf4);
+
+	udelay(200);
+
+	rt2800pci_rf_write(rt2x00dev, 1, rf->rf1);
+	rt2800pci_rf_write(rt2x00dev, 2, rf->rf2);
+	rt2800pci_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
+	rt2800pci_rf_write(rt2x00dev, 4, rf->rf4);
+}
+
+static void rt2800pci_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
+					  struct ieee80211_conf *conf,
+					  struct rf_channel *rf,
+					  struct channel_info *info)
+{
+	u8 rfcsr;
+
+	rt2800pci_rfcsr_write(rt2x00dev, 2, rf->rf1);
+	rt2800pci_rfcsr_write(rt2x00dev, 2, rf->rf3);
+
+	rt2800pci_rfcsr_read(rt2x00dev, 6, &rfcsr);
+	rt2x00_set_field8(&rfcsr, RFCSR6_R, rf->rf2);
+	rt2800pci_rfcsr_write(rt2x00dev, 6, rfcsr);
+
+	rt2800pci_rfcsr_read(rt2x00dev, 12, &rfcsr);
+	rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
+			  TXPOWER_G_TO_DEV(info->tx_power1));
+	rt2800pci_rfcsr_write(rt2x00dev, 12, rfcsr);
+
+	rt2800pci_rfcsr_read(rt2x00dev, 23, &rfcsr);
+	rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
+	rt2800pci_rfcsr_write(rt2x00dev, 23, rfcsr);
+
+	rt2800pci_rfcsr_write(rt2x00dev, 24,
+			      rt2x00dev->calibration[conf_is_ht40(conf)]);
+
+	rt2800pci_rfcsr_read(rt2x00dev, 23, &rfcsr);
+	rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
+	rt2800pci_rfcsr_write(rt2x00dev, 23, rfcsr);
+}
+
+static void rt2800pci_config_channel(struct rt2x00_dev *rt2x00dev,
+				     struct ieee80211_conf *conf,
+				     struct rf_channel *rf,
+				     struct channel_info *info)
+{
+	u32 reg;
+	unsigned int tx_pin;
+	u8 bbp;
+
+	if (rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION)
+		rt2800pci_config_channel_rt2x(rt2x00dev, conf, rf, info);
+	else
+		rt2800pci_config_channel_rt3x(rt2x00dev, conf, rf, info);
+
+	/*
+	 * Change BBP settings
+	 */
+	rt2800pci_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
+	rt2800pci_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
+	rt2800pci_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
+	rt2800pci_bbp_write(rt2x00dev, 86, 0);
+
+	if (rf->channel <= 14) {
+		if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) {
+			rt2800pci_bbp_write(rt2x00dev, 82, 0x62);
+			rt2800pci_bbp_write(rt2x00dev, 75, 0x46);
+		} else {
+			rt2800pci_bbp_write(rt2x00dev, 82, 0x84);
+			rt2800pci_bbp_write(rt2x00dev, 75, 0x50);
+		}
+	} else {
+		rt2800pci_bbp_write(rt2x00dev, 82, 0xf2);
+
+		if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags))
+			rt2800pci_bbp_write(rt2x00dev, 75, 0x46);
+		else
+			rt2800pci_bbp_write(rt2x00dev, 75, 0x50);
+	}
+
+	rt2x00pci_register_read(rt2x00dev, TX_BAND_CFG, &reg);
+	rt2x00_set_field32(&reg, TX_BAND_CFG_HT40_PLUS, conf_is_ht40_plus(conf));
+	rt2x00_set_field32(&reg, TX_BAND_CFG_A, rf->channel > 14);
+	rt2x00_set_field32(&reg, TX_BAND_CFG_BG, rf->channel <= 14);
+	rt2x00pci_register_write(rt2x00dev, TX_BAND_CFG, reg);
+
+	tx_pin = 0;
+
+	/* Turn on unused PA or LNA when not using 1T or 1R */
+	if (rt2x00dev->default_ant.tx != 1) {
+		rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
+		rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
+	}
+
+	/* Turn on unused PA or LNA when not using 1T or 1R */
+	if (rt2x00dev->default_ant.rx != 1) {
+		rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
+		rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
+	}
+
+	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
+	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
+	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1);
+	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1);
+	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, rf->channel <= 14);
+	rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, rf->channel > 14);
+
+	rt2x00pci_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
+
+	rt2800pci_bbp_read(rt2x00dev, 4, &bbp);
+	rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * conf_is_ht40(conf));
+	rt2800pci_bbp_write(rt2x00dev, 4, bbp);
+
+	rt2800pci_bbp_read(rt2x00dev, 3, &bbp);
+	rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf));
+	rt2800pci_bbp_write(rt2x00dev, 3, bbp);
+
+	if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
+		if (conf_is_ht40(conf)) {
+			rt2800pci_bbp_write(rt2x00dev, 69, 0x1a);
+			rt2800pci_bbp_write(rt2x00dev, 70, 0x0a);
+			rt2800pci_bbp_write(rt2x00dev, 73, 0x16);
+		} else {
+			rt2800pci_bbp_write(rt2x00dev, 69, 0x16);
+			rt2800pci_bbp_write(rt2x00dev, 70, 0x08);
+			rt2800pci_bbp_write(rt2x00dev, 73, 0x11);
+		}
+	}
+
+	msleep(1);
+}
+
+static void rt2800pci_config_txpower(struct rt2x00_dev *rt2x00dev,
+				     const int txpower)
+{
+	u32 reg;
+	u32 value = TXPOWER_G_TO_DEV(txpower);
+	u8 r1;
+
+	rt2800pci_bbp_read(rt2x00dev, 1, &r1);
+	rt2x00_set_field8(&reg, BBP1_TX_POWER, 0);
+	rt2800pci_bbp_write(rt2x00dev, 1, r1);
+
+	rt2x00pci_register_read(rt2x00dev, TX_PWR_CFG_0, &reg);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_0_1MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_0_2MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_0_55MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_0_11MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_0_6MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_0_9MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_0_12MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_0_18MBS, value);
+	rt2x00pci_register_write(rt2x00dev, TX_PWR_CFG_0, reg);
+
+	rt2x00pci_register_read(rt2x00dev, TX_PWR_CFG_1, &reg);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_1_24MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_1_36MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_1_48MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_1_54MBS, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS0, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS1, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS2, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS3, value);
+	rt2x00pci_register_write(rt2x00dev, TX_PWR_CFG_1, reg);
+
+	rt2x00pci_register_read(rt2x00dev, TX_PWR_CFG_2, &reg);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS4, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS5, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS6, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS7, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS8, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS9, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS10, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS11, value);
+	rt2x00pci_register_write(rt2x00dev, TX_PWR_CFG_2, reg);
+
+	rt2x00pci_register_read(rt2x00dev, TX_PWR_CFG_3, &reg);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS12, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS13, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS14, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS15, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN1, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN2, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN3, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN4, value);
+	rt2x00pci_register_write(rt2x00dev, TX_PWR_CFG_3, reg);
+
+	rt2x00pci_register_read(rt2x00dev, TX_PWR_CFG_4, &reg);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN5, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN6, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN7, value);
+	rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN8, value);
+	rt2x00pci_register_write(rt2x00dev, TX_PWR_CFG_4, reg);
+}
+
+static void rt2800pci_config_retry_limit(struct rt2x00_dev *rt2x00dev,
+					 struct rt2x00lib_conf *libconf)
+{
+	u32 reg;
+
+	rt2x00pci_register_read(rt2x00dev, TX_RTY_CFG, &reg);
+	rt2x00_set_field32(&reg, TX_RTY_CFG_SHORT_RTY_LIMIT,
+			   libconf->conf->short_frame_max_tx_count);
+	rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT,
+			   libconf->conf->long_frame_max_tx_count);
+	rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
+	rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
+	rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
+	rt2x00_set_field32(&reg, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
+	rt2x00pci_register_write(rt2x00dev, TX_RTY_CFG, reg);
+}
+
+static void rt2800pci_config_ps(struct rt2x00_dev *rt2x00dev,
+				struct rt2x00lib_conf *libconf)
+{
+	enum dev_state state =
+	    (libconf->conf->flags & IEEE80211_CONF_PS) ?
+		STATE_SLEEP : STATE_AWAKE;
+	u32 reg;
+
+	if (state == STATE_SLEEP) {
+		rt2x00pci_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0);
+
+		rt2x00pci_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
+		rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 5);
+		rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE,
+				   libconf->conf->listen_interval - 1);
+		rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 1);
+		rt2x00pci_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
+
+		rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
+	} else {
+		rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
+
+		rt2x00pci_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
+		rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 0);
+		rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, 0);
+		rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 0);
+		rt2x00pci_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
+	}
+}
+
+static void rt2800pci_config(struct rt2x00_dev *rt2x00dev,
+			     struct rt2x00lib_conf *libconf,
+			     const unsigned int flags)
+{
+	/* Always recalculate LNA gain before changing configuration */
+	rt2800pci_config_lna_gain(rt2x00dev, libconf);
+
+	if (flags & IEEE80211_CONF_CHANGE_CHANNEL)
+		rt2800pci_config_channel(rt2x00dev, libconf->conf,
+					 &libconf->rf, &libconf->channel);
+	if (flags & IEEE80211_CONF_CHANGE_POWER)
+		rt2800pci_config_txpower(rt2x00dev, libconf->conf->power_level);
+	if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
+		rt2800pci_config_retry_limit(rt2x00dev, libconf);
+	if (flags & IEEE80211_CONF_CHANGE_PS)
+		rt2800pci_config_ps(rt2x00dev, libconf);
+}
+
+/*
+ * Link tuning
+ */
+static void rt2800pci_link_stats(struct rt2x00_dev *rt2x00dev,
+				 struct link_qual *qual)
+{
+	u32 reg;
+
+	/*
+	 * Update FCS error count from register.
+	 */
+	rt2x00pci_register_read(rt2x00dev, RX_STA_CNT0, &reg);
+	qual->rx_failed = rt2x00_get_field32(reg, RX_STA_CNT0_CRC_ERR);
+}
+
+static u8 rt2800pci_get_default_vgc(struct rt2x00_dev *rt2x00dev)
+{
+	if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ)
+		return 0x2e + rt2x00dev->lna_gain;
+
+	if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
+		return 0x32 + (rt2x00dev->lna_gain * 5) / 3;
+	else
+		return 0x3a + (rt2x00dev->lna_gain * 5) / 3;
+}
+
+static inline void rt2800pci_set_vgc(struct rt2x00_dev *rt2x00dev,
+				     struct link_qual *qual, u8 vgc_level)
+{
+	if (qual->vgc_level != vgc_level) {
+		rt2800pci_bbp_write(rt2x00dev, 66, vgc_level);
+		qual->vgc_level = vgc_level;
+		qual->vgc_level_reg = vgc_level;
+	}
+}
+
+static void rt2800pci_reset_tuner(struct rt2x00_dev *rt2x00dev,
+				  struct link_qual *qual)
+{
+	rt2800pci_set_vgc(rt2x00dev, qual,
+			  rt2800pci_get_default_vgc(rt2x00dev));
+}
+
+static void rt2800pci_link_tuner(struct rt2x00_dev *rt2x00dev,
+				 struct link_qual *qual, const u32 count)
+{
+	if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION)
+		return;
+
+	/*
+	 * When RSSI is better then -80 increase VGC level with 0x10
+	 */
+	rt2800pci_set_vgc(rt2x00dev, qual,
+			  rt2800pci_get_default_vgc(rt2x00dev) +
+			  ((qual->rssi > -80) * 0x10));
+}
+
+/*
+ * Firmware functions
+ */
+static char *rt2800pci_get_firmware_name(struct rt2x00_dev *rt2x00dev)
+{
+	return FIRMWARE_RT2860;
+}
+
+static int rt2800pci_check_firmware(struct rt2x00_dev *rt2x00dev,
+				    const u8 *data, const size_t len)
+{
+	u16 fw_crc;
+	u16 crc;
+
+	/*
+	 * Only support 8kb firmware files.
+	 */
+	if (len != 8192)
+		return FW_BAD_LENGTH;
+
+	/*
+	 * The last 2 bytes in the firmware array are the crc checksum itself,
+	 * this means that we should never pass those 2 bytes to the crc
+	 * algorithm.
+	 */
+	fw_crc = (data[len - 2] << 8 | data[len - 1]);
+
+	/*
+	 * Use the crc ccitt algorithm.
+	 * This will return the same value as the legacy driver which
+	 * used bit ordering reversion on the both the firmware bytes
+	 * before input input as well as on the final output.
+	 * Obviously using crc ccitt directly is much more efficient.
+	 */
+	crc = crc_ccitt(~0, data, len - 2);
+
+	/*
+	 * There is a small difference between the crc-itu-t + bitrev and
+	 * the crc-ccitt crc calculation. In the latter method the 2 bytes
+	 * will be swapped, use swab16 to convert the crc to the correct
+	 * value.
+	 */
+	crc = swab16(crc);
+
+	return (fw_crc == crc) ? FW_OK : FW_BAD_CRC;
+}
+
+static int rt2800pci_load_firmware(struct rt2x00_dev *rt2x00dev,
+				   const u8 *data, const size_t len)
+{
+	unsigned int i;
+	u32 reg;
+
+	/*
+	 * Wait for stable hardware.
+	 */
+	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+		rt2x00pci_register_read(rt2x00dev, MAC_CSR0, &reg);
+		if (reg && reg != ~0)
+			break;
+		msleep(1);
+	}
+
+	if (i == REGISTER_BUSY_COUNT) {
+		ERROR(rt2x00dev, "Unstable hardware.\n");
+		return -EBUSY;
+	}
+
+	rt2x00pci_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
+	rt2x00pci_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0x00000000);
+
+	/*
+	 * Disable DMA, will be reenabled later when enabling
+	 * the radio.
+	 */
+	rt2x00pci_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
+	rt2x00pci_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+
+	/*
+	 * enable Host program ram write selection
+	 */
+	reg = 0;
+	rt2x00_set_field32(&reg, PBF_SYS_CTRL_HOST_RAM_WRITE, 1);
+	rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, reg);
+
+	/*
+	 * Write firmware to device.
+	 */
+	rt2x00pci_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
+				      data, len);
+
+	rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000);
+	rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001);
+
+	/*
+	 * Wait for device to stabilize.
+	 */
+	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+		rt2x00pci_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
+		if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY))
+			break;
+		msleep(1);
+	}
+
+	if (i == REGISTER_BUSY_COUNT) {
+		ERROR(rt2x00dev, "PBF system register not ready.\n");
+		return -EBUSY;
+	}
+
+	/*
+	 * Disable interrupts
+	 */
+	rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_IRQ_OFF);
+
+	/*
+	 * Initialize BBP R/W access agent
+	 */
+	rt2x00pci_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
+	rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
+
+	return 0;
+}
+
+/*
+ * Initialization functions.
+ */
+static bool rt2800pci_get_entry_state(struct queue_entry *entry)
+{
+	struct queue_entry_priv_pci *entry_priv = entry->priv_data;
+	u32 word;
+
+	if (entry->queue->qid == QID_RX) {
+		rt2x00_desc_read(entry_priv->desc, 1, &word);
+
+		return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
+	} else {
+		rt2x00_desc_read(entry_priv->desc, 1, &word);
+
+		return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
+	}
+}
+
+static void rt2800pci_clear_entry(struct queue_entry *entry)
+{
+	struct queue_entry_priv_pci *entry_priv = entry->priv_data;
+	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+	u32 word;
+
+	if (entry->queue->qid == QID_RX) {
+		rt2x00_desc_read(entry_priv->desc, 0, &word);
+		rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
+		rt2x00_desc_write(entry_priv->desc, 0, word);
+
+		rt2x00_desc_read(entry_priv->desc, 1, &word);
+		rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
+		rt2x00_desc_write(entry_priv->desc, 1, word);
+	} else {
+		rt2x00_desc_read(entry_priv->desc, 1, &word);
+		rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
+		rt2x00_desc_write(entry_priv->desc, 1, word);
+	}
+}
+
+static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
+{
+	struct queue_entry_priv_pci *entry_priv;
+	u32 reg;
+
+	rt2x00pci_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
+	rt2x00pci_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
+
+	rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
+	rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+
+	/*
+	 * Initialize registers.
+	 */
+	entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
+	rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR0, entry_priv->desc_dma);
+	rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT0, rt2x00dev->tx[0].limit);
+	rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX0, 0);
+	rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX0, 0);
+
+	entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
+	rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR1, entry_priv->desc_dma);
+	rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT1, rt2x00dev->tx[1].limit);
+	rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX1, 0);
+	rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX1, 0);
+
+	entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
+	rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR2, entry_priv->desc_dma);
+	rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT2, rt2x00dev->tx[2].limit);
+	rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX2, 0);
+	rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX2, 0);
+
+	entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
+	rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR3, entry_priv->desc_dma);
+	rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT3, rt2x00dev->tx[3].limit);
+	rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX3, 0);
+	rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX3, 0);
+
+	entry_priv = rt2x00dev->rx->entries[0].priv_data;
+	rt2x00pci_register_write(rt2x00dev, RX_BASE_PTR, entry_priv->desc_dma);
+	rt2x00pci_register_write(rt2x00dev, RX_MAX_CNT, rt2x00dev->rx[0].limit);
+	rt2x00pci_register_write(rt2x00dev, RX_CRX_IDX, rt2x00dev->rx[0].limit - 1);
+	rt2x00pci_register_write(rt2x00dev, RX_DRX_IDX, 0);
+
+	/*
+	 * Enable global DMA configuration
+	 */
+	rt2x00pci_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
+	rt2x00pci_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+
+	rt2x00pci_register_write(rt2x00dev, DELAY_INT_CFG, 0);
+
+	return 0;
+}
+
+static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
+{
+	u32 reg;
+	unsigned int i;
+
+	rt2x00pci_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
+
+	rt2x00pci_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+	rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
+	rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
+	rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+
+	rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
+
+	rt2x00pci_register_read(rt2x00dev, BCN_OFFSET0, &reg);
+	rt2x00_set_field32(&reg, BCN_OFFSET0_BCN0, 0xe0); /* 0x3800 */
+	rt2x00_set_field32(&reg, BCN_OFFSET0_BCN1, 0xe8); /* 0x3a00 */
+	rt2x00_set_field32(&reg, BCN_OFFSET0_BCN2, 0xf0); /* 0x3c00 */
+	rt2x00_set_field32(&reg, BCN_OFFSET0_BCN3, 0xf8); /* 0x3e00 */
+	rt2x00pci_register_write(rt2x00dev, BCN_OFFSET0, reg);
+
+	rt2x00pci_register_read(rt2x00dev, BCN_OFFSET1, &reg);
+	rt2x00_set_field32(&reg, BCN_OFFSET1_BCN4, 0xc8); /* 0x3200 */
+	rt2x00_set_field32(&reg, BCN_OFFSET1_BCN5, 0xd0); /* 0x3400 */
+	rt2x00_set_field32(&reg, BCN_OFFSET1_BCN6, 0x77); /* 0x1dc0 */
+	rt2x00_set_field32(&reg, BCN_OFFSET1_BCN7, 0x6f); /* 0x1bc0 */
+	rt2x00pci_register_write(rt2x00dev, BCN_OFFSET1, reg);
+
+	rt2x00pci_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0x0000013f);
+	rt2x00pci_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
+
+	rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
+
+	rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+	rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, 0);
+	rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
+	rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, 0);
+	rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
+	rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
+	rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0);
+	rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+	rt2x00pci_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
+	rt2x00pci_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
+
+	rt2x00pci_register_read(rt2x00dev, TX_LINK_CFG, &reg);
+	rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFB_LIFETIME, 32);
+	rt2x00_set_field32(&reg, TX_LINK_CFG_MFB_ENABLE, 0);
+	rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_UMFS_ENABLE, 0);
+	rt2x00_set_field32(&reg, TX_LINK_CFG_TX_MRQ_EN, 0);
+	rt2x00_set_field32(&reg, TX_LINK_CFG_TX_RDG_EN, 0);
+	rt2x00_set_field32(&reg, TX_LINK_CFG_TX_CF_ACK_EN, 1);
+	rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFB, 0);
+	rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFS, 0);
+	rt2x00pci_register_write(rt2x00dev, TX_LINK_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
+	rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_MPDU_LIFETIME, 9);
+	rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_TX_OP_TIMEOUT, 10);
+	rt2x00pci_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
+	rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
+	if (rt2x00_rev(&rt2x00dev->chip) >= RT2880E_VERSION &&
+	    rt2x00_rev(&rt2x00dev->chip) < RT3070_VERSION)
+		rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
+	else
+		rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
+	rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 0);
+	rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0);
+	rt2x00pci_register_write(rt2x00dev, MAX_LEN_CFG, reg);
+
+	rt2x00pci_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f);
+
+	rt2x00pci_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
+	rt2x00_set_field32(&reg, AUTO_RSP_CFG_AUTORESPONDER, 1);
+	rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MMODE, 0);
+	rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MREF, 0);
+	rt2x00_set_field32(&reg, AUTO_RSP_CFG_DUAL_CTS_EN, 0);
+	rt2x00_set_field32(&reg, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0);
+	rt2x00pci_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 8);
+	rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0);
+	rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1);
+	rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+	rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
+	rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
+	rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 1);
+	rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF20, 1);
+	rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+	rt2x00pci_register_write(rt2x00dev, CCK_PROT_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 8);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 1);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF20, 1);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+	rt2x00pci_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, 0x4004);
+	rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, 0);
+	rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV, 1);
+	rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+	rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
+	rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
+	rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
+	rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
+	rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
+	rt2x00pci_register_write(rt2x00dev, MM20_PROT_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
+	rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0);
+	rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1);
+	rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+	rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
+	rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
+	rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
+	rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
+	rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+	rt2x00pci_register_write(rt2x00dev, MM40_PROT_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, 0x4004);
+	rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, 0);
+	rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV, 1);
+	rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+	rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
+	rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
+	rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
+	rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
+	rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
+	rt2x00pci_register_write(rt2x00dev, GF20_PROT_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, 0x4084);
+	rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, 0);
+	rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV, 1);
+	rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
+	rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
+	rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
+	rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
+	rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
+	rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+	rt2x00pci_register_write(rt2x00dev, GF40_PROT_CFG, reg);
+
+	rt2x00pci_register_write(rt2x00dev, TXOP_CTRL_CFG, 0x0000583f);
+	rt2x00pci_register_write(rt2x00dev, TXOP_HLDR_ET, 0x00000002);
+
+	rt2x00pci_register_read(rt2x00dev, TX_RTS_CFG, &reg);
+	rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 32);
+	rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES,
+			   IEEE80211_MAX_RTS_THRESHOLD);
+	rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_FBK_EN, 0);
+	rt2x00pci_register_write(rt2x00dev, TX_RTS_CFG, reg);
+
+	rt2x00pci_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca);
+	rt2x00pci_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
+
+	/*
+	 * ASIC will keep garbage value after boot, clear encryption keys.
+	 */
+	for (i = 0; i < 4; i++)
+		rt2x00pci_register_write(rt2x00dev,
+					 SHARED_KEY_MODE_ENTRY(i), 0);
+
+	for (i = 0; i < 256; i++) {
+		u32 wcid[2] = { 0xffffffff, 0x00ffffff };
+		rt2x00pci_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i),
+					      wcid, sizeof(wcid));
+
+		rt2x00pci_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 1);
+		rt2x00pci_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
+	}
+
+	/*
+	 * Clear all beacons
+	 * For the Beacon base registers we only need to clear
+	 * the first byte since that byte contains the VALID and OWNER
+	 * bits which (when set to 0) will invalidate the entire beacon.
+	 */
+	rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE0, 0);
+	rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE1, 0);
+	rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE2, 0);
+	rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE3, 0);
+	rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE4, 0);
+	rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE5, 0);
+	rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE6, 0);
+	rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE7, 0);
+
+	rt2x00pci_register_read(rt2x00dev, HT_FBK_CFG0, &reg);
+	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS0FBK, 0);
+	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS1FBK, 0);
+	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS2FBK, 1);
+	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS3FBK, 2);
+	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS4FBK, 3);
+	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS5FBK, 4);
+	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS6FBK, 5);
+	rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS7FBK, 6);
+	rt2x00pci_register_write(rt2x00dev, HT_FBK_CFG0, reg);
+
+	rt2x00pci_register_read(rt2x00dev, HT_FBK_CFG1, &reg);
+	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS8FBK, 8);
+	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS9FBK, 8);
+	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS10FBK, 9);
+	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS11FBK, 10);
+	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS12FBK, 11);
+	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS13FBK, 12);
+	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS14FBK, 13);
+	rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS15FBK, 14);
+	rt2x00pci_register_write(rt2x00dev, HT_FBK_CFG1, reg);
+
+	rt2x00pci_register_read(rt2x00dev, LG_FBK_CFG0, &reg);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS0FBK, 8);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS1FBK, 8);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS2FBK, 9);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS3FBK, 10);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS4FBK, 11);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS5FBK, 12);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS6FBK, 13);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS7FBK, 14);
+	rt2x00pci_register_write(rt2x00dev, LG_FBK_CFG0, reg);
+
+	rt2x00pci_register_read(rt2x00dev, LG_FBK_CFG1, &reg);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS0FBK, 0);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS1FBK, 0);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS2FBK, 1);
+	rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS3FBK, 2);
+	rt2x00pci_register_write(rt2x00dev, LG_FBK_CFG1, reg);
+
+	/*
+	 * We must clear the error counters.
+	 * These registers are cleared on read,
+	 * so we may pass a useless variable to store the value.
+	 */
+	rt2x00pci_register_read(rt2x00dev, RX_STA_CNT0, &reg);
+	rt2x00pci_register_read(rt2x00dev, RX_STA_CNT1, &reg);
+	rt2x00pci_register_read(rt2x00dev, RX_STA_CNT2, &reg);
+	rt2x00pci_register_read(rt2x00dev, TX_STA_CNT0, &reg);
+	rt2x00pci_register_read(rt2x00dev, TX_STA_CNT1, &reg);
+	rt2x00pci_register_read(rt2x00dev, TX_STA_CNT2, &reg);
+
+	return 0;
+}
+
+static int rt2800pci_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev)
+{
+	unsigned int i;
+	u32 reg;
+
+	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+		rt2x00pci_register_read(rt2x00dev, MAC_STATUS_CFG, &reg);
+		if (!rt2x00_get_field32(reg, MAC_STATUS_CFG_BBP_RF_BUSY))
+			return 0;
+
+		udelay(REGISTER_BUSY_DELAY);
+	}
+
+	ERROR(rt2x00dev, "BBP/RF register access failed, aborting.\n");
+	return -EACCES;
+}
+
+static int rt2800pci_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
+{
+	unsigned int i;
+	u8 value;
+
+	/*
+	 * BBP was enabled after firmware was loaded,
+	 * but we need to reactivate it now.
+	 */
+	rt2x00pci_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
+	rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
+	msleep(1);
+
+	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+		rt2800pci_bbp_read(rt2x00dev, 0, &value);
+		if ((value != 0xff) && (value != 0x00))
+			return 0;
+		udelay(REGISTER_BUSY_DELAY);
+	}
+
+	ERROR(rt2x00dev, "BBP register access failed, aborting.\n");
+	return -EACCES;
+}
+
+static int rt2800pci_init_bbp(struct rt2x00_dev *rt2x00dev)
+{
+	unsigned int i;
+	u16 eeprom;
+	u8 reg_id;
+	u8 value;
+
+	if (unlikely(rt2800pci_wait_bbp_rf_ready(rt2x00dev) ||
+		     rt2800pci_wait_bbp_ready(rt2x00dev)))
+		return -EACCES;
+
+	rt2800pci_bbp_write(rt2x00dev, 65, 0x2c);
+	rt2800pci_bbp_write(rt2x00dev, 66, 0x38);
+	rt2800pci_bbp_write(rt2x00dev, 69, 0x12);
+	rt2800pci_bbp_write(rt2x00dev, 70, 0x0a);
+	rt2800pci_bbp_write(rt2x00dev, 73, 0x10);
+	rt2800pci_bbp_write(rt2x00dev, 81, 0x37);
+	rt2800pci_bbp_write(rt2x00dev, 82, 0x62);
+	rt2800pci_bbp_write(rt2x00dev, 83, 0x6a);
+	rt2800pci_bbp_write(rt2x00dev, 84, 0x99);
+	rt2800pci_bbp_write(rt2x00dev, 86, 0x00);
+	rt2800pci_bbp_write(rt2x00dev, 91, 0x04);
+	rt2800pci_bbp_write(rt2x00dev, 92, 0x00);
+	rt2800pci_bbp_write(rt2x00dev, 103, 0x00);
+	rt2800pci_bbp_write(rt2x00dev, 105, 0x05);
+
+	if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
+		rt2800pci_bbp_write(rt2x00dev, 69, 0x16);
+		rt2800pci_bbp_write(rt2x00dev, 73, 0x12);
+	}
+
+	if (rt2x00_rev(&rt2x00dev->chip) > RT2860D_VERSION)
+		rt2800pci_bbp_write(rt2x00dev, 84, 0x19);
+
+	if (rt2x00_rt(&rt2x00dev->chip, RT3052)) {
+		rt2800pci_bbp_write(rt2x00dev, 31, 0x08);
+		rt2800pci_bbp_write(rt2x00dev, 78, 0x0e);
+		rt2800pci_bbp_write(rt2x00dev, 80, 0x08);
+	}
+
+	for (i = 0; i < EEPROM_BBP_SIZE; i++) {
+		rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
+
+		if (eeprom != 0xffff && eeprom != 0x0000) {
+			reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
+			value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE);
+			rt2800pci_bbp_write(rt2x00dev, reg_id, value);
+		}
+	}
+
+	return 0;
+}
+
+static u8 rt2800pci_init_rx_filter(struct rt2x00_dev *rt2x00dev,
+				   bool bw40, u8 rfcsr24, u8 filter_target)
+{
+	unsigned int i;
+	u8 bbp;
+	u8 rfcsr;
+	u8 passband;
+	u8 stopband;
+	u8 overtuned = 0;
+
+	rt2800pci_rfcsr_write(rt2x00dev, 24, rfcsr24);
+
+	rt2800pci_bbp_read(rt2x00dev, 4, &bbp);
+	rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40);
+	rt2800pci_bbp_write(rt2x00dev, 4, bbp);
+
+	rt2800pci_rfcsr_read(rt2x00dev, 22, &rfcsr);
+	rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1);
+	rt2800pci_rfcsr_write(rt2x00dev, 22, rfcsr);
+
+	/*
+	 * Set power & frequency of passband test tone
+	 */
+	rt2800pci_bbp_write(rt2x00dev, 24, 0);
+
+	for (i = 0; i < 100; i++) {
+		rt2800pci_bbp_write(rt2x00dev, 25, 0x90);
+		msleep(1);
+
+		rt2800pci_bbp_read(rt2x00dev, 55, &passband);
+		if (passband)
+			break;
+	}
+
+	/*
+	 * Set power & frequency of stopband test tone
+	 */
+	rt2800pci_bbp_write(rt2x00dev, 24, 0x06);
+
+	for (i = 0; i < 100; i++) {
+		rt2800pci_bbp_write(rt2x00dev, 25, 0x90);
+		msleep(1);
+
+		rt2800pci_bbp_read(rt2x00dev, 55, &stopband);
+
+		if ((passband - stopband) <= filter_target) {
+			rfcsr24++;
+			overtuned += ((passband - stopband) == filter_target);
+		} else
+			break;
+
+		rt2800pci_rfcsr_write(rt2x00dev, 24, rfcsr24);
+	}
+
+	rfcsr24 -= !!overtuned;
+
+	rt2800pci_rfcsr_write(rt2x00dev, 24, rfcsr24);
+	return rfcsr24;
+}
+
+static int rt2800pci_init_rfcsr(struct rt2x00_dev *rt2x00dev)
+{
+	u8 rfcsr;
+	u8 bbp;
+
+	if (!rt2x00_rf(&rt2x00dev->chip, RF3020) &&
+	    !rt2x00_rf(&rt2x00dev->chip, RF3021) &&
+	    !rt2x00_rf(&rt2x00dev->chip, RF3022))
+		return 0;
+
+	/*
+	 * Init RF calibration.
+	 */
+	rt2800pci_rfcsr_read(rt2x00dev, 30, &rfcsr);
+	rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
+	rt2800pci_rfcsr_write(rt2x00dev, 30, rfcsr);
+	msleep(1);
+	rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
+	rt2800pci_rfcsr_write(rt2x00dev, 30, rfcsr);
+
+	rt2800pci_rfcsr_write(rt2x00dev, 0, 0x50);
+	rt2800pci_rfcsr_write(rt2x00dev, 1, 0x01);
+	rt2800pci_rfcsr_write(rt2x00dev, 2, 0xf7);
+	rt2800pci_rfcsr_write(rt2x00dev, 3, 0x75);
+	rt2800pci_rfcsr_write(rt2x00dev, 4, 0x40);
+	rt2800pci_rfcsr_write(rt2x00dev, 5, 0x03);
+	rt2800pci_rfcsr_write(rt2x00dev, 6, 0x02);
+	rt2800pci_rfcsr_write(rt2x00dev, 7, 0x50);
+	rt2800pci_rfcsr_write(rt2x00dev, 8, 0x39);
+	rt2800pci_rfcsr_write(rt2x00dev, 9, 0x0f);
+	rt2800pci_rfcsr_write(rt2x00dev, 10, 0x60);
+	rt2800pci_rfcsr_write(rt2x00dev, 11, 0x21);
+	rt2800pci_rfcsr_write(rt2x00dev, 12, 0x75);
+	rt2800pci_rfcsr_write(rt2x00dev, 13, 0x75);
+	rt2800pci_rfcsr_write(rt2x00dev, 14, 0x90);
+	rt2800pci_rfcsr_write(rt2x00dev, 15, 0x58);
+	rt2800pci_rfcsr_write(rt2x00dev, 16, 0xb3);
+	rt2800pci_rfcsr_write(rt2x00dev, 17, 0x92);
+	rt2800pci_rfcsr_write(rt2x00dev, 18, 0x2c);
+	rt2800pci_rfcsr_write(rt2x00dev, 19, 0x02);
+	rt2800pci_rfcsr_write(rt2x00dev, 20, 0xba);
+	rt2800pci_rfcsr_write(rt2x00dev, 21, 0xdb);
+	rt2800pci_rfcsr_write(rt2x00dev, 22, 0x00);
+	rt2800pci_rfcsr_write(rt2x00dev, 23, 0x31);
+	rt2800pci_rfcsr_write(rt2x00dev, 24, 0x08);
+	rt2800pci_rfcsr_write(rt2x00dev, 25, 0x01);
+	rt2800pci_rfcsr_write(rt2x00dev, 26, 0x25);
+	rt2800pci_rfcsr_write(rt2x00dev, 27, 0x23);
+	rt2800pci_rfcsr_write(rt2x00dev, 28, 0x13);
+	rt2800pci_rfcsr_write(rt2x00dev, 29, 0x83);
+
+	/*
+	 * Set RX Filter calibration for 20MHz and 40MHz
+	 */
+	rt2x00dev->calibration[0] =
+	    rt2800pci_init_rx_filter(rt2x00dev, false, 0x07, 0x16);
+	rt2x00dev->calibration[1] =
+	    rt2800pci_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
+
+	/*
+	 * Set back to initial state
+	 */
+	rt2800pci_bbp_write(rt2x00dev, 24, 0);
+
+	rt2800pci_rfcsr_read(rt2x00dev, 22, &rfcsr);
+	rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
+	rt2800pci_rfcsr_write(rt2x00dev, 22, rfcsr);
+
+	/*
+	 * set BBP back to BW20
+	 */
+	rt2800pci_bbp_read(rt2x00dev, 4, &bbp);
+	rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
+	rt2800pci_bbp_write(rt2x00dev, 4, bbp);
+
+	return 0;
+}
+
+/*
+ * Device state switch handlers.
+ */
+static void rt2800pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
+				enum dev_state state)
+{
+	u32 reg;
+
+	rt2x00pci_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+	rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX,
+			   (state == STATE_RADIO_RX_ON) ||
+			   (state == STATE_RADIO_RX_ON_LINK));
+	rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+}
+
+static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
+				 enum dev_state state)
+{
+	int mask = (state == STATE_RADIO_IRQ_ON);
+	u32 reg;
+
+	/*
+	 * When interrupts are being enabled, the interrupt registers
+	 * should clear the register to assure a clean state.
+	 */
+	if (state == STATE_RADIO_IRQ_ON) {
+		rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
+		rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+	}
+
+	rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, mask);
+	rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, mask);
+	rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
+}
+
+static int rt2800pci_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
+{
+	unsigned int i;
+	u32 reg;
+
+	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+		rt2x00pci_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+		if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
+		    !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
+			return 0;
+
+		msleep(1);
+	}
+
+	ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
+	return -EACCES;
+}
+
+static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
+{
+	u32 reg;
+	u16 word;
+
+	/*
+	 * Initialize all registers.
+	 */
+	if (unlikely(rt2800pci_wait_wpdma_ready(rt2x00dev) ||
+		     rt2800pci_init_queues(rt2x00dev) ||
+		     rt2800pci_init_registers(rt2x00dev) ||
+		     rt2800pci_wait_wpdma_ready(rt2x00dev) ||
+		     rt2800pci_init_bbp(rt2x00dev) ||
+		     rt2800pci_init_rfcsr(rt2x00dev)))
+		return -EIO;
+
+	/*
+	 * Send signal to firmware during boot time.
+	 */
+	rt2800pci_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0);
+
+	/*
+	 * Enable RX.
+	 */
+	rt2x00pci_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+	rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
+	rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
+	rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+
+	rt2x00pci_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 2);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
+	rt2x00pci_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+	rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
+	rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
+	rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+
+	/*
+	 * Initialize LED control
+	 */
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word);
+	rt2800pci_mcu_request(rt2x00dev, MCU_LED_1, 0xff,
+			      word & 0xff, (word >> 8) & 0xff);
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word);
+	rt2800pci_mcu_request(rt2x00dev, MCU_LED_2, 0xff,
+			      word & 0xff, (word >> 8) & 0xff);
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word);
+	rt2800pci_mcu_request(rt2x00dev, MCU_LED_3, 0xff,
+			      word & 0xff, (word >> 8) & 0xff);
+
+	return 0;
+}
+
+static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
+{
+	u32 reg;
+
+	rt2x00pci_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
+	rt2x00pci_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+
+	rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
+	rt2x00pci_register_write(rt2x00dev, PWR_PIN_CFG, 0);
+	rt2x00pci_register_write(rt2x00dev, TX_PIN_CFG, 0);
+
+	rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280);
+
+	rt2x00pci_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
+	rt2x00pci_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
+
+	rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
+	rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+
+	/* Wait for DMA, ignore error */
+	rt2800pci_wait_wpdma_ready(rt2x00dev);
+}
+
+static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
+			       enum dev_state state)
+{
+	/*
+	 * Always put the device to sleep (even when we intend to wakeup!)
+	 * if the device is booting and wasn't asleep it will return
+	 * failure when attempting to wakeup.
+	 */
+	rt2800pci_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2);
+
+	if (state == STATE_AWAKE) {
+		rt2800pci_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0);
+		rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP);
+	}
+
+	return 0;
+}
+
+static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
+				      enum dev_state state)
+{
+	int retval = 0;
+
+	switch (state) {
+	case STATE_RADIO_ON:
+		/*
+		 * Before the radio can be enabled, the device first has
+		 * to be woken up. After that it needs a bit of time
+		 * to be fully awake and then the radio can be enabled.
+		 */
+		rt2800pci_set_state(rt2x00dev, STATE_AWAKE);
+		msleep(1);
+		retval = rt2800pci_enable_radio(rt2x00dev);
+		break;
+	case STATE_RADIO_OFF:
+		/*
+		 * After the radio has been disabled, the device should
+		 * be put to sleep for powersaving.
+		 */
+		rt2800pci_disable_radio(rt2x00dev);
+		rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
+		break;
+	case STATE_RADIO_RX_ON:
+	case STATE_RADIO_RX_ON_LINK:
+	case STATE_RADIO_RX_OFF:
+	case STATE_RADIO_RX_OFF_LINK:
+		rt2800pci_toggle_rx(rt2x00dev, state);
+		break;
+	case STATE_RADIO_IRQ_ON:
+	case STATE_RADIO_IRQ_OFF:
+		rt2800pci_toggle_irq(rt2x00dev, state);
+		break;
+	case STATE_DEEP_SLEEP:
+	case STATE_SLEEP:
+	case STATE_STANDBY:
+	case STATE_AWAKE:
+		retval = rt2800pci_set_state(rt2x00dev, state);
+		break;
+	default:
+		retval = -ENOTSUPP;
+		break;
+	}
+
+	if (unlikely(retval))
+		ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n",
+		      state, retval);
+
+	return retval;
+}
+
+/*
+ * TX descriptor initialization
+ */
+static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
+				    struct sk_buff *skb,
+				    struct txentry_desc *txdesc)
+{
+	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
+	__le32 *txd = skbdesc->desc;
+	__le32 *txwi = (__le32 *)(skb->data - rt2x00dev->hw->extra_tx_headroom);
+	u32 word;
+
+	/*
+	 * Initialize TX Info descriptor
+	 */
+	rt2x00_desc_read(txwi, 0, &word);
+	rt2x00_set_field32(&word, TXWI_W0_FRAG,
+			   test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
+	rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0);
+	rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
+	rt2x00_set_field32(&word, TXWI_W0_TS,
+			   test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
+	rt2x00_set_field32(&word, TXWI_W0_AMPDU,
+			   test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
+	rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
+	rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->ifs);
+	rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
+	rt2x00_set_field32(&word, TXWI_W0_BW,
+			   test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
+	rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
+			   test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
+	rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
+	rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
+	rt2x00_desc_write(txwi, 0, word);
+
+	rt2x00_desc_read(txwi, 1, &word);
+	rt2x00_set_field32(&word, TXWI_W1_ACK,
+			   test_bit(ENTRY_TXD_ACK, &txdesc->flags));
+	rt2x00_set_field32(&word, TXWI_W1_NSEQ,
+			   test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
+	rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
+	rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
+			   test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
+			       (skbdesc->entry->entry_idx + 1) : 0xff);
+	rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
+			   skb->len - txdesc->l2pad);
+	rt2x00_set_field32(&word, TXWI_W1_PACKETID,
+			   skbdesc->entry->queue->qid + 1);
+	rt2x00_desc_write(txwi, 1, word);
+
+	/*
+	 * Always write 0 to IV/EIV fields, hardware will insert the IV
+	 * from the IVEIV register when ENTRY_TXD_ENCRYPT_IV is set to 0.
+	 * When ENTRY_TXD_ENCRYPT_IV is set to 1 it will use the IV data
+	 * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which
+	 * crypto entry in the registers should be used to encrypt the frame.
+	 */
+	_rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
+	_rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
+
+	/*
+	 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
+	 * must contains a TXWI structure + 802.11 header + padding + 802.11
+	 * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
+	 * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
+	 * data. It means that LAST_SEC0 is always 0.
+	 */
+
+	/*
+	 * Initialize TX descriptor
+	 */
+	rt2x00_desc_read(txd, 0, &word);
+	rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
+	rt2x00_desc_write(txd, 0, word);
+
+	rt2x00_desc_read(txd, 1, &word);
+	rt2x00_set_field32(&word, TXD_W1_SD_LEN1, skb->len);
+	rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
+			   !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
+	rt2x00_set_field32(&word, TXD_W1_BURST,
+			   test_bit(ENTRY_TXD_BURST, &txdesc->flags));
+	rt2x00_set_field32(&word, TXD_W1_SD_LEN0,
+			   rt2x00dev->hw->extra_tx_headroom);
+	rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
+	rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
+	rt2x00_desc_write(txd, 1, word);
+
+	rt2x00_desc_read(txd, 2, &word);
+	rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
+			   skbdesc->skb_dma + rt2x00dev->hw->extra_tx_headroom);
+	rt2x00_desc_write(txd, 2, word);
+
+	rt2x00_desc_read(txd, 3, &word);
+	rt2x00_set_field32(&word, TXD_W3_WIV,
+			   !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
+	rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
+	rt2x00_desc_write(txd, 3, word);
+}
+
+/*
+ * TX data initialization
+ */
+static void rt2800pci_write_beacon(struct queue_entry *entry)
+{
+	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+	unsigned int beacon_base;
+	u32 reg;
+
+	/*
+	 * Disable beaconing while we are reloading the beacon data,
+	 * otherwise we might be sending out invalid data.
+	 */
+	rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+	rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
+	rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+	/*
+	 * Write entire beacon with descriptor to register.
+	 */
+	beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
+	rt2x00pci_register_multiwrite(rt2x00dev,
+				      beacon_base,
+				      skbdesc->desc, skbdesc->desc_len);
+	rt2x00pci_register_multiwrite(rt2x00dev,
+				      beacon_base + skbdesc->desc_len,
+				      entry->skb->data, entry->skb->len);
+
+	/*
+	 * Clean up beacon skb.
+	 */
+	dev_kfree_skb_any(entry->skb);
+	entry->skb = NULL;
+}
+
+static void rt2800pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
+				    const enum data_queue_qid queue_idx)
+{
+	struct data_queue *queue;
+	unsigned int idx, qidx = 0;
+	u32 reg;
+
+	if (queue_idx == QID_BEACON) {
+		rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+		if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) {
+			rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+			rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
+			rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+			rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+		}
+		return;
+	}
+
+	if (queue_idx > QID_HCCA && queue_idx != QID_MGMT)
+		return;
+
+	queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+	idx = queue->index[Q_INDEX];
+
+	if (queue_idx == QID_MGMT)
+		qidx = 5;
+	else
+		qidx = queue_idx;
+
+	rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX(qidx), idx);
+}
+
+static void rt2800pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev,
+				    const enum data_queue_qid qid)
+{
+	u32 reg;
+
+	if (qid == QID_BEACON) {
+		rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, 0);
+		return;
+	}
+
+	rt2x00pci_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, (qid == QID_AC_BE));
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, (qid == QID_AC_BK));
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, (qid == QID_AC_VI));
+	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, (qid == QID_AC_VO));
+	rt2x00pci_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
+}
+
+/*
+ * RX control handlers
+ */
+static void rt2800pci_fill_rxdone(struct queue_entry *entry,
+				  struct rxdone_entry_desc *rxdesc)
+{
+	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+	struct queue_entry_priv_pci *entry_priv = entry->priv_data;
+	__le32 *rxd = entry_priv->desc;
+	__le32 *rxwi = (__le32 *)entry->skb->data;
+	u32 rxd3;
+	u32 rxwi0;
+	u32 rxwi1;
+	u32 rxwi2;
+	u32 rxwi3;
+
+	rt2x00_desc_read(rxd, 3, &rxd3);
+	rt2x00_desc_read(rxwi, 0, &rxwi0);
+	rt2x00_desc_read(rxwi, 1, &rxwi1);
+	rt2x00_desc_read(rxwi, 2, &rxwi2);
+	rt2x00_desc_read(rxwi, 3, &rxwi3);
+
+	if (rt2x00_get_field32(rxd3, RXD_W3_CRC_ERROR))
+		rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
+
+	if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
+		/*
+		 * Unfortunately we don't know the cipher type used during
+		 * decryption. This prevents us from correct providing
+		 * correct statistics through debugfs.
+		 */
+		rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
+		rxdesc->cipher_status =
+		    rt2x00_get_field32(rxd3, RXD_W3_CIPHER_ERROR);
+	}
+
+	if (rt2x00_get_field32(rxd3, RXD_W3_DECRYPTED)) {
+		/*
+		 * Hardware has stripped IV/EIV data from 802.11 frame during
+		 * decryption. Unfortunately the descriptor doesn't contain
+		 * any fields with the EIV/IV data either, so they can't
+		 * be restored by rt2x00lib.
+		 */
+		rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+
+		if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
+			rxdesc->flags |= RX_FLAG_DECRYPTED;
+		else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
+			rxdesc->flags |= RX_FLAG_MMIC_ERROR;
+	}
+
+	if (rt2x00_get_field32(rxd3, RXD_W3_MY_BSS))
+		rxdesc->dev_flags |= RXDONE_MY_BSS;
+
+	if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD)) {
+		rxdesc->dev_flags |= RXDONE_L2PAD;
+		skbdesc->flags |= SKBDESC_L2_PADDED;
+	}
+
+	if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
+		rxdesc->flags |= RX_FLAG_SHORT_GI;
+
+	if (rt2x00_get_field32(rxwi1, RXWI_W1_BW))
+		rxdesc->flags |= RX_FLAG_40MHZ;
+
+	/*
+	 * Detect RX rate, always use MCS as signal type.
+	 */
+	rxdesc->dev_flags |= RXDONE_SIGNAL_MCS;
+	rxdesc->rate_mode = rt2x00_get_field32(rxwi1, RXWI_W1_PHYMODE);
+	rxdesc->signal = rt2x00_get_field32(rxwi1, RXWI_W1_MCS);
+
+	/*
+	 * Mask of 0x8 bit to remove the short preamble flag.
+	 */
+	if (rxdesc->rate_mode == RATE_MODE_CCK)
+		rxdesc->signal &= ~0x8;
+
+	rxdesc->rssi =
+	    (rt2x00_get_field32(rxwi2, RXWI_W2_RSSI0) +
+	     rt2x00_get_field32(rxwi2, RXWI_W2_RSSI1)) / 2;
+
+	rxdesc->noise =
+	    (rt2x00_get_field32(rxwi3, RXWI_W3_SNR0) +
+	     rt2x00_get_field32(rxwi3, RXWI_W3_SNR1)) / 2;
+
+	rxdesc->size = rt2x00_get_field32(rxwi0, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
+
+	/*
+	 * Set RX IDX in register to inform hardware that we have handled
+	 * this entry and it is available for reuse again.
+	 */
+	rt2x00pci_register_write(rt2x00dev, RX_CRX_IDX, entry->entry_idx);
+
+	/*
+	 * Remove TXWI descriptor from start of buffer.
+	 */
+	skb_pull(entry->skb, RXWI_DESC_SIZE);
+	skb_trim(entry->skb, rxdesc->size);
+}
+
+/*
+ * Interrupt functions.
+ */
+static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
+{
+	struct data_queue *queue;
+	struct queue_entry *entry;
+	struct queue_entry *entry_done;
+	struct queue_entry_priv_pci *entry_priv;
+	struct txdone_entry_desc txdesc;
+	u32 word;
+	u32 reg;
+	u32 old_reg;
+	unsigned int type;
+	unsigned int index;
+	u16 mcs, real_mcs;
+
+	/*
+	 * During each loop we will compare the freshly read
+	 * TX_STA_FIFO register value with the value read from
+	 * the previous loop. If the 2 values are equal then
+	 * we should stop processing because the chance it
+	 * quite big that the device has been unplugged and
+	 * we risk going into an endless loop.
+	 */
+	old_reg = 0;
+
+	while (1) {
+		rt2x00pci_register_read(rt2x00dev, TX_STA_FIFO, &reg);
+		if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID))
+			break;
+
+		if (old_reg == reg)
+			break;
+		old_reg = reg;
+
+		/*
+		 * Skip this entry when it contains an invalid
+		 * queue identication number.
+		 */
+		type = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE) - 1;
+		if (type >= QID_RX)
+			continue;
+
+		queue = rt2x00queue_get_queue(rt2x00dev, type);
+		if (unlikely(!queue))
+			continue;
+
+		/*
+		 * Skip this entry when it contains an invalid
+		 * index number.
+		 */
+		index = rt2x00_get_field32(reg, TX_STA_FIFO_WCID) - 1;
+		if (unlikely(index >= queue->limit))
+			continue;
+
+		entry = &queue->entries[index];
+		entry_priv = entry->priv_data;
+		rt2x00_desc_read((__le32 *)entry->skb->data, 0, &word);
+
+		entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
+		while (entry != entry_done) {
+			/*
+			 * Catch up.
+			 * Just report any entries we missed as failed.
+			 */
+			WARNING(rt2x00dev,
+				"TX status report missed for entry %d\n",
+				entry_done->entry_idx);
+
+			txdesc.flags = 0;
+			__set_bit(TXDONE_UNKNOWN, &txdesc.flags);
+			txdesc.retry = 0;
+
+			rt2x00lib_txdone(entry_done, &txdesc);
+			entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
+		}
+
+		/*
+		 * Obtain the status about this packet.
+		 */
+		txdesc.flags = 0;
+		if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS))
+			__set_bit(TXDONE_SUCCESS, &txdesc.flags);
+		else
+			__set_bit(TXDONE_FAILURE, &txdesc.flags);
+
+		/*
+		 * Ralink has a retry mechanism using a global fallback
+		 * table. We setup this fallback table to try immediate
+		 * lower rate for all rates. In the TX_STA_FIFO,
+		 * the MCS field contains the MCS used for the successfull
+		 * transmission. If the first transmission succeed,
+		 * we have mcs == tx_mcs. On the second transmission,
+		 * we have mcs = tx_mcs - 1. So the number of
+		 * retry is (tx_mcs - mcs).
+		 */
+		mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
+		real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS);
+		__set_bit(TXDONE_FALLBACK, &txdesc.flags);
+		txdesc.retry = mcs - min(mcs, real_mcs);
+
+		rt2x00lib_txdone(entry, &txdesc);
+	}
+}
+
+static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
+{
+	struct rt2x00_dev *rt2x00dev = dev_instance;
+	u32 reg;
+
+	/* Read status and ACK all interrupts */
+	rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
+	rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+
+	if (!reg)
+		return IRQ_NONE;
+
+	if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+		return IRQ_HANDLED;
+
+	/*
+	 * 1 - Rx ring done interrupt.
+	 */
+	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
+		rt2x00pci_rxdone(rt2x00dev);
+
+	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
+		rt2800pci_txdone(rt2x00dev);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Device probe functions.
+ */
+static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
+{
+	u16 word;
+	u8 *mac;
+	u8 default_lna_gain;
+
+	/*
+	 * Read EEPROM into buffer
+	 */
+	switch(rt2x00dev->chip.rt) {
+	case RT2880:
+	case RT3052:
+		rt2800pci_read_eeprom_soc(rt2x00dev);
+		break;
+	case RT3090:
+		rt2800pci_read_eeprom_efuse(rt2x00dev);
+		break;
+	default:
+		rt2800pci_read_eeprom_pci(rt2x00dev);
+		break;
+	}
+
+	/*
+	 * Start validation of the data that has been read.
+	 */
+	mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
+	if (!is_valid_ether_addr(mac)) {
+		random_ether_addr(mac);
+		EEPROM(rt2x00dev, "MAC: %pM\n", mac);
+	}
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
+	if (word == 0xffff) {
+		rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2);
+		rt2x00_set_field16(&word, EEPROM_ANTENNA_TXPATH, 1);
+		rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820);
+		rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
+		EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
+	} else if (rt2x00_rev(&rt2x00dev->chip) < RT2883_VERSION) {
+		/*
+		 * There is a max of 2 RX streams for RT2860 series
+		 */
+		if (rt2x00_get_field16(word, EEPROM_ANTENNA_RXPATH) > 2)
+			rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2);
+		rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
+	}
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word);
+	if (word == 0xffff) {
+		rt2x00_set_field16(&word, EEPROM_NIC_HW_RADIO, 0);
+		rt2x00_set_field16(&word, EEPROM_NIC_DYNAMIC_TX_AGC, 0);
+		rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_BG, 0);
+		rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_A, 0);
+		rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0);
+		rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_BG, 0);
+		rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_A, 0);
+		rt2x00_set_field16(&word, EEPROM_NIC_WPS_PBC, 0);
+		rt2x00_set_field16(&word, EEPROM_NIC_BW40M_BG, 0);
+		rt2x00_set_field16(&word, EEPROM_NIC_BW40M_A, 0);
+		rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word);
+		EEPROM(rt2x00dev, "NIC: 0x%04x\n", word);
+	}
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
+	if ((word & 0x00ff) == 0x00ff) {
+		rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
+		rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE,
+				   LED_MODE_TXRX_ACTIVITY);
+		rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0);
+		rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
+		rt2x00_eeprom_write(rt2x00dev, EEPROM_LED1, 0x5555);
+		rt2x00_eeprom_write(rt2x00dev, EEPROM_LED2, 0x2221);
+		rt2x00_eeprom_write(rt2x00dev, EEPROM_LED3, 0xa9f8);
+		EEPROM(rt2x00dev, "Freq: 0x%04x\n", word);
+	}
+
+	/*
+	 * During the LNA validation we are going to use
+	 * lna0 as correct value. Note that EEPROM_LNA
+	 * is never validated.
+	 */
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &word);
+	default_lna_gain = rt2x00_get_field16(word, EEPROM_LNA_A0);
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word);
+	if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET0)) > 10)
+		rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET0, 0);
+	if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET1)) > 10)
+		rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET1, 0);
+	rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word);
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word);
+	if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10)
+		rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0);
+	if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 ||
+	    rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff)
+		rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1,
+				   default_lna_gain);
+	rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word);
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word);
+	if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10)
+		rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET0, 0);
+	if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET1)) > 10)
+		rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET1, 0);
+	rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word);
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word);
+	if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10)
+		rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0);
+	if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 ||
+	    rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff)
+		rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2,
+				   default_lna_gain);
+	rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
+
+	return 0;
+}
+
+static int rt2800pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
+{
+	u32 reg;
+	u16 value;
+	u16 eeprom;
+
+	/*
+	 * Read EEPROM word for configuration.
+	 */
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
+
+	/*
+	 * Identify RF chipset.
+	 */
+	value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
+	rt2x00pci_register_read(rt2x00dev, MAC_CSR0, &reg);
+	rt2x00_set_chip_rf(rt2x00dev, value, reg);
+
+	if (!rt2x00_rf(&rt2x00dev->chip, RF2820) &&
+	    !rt2x00_rf(&rt2x00dev->chip, RF2850) &&
+	    !rt2x00_rf(&rt2x00dev->chip, RF2720) &&
+	    !rt2x00_rf(&rt2x00dev->chip, RF2750) &&
+	    !rt2x00_rf(&rt2x00dev->chip, RF3020) &&
+	    !rt2x00_rf(&rt2x00dev->chip, RF2020) &&
+	    !rt2x00_rf(&rt2x00dev->chip, RF3021) &&
+	    !rt2x00_rf(&rt2x00dev->chip, RF3022)) {
+		ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
+		return -ENODEV;
+	}
+
+	/*
+	 * Identify default antenna configuration.
+	 */
+	rt2x00dev->default_ant.tx =
+	    rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH);
+	rt2x00dev->default_ant.rx =
+	    rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH);
+
+	/*
+	 * Read frequency offset and RF programming sequence.
+	 */
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
+	rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET);
+
+	/*
+	 * Read external LNA informations.
+	 */
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
+
+	if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_A))
+		__set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags);
+	if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG))
+		__set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags);
+
+	/*
+	 * Detect if this device has an hardware controlled radio.
+	 */
+	if (rt2x00_get_field16(eeprom, EEPROM_NIC_HW_RADIO))
+		__set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
+
+	/*
+	 * Store led settings, for correct led behaviour.
+	 */
+#ifdef CONFIG_RT2X00_LIB_LEDS
+	rt2800pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
+	rt2800pci_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC);
+	rt2800pci_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_QUALITY);
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &rt2x00dev->led_mcu_reg);
+#endif /* CONFIG_RT2X00_LIB_LEDS */
+
+	return 0;
+}
+
+/*
+ * RF value list for rt2860
+ * Supports: 2.4 GHz (all) & 5.2 GHz (RF2850 & RF2750)
+ */
+static const struct rf_channel rf_vals[] = {
+	{ 1,  0x18402ecc, 0x184c0786, 0x1816b455, 0x1800510b },
+	{ 2,  0x18402ecc, 0x184c0786, 0x18168a55, 0x1800519f },
+	{ 3,  0x18402ecc, 0x184c078a, 0x18168a55, 0x1800518b },
+	{ 4,  0x18402ecc, 0x184c078a, 0x18168a55, 0x1800519f },
+	{ 5,  0x18402ecc, 0x184c078e, 0x18168a55, 0x1800518b },
+	{ 6,  0x18402ecc, 0x184c078e, 0x18168a55, 0x1800519f },
+	{ 7,  0x18402ecc, 0x184c0792, 0x18168a55, 0x1800518b },
+	{ 8,  0x18402ecc, 0x184c0792, 0x18168a55, 0x1800519f },
+	{ 9,  0x18402ecc, 0x184c0796, 0x18168a55, 0x1800518b },
+	{ 10, 0x18402ecc, 0x184c0796, 0x18168a55, 0x1800519f },
+	{ 11, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800518b },
+	{ 12, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800519f },
+	{ 13, 0x18402ecc, 0x184c079e, 0x18168a55, 0x1800518b },
+	{ 14, 0x18402ecc, 0x184c07a2, 0x18168a55, 0x18005193 },
+
+	/* 802.11 UNI / HyperLan 2 */
+	{ 36, 0x18402ecc, 0x184c099a, 0x18158a55, 0x180ed1a3 },
+	{ 38, 0x18402ecc, 0x184c099e, 0x18158a55, 0x180ed193 },
+	{ 40, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed183 },
+	{ 44, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed1a3 },
+	{ 46, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed18b },
+	{ 48, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed19b },
+	{ 52, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed193 },
+	{ 54, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed1a3 },
+	{ 56, 0x18402ec8, 0x184c068e, 0x18158a55, 0x180ed18b },
+	{ 60, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed183 },
+	{ 62, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed193 },
+	{ 64, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed1a3 },
+
+	/* 802.11 HyperLan 2 */
+	{ 100, 0x18402ec8, 0x184c06b2, 0x18178a55, 0x180ed783 },
+	{ 102, 0x18402ec8, 0x184c06b2, 0x18578a55, 0x180ed793 },
+	{ 104, 0x18402ec8, 0x185c06b2, 0x18578a55, 0x180ed1a3 },
+	{ 108, 0x18402ecc, 0x185c0a32, 0x18578a55, 0x180ed193 },
+	{ 110, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed183 },
+	{ 112, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed19b },
+	{ 116, 0x18402ecc, 0x184c0a3a, 0x18178a55, 0x180ed1a3 },
+	{ 118, 0x18402ecc, 0x184c0a3e, 0x18178a55, 0x180ed193 },
+	{ 120, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed183 },
+	{ 124, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed193 },
+	{ 126, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed15b },
+	{ 128, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed1a3 },
+	{ 132, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed18b },
+	{ 134, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed193 },
+	{ 136, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed19b },
+	{ 140, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed183 },
+
+	/* 802.11 UNII */
+	{ 149, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed1a7 },
+	{ 151, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed187 },
+	{ 153, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed18f },
+	{ 157, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed19f },
+	{ 159, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed1a7 },
+	{ 161, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed187 },
+	{ 165, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed197 },
+
+	/* 802.11 Japan */
+	{ 184, 0x15002ccc, 0x1500491e, 0x1509be55, 0x150c0a0b },
+	{ 188, 0x15002ccc, 0x15004922, 0x1509be55, 0x150c0a13 },
+	{ 192, 0x15002ccc, 0x15004926, 0x1509be55, 0x150c0a1b },
+	{ 196, 0x15002ccc, 0x1500492a, 0x1509be55, 0x150c0a23 },
+	{ 208, 0x15002ccc, 0x1500493a, 0x1509be55, 0x150c0a13 },
+	{ 212, 0x15002ccc, 0x1500493e, 0x1509be55, 0x150c0a1b },
+	{ 216, 0x15002ccc, 0x15004982, 0x1509be55, 0x150c0a23 },
+};
+
+static int rt2800pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+{
+	struct hw_mode_spec *spec = &rt2x00dev->spec;
+	struct channel_info *info;
+	char *tx_power1;
+	char *tx_power2;
+	unsigned int i;
+	u16 eeprom;
+
+	/*
+	 * Initialize all hw fields.
+	 */
+	rt2x00dev->hw->flags =
+	    IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
+	    IEEE80211_HW_SIGNAL_DBM |
+	    IEEE80211_HW_SUPPORTS_PS |
+	    IEEE80211_HW_PS_NULLFUNC_STACK;
+	rt2x00dev->hw->extra_tx_headroom = TXWI_DESC_SIZE;
+
+	SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
+	SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
+				rt2x00_eeprom_addr(rt2x00dev,
+						   EEPROM_MAC_ADDR_0));
+
+	rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
+
+	/*
+	 * Initialize hw_mode information.
+	 */
+	spec->supported_bands = SUPPORT_BAND_2GHZ;
+	spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
+
+	if (rt2x00_rf(&rt2x00dev->chip, RF2820) ||
+	    rt2x00_rf(&rt2x00dev->chip, RF2720) ||
+	    rt2x00_rf(&rt2x00dev->chip, RF3020) ||
+	    rt2x00_rf(&rt2x00dev->chip, RF3021) ||
+	    rt2x00_rf(&rt2x00dev->chip, RF3022) ||
+	    rt2x00_rf(&rt2x00dev->chip, RF2020) ||
+	    rt2x00_rf(&rt2x00dev->chip, RF3052)) {
+		spec->num_channels = 14;
+		spec->channels = rf_vals;
+	} else if (rt2x00_rf(&rt2x00dev->chip, RF2850) ||
+		   rt2x00_rf(&rt2x00dev->chip, RF2750)) {
+		spec->supported_bands |= SUPPORT_BAND_5GHZ;
+		spec->num_channels = ARRAY_SIZE(rf_vals);
+		spec->channels = rf_vals;
+	}
+
+	/*
+	 * Initialize HT information.
+	 */
+	spec->ht.ht_supported = true;
+	spec->ht.cap =
+	    IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+	    IEEE80211_HT_CAP_GRN_FLD |
+	    IEEE80211_HT_CAP_SGI_20 |
+	    IEEE80211_HT_CAP_SGI_40 |
+	    IEEE80211_HT_CAP_TX_STBC |
+	    IEEE80211_HT_CAP_RX_STBC |
+	    IEEE80211_HT_CAP_PSMP_SUPPORT;
+	spec->ht.ampdu_factor = 3;
+	spec->ht.ampdu_density = 4;
+	spec->ht.mcs.tx_params =
+	    IEEE80211_HT_MCS_TX_DEFINED |
+	    IEEE80211_HT_MCS_TX_RX_DIFF |
+	    ((rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) - 1) <<
+		IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+
+	switch (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH)) {
+	case 3:
+		spec->ht.mcs.rx_mask[2] = 0xff;
+	case 2:
+		spec->ht.mcs.rx_mask[1] = 0xff;
+	case 1:
+		spec->ht.mcs.rx_mask[0] = 0xff;
+		spec->ht.mcs.rx_mask[4] = 0x1; /* MCS32 */
+		break;
+	}
+
+	/*
+	 * Create channel information array
+	 */
+	info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	spec->channels_info = info;
+
+	tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
+	tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
+
+	for (i = 0; i < 14; i++) {
+		info[i].tx_power1 = TXPOWER_G_FROM_DEV(tx_power1[i]);
+		info[i].tx_power2 = TXPOWER_G_FROM_DEV(tx_power2[i]);
+	}
+
+	if (spec->num_channels > 14) {
+		tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
+		tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
+
+		for (i = 14; i < spec->num_channels; i++) {
+			info[i].tx_power1 = TXPOWER_A_FROM_DEV(tx_power1[i]);
+			info[i].tx_power2 = TXPOWER_A_FROM_DEV(tx_power2[i]);
+		}
+	}
+
+	return 0;
+}
+
+static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
+{
+	int retval;
+
+	/*
+	 * Allocate eeprom data.
+	 */
+	retval = rt2800pci_validate_eeprom(rt2x00dev);
+	if (retval)
+		return retval;
+
+	retval = rt2800pci_init_eeprom(rt2x00dev);
+	if (retval)
+		return retval;
+
+	/*
+	 * Initialize hw specifications.
+	 */
+	retval = rt2800pci_probe_hw_mode(rt2x00dev);
+	if (retval)
+		return retval;
+
+	/*
+	 * This device has multiple filters for control frames
+	 * and has a separate filter for PS Poll frames.
+	 */
+	__set_bit(DRIVER_SUPPORT_CONTROL_FILTERS, &rt2x00dev->flags);
+	__set_bit(DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, &rt2x00dev->flags);
+
+	/*
+	 * This device requires firmware.
+	 */
+	if (!rt2x00_rt(&rt2x00dev->chip, RT2880) &&
+	    !rt2x00_rt(&rt2x00dev->chip, RT3052))
+		__set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
+	__set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
+	__set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
+	if (!modparam_nohwcrypt)
+		__set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
+
+	/*
+	 * Set the rssi offset.
+	 */
+	rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
+
+	return 0;
+}
+
+/*
+ * IEEE80211 stack callback functions.
+ */
+static void rt2800pci_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx,
+				   u32 *iv32, u16 *iv16)
+{
+	struct rt2x00_dev *rt2x00dev = hw->priv;
+	struct mac_iveiv_entry iveiv_entry;
+	u32 offset;
+
+	offset = MAC_IVEIV_ENTRY(hw_key_idx);
+	rt2x00pci_register_multiread(rt2x00dev, offset,
+				      &iveiv_entry, sizeof(iveiv_entry));
+
+	memcpy(&iveiv_entry.iv[0], iv16, sizeof(iv16));
+	memcpy(&iveiv_entry.iv[4], iv32, sizeof(iv32));
+}
+
+static int rt2800pci_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+	struct rt2x00_dev *rt2x00dev = hw->priv;
+	u32 reg;
+	bool enabled = (value < IEEE80211_MAX_RTS_THRESHOLD);
+
+	rt2x00pci_register_read(rt2x00dev, TX_RTS_CFG, &reg);
+	rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES, value);
+	rt2x00pci_register_write(rt2x00dev, TX_RTS_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, CCK_PROT_CFG_RTS_TH_EN, enabled);
+	rt2x00pci_register_write(rt2x00dev, CCK_PROT_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, OFDM_PROT_CFG_RTS_TH_EN, enabled);
+	rt2x00pci_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, MM20_PROT_CFG_RTS_TH_EN, enabled);
+	rt2x00pci_register_write(rt2x00dev, MM20_PROT_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, MM40_PROT_CFG_RTS_TH_EN, enabled);
+	rt2x00pci_register_write(rt2x00dev, MM40_PROT_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, GF20_PROT_CFG_RTS_TH_EN, enabled);
+	rt2x00pci_register_write(rt2x00dev, GF20_PROT_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
+	rt2x00_set_field32(&reg, GF40_PROT_CFG_RTS_TH_EN, enabled);
+	rt2x00pci_register_write(rt2x00dev, GF40_PROT_CFG, reg);
+
+	return 0;
+}
+
+static int rt2800pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
+			     const struct ieee80211_tx_queue_params *params)
+{
+	struct rt2x00_dev *rt2x00dev = hw->priv;
+	struct data_queue *queue;
+	struct rt2x00_field32 field;
+	int retval;
+	u32 reg;
+	u32 offset;
+
+	/*
+	 * First pass the configuration through rt2x00lib, that will
+	 * update the queue settings and validate the input. After that
+	 * we are free to update the registers based on the value
+	 * in the queue parameter.
+	 */
+	retval = rt2x00mac_conf_tx(hw, queue_idx, params);
+	if (retval)
+		return retval;
+
+	/*
+	 * We only need to perform additional register initialization
+	 * for WMM queues/
+	 */
+	if (queue_idx >= 4)
+		return 0;
+
+	queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+
+	/* Update WMM TXOP register */
+	offset = WMM_TXOP0_CFG + (sizeof(u32) * (!!(queue_idx & 2)));
+	field.bit_offset = (queue_idx & 1) * 16;
+	field.bit_mask = 0xffff << field.bit_offset;
+
+	rt2x00pci_register_read(rt2x00dev, offset, &reg);
+	rt2x00_set_field32(&reg, field, queue->txop);
+	rt2x00pci_register_write(rt2x00dev, offset, reg);
+
+	/* Update WMM registers */
+	field.bit_offset = queue_idx * 4;
+	field.bit_mask = 0xf << field.bit_offset;
+
+	rt2x00pci_register_read(rt2x00dev, WMM_AIFSN_CFG, &reg);
+	rt2x00_set_field32(&reg, field, queue->aifs);
+	rt2x00pci_register_write(rt2x00dev, WMM_AIFSN_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, WMM_CWMIN_CFG, &reg);
+	rt2x00_set_field32(&reg, field, queue->cw_min);
+	rt2x00pci_register_write(rt2x00dev, WMM_CWMIN_CFG, reg);
+
+	rt2x00pci_register_read(rt2x00dev, WMM_CWMAX_CFG, &reg);
+	rt2x00_set_field32(&reg, field, queue->cw_max);
+	rt2x00pci_register_write(rt2x00dev, WMM_CWMAX_CFG, reg);
+
+	/* Update EDCA registers */
+	offset = EDCA_AC0_CFG + (sizeof(u32) * queue_idx);
+
+	rt2x00pci_register_read(rt2x00dev, offset, &reg);
+	rt2x00_set_field32(&reg, EDCA_AC0_CFG_TX_OP, queue->txop);
+	rt2x00_set_field32(&reg, EDCA_AC0_CFG_AIFSN, queue->aifs);
+	rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMIN, queue->cw_min);
+	rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMAX, queue->cw_max);
+	rt2x00pci_register_write(rt2x00dev, offset, reg);
+
+	return 0;
+}
+
+static u64 rt2800pci_get_tsf(struct ieee80211_hw *hw)
+{
+	struct rt2x00_dev *rt2x00dev = hw->priv;
+	u64 tsf;
+	u32 reg;
+
+	rt2x00pci_register_read(rt2x00dev, TSF_TIMER_DW1, &reg);
+	tsf = (u64) rt2x00_get_field32(reg, TSF_TIMER_DW1_HIGH_WORD) << 32;
+	rt2x00pci_register_read(rt2x00dev, TSF_TIMER_DW0, &reg);
+	tsf |= rt2x00_get_field32(reg, TSF_TIMER_DW0_LOW_WORD);
+
+	return tsf;
+}
+
+static const struct ieee80211_ops rt2800pci_mac80211_ops = {
+	.tx			= rt2x00mac_tx,
+	.start			= rt2x00mac_start,
+	.stop			= rt2x00mac_stop,
+	.add_interface		= rt2x00mac_add_interface,
+	.remove_interface	= rt2x00mac_remove_interface,
+	.config			= rt2x00mac_config,
+	.configure_filter	= rt2x00mac_configure_filter,
+	.set_key		= rt2x00mac_set_key,
+	.get_stats		= rt2x00mac_get_stats,
+	.get_tkip_seq		= rt2800pci_get_tkip_seq,
+	.set_rts_threshold	= rt2800pci_set_rts_threshold,
+	.bss_info_changed	= rt2x00mac_bss_info_changed,
+	.conf_tx		= rt2800pci_conf_tx,
+	.get_tx_stats		= rt2x00mac_get_tx_stats,
+	.get_tsf		= rt2800pci_get_tsf,
+	.rfkill_poll		= rt2x00mac_rfkill_poll,
+};
+
+static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
+	.irq_handler		= rt2800pci_interrupt,
+	.probe_hw		= rt2800pci_probe_hw,
+	.get_firmware_name	= rt2800pci_get_firmware_name,
+	.check_firmware		= rt2800pci_check_firmware,
+	.load_firmware		= rt2800pci_load_firmware,
+	.initialize		= rt2x00pci_initialize,
+	.uninitialize		= rt2x00pci_uninitialize,
+	.get_entry_state	= rt2800pci_get_entry_state,
+	.clear_entry		= rt2800pci_clear_entry,
+	.set_device_state	= rt2800pci_set_device_state,
+	.rfkill_poll		= rt2800pci_rfkill_poll,
+	.link_stats		= rt2800pci_link_stats,
+	.reset_tuner		= rt2800pci_reset_tuner,
+	.link_tuner		= rt2800pci_link_tuner,
+	.write_tx_desc		= rt2800pci_write_tx_desc,
+	.write_tx_data		= rt2x00pci_write_tx_data,
+	.write_beacon		= rt2800pci_write_beacon,
+	.kick_tx_queue		= rt2800pci_kick_tx_queue,
+	.kill_tx_queue		= rt2800pci_kill_tx_queue,
+	.fill_rxdone		= rt2800pci_fill_rxdone,
+	.config_shared_key	= rt2800pci_config_shared_key,
+	.config_pairwise_key	= rt2800pci_config_pairwise_key,
+	.config_filter		= rt2800pci_config_filter,
+	.config_intf		= rt2800pci_config_intf,
+	.config_erp		= rt2800pci_config_erp,
+	.config_ant		= rt2800pci_config_ant,
+	.config			= rt2800pci_config,
+};
+
+static const struct data_queue_desc rt2800pci_queue_rx = {
+	.entry_num		= RX_ENTRIES,
+	.data_size		= AGGREGATION_SIZE,
+	.desc_size		= RXD_DESC_SIZE,
+	.priv_size		= sizeof(struct queue_entry_priv_pci),
+};
+
+static const struct data_queue_desc rt2800pci_queue_tx = {
+	.entry_num		= TX_ENTRIES,
+	.data_size		= AGGREGATION_SIZE,
+	.desc_size		= TXD_DESC_SIZE,
+	.priv_size		= sizeof(struct queue_entry_priv_pci),
+};
+
+static const struct data_queue_desc rt2800pci_queue_bcn = {
+	.entry_num		= 8 * BEACON_ENTRIES,
+	.data_size		= 0, /* No DMA required for beacons */
+	.desc_size		= TXWI_DESC_SIZE,
+	.priv_size		= sizeof(struct queue_entry_priv_pci),
+};
+
+static const struct rt2x00_ops rt2800pci_ops = {
+	.name		= KBUILD_MODNAME,
+	.max_sta_intf	= 1,
+	.max_ap_intf	= 8,
+	.eeprom_size	= EEPROM_SIZE,
+	.rf_size	= RF_SIZE,
+	.tx_queues	= NUM_TX_QUEUES,
+	.rx		= &rt2800pci_queue_rx,
+	.tx		= &rt2800pci_queue_tx,
+	.bcn		= &rt2800pci_queue_bcn,
+	.lib		= &rt2800pci_rt2x00_ops,
+	.hw		= &rt2800pci_mac80211_ops,
+#ifdef CONFIG_RT2X00_LIB_DEBUGFS
+	.debugfs	= &rt2800pci_rt2x00debug,
+#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
+};
+
+/*
+ * RT2800pci module information.
+ */
+static struct pci_device_id rt2800pci_device_table[] = {
+	{ PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1432, 0x7728), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1432, 0x7738), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1432, 0x7748), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1432, 0x7758), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1432, 0x7768), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x0701), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x0781), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x3090), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x3091), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x3092), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) },
+	{ 0, }
+};
+
+MODULE_AUTHOR(DRV_PROJECT);
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver.");
+MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards");
+#ifdef CONFIG_RT2800PCI_PCI
+MODULE_FIRMWARE(FIRMWARE_RT2860);
+MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
+#endif /* CONFIG_RT2800PCI_PCI */
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_RT2800PCI_WISOC
+#if defined(CONFIG_RALINK_RT288X)
+__rt2x00soc_probe(RT2880, &rt2800pci_ops);
+#elif defined(CONFIG_RALINK_RT305X)
+__rt2x00soc_probe(RT3052, &rt2800pci_ops);
+#endif
+
+static struct platform_driver rt2800soc_driver = {
+	.driver		= {
+		.name		= "rt2800_wmac",
+		.owner		= THIS_MODULE,
+		.mod_name	= KBUILD_MODNAME,
+	},
+	.probe		= __rt2x00soc_probe,
+	.remove		= __devexit_p(rt2x00soc_remove),
+	.suspend	= rt2x00soc_suspend,
+	.resume		= rt2x00soc_resume,
+};
+#endif /* CONFIG_RT2800PCI_WISOC */
+
+#ifdef CONFIG_RT2800PCI_PCI
+static struct pci_driver rt2800pci_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= rt2800pci_device_table,
+	.probe		= rt2x00pci_probe,
+	.remove		= __devexit_p(rt2x00pci_remove),
+	.suspend	= rt2x00pci_suspend,
+	.resume		= rt2x00pci_resume,
+};
+#endif /* CONFIG_RT2800PCI_PCI */
+
+static int __init rt2800pci_init(void)
+{
+	int ret = 0;
+
+#ifdef CONFIG_RT2800PCI_WISOC
+	ret = platform_driver_register(&rt2800soc_driver);
+	if (ret)
+		return ret;
+#endif
+#ifdef CONFIG_RT2800PCI_PCI
+	ret = pci_register_driver(&rt2800pci_driver);
+	if (ret) {
+#ifdef CONFIG_RT2800PCI_WISOC
+		platform_driver_unregister(&rt2800soc_driver);
+#endif
+		return ret;
+	}
+#endif
+
+	return ret;
+}
+
+static void __exit rt2800pci_exit(void)
+{
+#ifdef CONFIG_RT2800PCI_PCI
+	pci_unregister_driver(&rt2800pci_driver);
+#endif
+#ifdef CONFIG_RT2800PCI_WISOC
+	platform_driver_unregister(&rt2800soc_driver);
+#endif
+}
+
+module_init(rt2800pci_init);
+module_exit(rt2800pci_exit);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
new file mode 100644
index 0000000..8569088
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -0,0 +1,1960 @@
+/*
+	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	<http://rt2x00.serialmonkey.com>
+
+	This program is free software; you can redistribute it and/or modify
+	it under the terms of the GNU General Public License as published by
+	the Free Software Foundation; either version 2 of the License, or
+	(at your option) any later version.
+
+	This program is distributed in the hope that it will be useful,
+	but WITHOUT ANY WARRANTY; without even the implied warranty of
+	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+	GNU General Public License for more details.
+
+	You should have received a copy of the GNU General Public License
+	along with this program; if not, write to the
+	Free Software Foundation, Inc.,
+	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+	Module: rt2800pci
+	Abstract: Data structures and registers for the rt2800pci module.
+	Supported chipsets: RT2800E & RT2800ED.
+ */
+
+#ifndef RT2800PCI_H
+#define RT2800PCI_H
+
+/*
+ * RF chip defines.
+ *
+ * RF2820 2.4G 2T3R
+ * RF2850 2.4G/5G 2T3R
+ * RF2720 2.4G 1T2R
+ * RF2750 2.4G/5G 1T2R
+ * RF3020 2.4G 1T1R
+ * RF2020 2.4G B/G
+ * RF3021 2.4G 1T2R
+ * RF3022 2.4G 2T2R
+ * RF3052 2.4G 2T2R
+ */
+#define RF2820				0x0001
+#define RF2850				0x0002
+#define RF2720				0x0003
+#define RF2750				0x0004
+#define RF3020				0x0005
+#define RF2020				0x0006
+#define RF3021				0x0007
+#define RF3022				0x0008
+#define RF3052				0x0009
+
+/*
+ * RT2860 version
+ */
+#define RT2860C_VERSION			0x28600100
+#define RT2860D_VERSION			0x28600101
+#define RT2880E_VERSION			0x28720200
+#define RT2883_VERSION			0x28830300
+#define RT3070_VERSION			0x30700200
+
+/*
+ * Signal information.
+ * Default offset is required for RSSI <-> dBm conversion.
+ */
+#define DEFAULT_RSSI_OFFSET		120 /* FIXME */
+
+/*
+ * Register layout information.
+ */
+#define CSR_REG_BASE			0x1000
+#define CSR_REG_SIZE			0x0800
+#define EEPROM_BASE			0x0000
+#define EEPROM_SIZE			0x0110
+#define BBP_BASE			0x0000
+#define BBP_SIZE			0x0080
+#define RF_BASE				0x0004
+#define RF_SIZE				0x0010
+
+/*
+ * Number of TX queues.
+ */
+#define NUM_TX_QUEUES			4
+
+/*
+ * PCI registers.
+ */
+
+/*
+ * E2PROM_CSR: EEPROM control register.
+ * RELOAD: Write 1 to reload eeprom content.
+ * TYPE: 0: 93c46, 1:93c66.
+ * LOAD_STATUS: 1:loading, 0:done.
+ */
+#define E2PROM_CSR			0x0004
+#define E2PROM_CSR_DATA_CLOCK		FIELD32(0x00000001)
+#define E2PROM_CSR_CHIP_SELECT		FIELD32(0x00000002)
+#define E2PROM_CSR_DATA_IN		FIELD32(0x00000004)
+#define E2PROM_CSR_DATA_OUT		FIELD32(0x00000008)
+#define E2PROM_CSR_TYPE			FIELD32(0x00000030)
+#define E2PROM_CSR_LOAD_STATUS		FIELD32(0x00000040)
+#define E2PROM_CSR_RELOAD		FIELD32(0x00000080)
+
+/*
+ * INT_SOURCE_CSR: Interrupt source register.
+ * Write one to clear corresponding bit.
+ * TX_FIFO_STATUS: FIFO Statistics is full, sw should read 0x171c
+ */
+#define INT_SOURCE_CSR			0x0200
+#define INT_SOURCE_CSR_RXDELAYINT	FIELD32(0x00000001)
+#define INT_SOURCE_CSR_TXDELAYINT	FIELD32(0x00000002)
+#define INT_SOURCE_CSR_RX_DONE		FIELD32(0x00000004)
+#define INT_SOURCE_CSR_AC0_DMA_DONE	FIELD32(0x00000008)
+#define INT_SOURCE_CSR_AC1_DMA_DONE	FIELD32(0x00000010)
+#define INT_SOURCE_CSR_AC2_DMA_DONE	FIELD32(0x00000020)
+#define INT_SOURCE_CSR_AC3_DMA_DONE	FIELD32(0x00000040)
+#define INT_SOURCE_CSR_HCCA_DMA_DONE	FIELD32(0x00000080)
+#define INT_SOURCE_CSR_MGMT_DMA_DONE	FIELD32(0x00000100)
+#define INT_SOURCE_CSR_MCU_COMMAND	FIELD32(0x00000200)
+#define INT_SOURCE_CSR_RXTX_COHERENT	FIELD32(0x00000400)
+#define INT_SOURCE_CSR_TBTT		FIELD32(0x00000800)
+#define INT_SOURCE_CSR_PRE_TBTT		FIELD32(0x00001000)
+#define INT_SOURCE_CSR_TX_FIFO_STATUS	FIELD32(0x00002000)
+#define INT_SOURCE_CSR_AUTO_WAKEUP	FIELD32(0x00004000)
+#define INT_SOURCE_CSR_GPTIMER		FIELD32(0x00008000)
+#define INT_SOURCE_CSR_RX_COHERENT	FIELD32(0x00010000)
+#define INT_SOURCE_CSR_TX_COHERENT	FIELD32(0x00020000)
+
+/*
+ * INT_MASK_CSR: Interrupt MASK register. 1: the interrupt is mask OFF.
+ */
+#define INT_MASK_CSR			0x0204
+#define INT_MASK_CSR_RXDELAYINT		FIELD32(0x00000001)
+#define INT_MASK_CSR_TXDELAYINT		FIELD32(0x00000002)
+#define INT_MASK_CSR_RX_DONE		FIELD32(0x00000004)
+#define INT_MASK_CSR_AC0_DMA_DONE	FIELD32(0x00000008)
+#define INT_MASK_CSR_AC1_DMA_DONE	FIELD32(0x00000010)
+#define INT_MASK_CSR_AC2_DMA_DONE	FIELD32(0x00000020)
+#define INT_MASK_CSR_AC3_DMA_DONE	FIELD32(0x00000040)
+#define INT_MASK_CSR_HCCA_DMA_DONE	FIELD32(0x00000080)
+#define INT_MASK_CSR_MGMT_DMA_DONE	FIELD32(0x00000100)
+#define INT_MASK_CSR_MCU_COMMAND	FIELD32(0x00000200)
+#define INT_MASK_CSR_RXTX_COHERENT	FIELD32(0x00000400)
+#define INT_MASK_CSR_TBTT		FIELD32(0x00000800)
+#define INT_MASK_CSR_PRE_TBTT		FIELD32(0x00001000)
+#define INT_MASK_CSR_TX_FIFO_STATUS	FIELD32(0x00002000)
+#define INT_MASK_CSR_AUTO_WAKEUP	FIELD32(0x00004000)
+#define INT_MASK_CSR_GPTIMER		FIELD32(0x00008000)
+#define INT_MASK_CSR_RX_COHERENT	FIELD32(0x00010000)
+#define INT_MASK_CSR_TX_COHERENT	FIELD32(0x00020000)
+
+/*
+ * WPDMA_GLO_CFG
+ */
+#define WPDMA_GLO_CFG 			0x0208
+#define WPDMA_GLO_CFG_ENABLE_TX_DMA	FIELD32(0x00000001)
+#define WPDMA_GLO_CFG_TX_DMA_BUSY    	FIELD32(0x00000002)
+#define WPDMA_GLO_CFG_ENABLE_RX_DMA	FIELD32(0x00000004)
+#define WPDMA_GLO_CFG_RX_DMA_BUSY	FIELD32(0x00000008)
+#define WPDMA_GLO_CFG_WP_DMA_BURST_SIZE	FIELD32(0x00000030)
+#define WPDMA_GLO_CFG_TX_WRITEBACK_DONE	FIELD32(0x00000040)
+#define WPDMA_GLO_CFG_BIG_ENDIAN	FIELD32(0x00000080)
+#define WPDMA_GLO_CFG_RX_HDR_SCATTER	FIELD32(0x0000ff00)
+#define WPDMA_GLO_CFG_HDR_SEG_LEN	FIELD32(0xffff0000)
+
+/*
+ * WPDMA_RST_IDX
+ */
+#define WPDMA_RST_IDX 			0x020c
+#define WPDMA_RST_IDX_DTX_IDX0		FIELD32(0x00000001)
+#define WPDMA_RST_IDX_DTX_IDX1		FIELD32(0x00000002)
+#define WPDMA_RST_IDX_DTX_IDX2		FIELD32(0x00000004)
+#define WPDMA_RST_IDX_DTX_IDX3		FIELD32(0x00000008)
+#define WPDMA_RST_IDX_DTX_IDX4		FIELD32(0x00000010)
+#define WPDMA_RST_IDX_DTX_IDX5		FIELD32(0x00000020)
+#define WPDMA_RST_IDX_DRX_IDX0		FIELD32(0x00010000)
+
+/*
+ * DELAY_INT_CFG
+ */
+#define DELAY_INT_CFG			0x0210
+#define DELAY_INT_CFG_RXMAX_PTIME	FIELD32(0x000000ff)
+#define DELAY_INT_CFG_RXMAX_PINT	FIELD32(0x00007f00)
+#define DELAY_INT_CFG_RXDLY_INT_EN	FIELD32(0x00008000)
+#define DELAY_INT_CFG_TXMAX_PTIME	FIELD32(0x00ff0000)
+#define DELAY_INT_CFG_TXMAX_PINT	FIELD32(0x7f000000)
+#define DELAY_INT_CFG_TXDLY_INT_EN	FIELD32(0x80000000)
+
+/*
+ * WMM_AIFSN_CFG: Aifsn for each EDCA AC
+ * AIFSN0: AC_BE
+ * AIFSN1: AC_BK
+ * AIFSN1: AC_VI
+ * AIFSN1: AC_VO
+ */
+#define WMM_AIFSN_CFG			0x0214
+#define WMM_AIFSN_CFG_AIFSN0		FIELD32(0x0000000f)
+#define WMM_AIFSN_CFG_AIFSN1		FIELD32(0x000000f0)
+#define WMM_AIFSN_CFG_AIFSN2		FIELD32(0x00000f00)
+#define WMM_AIFSN_CFG_AIFSN3		FIELD32(0x0000f000)
+
+/*
+ * WMM_CWMIN_CSR: CWmin for each EDCA AC
+ * CWMIN0: AC_BE
+ * CWMIN1: AC_BK
+ * CWMIN1: AC_VI
+ * CWMIN1: AC_VO
+ */
+#define WMM_CWMIN_CFG			0x0218
+#define WMM_CWMIN_CFG_CWMIN0		FIELD32(0x0000000f)
+#define WMM_CWMIN_CFG_CWMIN1		FIELD32(0x000000f0)
+#define WMM_CWMIN_CFG_CWMIN2		FIELD32(0x00000f00)
+#define WMM_CWMIN_CFG_CWMIN3		FIELD32(0x0000f000)
+
+/*
+ * WMM_CWMAX_CSR: CWmax for each EDCA AC
+ * CWMAX0: AC_BE
+ * CWMAX1: AC_BK
+ * CWMAX1: AC_VI
+ * CWMAX1: AC_VO
+ */
+#define WMM_CWMAX_CFG			0x021c
+#define WMM_CWMAX_CFG_CWMAX0		FIELD32(0x0000000f)
+#define WMM_CWMAX_CFG_CWMAX1		FIELD32(0x000000f0)
+#define WMM_CWMAX_CFG_CWMAX2		FIELD32(0x00000f00)
+#define WMM_CWMAX_CFG_CWMAX3		FIELD32(0x0000f000)
+
+/*
+ * AC_TXOP0: AC_BK/AC_BE TXOP register
+ * AC0TXOP: AC_BK in unit of 32us
+ * AC1TXOP: AC_BE in unit of 32us
+ */
+#define WMM_TXOP0_CFG			0x0220
+#define WMM_TXOP0_CFG_AC0TXOP		FIELD32(0x0000ffff)
+#define WMM_TXOP0_CFG_AC1TXOP		FIELD32(0xffff0000)
+
+/*
+ * AC_TXOP1: AC_VO/AC_VI TXOP register
+ * AC2TXOP: AC_VI in unit of 32us
+ * AC3TXOP: AC_VO in unit of 32us
+ */
+#define WMM_TXOP1_CFG			0x0224
+#define WMM_TXOP1_CFG_AC2TXOP		FIELD32(0x0000ffff)
+#define WMM_TXOP1_CFG_AC3TXOP		FIELD32(0xffff0000)
+
+/*
+ * GPIO_CTRL_CFG:
+ */
+#define GPIO_CTRL_CFG			0x0228
+#define GPIO_CTRL_CFG_BIT0		FIELD32(0x00000001)
+#define GPIO_CTRL_CFG_BIT1		FIELD32(0x00000002)
+#define GPIO_CTRL_CFG_BIT2		FIELD32(0x00000004)
+#define GPIO_CTRL_CFG_BIT3		FIELD32(0x00000008)
+#define GPIO_CTRL_CFG_BIT4		FIELD32(0x00000010)
+#define GPIO_CTRL_CFG_BIT5		FIELD32(0x00000020)
+#define GPIO_CTRL_CFG_BIT6		FIELD32(0x00000040)
+#define GPIO_CTRL_CFG_BIT7		FIELD32(0x00000080)
+#define GPIO_CTRL_CFG_BIT8		FIELD32(0x00000100)
+
+/*
+ * MCU_CMD_CFG
+ */
+#define MCU_CMD_CFG			0x022c
+
+/*
+ * AC_BK register offsets
+ */
+#define TX_BASE_PTR0			0x0230
+#define TX_MAX_CNT0			0x0234
+#define TX_CTX_IDX0			0x0238
+#define TX_DTX_IDX0			0x023c
+
+/*
+ * AC_BE register offsets
+ */
+#define TX_BASE_PTR1			0x0240
+#define TX_MAX_CNT1			0x0244
+#define TX_CTX_IDX1			0x0248
+#define TX_DTX_IDX1			0x024c
+
+/*
+ * AC_VI register offsets
+ */
+#define TX_BASE_PTR2			0x0250
+#define TX_MAX_CNT2			0x0254
+#define TX_CTX_IDX2			0x0258
+#define TX_DTX_IDX2			0x025c
+
+/*
+ * AC_VO register offsets
+ */
+#define TX_BASE_PTR3			0x0260
+#define TX_MAX_CNT3			0x0264
+#define TX_CTX_IDX3			0x0268
+#define TX_DTX_IDX3			0x026c
+
+/*
+ * HCCA register offsets
+ */
+#define TX_BASE_PTR4			0x0270
+#define TX_MAX_CNT4			0x0274
+#define TX_CTX_IDX4			0x0278
+#define TX_DTX_IDX4			0x027c
+
+/*
+ * MGMT register offsets
+ */
+#define TX_BASE_PTR5			0x0280
+#define TX_MAX_CNT5			0x0284
+#define TX_CTX_IDX5			0x0288
+#define TX_DTX_IDX5			0x028c
+
+/*
+ * Queue register offset macros
+ */
+#define TX_QUEUE_REG_OFFSET		0x10
+#define TX_BASE_PTR(__x)		TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET)
+#define TX_MAX_CNT(__x)			TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET)
+#define TX_CTX_IDX(__x)			TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET)
+#define TX_DTX_IDX(__x)			TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET)
+
+/*
+ * RX register offsets
+ */
+#define RX_BASE_PTR			0x0290
+#define RX_MAX_CNT			0x0294
+#define RX_CRX_IDX			0x0298
+#define RX_DRX_IDX			0x029c
+
+/*
+ * PBF_SYS_CTRL
+ * HOST_RAM_WRITE: enable Host program ram write selection
+ */
+#define PBF_SYS_CTRL			0x0400
+#define PBF_SYS_CTRL_READY		FIELD32(0x00000080)
+#define PBF_SYS_CTRL_HOST_RAM_WRITE	FIELD32(0x00010000)
+
+/*
+ * HOST-MCU shared memory
+ */
+#define HOST_CMD_CSR			0x0404
+#define HOST_CMD_CSR_HOST_COMMAND	FIELD32(0x000000ff)
+
+/*
+ * PBF registers
+ * Most are for debug. Driver doesn't touch PBF register.
+ */
+#define PBF_CFG				0x0408
+#define PBF_MAX_PCNT			0x040c
+#define PBF_CTRL			0x0410
+#define PBF_INT_STA			0x0414
+#define PBF_INT_ENA			0x0418
+
+/*
+ * BCN_OFFSET0:
+ */
+#define BCN_OFFSET0			0x042c
+#define BCN_OFFSET0_BCN0		FIELD32(0x000000ff)
+#define BCN_OFFSET0_BCN1		FIELD32(0x0000ff00)
+#define BCN_OFFSET0_BCN2		FIELD32(0x00ff0000)
+#define BCN_OFFSET0_BCN3		FIELD32(0xff000000)
+
+/*
+ * BCN_OFFSET1:
+ */
+#define BCN_OFFSET1			0x0430
+#define BCN_OFFSET1_BCN4		FIELD32(0x000000ff)
+#define BCN_OFFSET1_BCN5		FIELD32(0x0000ff00)
+#define BCN_OFFSET1_BCN6		FIELD32(0x00ff0000)
+#define BCN_OFFSET1_BCN7		FIELD32(0xff000000)
+
+/*
+ * PBF registers
+ * Most are for debug. Driver doesn't touch PBF register.
+ */
+#define TXRXQ_PCNT			0x0438
+#define PBF_DBG				0x043c
+
+/*
+ * RF registers
+ */
+#define	RF_CSR_CFG			0x0500
+#define RF_CSR_CFG_DATA			FIELD32(0x000000ff)
+#define RF_CSR_CFG_REGNUM		FIELD32(0x00001f00)
+#define RF_CSR_CFG_WRITE		FIELD32(0x00010000)
+#define RF_CSR_CFG_BUSY			FIELD32(0x00020000)
+
+/*
+ * EFUSE_CSR: RT3090 EEPROM
+ */
+#define EFUSE_CTRL			0x0580
+#define EFUSE_CTRL_ADDRESS_IN		FIELD32(0x03fe0000)
+#define EFUSE_CTRL_MODE			FIELD32(0x000000c0)
+#define EFUSE_CTRL_KICK			FIELD32(0x40000000)
+
+/*
+ * EFUSE_DATA0
+ */
+#define EFUSE_DATA0			0x0590
+
+/*
+ * EFUSE_DATA1
+ */
+#define EFUSE_DATA1			0x0594
+
+/*
+ * EFUSE_DATA2
+ */
+#define EFUSE_DATA2			0x0598
+
+/*
+ * EFUSE_DATA3
+ */
+#define EFUSE_DATA3			0x059c
+
+/*
+ * MAC Control/Status Registers(CSR).
+ * Some values are set in TU, whereas 1 TU == 1024 us.
+ */
+
+/*
+ * MAC_CSR0: ASIC revision number.
+ * ASIC_REV: 0
+ * ASIC_VER: 2860
+ */
+#define MAC_CSR0			0x1000
+#define MAC_CSR0_ASIC_REV		FIELD32(0x0000ffff)
+#define MAC_CSR0_ASIC_VER		FIELD32(0xffff0000)
+
+/*
+ * MAC_SYS_CTRL:
+ */
+#define MAC_SYS_CTRL			0x1004
+#define MAC_SYS_CTRL_RESET_CSR		FIELD32(0x00000001)
+#define MAC_SYS_CTRL_RESET_BBP		FIELD32(0x00000002)
+#define MAC_SYS_CTRL_ENABLE_TX		FIELD32(0x00000004)
+#define MAC_SYS_CTRL_ENABLE_RX		FIELD32(0x00000008)
+#define MAC_SYS_CTRL_CONTINUOUS_TX	FIELD32(0x00000010)
+#define MAC_SYS_CTRL_LOOPBACK		FIELD32(0x00000020)
+#define MAC_SYS_CTRL_WLAN_HALT		FIELD32(0x00000040)
+#define MAC_SYS_CTRL_RX_TIMESTAMP	FIELD32(0x00000080)
+
+/*
+ * MAC_ADDR_DW0: STA MAC register 0
+ */
+#define MAC_ADDR_DW0			0x1008
+#define MAC_ADDR_DW0_BYTE0		FIELD32(0x000000ff)
+#define MAC_ADDR_DW0_BYTE1		FIELD32(0x0000ff00)
+#define MAC_ADDR_DW0_BYTE2		FIELD32(0x00ff0000)
+#define MAC_ADDR_DW0_BYTE3		FIELD32(0xff000000)
+
+/*
+ * MAC_ADDR_DW1: STA MAC register 1
+ * UNICAST_TO_ME_MASK:
+ * Used to mask off bits from byte 5 of the MAC address
+ * to determine the UNICAST_TO_ME bit for RX frames.
+ * The full mask is complemented by BSS_ID_MASK:
+ *    MASK = BSS_ID_MASK & UNICAST_TO_ME_MASK
+ */
+#define MAC_ADDR_DW1			0x100c
+#define MAC_ADDR_DW1_BYTE4		FIELD32(0x000000ff)
+#define MAC_ADDR_DW1_BYTE5		FIELD32(0x0000ff00)
+#define MAC_ADDR_DW1_UNICAST_TO_ME_MASK	FIELD32(0x00ff0000)
+
+/*
+ * MAC_BSSID_DW0: BSSID register 0
+ */
+#define MAC_BSSID_DW0			0x1010
+#define MAC_BSSID_DW0_BYTE0		FIELD32(0x000000ff)
+#define MAC_BSSID_DW0_BYTE1		FIELD32(0x0000ff00)
+#define MAC_BSSID_DW0_BYTE2		FIELD32(0x00ff0000)
+#define MAC_BSSID_DW0_BYTE3		FIELD32(0xff000000)
+
+/*
+ * MAC_BSSID_DW1: BSSID register 1
+ * BSS_ID_MASK:
+ *     0: 1-BSSID mode (BSS index = 0)
+ *     1: 2-BSSID mode (BSS index: Byte5, bit 0)
+ *     2: 4-BSSID mode (BSS index: byte5, bit 0 - 1)
+ *     3: 8-BSSID mode (BSS index: byte5, bit 0 - 2)
+ * This mask is used to mask off bits 0, 1 and 2 of byte 5 of the
+ * BSSID. This will make sure that those bits will be ignored
+ * when determining the MY_BSS of RX frames.
+ */
+#define MAC_BSSID_DW1			0x1014
+#define MAC_BSSID_DW1_BYTE4		FIELD32(0x000000ff)
+#define MAC_BSSID_DW1_BYTE5		FIELD32(0x0000ff00)
+#define MAC_BSSID_DW1_BSS_ID_MASK	FIELD32(0x00030000)
+#define MAC_BSSID_DW1_BSS_BCN_NUM	FIELD32(0x001c0000)
+
+/*
+ * MAX_LEN_CFG: Maximum frame length register.
+ * MAX_MPDU: rt2860b max 16k bytes
+ * MAX_PSDU: Maximum PSDU length
+ *	(power factor) 0:2^13, 1:2^14, 2:2^15, 3:2^16
+ */
+#define MAX_LEN_CFG			0x1018
+#define MAX_LEN_CFG_MAX_MPDU		FIELD32(0x00000fff)
+#define MAX_LEN_CFG_MAX_PSDU		FIELD32(0x00003000)
+#define MAX_LEN_CFG_MIN_PSDU		FIELD32(0x0000c000)
+#define MAX_LEN_CFG_MIN_MPDU		FIELD32(0x000f0000)
+
+/*
+ * BBP_CSR_CFG: BBP serial control register
+ * VALUE: Register value to program into BBP
+ * REG_NUM: Selected BBP register
+ * READ_CONTROL: 0 write BBP, 1 read BBP
+ * BUSY: ASIC is busy executing BBP commands
+ * BBP_PAR_DUR: 0 4 MAC clocks, 1 8 MAC clocks
+ * BBP_RW_MODE: 0 serial, 1 paralell
+ */
+#define BBP_CSR_CFG			0x101c
+#define BBP_CSR_CFG_VALUE		FIELD32(0x000000ff)
+#define BBP_CSR_CFG_REGNUM		FIELD32(0x0000ff00)
+#define BBP_CSR_CFG_READ_CONTROL	FIELD32(0x00010000)
+#define BBP_CSR_CFG_BUSY		FIELD32(0x00020000)
+#define BBP_CSR_CFG_BBP_PAR_DUR		FIELD32(0x00040000)
+#define BBP_CSR_CFG_BBP_RW_MODE		FIELD32(0x00080000)
+
+/*
+ * RF_CSR_CFG0: RF control register
+ * REGID_AND_VALUE: Register value to program into RF
+ * BITWIDTH: Selected RF register
+ * STANDBYMODE: 0 high when standby, 1 low when standby
+ * SEL: 0 RF_LE0 activate, 1 RF_LE1 activate
+ * BUSY: ASIC is busy executing RF commands
+ */
+#define RF_CSR_CFG0			0x1020
+#define RF_CSR_CFG0_REGID_AND_VALUE	FIELD32(0x00ffffff)
+#define RF_CSR_CFG0_BITWIDTH		FIELD32(0x1f000000)
+#define RF_CSR_CFG0_REG_VALUE_BW	FIELD32(0x1fffffff)
+#define RF_CSR_CFG0_STANDBYMODE		FIELD32(0x20000000)
+#define RF_CSR_CFG0_SEL			FIELD32(0x40000000)
+#define RF_CSR_CFG0_BUSY		FIELD32(0x80000000)
+
+/*
+ * RF_CSR_CFG1: RF control register
+ * REGID_AND_VALUE: Register value to program into RF
+ * RFGAP: Gap between BB_CONTROL_RF and RF_LE
+ *        0: 3 system clock cycle (37.5usec)
+ *        1: 5 system clock cycle (62.5usec)
+ */
+#define RF_CSR_CFG1			0x1024
+#define RF_CSR_CFG1_REGID_AND_VALUE	FIELD32(0x00ffffff)
+#define RF_CSR_CFG1_RFGAP		FIELD32(0x1f000000)
+
+/*
+ * RF_CSR_CFG2: RF control register
+ * VALUE: Register value to program into RF
+ * RFGAP: Gap between BB_CONTROL_RF and RF_LE
+ *        0: 3 system clock cycle (37.5usec)
+ *        1: 5 system clock cycle (62.5usec)
+ */
+#define RF_CSR_CFG2			0x1028
+#define RF_CSR_CFG2_VALUE		FIELD32(0x00ffffff)
+
+/*
+ * LED_CFG: LED control
+ * color LED's:
+ *   0: off
+ *   1: blinking upon TX2
+ *   2: periodic slow blinking
+ *   3: always on
+ * LED polarity:
+ *   0: active low
+ *   1: active high
+ */
+#define LED_CFG				0x102c
+#define LED_CFG_ON_PERIOD		FIELD32(0x000000ff)
+#define LED_CFG_OFF_PERIOD		FIELD32(0x0000ff00)
+#define LED_CFG_SLOW_BLINK_PERIOD	FIELD32(0x003f0000)
+#define LED_CFG_R_LED_MODE		FIELD32(0x03000000)
+#define LED_CFG_G_LED_MODE		FIELD32(0x0c000000)
+#define LED_CFG_Y_LED_MODE		FIELD32(0x30000000)
+#define LED_CFG_LED_POLAR		FIELD32(0x40000000)
+
+/*
+ * XIFS_TIME_CFG: MAC timing
+ * CCKM_SIFS_TIME: unit 1us. Applied after CCK RX/TX
+ * OFDM_SIFS_TIME: unit 1us. Applied after OFDM RX/TX
+ * OFDM_XIFS_TIME: unit 1us. Applied after OFDM RX
+ *	when MAC doesn't reference BBP signal BBRXEND
+ * EIFS: unit 1us
+ * BB_RXEND_ENABLE: reference RXEND signal to begin XIFS defer
+ *
+ */
+#define XIFS_TIME_CFG			0x1100
+#define XIFS_TIME_CFG_CCKM_SIFS_TIME	FIELD32(0x000000ff)
+#define XIFS_TIME_CFG_OFDM_SIFS_TIME	FIELD32(0x0000ff00)
+#define XIFS_TIME_CFG_OFDM_XIFS_TIME	FIELD32(0x000f0000)
+#define XIFS_TIME_CFG_EIFS		FIELD32(0x1ff00000)
+#define XIFS_TIME_CFG_BB_RXEND_ENABLE	FIELD32(0x20000000)
+
+/*
+ * BKOFF_SLOT_CFG:
+ */
+#define BKOFF_SLOT_CFG			0x1104
+#define BKOFF_SLOT_CFG_SLOT_TIME	FIELD32(0x000000ff)
+#define BKOFF_SLOT_CFG_CC_DELAY_TIME	FIELD32(0x0000ff00)
+
+/*
+ * NAV_TIME_CFG:
+ */
+#define NAV_TIME_CFG			0x1108
+#define NAV_TIME_CFG_SIFS		FIELD32(0x000000ff)
+#define NAV_TIME_CFG_SLOT_TIME		FIELD32(0x0000ff00)
+#define NAV_TIME_CFG_EIFS		FIELD32(0x01ff0000)
+#define NAV_TIME_ZERO_SIFS		FIELD32(0x02000000)
+
+/*
+ * CH_TIME_CFG: count as channel busy
+ */
+#define CH_TIME_CFG     	        0x110c
+
+/*
+ * PBF_LIFE_TIMER: TX/RX MPDU timestamp timer (free run) Unit: 1us
+ */
+#define PBF_LIFE_TIMER     	        0x1110
+
+/*
+ * BCN_TIME_CFG:
+ * BEACON_INTERVAL: in unit of 1/16 TU
+ * TSF_TICKING: Enable TSF auto counting
+ * TSF_SYNC: Enable TSF sync, 00: disable, 01: infra mode, 10: ad-hoc mode
+ * BEACON_GEN: Enable beacon generator
+ */
+#define BCN_TIME_CFG			0x1114
+#define BCN_TIME_CFG_BEACON_INTERVAL	FIELD32(0x0000ffff)
+#define BCN_TIME_CFG_TSF_TICKING	FIELD32(0x00010000)
+#define BCN_TIME_CFG_TSF_SYNC		FIELD32(0x00060000)
+#define BCN_TIME_CFG_TBTT_ENABLE	FIELD32(0x00080000)
+#define BCN_TIME_CFG_BEACON_GEN		FIELD32(0x00100000)
+#define BCN_TIME_CFG_TX_TIME_COMPENSATE	FIELD32(0xf0000000)
+
+/*
+ * TBTT_SYNC_CFG:
+ */
+#define TBTT_SYNC_CFG			0x1118
+
+/*
+ * TSF_TIMER_DW0: Local lsb TSF timer, read-only
+ */
+#define TSF_TIMER_DW0			0x111c
+#define TSF_TIMER_DW0_LOW_WORD		FIELD32(0xffffffff)
+
+/*
+ * TSF_TIMER_DW1: Local msb TSF timer, read-only
+ */
+#define TSF_TIMER_DW1			0x1120
+#define TSF_TIMER_DW1_HIGH_WORD		FIELD32(0xffffffff)
+
+/*
+ * TBTT_TIMER: TImer remains till next TBTT, read-only
+ */
+#define TBTT_TIMER			0x1124
+
+/*
+ * INT_TIMER_CFG:
+ */
+#define INT_TIMER_CFG			0x1128
+
+/*
+ * INT_TIMER_EN: GP-timer and pre-tbtt Int enable
+ */
+#define INT_TIMER_EN			0x112c
+
+/*
+ * CH_IDLE_STA: channel idle time
+ */
+#define CH_IDLE_STA			0x1130
+
+/*
+ * CH_BUSY_STA: channel busy time
+ */
+#define CH_BUSY_STA			0x1134
+
+/*
+ * MAC_STATUS_CFG:
+ * BBP_RF_BUSY: When set to 0, BBP and RF are stable.
+ *	if 1 or higher one of the 2 registers is busy.
+ */
+#define MAC_STATUS_CFG			0x1200
+#define MAC_STATUS_CFG_BBP_RF_BUSY	FIELD32(0x00000003)
+
+/*
+ * PWR_PIN_CFG:
+ */
+#define PWR_PIN_CFG			0x1204
+
+/*
+ * AUTOWAKEUP_CFG: Manual power control / status register
+ * TBCN_BEFORE_WAKE: ForceWake has high privilege than PutToSleep when both set
+ * AUTOWAKE: 0:sleep, 1:awake
+ */
+#define AUTOWAKEUP_CFG			0x1208
+#define AUTOWAKEUP_CFG_AUTO_LEAD_TIME	FIELD32(0x000000ff)
+#define AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE	FIELD32(0x00007f00)
+#define AUTOWAKEUP_CFG_AUTOWAKE		FIELD32(0x00008000)
+
+/*
+ * EDCA_AC0_CFG:
+ */
+#define EDCA_AC0_CFG			0x1300
+#define EDCA_AC0_CFG_TX_OP		FIELD32(0x000000ff)
+#define EDCA_AC0_CFG_AIFSN		FIELD32(0x00000f00)
+#define EDCA_AC0_CFG_CWMIN		FIELD32(0x0000f000)
+#define EDCA_AC0_CFG_CWMAX		FIELD32(0x000f0000)
+
+/*
+ * EDCA_AC1_CFG:
+ */
+#define EDCA_AC1_CFG			0x1304
+#define EDCA_AC1_CFG_TX_OP		FIELD32(0x000000ff)
+#define EDCA_AC1_CFG_AIFSN		FIELD32(0x00000f00)
+#define EDCA_AC1_CFG_CWMIN		FIELD32(0x0000f000)
+#define EDCA_AC1_CFG_CWMAX		FIELD32(0x000f0000)
+
+/*
+ * EDCA_AC2_CFG:
+ */
+#define EDCA_AC2_CFG			0x1308
+#define EDCA_AC2_CFG_TX_OP		FIELD32(0x000000ff)
+#define EDCA_AC2_CFG_AIFSN		FIELD32(0x00000f00)
+#define EDCA_AC2_CFG_CWMIN		FIELD32(0x0000f000)
+#define EDCA_AC2_CFG_CWMAX		FIELD32(0x000f0000)
+
+/*
+ * EDCA_AC3_CFG:
+ */
+#define EDCA_AC3_CFG			0x130c
+#define EDCA_AC3_CFG_TX_OP		FIELD32(0x000000ff)
+#define EDCA_AC3_CFG_AIFSN		FIELD32(0x00000f00)
+#define EDCA_AC3_CFG_CWMIN		FIELD32(0x0000f000)
+#define EDCA_AC3_CFG_CWMAX		FIELD32(0x000f0000)
+
+/*
+ * EDCA_TID_AC_MAP:
+ */
+#define EDCA_TID_AC_MAP			0x1310
+
+/*
+ * TX_PWR_CFG_0:
+ */
+#define TX_PWR_CFG_0			0x1314
+#define TX_PWR_CFG_0_1MBS		FIELD32(0x0000000f)
+#define TX_PWR_CFG_0_2MBS		FIELD32(0x000000f0)
+#define TX_PWR_CFG_0_55MBS		FIELD32(0x00000f00)
+#define TX_PWR_CFG_0_11MBS		FIELD32(0x0000f000)
+#define TX_PWR_CFG_0_6MBS		FIELD32(0x000f0000)
+#define TX_PWR_CFG_0_9MBS		FIELD32(0x00f00000)
+#define TX_PWR_CFG_0_12MBS		FIELD32(0x0f000000)
+#define TX_PWR_CFG_0_18MBS		FIELD32(0xf0000000)
+
+/*
+ * TX_PWR_CFG_1:
+ */
+#define TX_PWR_CFG_1			0x1318
+#define TX_PWR_CFG_1_24MBS		FIELD32(0x0000000f)
+#define TX_PWR_CFG_1_36MBS		FIELD32(0x000000f0)
+#define TX_PWR_CFG_1_48MBS		FIELD32(0x00000f00)
+#define TX_PWR_CFG_1_54MBS		FIELD32(0x0000f000)
+#define TX_PWR_CFG_1_MCS0		FIELD32(0x000f0000)
+#define TX_PWR_CFG_1_MCS1		FIELD32(0x00f00000)
+#define TX_PWR_CFG_1_MCS2		FIELD32(0x0f000000)
+#define TX_PWR_CFG_1_MCS3		FIELD32(0xf0000000)
+
+/*
+ * TX_PWR_CFG_2:
+ */
+#define TX_PWR_CFG_2			0x131c
+#define TX_PWR_CFG_2_MCS4		FIELD32(0x0000000f)
+#define TX_PWR_CFG_2_MCS5		FIELD32(0x000000f0)
+#define TX_PWR_CFG_2_MCS6		FIELD32(0x00000f00)
+#define TX_PWR_CFG_2_MCS7		FIELD32(0x0000f000)
+#define TX_PWR_CFG_2_MCS8		FIELD32(0x000f0000)
+#define TX_PWR_CFG_2_MCS9		FIELD32(0x00f00000)
+#define TX_PWR_CFG_2_MCS10		FIELD32(0x0f000000)
+#define TX_PWR_CFG_2_MCS11		FIELD32(0xf0000000)
+
+/*
+ * TX_PWR_CFG_3:
+ */
+#define TX_PWR_CFG_3			0x1320
+#define TX_PWR_CFG_3_MCS12		FIELD32(0x0000000f)
+#define TX_PWR_CFG_3_MCS13		FIELD32(0x000000f0)
+#define TX_PWR_CFG_3_MCS14		FIELD32(0x00000f00)
+#define TX_PWR_CFG_3_MCS15		FIELD32(0x0000f000)
+#define TX_PWR_CFG_3_UKNOWN1		FIELD32(0x000f0000)
+#define TX_PWR_CFG_3_UKNOWN2		FIELD32(0x00f00000)
+#define TX_PWR_CFG_3_UKNOWN3		FIELD32(0x0f000000)
+#define TX_PWR_CFG_3_UKNOWN4		FIELD32(0xf0000000)
+
+/*
+ * TX_PWR_CFG_4:
+ */
+#define TX_PWR_CFG_4			0x1324
+#define TX_PWR_CFG_4_UKNOWN5		FIELD32(0x0000000f)
+#define TX_PWR_CFG_4_UKNOWN6		FIELD32(0x000000f0)
+#define TX_PWR_CFG_4_UKNOWN7		FIELD32(0x00000f00)
+#define TX_PWR_CFG_4_UKNOWN8		FIELD32(0x0000f000)
+
+/*
+ * TX_PIN_CFG:
+ */
+#define TX_PIN_CFG			0x1328
+#define TX_PIN_CFG_PA_PE_A0_EN		FIELD32(0x00000001)
+#define TX_PIN_CFG_PA_PE_G0_EN		FIELD32(0x00000002)
+#define TX_PIN_CFG_PA_PE_A1_EN		FIELD32(0x00000004)
+#define TX_PIN_CFG_PA_PE_G1_EN		FIELD32(0x00000008)
+#define TX_PIN_CFG_PA_PE_A0_POL		FIELD32(0x00000010)
+#define TX_PIN_CFG_PA_PE_G0_POL		FIELD32(0x00000020)
+#define TX_PIN_CFG_PA_PE_A1_POL		FIELD32(0x00000040)
+#define TX_PIN_CFG_PA_PE_G1_POL		FIELD32(0x00000080)
+#define TX_PIN_CFG_LNA_PE_A0_EN		FIELD32(0x00000100)
+#define TX_PIN_CFG_LNA_PE_G0_EN		FIELD32(0x00000200)
+#define TX_PIN_CFG_LNA_PE_A1_EN		FIELD32(0x00000400)
+#define TX_PIN_CFG_LNA_PE_G1_EN		FIELD32(0x00000800)
+#define TX_PIN_CFG_LNA_PE_A0_POL	FIELD32(0x00001000)
+#define TX_PIN_CFG_LNA_PE_G0_POL	FIELD32(0x00002000)
+#define TX_PIN_CFG_LNA_PE_A1_POL	FIELD32(0x00004000)
+#define TX_PIN_CFG_LNA_PE_G1_POL	FIELD32(0x00008000)
+#define TX_PIN_CFG_RFTR_EN		FIELD32(0x00010000)
+#define TX_PIN_CFG_RFTR_POL		FIELD32(0x00020000)
+#define TX_PIN_CFG_TRSW_EN		FIELD32(0x00040000)
+#define TX_PIN_CFG_TRSW_POL		FIELD32(0x00080000)
+
+/*
+ * TX_BAND_CFG: 0x1 use upper 20MHz, 0x0 use lower 20MHz
+ */
+#define TX_BAND_CFG			0x132c
+#define TX_BAND_CFG_HT40_PLUS		FIELD32(0x00000001)
+#define TX_BAND_CFG_A			FIELD32(0x00000002)
+#define TX_BAND_CFG_BG			FIELD32(0x00000004)
+
+/*
+ * TX_SW_CFG0:
+ */
+#define TX_SW_CFG0			0x1330
+
+/*
+ * TX_SW_CFG1:
+ */
+#define TX_SW_CFG1			0x1334
+
+/*
+ * TX_SW_CFG2:
+ */
+#define TX_SW_CFG2			0x1338
+
+/*
+ * TXOP_THRES_CFG:
+ */
+#define TXOP_THRES_CFG			0x133c
+
+/*
+ * TXOP_CTRL_CFG:
+ */
+#define TXOP_CTRL_CFG			0x1340
+
+/*
+ * TX_RTS_CFG:
+ * RTS_THRES: unit:byte
+ * RTS_FBK_EN: enable rts rate fallback
+ */
+#define TX_RTS_CFG			0x1344
+#define TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT	FIELD32(0x000000ff)
+#define TX_RTS_CFG_RTS_THRES		FIELD32(0x00ffff00)
+#define TX_RTS_CFG_RTS_FBK_EN		FIELD32(0x01000000)
+
+/*
+ * TX_TIMEOUT_CFG:
+ * MPDU_LIFETIME: expiration time = 2^(9+MPDU LIFE TIME) us
+ * RX_ACK_TIMEOUT: unit:slot. Used for TX procedure
+ * TX_OP_TIMEOUT: TXOP timeout value for TXOP truncation.
+ *                it is recommended that:
+ *                (SLOT_TIME) > (TX_OP_TIMEOUT) > (RX_ACK_TIMEOUT)
+ */
+#define TX_TIMEOUT_CFG			0x1348
+#define TX_TIMEOUT_CFG_MPDU_LIFETIME	FIELD32(0x000000f0)
+#define TX_TIMEOUT_CFG_RX_ACK_TIMEOUT	FIELD32(0x0000ff00)
+#define TX_TIMEOUT_CFG_TX_OP_TIMEOUT	FIELD32(0x00ff0000)
+
+/*
+ * TX_RTY_CFG:
+ * SHORT_RTY_LIMIT: short retry limit
+ * LONG_RTY_LIMIT: long retry limit
+ * LONG_RTY_THRE: Long retry threshoold
+ * NON_AGG_RTY_MODE: Non-Aggregate MPDU retry mode
+ *                   0:expired by retry limit, 1: expired by mpdu life timer
+ * AGG_RTY_MODE: Aggregate MPDU retry mode
+ *               0:expired by retry limit, 1: expired by mpdu life timer
+ * TX_AUTO_FB_ENABLE: Tx retry PHY rate auto fallback enable
+ */
+#define TX_RTY_CFG			0x134c
+#define TX_RTY_CFG_SHORT_RTY_LIMIT	FIELD32(0x000000ff)
+#define TX_RTY_CFG_LONG_RTY_LIMIT	FIELD32(0x0000ff00)
+#define TX_RTY_CFG_LONG_RTY_THRE	FIELD32(0x0fff0000)
+#define TX_RTY_CFG_NON_AGG_RTY_MODE	FIELD32(0x10000000)
+#define TX_RTY_CFG_AGG_RTY_MODE		FIELD32(0x20000000)
+#define TX_RTY_CFG_TX_AUTO_FB_ENABLE	FIELD32(0x40000000)
+
+/*
+ * TX_LINK_CFG:
+ * REMOTE_MFB_LIFETIME: remote MFB life time. unit: 32us
+ * MFB_ENABLE: TX apply remote MFB 1:enable
+ * REMOTE_UMFS_ENABLE: remote unsolicit  MFB enable
+ *                     0: not apply remote remote unsolicit (MFS=7)
+ * TX_MRQ_EN: MCS request TX enable
+ * TX_RDG_EN: RDG TX enable
+ * TX_CF_ACK_EN: Piggyback CF-ACK enable
+ * REMOTE_MFB: remote MCS feedback
+ * REMOTE_MFS: remote MCS feedback sequence number
+ */
+#define TX_LINK_CFG			0x1350
+#define TX_LINK_CFG_REMOTE_MFB_LIFETIME	FIELD32(0x000000ff)
+#define TX_LINK_CFG_MFB_ENABLE		FIELD32(0x00000100)
+#define TX_LINK_CFG_REMOTE_UMFS_ENABLE	FIELD32(0x00000200)
+#define TX_LINK_CFG_TX_MRQ_EN		FIELD32(0x00000400)
+#define TX_LINK_CFG_TX_RDG_EN		FIELD32(0x00000800)
+#define TX_LINK_CFG_TX_CF_ACK_EN	FIELD32(0x00001000)
+#define TX_LINK_CFG_REMOTE_MFB		FIELD32(0x00ff0000)
+#define TX_LINK_CFG_REMOTE_MFS		FIELD32(0xff000000)
+
+/*
+ * HT_FBK_CFG0:
+ */
+#define HT_FBK_CFG0			0x1354
+#define HT_FBK_CFG0_HTMCS0FBK		FIELD32(0x0000000f)
+#define HT_FBK_CFG0_HTMCS1FBK		FIELD32(0x000000f0)
+#define HT_FBK_CFG0_HTMCS2FBK		FIELD32(0x00000f00)
+#define HT_FBK_CFG0_HTMCS3FBK		FIELD32(0x0000f000)
+#define HT_FBK_CFG0_HTMCS4FBK		FIELD32(0x000f0000)
+#define HT_FBK_CFG0_HTMCS5FBK		FIELD32(0x00f00000)
+#define HT_FBK_CFG0_HTMCS6FBK		FIELD32(0x0f000000)
+#define HT_FBK_CFG0_HTMCS7FBK		FIELD32(0xf0000000)
+
+/*
+ * HT_FBK_CFG1:
+ */
+#define HT_FBK_CFG1			0x1358
+#define HT_FBK_CFG1_HTMCS8FBK		FIELD32(0x0000000f)
+#define HT_FBK_CFG1_HTMCS9FBK		FIELD32(0x000000f0)
+#define HT_FBK_CFG1_HTMCS10FBK		FIELD32(0x00000f00)
+#define HT_FBK_CFG1_HTMCS11FBK		FIELD32(0x0000f000)
+#define HT_FBK_CFG1_HTMCS12FBK		FIELD32(0x000f0000)
+#define HT_FBK_CFG1_HTMCS13FBK		FIELD32(0x00f00000)
+#define HT_FBK_CFG1_HTMCS14FBK		FIELD32(0x0f000000)
+#define HT_FBK_CFG1_HTMCS15FBK		FIELD32(0xf0000000)
+
+/*
+ * LG_FBK_CFG0:
+ */
+#define LG_FBK_CFG0			0x135c
+#define LG_FBK_CFG0_OFDMMCS0FBK		FIELD32(0x0000000f)
+#define LG_FBK_CFG0_OFDMMCS1FBK		FIELD32(0x000000f0)
+#define LG_FBK_CFG0_OFDMMCS2FBK		FIELD32(0x00000f00)
+#define LG_FBK_CFG0_OFDMMCS3FBK		FIELD32(0x0000f000)
+#define LG_FBK_CFG0_OFDMMCS4FBK		FIELD32(0x000f0000)
+#define LG_FBK_CFG0_OFDMMCS5FBK		FIELD32(0x00f00000)
+#define LG_FBK_CFG0_OFDMMCS6FBK		FIELD32(0x0f000000)
+#define LG_FBK_CFG0_OFDMMCS7FBK		FIELD32(0xf0000000)
+
+/*
+ * LG_FBK_CFG1:
+ */
+#define LG_FBK_CFG1			0x1360
+#define LG_FBK_CFG0_CCKMCS0FBK		FIELD32(0x0000000f)
+#define LG_FBK_CFG0_CCKMCS1FBK		FIELD32(0x000000f0)
+#define LG_FBK_CFG0_CCKMCS2FBK		FIELD32(0x00000f00)
+#define LG_FBK_CFG0_CCKMCS3FBK		FIELD32(0x0000f000)
+
+/*
+ * CCK_PROT_CFG: CCK Protection
+ * PROTECT_RATE: Protection control frame rate for CCK TX(RTS/CTS/CFEnd)
+ * PROTECT_CTRL: Protection control frame type for CCK TX
+ *               0:none, 1:RTS/CTS, 2:CTS-to-self
+ * PROTECT_NAV: TXOP protection type for CCK TX
+ *              0:none, 1:ShortNAVprotect, 2:LongNAVProtect
+ * TX_OP_ALLOW_CCK: CCK TXOP allowance, 0:disallow
+ * TX_OP_ALLOW_OFDM: CCK TXOP allowance, 0:disallow
+ * TX_OP_ALLOW_MM20: CCK TXOP allowance, 0:disallow
+ * TX_OP_ALLOW_MM40: CCK TXOP allowance, 0:disallow
+ * TX_OP_ALLOW_GF20: CCK TXOP allowance, 0:disallow
+ * TX_OP_ALLOW_GF40: CCK TXOP allowance, 0:disallow
+ * RTS_TH_EN: RTS threshold enable on CCK TX
+ */
+#define CCK_PROT_CFG			0x1364
+#define CCK_PROT_CFG_PROTECT_RATE	FIELD32(0x0000ffff)
+#define CCK_PROT_CFG_PROTECT_CTRL	FIELD32(0x00030000)
+#define CCK_PROT_CFG_PROTECT_NAV	FIELD32(0x000c0000)
+#define CCK_PROT_CFG_TX_OP_ALLOW_CCK	FIELD32(0x00100000)
+#define CCK_PROT_CFG_TX_OP_ALLOW_OFDM	FIELD32(0x00200000)
+#define CCK_PROT_CFG_TX_OP_ALLOW_MM20	FIELD32(0x00400000)
+#define CCK_PROT_CFG_TX_OP_ALLOW_MM40	FIELD32(0x00800000)
+#define CCK_PROT_CFG_TX_OP_ALLOW_GF20	FIELD32(0x01000000)
+#define CCK_PROT_CFG_TX_OP_ALLOW_GF40	FIELD32(0x02000000)
+#define CCK_PROT_CFG_RTS_TH_EN		FIELD32(0x04000000)
+
+/*
+ * OFDM_PROT_CFG: OFDM Protection
+ */
+#define OFDM_PROT_CFG			0x1368
+#define OFDM_PROT_CFG_PROTECT_RATE	FIELD32(0x0000ffff)
+#define OFDM_PROT_CFG_PROTECT_CTRL	FIELD32(0x00030000)
+#define OFDM_PROT_CFG_PROTECT_NAV	FIELD32(0x000c0000)
+#define OFDM_PROT_CFG_TX_OP_ALLOW_CCK	FIELD32(0x00100000)
+#define OFDM_PROT_CFG_TX_OP_ALLOW_OFDM	FIELD32(0x00200000)
+#define OFDM_PROT_CFG_TX_OP_ALLOW_MM20	FIELD32(0x00400000)
+#define OFDM_PROT_CFG_TX_OP_ALLOW_MM40	FIELD32(0x00800000)
+#define OFDM_PROT_CFG_TX_OP_ALLOW_GF20	FIELD32(0x01000000)
+#define OFDM_PROT_CFG_TX_OP_ALLOW_GF40	FIELD32(0x02000000)
+#define OFDM_PROT_CFG_RTS_TH_EN		FIELD32(0x04000000)
+
+/*
+ * MM20_PROT_CFG: MM20 Protection
+ */
+#define MM20_PROT_CFG			0x136c
+#define MM20_PROT_CFG_PROTECT_RATE	FIELD32(0x0000ffff)
+#define MM20_PROT_CFG_PROTECT_CTRL	FIELD32(0x00030000)
+#define MM20_PROT_CFG_PROTECT_NAV	FIELD32(0x000c0000)
+#define MM20_PROT_CFG_TX_OP_ALLOW_CCK	FIELD32(0x00100000)
+#define MM20_PROT_CFG_TX_OP_ALLOW_OFDM	FIELD32(0x00200000)
+#define MM20_PROT_CFG_TX_OP_ALLOW_MM20	FIELD32(0x00400000)
+#define MM20_PROT_CFG_TX_OP_ALLOW_MM40	FIELD32(0x00800000)
+#define MM20_PROT_CFG_TX_OP_ALLOW_GF20	FIELD32(0x01000000)
+#define MM20_PROT_CFG_TX_OP_ALLOW_GF40	FIELD32(0x02000000)
+#define MM20_PROT_CFG_RTS_TH_EN		FIELD32(0x04000000)
+
+/*
+ * MM40_PROT_CFG: MM40 Protection
+ */
+#define MM40_PROT_CFG			0x1370
+#define MM40_PROT_CFG_PROTECT_RATE	FIELD32(0x0000ffff)
+#define MM40_PROT_CFG_PROTECT_CTRL	FIELD32(0x00030000)
+#define MM40_PROT_CFG_PROTECT_NAV	FIELD32(0x000c0000)
+#define MM40_PROT_CFG_TX_OP_ALLOW_CCK	FIELD32(0x00100000)
+#define MM40_PROT_CFG_TX_OP_ALLOW_OFDM	FIELD32(0x00200000)
+#define MM40_PROT_CFG_TX_OP_ALLOW_MM20	FIELD32(0x00400000)
+#define MM40_PROT_CFG_TX_OP_ALLOW_MM40	FIELD32(0x00800000)
+#define MM40_PROT_CFG_TX_OP_ALLOW_GF20	FIELD32(0x01000000)
+#define MM40_PROT_CFG_TX_OP_ALLOW_GF40	FIELD32(0x02000000)
+#define MM40_PROT_CFG_RTS_TH_EN		FIELD32(0x04000000)
+
+/*
+ * GF20_PROT_CFG: GF20 Protection
+ */
+#define GF20_PROT_CFG			0x1374
+#define GF20_PROT_CFG_PROTECT_RATE	FIELD32(0x0000ffff)
+#define GF20_PROT_CFG_PROTECT_CTRL	FIELD32(0x00030000)
+#define GF20_PROT_CFG_PROTECT_NAV	FIELD32(0x000c0000)
+#define GF20_PROT_CFG_TX_OP_ALLOW_CCK	FIELD32(0x00100000)
+#define GF20_PROT_CFG_TX_OP_ALLOW_OFDM	FIELD32(0x00200000)
+#define GF20_PROT_CFG_TX_OP_ALLOW_MM20	FIELD32(0x00400000)
+#define GF20_PROT_CFG_TX_OP_ALLOW_MM40	FIELD32(0x00800000)
+#define GF20_PROT_CFG_TX_OP_ALLOW_GF20	FIELD32(0x01000000)
+#define GF20_PROT_CFG_TX_OP_ALLOW_GF40	FIELD32(0x02000000)
+#define GF20_PROT_CFG_RTS_TH_EN		FIELD32(0x04000000)
+
+/*
+ * GF40_PROT_CFG: GF40 Protection
+ */
+#define GF40_PROT_CFG			0x1378
+#define GF40_PROT_CFG_PROTECT_RATE	FIELD32(0x0000ffff)
+#define GF40_PROT_CFG_PROTECT_CTRL	FIELD32(0x00030000)
+#define GF40_PROT_CFG_PROTECT_NAV	FIELD32(0x000c0000)
+#define GF40_PROT_CFG_TX_OP_ALLOW_CCK	FIELD32(0x00100000)
+#define GF40_PROT_CFG_TX_OP_ALLOW_OFDM	FIELD32(0x00200000)
+#define GF40_PROT_CFG_TX_OP_ALLOW_MM20	FIELD32(0x00400000)
+#define GF40_PROT_CFG_TX_OP_ALLOW_MM40	FIELD32(0x00800000)
+#define GF40_PROT_CFG_TX_OP_ALLOW_GF20	FIELD32(0x01000000)
+#define GF40_PROT_CFG_TX_OP_ALLOW_GF40	FIELD32(0x02000000)
+#define GF40_PROT_CFG_RTS_TH_EN		FIELD32(0x04000000)
+
+/*
+ * EXP_CTS_TIME:
+ */
+#define EXP_CTS_TIME			0x137c
+
+/*
+ * EXP_ACK_TIME:
+ */
+#define EXP_ACK_TIME			0x1380
+
+/*
+ * RX_FILTER_CFG: RX configuration register.
+ */
+#define RX_FILTER_CFG			0x1400
+#define RX_FILTER_CFG_DROP_CRC_ERROR	FIELD32(0x00000001)
+#define RX_FILTER_CFG_DROP_PHY_ERROR	FIELD32(0x00000002)
+#define RX_FILTER_CFG_DROP_NOT_TO_ME	FIELD32(0x00000004)
+#define RX_FILTER_CFG_DROP_NOT_MY_BSSD	FIELD32(0x00000008)
+#define RX_FILTER_CFG_DROP_VER_ERROR	FIELD32(0x00000010)
+#define RX_FILTER_CFG_DROP_MULTICAST	FIELD32(0x00000020)
+#define RX_FILTER_CFG_DROP_BROADCAST	FIELD32(0x00000040)
+#define RX_FILTER_CFG_DROP_DUPLICATE	FIELD32(0x00000080)
+#define RX_FILTER_CFG_DROP_CF_END_ACK	FIELD32(0x00000100)
+#define RX_FILTER_CFG_DROP_CF_END	FIELD32(0x00000200)
+#define RX_FILTER_CFG_DROP_ACK		FIELD32(0x00000400)
+#define RX_FILTER_CFG_DROP_CTS		FIELD32(0x00000800)
+#define RX_FILTER_CFG_DROP_RTS		FIELD32(0x00001000)
+#define RX_FILTER_CFG_DROP_PSPOLL	FIELD32(0x00002000)
+#define RX_FILTER_CFG_DROP_BA		FIELD32(0x00004000)
+#define RX_FILTER_CFG_DROP_BAR		FIELD32(0x00008000)
+#define RX_FILTER_CFG_DROP_CNTL		FIELD32(0x00010000)
+
+/*
+ * AUTO_RSP_CFG:
+ * AUTORESPONDER: 0: disable, 1: enable
+ * BAC_ACK_POLICY: 0:long, 1:short preamble
+ * CTS_40_MMODE: Response CTS 40MHz duplicate mode
+ * CTS_40_MREF: Response CTS 40MHz duplicate mode
+ * AR_PREAMBLE: Auto responder preamble 0:long, 1:short preamble
+ * DUAL_CTS_EN: Power bit value in control frame
+ * ACK_CTS_PSM_BIT:Power bit value in control frame
+ */
+#define AUTO_RSP_CFG			0x1404
+#define AUTO_RSP_CFG_AUTORESPONDER	FIELD32(0x00000001)
+#define AUTO_RSP_CFG_BAC_ACK_POLICY	FIELD32(0x00000002)
+#define AUTO_RSP_CFG_CTS_40_MMODE	FIELD32(0x00000004)
+#define AUTO_RSP_CFG_CTS_40_MREF	FIELD32(0x00000008)
+#define AUTO_RSP_CFG_AR_PREAMBLE	FIELD32(0x00000010)
+#define AUTO_RSP_CFG_DUAL_CTS_EN	FIELD32(0x00000040)
+#define AUTO_RSP_CFG_ACK_CTS_PSM_BIT	FIELD32(0x00000080)
+
+/*
+ * LEGACY_BASIC_RATE:
+ */
+#define LEGACY_BASIC_RATE		0x1408
+
+/*
+ * HT_BASIC_RATE:
+ */
+#define HT_BASIC_RATE			0x140c
+
+/*
+ * HT_CTRL_CFG:
+ */
+#define HT_CTRL_CFG			0x1410
+
+/*
+ * SIFS_COST_CFG:
+ */
+#define SIFS_COST_CFG			0x1414
+
+/*
+ * RX_PARSER_CFG:
+ * Set NAV for all received frames
+ */
+#define RX_PARSER_CFG			0x1418
+
+/*
+ * TX_SEC_CNT0:
+ */
+#define TX_SEC_CNT0			0x1500
+
+/*
+ * RX_SEC_CNT0:
+ */
+#define RX_SEC_CNT0			0x1504
+
+/*
+ * CCMP_FC_MUTE:
+ */
+#define CCMP_FC_MUTE			0x1508
+
+/*
+ * TXOP_HLDR_ADDR0:
+ */
+#define TXOP_HLDR_ADDR0			0x1600
+
+/*
+ * TXOP_HLDR_ADDR1:
+ */
+#define TXOP_HLDR_ADDR1			0x1604
+
+/*
+ * TXOP_HLDR_ET:
+ */
+#define TXOP_HLDR_ET			0x1608
+
+/*
+ * QOS_CFPOLL_RA_DW0:
+ */
+#define QOS_CFPOLL_RA_DW0		0x160c
+
+/*
+ * QOS_CFPOLL_RA_DW1:
+ */
+#define QOS_CFPOLL_RA_DW1		0x1610
+
+/*
+ * QOS_CFPOLL_QC:
+ */
+#define QOS_CFPOLL_QC			0x1614
+
+/*
+ * RX_STA_CNT0: RX PLCP error count & RX CRC error count
+ */
+#define RX_STA_CNT0			0x1700
+#define RX_STA_CNT0_CRC_ERR		FIELD32(0x0000ffff)
+#define RX_STA_CNT0_PHY_ERR		FIELD32(0xffff0000)
+
+/*
+ * RX_STA_CNT1: RX False CCA count & RX LONG frame count
+ */
+#define RX_STA_CNT1			0x1704
+#define RX_STA_CNT1_FALSE_CCA		FIELD32(0x0000ffff)
+#define RX_STA_CNT1_PLCP_ERR		FIELD32(0xffff0000)
+
+/*
+ * RX_STA_CNT2:
+ */
+#define RX_STA_CNT2			0x1708
+#define RX_STA_CNT2_RX_DUPLI_COUNT	FIELD32(0x0000ffff)
+#define RX_STA_CNT2_RX_FIFO_OVERFLOW	FIELD32(0xffff0000)
+
+/*
+ * TX_STA_CNT0: TX Beacon count
+ */
+#define TX_STA_CNT0			0x170c
+#define TX_STA_CNT0_TX_FAIL_COUNT	FIELD32(0x0000ffff)
+#define TX_STA_CNT0_TX_BEACON_COUNT	FIELD32(0xffff0000)
+
+/*
+ * TX_STA_CNT1: TX tx count
+ */
+#define TX_STA_CNT1			0x1710
+#define TX_STA_CNT1_TX_SUCCESS		FIELD32(0x0000ffff)
+#define TX_STA_CNT1_TX_RETRANSMIT	FIELD32(0xffff0000)
+
+/*
+ * TX_STA_CNT2: TX tx count
+ */
+#define TX_STA_CNT2			0x1714
+#define TX_STA_CNT2_TX_ZERO_LEN_COUNT	FIELD32(0x0000ffff)
+#define TX_STA_CNT2_TX_UNDER_FLOW_COUNT	FIELD32(0xffff0000)
+
+/*
+ * TX_STA_FIFO: TX Result for specific PID status fifo register
+ */
+#define TX_STA_FIFO			0x1718
+#define TX_STA_FIFO_VALID		FIELD32(0x00000001)
+#define TX_STA_FIFO_PID_TYPE		FIELD32(0x0000001e)
+#define TX_STA_FIFO_TX_SUCCESS		FIELD32(0x00000020)
+#define TX_STA_FIFO_TX_AGGRE		FIELD32(0x00000040)
+#define TX_STA_FIFO_TX_ACK_REQUIRED	FIELD32(0x00000080)
+#define TX_STA_FIFO_WCID		FIELD32(0x0000ff00)
+#define TX_STA_FIFO_MCS			FIELD32(0x007f0000)
+#define TX_STA_FIFO_PHYMODE		FIELD32(0xc0000000)
+
+/*
+ * TX_AGG_CNT: Debug counter
+ */
+#define TX_AGG_CNT			0x171c
+#define TX_AGG_CNT_NON_AGG_TX_COUNT	FIELD32(0x0000ffff)
+#define TX_AGG_CNT_AGG_TX_COUNT		FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT0:
+ */
+#define TX_AGG_CNT0			0x1720
+#define TX_AGG_CNT0_AGG_SIZE_1_COUNT	FIELD32(0x0000ffff)
+#define TX_AGG_CNT0_AGG_SIZE_2_COUNT	FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT1:
+ */
+#define TX_AGG_CNT1			0x1724
+#define TX_AGG_CNT1_AGG_SIZE_3_COUNT	FIELD32(0x0000ffff)
+#define TX_AGG_CNT1_AGG_SIZE_4_COUNT	FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT2:
+ */
+#define TX_AGG_CNT2			0x1728
+#define TX_AGG_CNT2_AGG_SIZE_5_COUNT	FIELD32(0x0000ffff)
+#define TX_AGG_CNT2_AGG_SIZE_6_COUNT	FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT3:
+ */
+#define TX_AGG_CNT3			0x172c
+#define TX_AGG_CNT3_AGG_SIZE_7_COUNT	FIELD32(0x0000ffff)
+#define TX_AGG_CNT3_AGG_SIZE_8_COUNT	FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT4:
+ */
+#define TX_AGG_CNT4			0x1730
+#define TX_AGG_CNT4_AGG_SIZE_9_COUNT	FIELD32(0x0000ffff)
+#define TX_AGG_CNT4_AGG_SIZE_10_COUNT	FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT5:
+ */
+#define TX_AGG_CNT5			0x1734
+#define TX_AGG_CNT5_AGG_SIZE_11_COUNT	FIELD32(0x0000ffff)
+#define TX_AGG_CNT5_AGG_SIZE_12_COUNT	FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT6:
+ */
+#define TX_AGG_CNT6			0x1738
+#define TX_AGG_CNT6_AGG_SIZE_13_COUNT	FIELD32(0x0000ffff)
+#define TX_AGG_CNT6_AGG_SIZE_14_COUNT	FIELD32(0xffff0000)
+
+/*
+ * TX_AGG_CNT7:
+ */
+#define TX_AGG_CNT7			0x173c
+#define TX_AGG_CNT7_AGG_SIZE_15_COUNT	FIELD32(0x0000ffff)
+#define TX_AGG_CNT7_AGG_SIZE_16_COUNT	FIELD32(0xffff0000)
+
+/*
+ * MPDU_DENSITY_CNT:
+ * TX_ZERO_DEL: TX zero length delimiter count
+ * RX_ZERO_DEL: RX zero length delimiter count
+ */
+#define MPDU_DENSITY_CNT		0x1740
+#define MPDU_DENSITY_CNT_TX_ZERO_DEL	FIELD32(0x0000ffff)
+#define MPDU_DENSITY_CNT_RX_ZERO_DEL	FIELD32(0xffff0000)
+
+/*
+ * Security key table memory.
+ * MAC_WCID_BASE: 8-bytes (use only 6 bytes) * 256 entry
+ * PAIRWISE_KEY_TABLE_BASE: 32-byte * 256 entry
+ * MAC_IVEIV_TABLE_BASE: 8-byte * 256-entry
+ * MAC_WCID_ATTRIBUTE_BASE: 4-byte * 256-entry
+ * SHARED_KEY_TABLE_BASE: 32 bytes * 32-entry
+ * SHARED_KEY_MODE_BASE: 4 bits * 32-entry
+ */
+#define MAC_WCID_BASE			0x1800
+#define PAIRWISE_KEY_TABLE_BASE		0x4000
+#define MAC_IVEIV_TABLE_BASE		0x6000
+#define MAC_WCID_ATTRIBUTE_BASE		0x6800
+#define SHARED_KEY_TABLE_BASE		0x6c00
+#define SHARED_KEY_MODE_BASE		0x7000
+
+#define MAC_WCID_ENTRY(__idx) \
+	( MAC_WCID_BASE + ((__idx) * sizeof(struct mac_wcid_entry)) )
+#define PAIRWISE_KEY_ENTRY(__idx) \
+	( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
+#define MAC_IVEIV_ENTRY(__idx) \
+	( MAC_IVEIV_TABLE_BASE + ((__idx) & sizeof(struct mac_iveiv_entry)) )
+#define MAC_WCID_ATTR_ENTRY(__idx) \
+	( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) )
+#define SHARED_KEY_ENTRY(__idx) \
+	( SHARED_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
+#define SHARED_KEY_MODE_ENTRY(__idx) \
+	( SHARED_KEY_MODE_BASE + ((__idx) * sizeof(u32)) )
+
+struct mac_wcid_entry {
+	u8 mac[6];
+	u8 reserved[2];
+} __attribute__ ((packed));
+
+struct hw_key_entry {
+	u8 key[16];
+	u8 tx_mic[8];
+	u8 rx_mic[8];
+} __attribute__ ((packed));
+
+struct mac_iveiv_entry {
+	u8 iv[8];
+} __attribute__ ((packed));
+
+/*
+ * MAC_WCID_ATTRIBUTE:
+ */
+#define MAC_WCID_ATTRIBUTE_KEYTAB	FIELD32(0x00000001)
+#define MAC_WCID_ATTRIBUTE_CIPHER	FIELD32(0x0000000e)
+#define MAC_WCID_ATTRIBUTE_BSS_IDX	FIELD32(0x00000070)
+#define MAC_WCID_ATTRIBUTE_RX_WIUDF	FIELD32(0x00000380)
+
+/*
+ * SHARED_KEY_MODE:
+ */
+#define SHARED_KEY_MODE_BSS0_KEY0	FIELD32(0x00000007)
+#define SHARED_KEY_MODE_BSS0_KEY1	FIELD32(0x00000070)
+#define SHARED_KEY_MODE_BSS0_KEY2	FIELD32(0x00000700)
+#define SHARED_KEY_MODE_BSS0_KEY3	FIELD32(0x00007000)
+#define SHARED_KEY_MODE_BSS1_KEY0	FIELD32(0x00070000)
+#define SHARED_KEY_MODE_BSS1_KEY1	FIELD32(0x00700000)
+#define SHARED_KEY_MODE_BSS1_KEY2	FIELD32(0x07000000)
+#define SHARED_KEY_MODE_BSS1_KEY3	FIELD32(0x70000000)
+
+/*
+ * HOST-MCU communication
+ */
+
+/*
+ * H2M_MAILBOX_CSR: Host-to-MCU Mailbox.
+ */
+#define H2M_MAILBOX_CSR			0x7010
+#define H2M_MAILBOX_CSR_ARG0		FIELD32(0x000000ff)
+#define H2M_MAILBOX_CSR_ARG1		FIELD32(0x0000ff00)
+#define H2M_MAILBOX_CSR_CMD_TOKEN	FIELD32(0x00ff0000)
+#define H2M_MAILBOX_CSR_OWNER		FIELD32(0xff000000)
+
+/*
+ * H2M_MAILBOX_CID:
+ */
+#define H2M_MAILBOX_CID			0x7014
+#define H2M_MAILBOX_CID_CMD0		FIELD32(0x000000ff)
+#define H2M_MAILBOX_CID_CMD1		FIELD32(0x0000ff00)
+#define H2M_MAILBOX_CID_CMD2		FIELD32(0x00ff0000)
+#define H2M_MAILBOX_CID_CMD3		FIELD32(0xff000000)
+
+/*
+ * H2M_MAILBOX_STATUS:
+ */
+#define H2M_MAILBOX_STATUS		0x701c
+
+/*
+ * H2M_INT_SRC:
+ */
+#define H2M_INT_SRC			0x7024
+
+/*
+ * H2M_BBP_AGENT:
+ */
+#define H2M_BBP_AGENT			0x7028
+
+/*
+ * MCU_LEDCS: LED control for MCU Mailbox.
+ */
+#define MCU_LEDCS_LED_MODE		FIELD8(0x1f)
+#define MCU_LEDCS_POLARITY		FIELD8(0x01)
+
+/*
+ * HW_CS_CTS_BASE:
+ * Carrier-sense CTS frame base address.
+ * It's where mac stores carrier-sense frame for carrier-sense function.
+ */
+#define HW_CS_CTS_BASE			0x7700
+
+/*
+ * HW_DFS_CTS_BASE:
+ * FS CTS frame base address. It's where mac stores CTS frame for DFS.
+ */
+#define HW_DFS_CTS_BASE			0x7780
+
+/*
+ * TXRX control registers - base address 0x3000
+ */
+
+/*
+ * TXRX_CSR1:
+ * rt2860b  UNKNOWN reg use R/O Reg Addr 0x77d0 first..
+ */
+#define TXRX_CSR1			0x77d0
+
+/*
+ * HW_DEBUG_SETTING_BASE:
+ * since NULL frame won't be that long (256 byte)
+ * We steal 16 tail bytes to save debugging settings
+ */
+#define HW_DEBUG_SETTING_BASE		0x77f0
+#define HW_DEBUG_SETTING_BASE2		0x7770
+
+/*
+ * HW_BEACON_BASE
+ * In order to support maximum 8 MBSS and its maximum length
+ * is 512 bytes for each beacon
+ * Three section discontinue memory segments will be used.
+ * 1. The original region for BCN 0~3
+ * 2. Extract memory from FCE table for BCN 4~5
+ * 3. Extract memory from Pair-wise key table for BCN 6~7
+ *    It occupied those memory of wcid 238~253 for BCN 6
+ *    and wcid 222~237 for BCN 7
+ *
+ * IMPORTANT NOTE: Not sure why legacy driver does this,
+ * but HW_BEACON_BASE7 is 0x0200 bytes below HW_BEACON_BASE6.
+ */
+#define HW_BEACON_BASE0			0x7800
+#define HW_BEACON_BASE1			0x7a00
+#define HW_BEACON_BASE2			0x7c00
+#define HW_BEACON_BASE3			0x7e00
+#define HW_BEACON_BASE4			0x7200
+#define HW_BEACON_BASE5			0x7400
+#define HW_BEACON_BASE6			0x5dc0
+#define HW_BEACON_BASE7			0x5bc0
+
+#define HW_BEACON_OFFSET(__index) \
+	( ((__index) < 4) ? ( HW_BEACON_BASE0 + (__index * 0x0200) ) : \
+	  (((__index) < 6) ? ( HW_BEACON_BASE4 + ((__index - 4) * 0x0200) ) : \
+	  (HW_BEACON_BASE6 - ((__index - 6) * 0x0200))) )
+
+/*
+ * 8051 firmware image.
+ */
+#define FIRMWARE_RT2860			"rt2860.bin"
+#define FIRMWARE_IMAGE_BASE		0x2000
+
+/*
+ * BBP registers.
+ * The wordsize of the BBP is 8 bits.
+ */
+
+/*
+ * BBP 1: TX Antenna
+ */
+#define BBP1_TX_POWER			FIELD8(0x07)
+#define BBP1_TX_ANTENNA			FIELD8(0x18)
+
+/*
+ * BBP 3: RX Antenna
+ */
+#define BBP3_RX_ANTENNA			FIELD8(0x18)
+#define BBP3_HT40_PLUS			FIELD8(0x20)
+
+/*
+ * BBP 4: Bandwidth
+ */
+#define BBP4_TX_BF			FIELD8(0x01)
+#define BBP4_BANDWIDTH			FIELD8(0x18)
+
+/*
+ * RFCSR registers
+ * The wordsize of the RFCSR is 8 bits.
+ */
+
+/*
+ * RFCSR 6:
+ */
+#define RFCSR6_R			FIELD8(0x03)
+
+/*
+ * RFCSR 7:
+ */
+#define RFCSR7_RF_TUNING		FIELD8(0x01)
+
+/*
+ * RFCSR 12:
+ */
+#define RFCSR12_TX_POWER		FIELD8(0x1f)
+
+/*
+ * RFCSR 22:
+ */
+#define RFCSR22_BASEBAND_LOOPBACK	FIELD8(0x01)
+
+/*
+ * RFCSR 23:
+ */
+#define RFCSR23_FREQ_OFFSET		FIELD8(0x7f)
+
+/*
+ * RFCSR 30:
+ */
+#define RFCSR30_RF_CALIBRATION		FIELD8(0x80)
+
+/*
+ * RF registers
+ */
+
+/*
+ * RF 2
+ */
+#define RF2_ANTENNA_RX2			FIELD32(0x00000040)
+#define RF2_ANTENNA_TX1			FIELD32(0x00004000)
+#define RF2_ANTENNA_RX1			FIELD32(0x00020000)
+
+/*
+ * RF 3
+ */
+#define RF3_TXPOWER_G			FIELD32(0x00003e00)
+#define RF3_TXPOWER_A_7DBM_BOOST	FIELD32(0x00000200)
+#define RF3_TXPOWER_A			FIELD32(0x00003c00)
+
+/*
+ * RF 4
+ */
+#define RF4_TXPOWER_G			FIELD32(0x000007c0)
+#define RF4_TXPOWER_A_7DBM_BOOST	FIELD32(0x00000040)
+#define RF4_TXPOWER_A			FIELD32(0x00000780)
+#define RF4_FREQ_OFFSET			FIELD32(0x001f8000)
+#define RF4_HT40			FIELD32(0x00200000)
+
+/*
+ * EEPROM content.
+ * The wordsize of the EEPROM is 16 bits.
+ */
+
+/*
+ * EEPROM Version
+ */
+#define EEPROM_VERSION			0x0001
+#define EEPROM_VERSION_FAE		FIELD16(0x00ff)
+#define EEPROM_VERSION_VERSION		FIELD16(0xff00)
+
+/*
+ * HW MAC address.
+ */
+#define EEPROM_MAC_ADDR_0		0x0002
+#define EEPROM_MAC_ADDR_BYTE0		FIELD16(0x00ff)
+#define EEPROM_MAC_ADDR_BYTE1		FIELD16(0xff00)
+#define EEPROM_MAC_ADDR_1		0x0003
+#define EEPROM_MAC_ADDR_BYTE2		FIELD16(0x00ff)
+#define EEPROM_MAC_ADDR_BYTE3		FIELD16(0xff00)
+#define EEPROM_MAC_ADDR_2		0x0004
+#define EEPROM_MAC_ADDR_BYTE4		FIELD16(0x00ff)
+#define EEPROM_MAC_ADDR_BYTE5		FIELD16(0xff00)
+
+/*
+ * EEPROM ANTENNA config
+ * RXPATH: 1: 1R, 2: 2R, 3: 3R
+ * TXPATH: 1: 1T, 2: 2T
+ */
+#define	EEPROM_ANTENNA			0x001a
+#define EEPROM_ANTENNA_RXPATH		FIELD16(0x000f)
+#define EEPROM_ANTENNA_TXPATH		FIELD16(0x00f0)
+#define EEPROM_ANTENNA_RF_TYPE		FIELD16(0x0f00)
+
+/*
+ * EEPROM NIC config
+ * CARDBUS_ACCEL: 0 - enable, 1 - disable
+ */
+#define	EEPROM_NIC			0x001b
+#define EEPROM_NIC_HW_RADIO		FIELD16(0x0001)
+#define EEPROM_NIC_DYNAMIC_TX_AGC	FIELD16(0x0002)
+#define EEPROM_NIC_EXTERNAL_LNA_BG	FIELD16(0x0004)
+#define EEPROM_NIC_EXTERNAL_LNA_A	FIELD16(0x0008)
+#define EEPROM_NIC_CARDBUS_ACCEL	FIELD16(0x0010)
+#define EEPROM_NIC_BW40M_SB_BG		FIELD16(0x0020)
+#define EEPROM_NIC_BW40M_SB_A		FIELD16(0x0040)
+#define EEPROM_NIC_WPS_PBC		FIELD16(0x0080)
+#define EEPROM_NIC_BW40M_BG		FIELD16(0x0100)
+#define EEPROM_NIC_BW40M_A		FIELD16(0x0200)
+
+/*
+ * EEPROM frequency
+ */
+#define	EEPROM_FREQ			0x001d
+#define EEPROM_FREQ_OFFSET		FIELD16(0x00ff)
+#define EEPROM_FREQ_LED_MODE		FIELD16(0x7f00)
+#define EEPROM_FREQ_LED_POLARITY	FIELD16(0x1000)
+
+/*
+ * EEPROM LED
+ * POLARITY_RDY_G: Polarity RDY_G setting.
+ * POLARITY_RDY_A: Polarity RDY_A setting.
+ * POLARITY_ACT: Polarity ACT setting.
+ * POLARITY_GPIO_0: Polarity GPIO0 setting.
+ * POLARITY_GPIO_1: Polarity GPIO1 setting.
+ * POLARITY_GPIO_2: Polarity GPIO2 setting.
+ * POLARITY_GPIO_3: Polarity GPIO3 setting.
+ * POLARITY_GPIO_4: Polarity GPIO4 setting.
+ * LED_MODE: Led mode.
+ */
+#define EEPROM_LED1			0x001e
+#define EEPROM_LED2			0x001f
+#define EEPROM_LED3			0x0020
+#define EEPROM_LED_POLARITY_RDY_BG	FIELD16(0x0001)
+#define EEPROM_LED_POLARITY_RDY_A	FIELD16(0x0002)
+#define EEPROM_LED_POLARITY_ACT		FIELD16(0x0004)
+#define EEPROM_LED_POLARITY_GPIO_0	FIELD16(0x0008)
+#define EEPROM_LED_POLARITY_GPIO_1	FIELD16(0x0010)
+#define EEPROM_LED_POLARITY_GPIO_2	FIELD16(0x0020)
+#define EEPROM_LED_POLARITY_GPIO_3	FIELD16(0x0040)
+#define EEPROM_LED_POLARITY_GPIO_4	FIELD16(0x0080)
+#define EEPROM_LED_LED_MODE		FIELD16(0x1f00)
+
+/*
+ * EEPROM LNA
+ */
+#define EEPROM_LNA			0x0022
+#define EEPROM_LNA_BG			FIELD16(0x00ff)
+#define EEPROM_LNA_A0			FIELD16(0xff00)
+
+/*
+ * EEPROM RSSI BG offset
+ */
+#define EEPROM_RSSI_BG			0x0023
+#define EEPROM_RSSI_BG_OFFSET0		FIELD16(0x00ff)
+#define EEPROM_RSSI_BG_OFFSET1		FIELD16(0xff00)
+
+/*
+ * EEPROM RSSI BG2 offset
+ */
+#define EEPROM_RSSI_BG2			0x0024
+#define EEPROM_RSSI_BG2_OFFSET2		FIELD16(0x00ff)
+#define EEPROM_RSSI_BG2_LNA_A1		FIELD16(0xff00)
+
+/*
+ * EEPROM RSSI A offset
+ */
+#define EEPROM_RSSI_A			0x0025
+#define EEPROM_RSSI_A_OFFSET0		FIELD16(0x00ff)
+#define EEPROM_RSSI_A_OFFSET1		FIELD16(0xff00)
+
+/*
+ * EEPROM RSSI A2 offset
+ */
+#define EEPROM_RSSI_A2			0x0026
+#define EEPROM_RSSI_A2_OFFSET2		FIELD16(0x00ff)
+#define EEPROM_RSSI_A2_LNA_A2		FIELD16(0xff00)
+
+/*
+ * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
+ *	This is delta in 40MHZ.
+ * VALUE: Tx Power dalta value (MAX=4)
+ * TYPE: 1: Plus the delta value, 0: minus the delta value
+ * TXPOWER: Enable:
+ */
+#define EEPROM_TXPOWER_DELTA		0x0028
+#define EEPROM_TXPOWER_DELTA_VALUE	FIELD16(0x003f)
+#define EEPROM_TXPOWER_DELTA_TYPE	FIELD16(0x0040)
+#define EEPROM_TXPOWER_DELTA_TXPOWER	FIELD16(0x0080)
+
+/*
+ * EEPROM TXPOWER 802.11BG
+ */
+#define	EEPROM_TXPOWER_BG1		0x0029
+#define	EEPROM_TXPOWER_BG2		0x0030
+#define EEPROM_TXPOWER_BG_SIZE		7
+#define EEPROM_TXPOWER_BG_1		FIELD16(0x00ff)
+#define EEPROM_TXPOWER_BG_2		FIELD16(0xff00)
+
+/*
+ * EEPROM TXPOWER 802.11A
+ */
+#define EEPROM_TXPOWER_A1		0x003c
+#define EEPROM_TXPOWER_A2		0x0053
+#define EEPROM_TXPOWER_A_SIZE		6
+#define EEPROM_TXPOWER_A_1		FIELD16(0x00ff)
+#define EEPROM_TXPOWER_A_2		FIELD16(0xff00)
+
+/*
+ * EEPROM TXpower byrate: 20MHZ power
+ */
+#define EEPROM_TXPOWER_BYRATE		0x006f
+
+/*
+ * EEPROM BBP.
+ */
+#define	EEPROM_BBP_START		0x0078
+#define EEPROM_BBP_SIZE			16
+#define EEPROM_BBP_VALUE		FIELD16(0x00ff)
+#define EEPROM_BBP_REG_ID		FIELD16(0xff00)
+
+/*
+ * MCU mailbox commands.
+ */
+#define MCU_SLEEP			0x30
+#define MCU_WAKEUP			0x31
+#define MCU_RADIO_OFF			0x35
+#define MCU_CURRENT			0x36
+#define MCU_LED				0x50
+#define MCU_LED_STRENGTH		0x51
+#define MCU_LED_1			0x52
+#define MCU_LED_2			0x53
+#define MCU_LED_3			0x54
+#define MCU_RADAR			0x60
+#define MCU_BOOT_SIGNAL			0x72
+#define MCU_BBP_SIGNAL			0x80
+#define MCU_POWER_SAVE			0x83
+
+/*
+ * MCU mailbox tokens
+ */
+#define TOKEN_WAKUP			3
+
+/*
+ * DMA descriptor defines.
+ */
+#define TXD_DESC_SIZE			( 4 * sizeof(__le32) )
+#define TXWI_DESC_SIZE			( 4 * sizeof(__le32) )
+#define RXD_DESC_SIZE			( 4 * sizeof(__le32) )
+#define RXWI_DESC_SIZE			( 4 * sizeof(__le32) )
+
+/*
+ * TX descriptor format for TX, PRIO and Beacon Ring.
+ */
+
+/*
+ * Word0
+ */
+#define TXD_W0_SD_PTR0			FIELD32(0xffffffff)
+
+/*
+ * Word1
+ */
+#define TXD_W1_SD_LEN1			FIELD32(0x00003fff)
+#define TXD_W1_LAST_SEC1		FIELD32(0x00004000)
+#define TXD_W1_BURST			FIELD32(0x00008000)
+#define TXD_W1_SD_LEN0			FIELD32(0x3fff0000)
+#define TXD_W1_LAST_SEC0		FIELD32(0x40000000)
+#define TXD_W1_DMA_DONE			FIELD32(0x80000000)
+
+/*
+ * Word2
+ */
+#define TXD_W2_SD_PTR1			FIELD32(0xffffffff)
+
+/*
+ * Word3
+ * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
+ * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
+ *       0:MGMT, 1:HCCA 2:EDCA
+ */
+#define TXD_W3_WIV			FIELD32(0x01000000)
+#define TXD_W3_QSEL			FIELD32(0x06000000)
+#define TXD_W3_TCO			FIELD32(0x20000000)
+#define TXD_W3_UCO			FIELD32(0x40000000)
+#define TXD_W3_ICO			FIELD32(0x80000000)
+
+/*
+ * TX WI structure
+ */
+
+/*
+ * Word0
+ * FRAG: 1 To inform TKIP engine this is a fragment.
+ * MIMO_PS: The remote peer is in dynamic MIMO-PS mode
+ * TX_OP: 0:HT TXOP rule , 1:PIFS TX ,2:Backoff, 3:sifs
+ * BW: Channel bandwidth 20MHz or 40 MHz
+ * STBC: 1: STBC support MCS =0-7, 2,3 : RESERVED
+ */
+#define TXWI_W0_FRAG			FIELD32(0x00000001)
+#define TXWI_W0_MIMO_PS			FIELD32(0x00000002)
+#define TXWI_W0_CF_ACK			FIELD32(0x00000004)
+#define TXWI_W0_TS			FIELD32(0x00000008)
+#define TXWI_W0_AMPDU			FIELD32(0x00000010)
+#define TXWI_W0_MPDU_DENSITY		FIELD32(0x000000e0)
+#define TXWI_W0_TX_OP			FIELD32(0x00000300)
+#define TXWI_W0_MCS			FIELD32(0x007f0000)
+#define TXWI_W0_BW			FIELD32(0x00800000)
+#define TXWI_W0_SHORT_GI		FIELD32(0x01000000)
+#define TXWI_W0_STBC			FIELD32(0x06000000)
+#define TXWI_W0_IFS			FIELD32(0x08000000)
+#define TXWI_W0_PHYMODE			FIELD32(0xc0000000)
+
+/*
+ * Word1
+ */
+#define TXWI_W1_ACK			FIELD32(0x00000001)
+#define TXWI_W1_NSEQ			FIELD32(0x00000002)
+#define TXWI_W1_BW_WIN_SIZE		FIELD32(0x000000fc)
+#define TXWI_W1_WIRELESS_CLI_ID		FIELD32(0x0000ff00)
+#define TXWI_W1_MPDU_TOTAL_BYTE_COUNT	FIELD32(0x0fff0000)
+#define TXWI_W1_PACKETID		FIELD32(0xf0000000)
+
+/*
+ * Word2
+ */
+#define TXWI_W2_IV			FIELD32(0xffffffff)
+
+/*
+ * Word3
+ */
+#define TXWI_W3_EIV			FIELD32(0xffffffff)
+
+/*
+ * RX descriptor format for RX Ring.
+ */
+
+/*
+ * Word0
+ */
+#define RXD_W0_SDP0			FIELD32(0xffffffff)
+
+/*
+ * Word1
+ */
+#define RXD_W1_SDL1			FIELD32(0x00003fff)
+#define RXD_W1_SDL0			FIELD32(0x3fff0000)
+#define RXD_W1_LS0			FIELD32(0x40000000)
+#define RXD_W1_DMA_DONE			FIELD32(0x80000000)
+
+/*
+ * Word2
+ */
+#define RXD_W2_SDP1			FIELD32(0xffffffff)
+
+/*
+ * Word3
+ * AMSDU: RX with 802.3 header, not 802.11 header.
+ * DECRYPTED: This frame is being decrypted.
+ */
+#define RXD_W3_BA			FIELD32(0x00000001)
+#define RXD_W3_DATA			FIELD32(0x00000002)
+#define RXD_W3_NULLDATA			FIELD32(0x00000004)
+#define RXD_W3_FRAG			FIELD32(0x00000008)
+#define RXD_W3_UNICAST_TO_ME		FIELD32(0x00000010)
+#define RXD_W3_MULTICAST		FIELD32(0x00000020)
+#define RXD_W3_BROADCAST		FIELD32(0x00000040)
+#define RXD_W3_MY_BSS			FIELD32(0x00000080)
+#define RXD_W3_CRC_ERROR		FIELD32(0x00000100)
+#define RXD_W3_CIPHER_ERROR		FIELD32(0x00000600)
+#define RXD_W3_AMSDU			FIELD32(0x00000800)
+#define RXD_W3_HTC			FIELD32(0x00001000)
+#define RXD_W3_RSSI			FIELD32(0x00002000)
+#define RXD_W3_L2PAD			FIELD32(0x00004000)
+#define RXD_W3_AMPDU			FIELD32(0x00008000)
+#define RXD_W3_DECRYPTED		FIELD32(0x00010000)
+#define RXD_W3_PLCP_SIGNAL		FIELD32(0x00020000)
+#define RXD_W3_PLCP_RSSI		FIELD32(0x00040000)
+
+/*
+ * RX WI structure
+ */
+
+/*
+ * Word0
+ */
+#define RXWI_W0_WIRELESS_CLI_ID		FIELD32(0x000000ff)
+#define RXWI_W0_KEY_INDEX		FIELD32(0x00000300)
+#define RXWI_W0_BSSID			FIELD32(0x00001c00)
+#define RXWI_W0_UDF			FIELD32(0x0000e000)
+#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT	FIELD32(0x0fff0000)
+#define RXWI_W0_TID			FIELD32(0xf0000000)
+
+/*
+ * Word1
+ */
+#define RXWI_W1_FRAG			FIELD32(0x0000000f)
+#define RXWI_W1_SEQUENCE		FIELD32(0x0000fff0)
+#define RXWI_W1_MCS			FIELD32(0x007f0000)
+#define RXWI_W1_BW			FIELD32(0x00800000)
+#define RXWI_W1_SHORT_GI		FIELD32(0x01000000)
+#define RXWI_W1_STBC			FIELD32(0x06000000)
+#define RXWI_W1_PHYMODE			FIELD32(0xc0000000)
+
+/*
+ * Word2
+ */
+#define RXWI_W2_RSSI0			FIELD32(0x000000ff)
+#define RXWI_W2_RSSI1			FIELD32(0x0000ff00)
+#define RXWI_W2_RSSI2			FIELD32(0x00ff0000)
+
+/*
+ * Word3
+ */
+#define RXWI_W3_SNR0			FIELD32(0x000000ff)
+#define RXWI_W3_SNR1			FIELD32(0x0000ff00)
+
+/*
+ * Macros for converting txpower from EEPROM to mac80211 value
+ * and from mac80211 value to register value.
+ */
+#define MIN_G_TXPOWER	0
+#define MIN_A_TXPOWER	-7
+#define MAX_G_TXPOWER	31
+#define MAX_A_TXPOWER	15
+#define DEFAULT_TXPOWER	5
+
+#define TXPOWER_G_FROM_DEV(__txpower) \
+	((__txpower) > MAX_G_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
+
+#define TXPOWER_G_TO_DEV(__txpower) \
+	clamp_t(char, __txpower, MIN_G_TXPOWER, MAX_G_TXPOWER)
+
+#define TXPOWER_A_FROM_DEV(__txpower) \
+	((__txpower) > MAX_A_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
+
+#define TXPOWER_A_TO_DEV(__txpower) \
+	clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER)
+
+#endif /* RT2800PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index a084077..9fe770f 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -1994,7 +1994,7 @@
 	rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
 	rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
 			   test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
-			       (skbdesc->entry->entry_idx + 1) : 0xff);
+			   txdesc->key_idx : 0xff);
 	rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
 			   skb->len - txdesc->l2pad);
 	rt2x00_set_field32(&word, TXWI_W1_PACKETID,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 27bc6b7..196de8a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -158,6 +158,13 @@
 #define RT2561		0x0302
 #define RT2661		0x0401
 #define RT2571		0x1300
+#define RT2860		0x0601	/* 2.4GHz PCI/CB */
+#define RT2860D		0x0681	/* 2.4GHz, 5GHz PCI/CB */
+#define RT2890		0x0701	/* 2.4GHz PCIe */
+#define RT2890D		0x0781	/* 2.4GHz, 5GHz PCIe */
+#define RT2880		0x2880	/* WSOC */
+#define RT3052		0x3052	/* WSOC */
+#define RT3090		0x3090	/* 2.4GHz PCIe */
 #define RT2870		0x1600
 
 	u16 rf;
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.c b/drivers/net/wireless/rt2x00/rt2x00soc.c
new file mode 100644
index 0000000..539568c
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.c
@@ -0,0 +1,159 @@
+/*
+	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	<http://rt2x00.serialmonkey.com>
+
+	This program is free software; you can redistribute it and/or modify
+	it under the terms of the GNU General Public License as published by
+	the Free Software Foundation; either version 2 of the License, or
+	(at your option) any later version.
+
+	This program is distributed in the hope that it will be useful,
+	but WITHOUT ANY WARRANTY; without even the implied warranty of
+	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+	GNU General Public License for more details.
+
+	You should have received a copy of the GNU General Public License
+	along with this program; if not, write to the
+	Free Software Foundation, Inc.,
+	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+	Module: rt2x00soc
+	Abstract: rt2x00 generic soc device routines.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "rt2x00.h"
+#include "rt2x00soc.h"
+
+static void rt2x00soc_free_reg(struct rt2x00_dev *rt2x00dev)
+{
+	kfree(rt2x00dev->rf);
+	rt2x00dev->rf = NULL;
+
+	kfree(rt2x00dev->eeprom);
+	rt2x00dev->eeprom = NULL;
+}
+
+static int rt2x00soc_alloc_reg(struct rt2x00_dev *rt2x00dev)
+{
+	struct platform_device *pdev = to_platform_device(rt2x00dev->dev);
+	struct resource *res;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+
+	rt2x00dev->csr.base = (void __iomem *)KSEG1ADDR(res->start);
+	if (!rt2x00dev->csr.base)
+		goto exit;
+
+	rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
+	if (!rt2x00dev->eeprom)
+		goto exit;
+
+	rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
+	if (!rt2x00dev->rf)
+		goto exit;
+
+	return 0;
+
+exit:
+	ERROR_PROBE("Failed to allocate registers.\n");
+	rt2x00soc_free_reg(rt2x00dev);
+
+	return -ENOMEM;
+}
+
+int rt2x00soc_probe(struct platform_device *pdev,
+		    const unsigned short chipset,
+		    const struct rt2x00_ops *ops)
+{
+	struct ieee80211_hw *hw;
+	struct rt2x00_dev *rt2x00dev;
+	int retval;
+
+	hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
+	if (!hw) {
+		ERROR_PROBE("Failed to allocate hardware.\n");
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, hw);
+
+	rt2x00dev = hw->priv;
+	rt2x00dev->dev = &pdev->dev;
+	rt2x00dev->ops = ops;
+	rt2x00dev->hw = hw;
+	rt2x00dev->irq = platform_get_irq(pdev, 0);
+	rt2x00dev->name = pdev->dev.driver->name;
+
+	rt2x00_set_chip_rt(rt2x00dev, chipset);
+
+	retval = rt2x00soc_alloc_reg(rt2x00dev);
+	if (retval)
+		goto exit_free_device;
+
+	retval = rt2x00lib_probe_dev(rt2x00dev);
+	if (retval)
+		goto exit_free_reg;
+
+	return 0;
+
+exit_free_reg:
+	rt2x00soc_free_reg(rt2x00dev);
+
+exit_free_device:
+	ieee80211_free_hw(hw);
+
+	return retval;
+}
+
+int rt2x00soc_remove(struct platform_device *pdev)
+{
+	struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+	struct rt2x00_dev *rt2x00dev = hw->priv;
+
+	/*
+	 * Free all allocated data.
+	 */
+	rt2x00lib_remove_dev(rt2x00dev);
+	rt2x00soc_free_reg(rt2x00dev);
+	ieee80211_free_hw(hw);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rt2x00soc_remove);
+
+#ifdef CONFIG_PM
+int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+	struct rt2x00_dev *rt2x00dev = hw->priv;
+
+	return rt2x00lib_suspend(rt2x00dev, state);
+}
+EXPORT_SYMBOL_GPL(rt2x00soc_suspend);
+
+int rt2x00soc_resume(struct platform_device *pdev)
+{
+	struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+	struct rt2x00_dev *rt2x00dev = hw->priv;
+
+	return rt2x00lib_resume(rt2x00dev);
+}
+EXPORT_SYMBOL_GPL(rt2x00soc_resume);
+#endif /* CONFIG_PM */
+
+/*
+ * rt2x00soc module information.
+ */
+MODULE_AUTHOR(DRV_PROJECT);
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("rt2x00 soc library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.h b/drivers/net/wireless/rt2x00/rt2x00soc.h
new file mode 100644
index 0000000..5cf114a
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.h
@@ -0,0 +1,52 @@
+/*
+	Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
+	<http://rt2x00.serialmonkey.com>
+
+	This program is free software; you can redistribute it and/or modify
+	it under the terms of the GNU General Public License as published by
+	the Free Software Foundation; either version 2 of the License, or
+	(at your option) any later version.
+
+	This program is distributed in the hope that it will be useful,
+	but WITHOUT ANY WARRANTY; without even the implied warranty of
+	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+	GNU General Public License for more details.
+
+	You should have received a copy of the GNU General Public License
+	along with this program; if not, write to the
+	Free Software Foundation, Inc.,
+	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+	Module: rt2x00soc
+	Abstract: Data structures for the rt2x00soc module.
+ */
+
+#ifndef RT2X00SOC_H
+#define RT2X00SOC_H
+
+#define KSEG1ADDR(__ptr) __ptr
+
+#define __rt2x00soc_probe(__chipset, __ops) \
+static int __rt2x00soc_probe(struct platform_device *pdev) \
+{ \
+	return rt2x00soc_probe(pdev, (__chipset), (__ops)); \
+}
+
+/*
+ * SoC driver handlers.
+ */
+int rt2x00soc_probe(struct platform_device *pdev,
+		    const unsigned short chipset,
+		    const struct rt2x00_ops *ops);
+int rt2x00soc_remove(struct platform_device *pdev);
+#ifdef CONFIG_PM
+int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state);
+int rt2x00soc_resume(struct platform_device *pdev);
+#else
+#define rt2x00soc_suspend	NULL
+#define rt2x00soc_resume	NULL
+#endif /* CONFIG_PM */
+
+#endif /* RT2X00SOC_H */
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index b8f5ee3..14e7bb21 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2389,10 +2389,13 @@
 	{ USB_DEVICE(0x13b1, 0x0023), USB_DEVICE_DATA(&rt73usb_ops) },
 	{ USB_DEVICE(0x13b1, 0x0028), USB_DEVICE_DATA(&rt73usb_ops) },
 	/* MSI */
+	{ USB_DEVICE(0x0db0, 0x4600), USB_DEVICE_DATA(&rt73usb_ops) },
 	{ USB_DEVICE(0x0db0, 0x6877), USB_DEVICE_DATA(&rt73usb_ops) },
 	{ USB_DEVICE(0x0db0, 0x6874), USB_DEVICE_DATA(&rt73usb_ops) },
 	{ USB_DEVICE(0x0db0, 0xa861), USB_DEVICE_DATA(&rt73usb_ops) },
 	{ USB_DEVICE(0x0db0, 0xa874), USB_DEVICE_DATA(&rt73usb_ops) },
+	/* Ovislink */
+	{ USB_DEVICE(0x1b75, 0x7318), USB_DEVICE_DATA(&rt73usb_ops) },
 	/* Ralink */
 	{ USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) },
 	{ USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) },
@@ -2420,6 +2423,8 @@
 	/* Planex */
 	{ USB_DEVICE(0x2019, 0xab01), USB_DEVICE_DATA(&rt73usb_ops) },
 	{ USB_DEVICE(0x2019, 0xab50), USB_DEVICE_DATA(&rt73usb_ops) },
+	/* WideTell */
+	{ USB_DEVICE(0x7167, 0x3840), USB_DEVICE_DATA(&rt73usb_ops) },
 	/* Zcom */
 	{ USB_DEVICE(0x0cde, 0x001c), USB_DEVICE_DATA(&rt73usb_ops) },
 	/* ZyXEL */
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 1103256..48b0bfd 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -183,8 +183,11 @@
 		wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG12)",
 			     wl->chip_id);
 		break;
-	case CHIP_ID_1251_PG10:
 	case CHIP_ID_1251_PG11:
+		wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG11)",
+			     wl->chip_id);
+		break;
+	case CHIP_ID_1251_PG10:
 	default:
 		wl1251_error("unsupported chip id: 0x%x", wl->chip_id);
 		ret = -ENODEV;
@@ -1426,4 +1429,4 @@
 MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>");
-MODULE_ALIAS("spi:wl12xx");
+MODULE_ALIAS("spi:wl1251");
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
index 17c54b5..601fe0d 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.c
@@ -153,7 +153,7 @@
 		     beacon ? "beacon" : "");
 
 	memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
-	ieee80211_rx(wl->hw, skb);
+	ieee80211_rx_ni(wl->hw, skb);
 }
 
 static void wl1251_rx_ack(struct wl1251 *wl)
diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.c b/drivers/net/wireless/wl12xx/wl1251_spi.c
index 14eff2b..2cf8a21 100644
--- a/drivers/net/wireless/wl12xx/wl1251_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1251_spi.c
@@ -307,7 +307,7 @@
 
 static struct spi_driver wl1251_spi_driver = {
 	.driver = {
-		.name		= "wl12xx",
+		.name		= "wl1251",
 		.bus		= &spi_bus_type,
 		.owner		= THIS_MODULE,
 	},
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 55818f9..566f152 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -32,6 +32,8 @@
 #include <linux/bitops.h>
 #include <net/mac80211.h>
 
+#include "wl1271_conf.h"
+
 #define DRIVER_NAME "wl1271"
 #define DRIVER_PREFIX DRIVER_NAME ": "
 
@@ -97,21 +99,42 @@
 	} while (0)
 
 #define WL1271_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN |	\
-				  CFG_BSSID_FILTER_EN)
+				  CFG_BSSID_FILTER_EN | \
+				  CFG_MC_FILTER_EN)
 
 #define WL1271_DEFAULT_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN |  \
 				  CFG_RX_MGMT_EN | CFG_RX_DATA_EN |   \
 				  CFG_RX_CTL_EN | CFG_RX_BCN_EN |     \
 				  CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
 
+#define WL1271_DEFAULT_BASIC_RATE_SET (CONF_TX_RATE_MASK_ALL)
+
 #define WL1271_FW_NAME "wl1271-fw.bin"
 #define WL1271_NVS_NAME "wl1271-nvs.bin"
 
-#define WL1271_BUSY_WORD_LEN 8
+/*
+ * Enable/disable 802.11a support for WL1273
+ */
+#undef WL1271_80211A_ENABLED
+
+/*
+ * FIXME: for the wl1271, a busy word count of 1 here will result in a more
+ * optimal SPI interface. There is some SPI bug however, causing RXS time outs
+ * with this mode occasionally on boot, so lets have three for now. A value of
+ * three should make sure, that the chipset will always be ready, though this
+ * will impact throughput and latencies slightly.
+ */
+#define WL1271_BUSY_WORD_CNT 3
+#define WL1271_BUSY_WORD_LEN (WL1271_BUSY_WORD_CNT * sizeof(u32))
 
 #define WL1271_ELP_HW_STATE_ASLEEP 0
 #define WL1271_ELP_HW_STATE_IRQ    1
 
+#define WL1271_DEFAULT_BEACON_INT  100
+#define WL1271_DEFAULT_DTIM_PERIOD 1
+
+#define ACX_TX_DESCRIPTORS         32
+
 enum wl1271_state {
 	WL1271_STATE_OFF,
 	WL1271_STATE_ON,
@@ -134,6 +157,8 @@
 struct wl1271_partition_set {
 	struct wl1271_partition mem;
 	struct wl1271_partition reg;
+	struct wl1271_partition mem2;
+	struct wl1271_partition mem3;
 };
 
 struct wl1271;
@@ -258,15 +283,15 @@
 
 /* FW status registers */
 struct wl1271_fw_status {
-	u32 intr;
+	__le32 intr;
 	u8  fw_rx_counter;
 	u8  drv_rx_counter;
 	u8  reserved;
 	u8  tx_results_counter;
-	u32 rx_pkt_descs[NUM_RX_PKT_DESC];
-	u32 tx_released_blks[NUM_TX_QUEUES];
-	u32 fw_localtime;
-	u32 padding[2];
+	__le32 rx_pkt_descs[NUM_RX_PKT_DESC];
+	__le32 tx_released_blks[NUM_TX_QUEUES];
+	__le32 fw_localtime;
+	__le32 padding[2];
 } __attribute__ ((packed));
 
 struct wl1271_rx_mem_pool_addr {
@@ -274,6 +299,15 @@
 	u32 addr_extra;
 };
 
+struct wl1271_scan {
+	u8 state;
+	u8 ssid[IW_ESSID_MAX_SIZE+1];
+	size_t ssid_len;
+	u8 active;
+	u8 high_prio;
+	u8 probe_requests;
+};
+
 struct wl1271 {
 	struct ieee80211_hw *hw;
 	bool mac80211_registered;
@@ -288,10 +322,7 @@
 	enum wl1271_state state;
 	struct mutex mutex;
 
-	int physical_mem_addr;
-	int physical_reg_addr;
-	int virtual_mem_addr;
-	int virtual_reg_addr;
+	struct wl1271_partition_set part;
 
 	struct wl1271_chip chip;
 
@@ -308,7 +339,6 @@
 	u8 bss_type;
 	u8 ssid[IW_ESSID_MAX_SIZE + 1];
 	u8 ssid_len;
-	u8 listen_int;
 	int channel;
 
 	struct wl1271_acx_mem_map *target_mem_map;
@@ -332,10 +362,14 @@
 	bool tx_queue_stopped;
 
 	struct work_struct tx_work;
-	struct work_struct filter_work;
 
 	/* Pending TX frames */
-	struct sk_buff *tx_frames[16];
+	struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
+
+	/* Security sequence number counters */
+	u8 tx_security_last_seq;
+	u16 tx_security_seq_16;
+	u32 tx_security_seq_32;
 
 	/* FW Rx counter */
 	u32 rx_counter;
@@ -354,10 +388,17 @@
 
 	/* Are we currently scanning */
 	bool scanning;
+	struct wl1271_scan scan;
 
 	/* Our association ID */
 	u16 aid;
 
+	/* currently configured rate set */
+	u32 basic_rate_set;
+
+	/* The current band */
+	enum ieee80211_band band;
+
 	/* Default key (for WEP) */
 	u32 default_key;
 
@@ -368,6 +409,7 @@
 	bool elp;
 
 	struct completion *elp_compl;
+	struct delayed_work elp_work;
 
 	/* we can be in psm, but not in elp, we have to differentiate */
 	bool psm;
@@ -383,11 +425,20 @@
 
 	u32 buffer_32;
 	u32 buffer_cmd;
-	u8 buffer_busyword[WL1271_BUSY_WORD_LEN];
-	struct wl1271_rx_descriptor *rx_descriptor;
+	u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
 
 	struct wl1271_fw_status *fw_status;
 	struct wl1271_tx_hw_res_if *tx_res_if;
+
+	struct ieee80211_vif *vif;
+
+	/* Used for a workaround to send disconnect before rejoining */
+	bool joined;
+
+	/* Current chipset configuration */
+	struct conf_drv_settings conf;
+
+	struct list_head list;
 };
 
 int wl1271_plt_start(struct wl1271 *wl);
@@ -404,4 +455,13 @@
 /* WL1271 needs a 200ms sleep after power on */
 #define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
 
+static inline bool wl1271_11a_enabled(void)
+{
+#ifdef WL1271_80211A_ENABLED
+	return true;
+#else
+	return false;
+#endif
+}
+
 #endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index f622a40..bf5a868 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -34,8 +34,7 @@
 #include "wl1271_spi.h"
 #include "wl1271_ps.h"
 
-int wl1271_acx_wake_up_conditions(struct wl1271 *wl, u8 wake_up_event,
-				  u8 listen_interval)
+int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
 {
 	struct acx_wake_up_condition *wake_up;
 	int ret;
@@ -48,8 +47,8 @@
 		goto out;
 	}
 
-	wake_up->wake_up_event = wake_up_event;
-	wake_up->listen_interval = listen_interval;
+	wake_up->wake_up_event = wl->conf.conn.wake_up_event;
+	wake_up->listen_interval = wl->conf.conn.listen_interval;
 
 	ret = wl1271_cmd_configure(wl, ACX_WAKE_UP_CONDITIONS,
 				   wake_up, sizeof(*wake_up));
@@ -137,7 +136,12 @@
 		goto out;
 	}
 
-	acx->current_tx_power = power * 10;
+	/*
+	 * FIXME: This is a workaround needed while we don't the correct
+	 * calibration, to avoid distortions
+	 */
+	/* acx->current_tx_power = power * 10; */
+	acx->current_tx_power = 70;
 
 	ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
 	if (ret < 0) {
@@ -193,7 +197,7 @@
 	return 0;
 }
 
-int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl, u32 life_time)
+int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl)
 {
 	struct acx_rx_msdu_lifetime *acx;
 	int ret;
@@ -206,7 +210,7 @@
 		goto out;
 	}
 
-	acx->lifetime = life_time;
+	acx->lifetime = cpu_to_le32(wl->conf.rx.rx_msdu_life_time);
 	ret = wl1271_cmd_configure(wl, DOT11_RX_MSDU_LIFE_TIME,
 				   acx, sizeof(*acx));
 	if (ret < 0) {
@@ -232,8 +236,8 @@
 		goto out;
 	}
 
-	rx_config->config_options = config;
-	rx_config->filter_options = filter;
+	rx_config->config_options = cpu_to_le32(config);
+	rx_config->filter_options = cpu_to_le32(filter);
 
 	ret = wl1271_cmd_configure(wl, ACX_RX_CFG,
 				   rx_config, sizeof(*rx_config));
@@ -260,7 +264,7 @@
 		goto out;
 	}
 
-	/* FIXME: threshold value not set */
+	pd->threshold = cpu_to_le32(wl->conf.rx.packet_detection_threshold);
 
 	ret = wl1271_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd));
 	if (ret < 0) {
@@ -300,7 +304,8 @@
 	return ret;
 }
 
-int wl1271_acx_group_address_tbl(struct wl1271 *wl)
+int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
+				 void *mc_list, u32 mc_list_len)
 {
 	struct acx_dot11_grp_addr_tbl *acx;
 	int ret;
@@ -314,9 +319,9 @@
 	}
 
 	/* MAC filtering */
-	acx->enabled = 0;
-	acx->num_groups = 0;
-	memset(acx->mac_table, 0, ADDRESS_GROUP_MAX_LEN);
+	acx->enabled = enable;
+	acx->num_groups = mc_list_len;
+	memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
 
 	ret = wl1271_cmd_configure(wl, DOT11_GROUP_ADDRESS_TBL,
 				   acx, sizeof(*acx));
@@ -343,8 +348,8 @@
 
 	wl1271_debug(DEBUG_ACX, "acx service period timeout");
 
-	rx_timeout->ps_poll_timeout = RX_TIMEOUT_PS_POLL_DEF;
-	rx_timeout->upsd_timeout = RX_TIMEOUT_UPSD_DEF;
+	rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout);
+	rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout);
 
 	ret = wl1271_cmd_configure(wl, ACX_SERVICE_PERIOD_TIMEOUT,
 				   rx_timeout, sizeof(*rx_timeout));
@@ -372,7 +377,7 @@
 		goto out;
 	}
 
-	rts->threshold = rts_threshold;
+	rts->threshold = cpu_to_le16(rts_threshold);
 
 	ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts));
 	if (ret < 0) {
@@ -385,20 +390,29 @@
 	return ret;
 }
 
-int wl1271_acx_beacon_filter_opt(struct wl1271 *wl)
+int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
 {
-	struct acx_beacon_filter_option *beacon_filter;
-	int ret;
+	struct acx_beacon_filter_option *beacon_filter = NULL;
+	int ret = 0;
 
 	wl1271_debug(DEBUG_ACX, "acx beacon filter opt");
 
+	if (enable_filter &&
+	    wl->conf.conn.bcn_filt_mode == CONF_BCN_FILT_MODE_DISABLED)
+		goto out;
+
 	beacon_filter = kzalloc(sizeof(*beacon_filter), GFP_KERNEL);
 	if (!beacon_filter) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
-	beacon_filter->enable = 0;
+	beacon_filter->enable = enable_filter;
+
+	/*
+	 * When set to zero, and the filter is enabled, beacons
+	 * without the unicast TIM bit set are dropped.
+	 */
 	beacon_filter->max_num_beacons = 0;
 
 	ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_OPT,
@@ -416,7 +430,9 @@
 int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
 {
 	struct acx_beacon_filter_ie_table *ie_table;
+	int i, idx = 0;
 	int ret;
+	bool vendor_spec = false;
 
 	wl1271_debug(DEBUG_ACX, "acx beacon filter table");
 
@@ -426,8 +442,32 @@
 		goto out;
 	}
 
+	/* configure default beacon pass-through rules */
 	ie_table->num_ie = 0;
-	memset(ie_table->table, 0, BEACON_FILTER_TABLE_MAX_SIZE);
+	for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) {
+		struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]);
+		ie_table->table[idx++] = r->ie;
+		ie_table->table[idx++] = r->rule;
+
+		if (r->ie == WLAN_EID_VENDOR_SPECIFIC) {
+			/* only one vendor specific ie allowed */
+			if (vendor_spec)
+				continue;
+
+			/* for vendor specific rules configure the
+			   additional fields */
+			memcpy(&(ie_table->table[idx]), r->oui,
+			       CONF_BCN_IE_OUI_LEN);
+			idx += CONF_BCN_IE_OUI_LEN;
+			ie_table->table[idx++] = r->type;
+			memcpy(&(ie_table->table[idx]), r->version,
+			       CONF_BCN_IE_VER_LEN);
+			idx += CONF_BCN_IE_VER_LEN;
+			vendor_spec = true;
+		}
+
+		ie_table->num_ie++;
+	}
 
 	ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_TABLE,
 				   ie_table, sizeof(*ie_table));
@@ -441,6 +481,36 @@
 	return ret;
 }
 
+int wl1271_acx_conn_monit_params(struct wl1271 *wl)
+{
+	struct acx_conn_monit_params *acx;
+	int ret;
+
+	wl1271_debug(DEBUG_ACX, "acx connection monitor parameters");
+
+	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+	if (!acx) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	acx->synch_fail_thold = cpu_to_le32(wl->conf.conn.synch_fail_thold);
+	acx->bss_lose_timeout = cpu_to_le32(wl->conf.conn.bss_lose_timeout);
+
+	ret = wl1271_cmd_configure(wl, ACX_CONN_MONIT_PARAMS,
+				   acx, sizeof(*acx));
+	if (ret < 0) {
+		wl1271_warning("failed to set connection monitor "
+			       "parameters: %d", ret);
+		goto out;
+	}
+
+out:
+	kfree(acx);
+	return ret;
+}
+
+
 int wl1271_acx_sg_enable(struct wl1271 *wl)
 {
 	struct acx_bt_wlan_coex *pta;
@@ -470,6 +540,7 @@
 int wl1271_acx_sg_cfg(struct wl1271 *wl)
 {
 	struct acx_bt_wlan_coex_param *param;
+	struct conf_sg_settings *c = &wl->conf.sg;
 	int ret;
 
 	wl1271_debug(DEBUG_ACX, "acx sg cfg");
@@ -481,34 +552,19 @@
 	}
 
 	/* BT-WLAN coext parameters */
-	param->min_rate = RATE_INDEX_24MBPS;
-	param->bt_hp_max_time = PTA_BT_HP_MAXTIME_DEF;
-	param->wlan_hp_max_time = PTA_WLAN_HP_MAX_TIME_DEF;
-	param->sense_disable_timer = PTA_SENSE_DISABLE_TIMER_DEF;
-	param->rx_time_bt_hp = PTA_PROTECTIVE_RX_TIME_DEF;
-	param->tx_time_bt_hp = PTA_PROTECTIVE_TX_TIME_DEF;
-	param->rx_time_bt_hp_fast = PTA_PROTECTIVE_RX_TIME_FAST_DEF;
-	param->tx_time_bt_hp_fast = PTA_PROTECTIVE_TX_TIME_FAST_DEF;
-	param->wlan_cycle_fast = PTA_CYCLE_TIME_FAST_DEF;
-	param->bt_anti_starvation_period = PTA_ANTI_STARVE_PERIOD_DEF;
-	param->next_bt_lp_packet = PTA_TIMEOUT_NEXT_BT_LP_PACKET_DEF;
-	param->wake_up_beacon = PTA_TIME_BEFORE_BEACON_DEF;
-	param->hp_dm_max_guard_time = PTA_HPDM_MAX_TIME_DEF;
-	param->next_wlan_packet = PTA_TIME_OUT_NEXT_WLAN_DEF;
-	param->antenna_type = PTA_ANTENNA_TYPE_DEF;
-	param->signal_type = PTA_SIGNALING_TYPE_DEF;
-	param->afh_leverage_on = PTA_AFH_LEVERAGE_ON_DEF;
-	param->quiet_cycle_num = PTA_NUMBER_QUIET_CYCLE_DEF;
-	param->max_cts = PTA_MAX_NUM_CTS_DEF;
-	param->wlan_packets_num = PTA_NUMBER_OF_WLAN_PACKETS_DEF;
-	param->bt_packets_num = PTA_NUMBER_OF_BT_PACKETS_DEF;
-	param->missed_rx_avalanche = PTA_RX_FOR_AVALANCHE_DEF;
-	param->wlan_elp_hp = PTA_ELP_HP_DEF;
-	param->bt_anti_starvation_cycles = PTA_ANTI_STARVE_NUM_CYCLE_DEF;
-	param->ack_mode_dual_ant = PTA_ACK_MODE_DEF;
-	param->pa_sd_enable = PTA_ALLOW_PA_SD_DEF;
-	param->pta_auto_mode_enable = PTA_AUTO_MODE_NO_CTS_DEF;
-	param->bt_hp_respected_num = PTA_BT_HP_RESPECTED_DEF;
+	param->per_threshold = cpu_to_le32(c->per_threshold);
+	param->max_scan_compensation_time =
+		cpu_to_le32(c->max_scan_compensation_time);
+	param->nfs_sample_interval = cpu_to_le16(c->nfs_sample_interval);
+	param->load_ratio = c->load_ratio;
+	param->auto_ps_mode = c->auto_ps_mode;
+	param->probe_req_compensation = c->probe_req_compensation;
+	param->scan_window_compensation = c->scan_window_compensation;
+	param->antenna_config = c->antenna_config;
+	param->beacon_miss_threshold = c->beacon_miss_threshold;
+	param->rate_adaptation_threshold =
+		cpu_to_le32(c->rate_adaptation_threshold);
+	param->rate_adaptation_snr = c->rate_adaptation_snr;
 
 	ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
 	if (ret < 0) {
@@ -534,8 +590,8 @@
 		goto out;
 	}
 
-	detection->rx_cca_threshold = CCA_THRSH_DISABLE_ENERGY_D;
-	detection->tx_energy_detection = 0;
+	detection->rx_cca_threshold = cpu_to_le16(wl->conf.rx.rx_cca_threshold);
+	detection->tx_energy_detection = wl->conf.tx.tx_energy_detection;
 
 	ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD,
 				   detection, sizeof(*detection));
@@ -562,10 +618,10 @@
 		goto out;
 	}
 
-	bb->beacon_rx_timeout = BCN_RX_TIMEOUT_DEF_VALUE;
-	bb->broadcast_timeout = BROADCAST_RX_TIMEOUT_DEF_VALUE;
-	bb->rx_broadcast_in_ps = RX_BROADCAST_IN_PS_DEF_VALUE;
-	bb->ps_poll_threshold = CONSECUTIVE_PS_POLL_FAILURE_DEF;
+	bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout);
+	bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout);
+	bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps;
+	bb->ps_poll_threshold = wl->conf.conn.ps_poll_threshold;
 
 	ret = wl1271_cmd_configure(wl, ACX_BCN_DTIM_OPTIONS, bb, sizeof(*bb));
 	if (ret < 0) {
@@ -591,7 +647,7 @@
 		goto out;
 	}
 
-	acx_aid->aid = aid;
+	acx_aid->aid = cpu_to_le16(aid);
 
 	ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid));
 	if (ret < 0) {
@@ -618,9 +674,8 @@
 	}
 
 	/* high event mask is unused */
-	mask->high_event_mask = 0xffffffff;
-
-	mask->event_mask = event_mask;
+	mask->high_event_mask = cpu_to_le32(0xffffffff);
+	mask->event_mask = cpu_to_le32(event_mask);
 
 	ret = wl1271_cmd_configure(wl, ACX_EVENT_MBOX_MASK,
 				   mask, sizeof(*mask));
@@ -703,9 +758,10 @@
 	return 0;
 }
 
-int wl1271_acx_rate_policies(struct wl1271 *wl)
+int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates)
 {
 	struct acx_rate_policy *acx;
+	struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf;
 	int ret = 0;
 
 	wl1271_debug(DEBUG_ACX, "acx rate policies");
@@ -718,11 +774,11 @@
 	}
 
 	/* configure one default (one-size-fits-all) rate class */
-	acx->rate_class_cnt = 1;
-	acx->rate_class[0].enabled_rates = ACX_RATE_MASK_ALL;
-	acx->rate_class[0].short_retry_limit = ACX_RATE_RETRY_LIMIT;
-	acx->rate_class[0].long_retry_limit = ACX_RATE_RETRY_LIMIT;
-	acx->rate_class[0].aflags = 0;
+	acx->rate_class_cnt = cpu_to_le32(1);
+	acx->rate_class[0].enabled_rates = cpu_to_le32(enabled_rates);
+	acx->rate_class[0].short_retry_limit = c->short_retry_limit;
+	acx->rate_class[0].long_retry_limit = c->long_retry_limit;
+	acx->rate_class[0].aflags = c->aflags;
 
 	ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
 	if (ret < 0) {
@@ -749,22 +805,14 @@
 		goto out;
 	}
 
-	/*
-	 * FIXME: Configure each AC with appropriate values (most suitable
-	 * values will probably be different for each AC.
-	 */
-	for (i = 0; i < WL1271_ACX_AC_COUNT; i++) {
-		acx->ac = i;
-
-		/*
-		 * FIXME: The following default values originate from
-		 * the TI reference driver. What do they mean?
-		 */
-		acx->cw_min = 15;
-		acx->cw_max = 63;
-		acx->aifsn = 3;
+	for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
+		struct conf_tx_ac_category *c = &(wl->conf.tx.ac_conf[i]);
+		acx->ac = c->ac;
+		acx->cw_min = c->cw_min;
+		acx->cw_max = cpu_to_le16(c->cw_max);
+		acx->aifsn = c->aifsn;
 		acx->reserved = 0;
-		acx->tx_op_limit = 0;
+		acx->tx_op_limit = cpu_to_le16(c->tx_op_limit);
 
 		ret = wl1271_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx));
 		if (ret < 0) {
@@ -793,12 +841,15 @@
 		goto out;
 	}
 
-	/* FIXME: configure each TID with a different AC reference */
-	for (i = 0; i < WL1271_ACX_TID_COUNT; i++) {
-		acx->queue_id = i;
-		acx->tsid = WL1271_ACX_AC_BE;
-		acx->ps_scheme = WL1271_ACX_PS_SCHEME_LEGACY;
-		acx->ack_policy = WL1271_ACX_ACK_POLICY_LEGACY;
+	for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
+		struct conf_tx_tid *c = &(wl->conf.tx.tid_conf[i]);
+		acx->queue_id = c->queue_id;
+		acx->channel_type = c->channel_type;
+		acx->tsid = c->tsid;
+		acx->ps_scheme = c->ps_scheme;
+		acx->ack_policy = c->ack_policy;
+		acx->apsd_conf[0] = cpu_to_le32(c->apsd_conf[0]);
+		acx->apsd_conf[1] = cpu_to_le32(c->apsd_conf[1]);
 
 		ret = wl1271_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx));
 		if (ret < 0) {
@@ -826,7 +877,7 @@
 		goto out;
 	}
 
-	acx->frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
+	acx->frag_threshold = cpu_to_le16(wl->conf.tx.frag_threshold);
 	ret = wl1271_cmd_configure(wl, ACX_FRAG_CFG, acx, sizeof(*acx));
 	if (ret < 0) {
 		wl1271_warning("Setting of frag threshold failed: %d", ret);
@@ -852,8 +903,8 @@
 		goto out;
 	}
 
-	acx->tx_compl_timeout = WL1271_ACX_TX_COMPL_TIMEOUT;
-	acx->tx_compl_threshold = WL1271_ACX_TX_COMPL_THRESHOLD;
+	acx->tx_compl_timeout = cpu_to_le16(wl->conf.tx.tx_compl_timeout);
+	acx->tx_compl_threshold = cpu_to_le16(wl->conf.tx.tx_compl_threshold);
 	ret = wl1271_cmd_configure(wl, ACX_TX_CONFIG_OPT, acx, sizeof(*acx));
 	if (ret < 0) {
 		wl1271_warning("Setting of tx options failed: %d", ret);
@@ -879,11 +930,11 @@
 	}
 
 	/* memory config */
-	mem_conf->num_stations = cpu_to_le16(DEFAULT_NUM_STATIONS);
+	mem_conf->num_stations = DEFAULT_NUM_STATIONS;
 	mem_conf->rx_mem_block_num = ACX_RX_MEM_BLOCKS;
 	mem_conf->tx_min_mem_block_num = ACX_TX_MIN_MEM_BLOCKS;
 	mem_conf->num_ssid_profiles = ACX_NUM_SSID_PROFILES;
-	mem_conf->total_tx_descriptors = ACX_TX_DESCRIPTORS;
+	mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
 
 	ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
 				   sizeof(*mem_conf));
@@ -906,7 +957,7 @@
 		return ret;
 
 	wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map),
-					  GFP_KERNEL);
+				     GFP_KERNEL);
 	if (!wl->target_mem_map) {
 		wl1271_error("couldn't allocate target memory map");
 		return -ENOMEM;
@@ -923,7 +974,8 @@
 	}
 
 	/* initialize TX block book keeping */
-	wl->tx_blocks_available = wl->target_mem_map->num_tx_mem_blocks;
+	wl->tx_blocks_available =
+		le32_to_cpu(wl->target_mem_map->num_tx_mem_blocks);
 	wl1271_debug(DEBUG_TX, "available tx blocks: %d",
 		     wl->tx_blocks_available);
 
@@ -943,10 +995,10 @@
 		goto out;
 	}
 
-	rx_conf->threshold = WL1271_RX_INTR_THRESHOLD_DEF;
-	rx_conf->timeout = WL1271_RX_INTR_TIMEOUT_DEF;
-	rx_conf->mblk_threshold = USHORT_MAX; /* Disabled */
-	rx_conf->queue_type = RX_QUEUE_TYPE_RX_LOW_PRIORITY;
+	rx_conf->threshold = cpu_to_le16(wl->conf.rx.irq_pkt_threshold);
+	rx_conf->timeout = cpu_to_le16(wl->conf.rx.irq_timeout);
+	rx_conf->mblk_threshold = cpu_to_le16(wl->conf.rx.irq_blk_threshold);
+	rx_conf->queue_type = wl->conf.rx.queue_type;
 
 	ret = wl1271_cmd_configure(wl, ACX_RX_CONFIG_OPT, rx_conf,
 				   sizeof(*rx_conf));
@@ -959,3 +1011,124 @@
 	kfree(rx_conf);
 	return ret;
 }
+
+int wl1271_acx_smart_reflex(struct wl1271 *wl)
+{
+	struct acx_smart_reflex_state *sr_state = NULL;
+	struct acx_smart_reflex_config_params *sr_param = NULL;
+	int i, ret;
+
+	wl1271_debug(DEBUG_ACX, "acx smart reflex");
+
+	sr_param = kzalloc(sizeof(*sr_param), GFP_KERNEL);
+	if (!sr_param) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	for (i = 0; i < CONF_SR_ERR_TBL_COUNT; i++) {
+		struct conf_mart_reflex_err_table *e =
+			&(wl->conf.init.sr_err_tbl[i]);
+
+		sr_param->error_table[i].len = e->len;
+		sr_param->error_table[i].upper_limit = e->upper_limit;
+		memcpy(sr_param->error_table[i].values, e->values, e->len);
+	}
+
+	ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_PARAMS,
+				   sr_param, sizeof(*sr_param));
+	if (ret < 0) {
+		wl1271_warning("failed to set smart reflex params: %d", ret);
+		goto out;
+	}
+
+	sr_state = kzalloc(sizeof(*sr_state), GFP_KERNEL);
+	if (!sr_state) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	/* enable smart reflex */
+	sr_state->enable = wl->conf.init.sr_enable;
+
+	ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_STATE,
+				   sr_state, sizeof(*sr_state));
+	if (ret < 0) {
+		wl1271_warning("failed to set smart reflex params: %d", ret);
+		goto out;
+	}
+
+out:
+	kfree(sr_state);
+	kfree(sr_param);
+	return ret;
+
+}
+
+int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
+{
+	struct wl1271_acx_bet_enable *acx = NULL;
+	int ret = 0;
+
+	wl1271_debug(DEBUG_ACX, "acx bet enable");
+
+	if (enable && wl->conf.conn.bet_enable == CONF_BET_MODE_DISABLE)
+		goto out;
+
+	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+	if (!acx) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE;
+	acx->max_consecutive = wl->conf.conn.bet_max_consecutive;
+
+	ret = wl1271_cmd_configure(wl, ACX_BET_ENABLE, acx, sizeof(*acx));
+	if (ret < 0) {
+		wl1271_warning("acx bet enable failed: %d", ret);
+		goto out;
+	}
+
+out:
+	kfree(acx);
+	return ret;
+}
+
+int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
+			     u8 version)
+{
+	struct wl1271_acx_arp_filter *acx;
+	int ret;
+
+	wl1271_debug(DEBUG_ACX, "acx arp ip filter, enable: %d", enable);
+
+	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+	if (!acx) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	acx->version = version;
+	acx->enable = enable;
+
+	if (enable == true) {
+		if (version == ACX_IPV4_VERSION)
+			memcpy(acx->address, address, ACX_IPV4_ADDR_SIZE);
+		else if (version == ACX_IPV6_VERSION)
+			memcpy(acx->address, address, sizeof(acx->address));
+		else
+			wl1271_error("Invalid IP version");
+	}
+
+	ret = wl1271_cmd_configure(wl, ACX_ARP_IP_FILTER,
+				   acx, sizeof(*acx));
+	if (ret < 0) {
+		wl1271_warning("failed to set arp ip filter: %d", ret);
+		goto out;
+	}
+
+out:
+	kfree(acx);
+	return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index 9068daa..2ce0a81 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -61,8 +61,9 @@
 					    WL1271_ACX_INTR_HW_AVAILABLE  | \
 					    WL1271_ACX_INTR_DATA)
 
-#define WL1271_INTR_MASK                   (WL1271_ACX_INTR_EVENT_A |	\
-					    WL1271_ACX_INTR_EVENT_B | \
+#define WL1271_INTR_MASK                   (WL1271_ACX_INTR_EVENT_A      | \
+					    WL1271_ACX_INTR_EVENT_B      | \
+					    WL1271_ACX_INTR_HW_AVAILABLE | \
 					    WL1271_ACX_INTR_DATA)
 
 /* Target's information element */
@@ -70,11 +71,11 @@
 	struct wl1271_cmd_header cmd;
 
 	/* acx (or information element) header */
-	u16 id;
+	__le16 id;
 
 	/* payload length (not including headers */
-	u16 len;
-};
+	__le16 len;
+} __attribute__ ((packed));
 
 struct acx_error_counter {
 	struct acx_header header;
@@ -82,21 +83,21 @@
 	/* The number of PLCP errors since the last time this */
 	/* information element was interrogated. This field is */
 	/* automatically cleared when it is interrogated.*/
-	u32 PLCP_error;
+	__le32 PLCP_error;
 
 	/* The number of FCS errors since the last time this */
 	/* information element was interrogated. This field is */
 	/* automatically cleared when it is interrogated.*/
-	u32 FCS_error;
+	__le32 FCS_error;
 
 	/* The number of MPDUs without PLCP header errors received*/
 	/* since the last time this information element was interrogated. */
 	/* This field is automatically cleared when it is interrogated.*/
-	u32 valid_frame;
+	__le32 valid_frame;
 
 	/* the number of missed sequence numbers in the squentially */
 	/* values of frames seq numbers */
-	u32 seq_num_miss;
+	__le32 seq_num_miss;
 } __attribute__ ((packed));
 
 struct acx_revision {
@@ -125,7 +126,7 @@
 	 *              (1 = first spin, 2 = second spin, and so on).
 	 * bits 24 - 31: Chip ID - The WiLink chip ID.
 	 */
-	u32 hw_version;
+	__le32 hw_version;
 } __attribute__ ((packed));
 
 enum wl1271_psm_mode {
@@ -170,7 +171,6 @@
 #define  DP_RX_PACKET_RING_CHUNK_NUM 2
 #define  DP_TX_PACKET_RING_CHUNK_NUM 2
 #define  DP_TX_COMPLETE_TIME_OUT 20
-#define  FW_TX_CMPLT_BLOCK_SIZE 16
 
 #define TX_MSDU_LIFETIME_MIN       0
 #define TX_MSDU_LIFETIME_MAX       3000
@@ -186,7 +186,7 @@
 	 * The maximum amount of time, in TU, before the
 	 * firmware discards the MSDU.
 	 */
-	u32 lifetime;
+	__le32 lifetime;
 } __attribute__ ((packed));
 
 /*
@@ -273,14 +273,14 @@
 struct acx_rx_config {
 	struct acx_header header;
 
-	u32 config_options;
-	u32 filter_options;
+	__le32 config_options;
+	__le32 filter_options;
 } __attribute__ ((packed));
 
 struct acx_packet_detection {
 	struct acx_header header;
 
-	u32 threshold;
+	__le32 threshold;
 } __attribute__ ((packed));
 
 
@@ -302,8 +302,8 @@
 } __attribute__ ((packed));
 
 
-#define ADDRESS_GROUP_MAX	(8)
-#define ADDRESS_GROUP_MAX_LEN	(ETH_ALEN * ADDRESS_GROUP_MAX)
+#define ACX_MC_ADDRESS_GROUP_MAX	(8)
+#define ADDRESS_GROUP_MAX_LEN	        (ETH_ALEN * ACX_MC_ADDRESS_GROUP_MAX)
 
 struct acx_dot11_grp_addr_tbl {
 	struct acx_header header;
@@ -314,40 +314,17 @@
 	u8 mac_table[ADDRESS_GROUP_MAX_LEN];
 } __attribute__ ((packed));
 
-
-#define  RX_TIMEOUT_PS_POLL_MIN    0
-#define  RX_TIMEOUT_PS_POLL_MAX    (200000)
-#define  RX_TIMEOUT_PS_POLL_DEF    (15)
-#define  RX_TIMEOUT_UPSD_MIN       0
-#define  RX_TIMEOUT_UPSD_MAX       (200000)
-#define  RX_TIMEOUT_UPSD_DEF       (15)
-
 struct acx_rx_timeout {
 	struct acx_header header;
 
-	/*
-	 * The longest time the STA will wait to receive
-	 * traffic from the AP after a PS-poll has been
-	 * transmitted.
-	 */
-	u16 ps_poll_timeout;
-
-	/*
-	 * The longest time the STA will wait to receive
-	 * traffic from the AP after a frame has been sent
-	 * from an UPSD enabled queue.
-	 */
-	u16 upsd_timeout;
+	__le16 ps_poll_timeout;
+	__le16 upsd_timeout;
 } __attribute__ ((packed));
 
-#define RTS_THRESHOLD_MIN              0
-#define RTS_THRESHOLD_MAX              4096
-#define RTS_THRESHOLD_DEF              2347
-
 struct acx_rts_threshold {
 	struct acx_header header;
 
-	u16 threshold;
+	__le16 threshold;
 	u8 pad[2];
 } __attribute__ ((packed));
 
@@ -408,6 +385,13 @@
 	u8 pad[3];
 } __attribute__ ((packed));
 
+struct acx_conn_monit_params {
+       struct acx_header header;
+
+       __le32 synch_fail_thold; /* number of beacons missed */
+       __le32 bss_lose_timeout; /* number of TU's from synch fail */
+} __attribute__ ((packed));
+
 enum {
 	SG_ENABLE = 0,
 	SG_DISABLE,
@@ -431,6 +415,25 @@
 	u8 pad[3];
 } __attribute__ ((packed));
 
+struct acx_smart_reflex_state {
+	struct acx_header header;
+
+	u8 enable;
+	u8 padding[3];
+} __attribute__ ((packed));
+
+struct smart_reflex_err_table {
+	u8 len;
+	s8 upper_limit;
+	s8 values[14];
+} __attribute__ ((packed));
+
+struct acx_smart_reflex_config_params {
+	struct acx_header header;
+
+	struct smart_reflex_err_table error_table[3];
+} __attribute__ ((packed));
+
 #define PTA_ANTENNA_TYPE_DEF		  (0)
 #define PTA_BT_HP_MAXTIME_DEF		  (2000)
 #define PTA_WLAN_HP_MAX_TIME_DEF	  (5000)
@@ -463,150 +466,34 @@
 struct acx_bt_wlan_coex_param {
 	struct acx_header header;
 
-	/*
-	 * The minimum rate of a received WLAN packet in the STA,
-	 * during protective mode, of which a new BT-HP request
-	 * during this Rx will always be respected and gain the antenna.
-	 */
-	u32 min_rate;
-
-	/* Max time the BT HP will be respected. */
-	u16 bt_hp_max_time;
-
-	/* Max time the WLAN HP will be respected. */
-	u16 wlan_hp_max_time;
-
-	/*
-	 * The time between the last BT activity
-	 * and the moment when the sense mode returns
-	 * to SENSE_INACTIVE.
-	 */
-	u16 sense_disable_timer;
-
-	/* Time before the next BT HP instance */
-	u16 rx_time_bt_hp;
-	u16 tx_time_bt_hp;
-
-	/* range: 10-20000    default: 1500 */
-	u16 rx_time_bt_hp_fast;
-	u16 tx_time_bt_hp_fast;
-
-	/* range: 2000-65535  default: 8700 */
-	u16 wlan_cycle_fast;
-
-	/* range: 0 - 15000 (Msec) default: 1000 */
-	u16 bt_anti_starvation_period;
-
-	/* range 400-10000(Usec) default: 3000 */
-	u16 next_bt_lp_packet;
-
-	/* Deafult: worst case for BT DH5 traffic */
-	u16 wake_up_beacon;
-
-	/* range: 0-50000(Usec) default: 1050 */
-	u16 hp_dm_max_guard_time;
-
-	/*
-	 * This is to prevent both BT & WLAN antenna
-	 * starvation.
-	 * Range: 100-50000(Usec) default:2550
-	 */
-	u16 next_wlan_packet;
-
-	/* 0 -> shared antenna */
-	u8 antenna_type;
-
-	/*
-	 * 0 -> TI legacy
-	 * 1 -> Palau
-	 */
-	u8 signal_type;
-
-	/*
-	 * BT AFH status
-	 * 0 -> no AFH
-	 * 1 -> from dedicated GPIO
-	 * 2 -> AFH on (from host)
-	 */
-	u8 afh_leverage_on;
-
-	/*
-	 * The number of cycles during which no
-	 * TX will be sent after 1 cycle of RX
-	 * transaction in protective mode
-	 */
-	u8 quiet_cycle_num;
-
-	/*
-	 * The maximum number of CTSs that will
-	 * be sent for receiving RX packet in
-	 * protective mode
-	 */
-	u8 max_cts;
-
-	/*
-	 * The number of WLAN packets
-	 * transferred in common mode before
-	 * switching to BT.
-	 */
-	u8 wlan_packets_num;
-
-	/*
-	 * The number of BT packets
-	 * transferred in common mode before
-	 * switching to WLAN.
-	 */
-	u8 bt_packets_num;
-
-	/* range: 1-255  default: 5 */
-	u8 missed_rx_avalanche;
-
-	/* range: 0-1    default: 1 */
-	u8 wlan_elp_hp;
-
-	/* range: 0 - 15  default: 4 */
-	u8 bt_anti_starvation_cycles;
-
-	u8 ack_mode_dual_ant;
-
-	/*
-	 * Allow PA_SD assertion/de-assertion
-	 * during enabled BT activity.
-	 */
-	u8 pa_sd_enable;
-
-	/*
-	 * Enable/Disable PTA in auto mode:
-	 * Support Both Active & P.S modes
-	 */
-	u8 pta_auto_mode_enable;
-
-	/* range: 0 - 20  default: 1 */
-	u8 bt_hp_respected_num;
+	__le32 per_threshold;
+	__le32 max_scan_compensation_time;
+	__le16 nfs_sample_interval;
+	u8 load_ratio;
+	u8 auto_ps_mode;
+	u8 probe_req_compensation;
+	u8 scan_window_compensation;
+	u8 antenna_config;
+	u8 beacon_miss_threshold;
+	__le32 rate_adaptation_threshold;
+	s8 rate_adaptation_snr;
+	u8 padding[3];
 } __attribute__ ((packed));
 
-#define CCA_THRSH_ENABLE_ENERGY_D       0x140A
-#define CCA_THRSH_DISABLE_ENERGY_D      0xFFEF
-
 struct acx_energy_detection {
 	struct acx_header header;
 
 	/* The RX Clear Channel Assessment threshold in the PHY */
-	u16 rx_cca_threshold;
+	__le16 rx_cca_threshold;
 	u8 tx_energy_detection;
 	u8 pad;
 } __attribute__ ((packed));
 
-#define BCN_RX_TIMEOUT_DEF_VALUE        10000
-#define BROADCAST_RX_TIMEOUT_DEF_VALUE  20000
-#define RX_BROADCAST_IN_PS_DEF_VALUE    1
-#define CONSECUTIVE_PS_POLL_FAILURE_DEF 4
-
 struct acx_beacon_broadcast {
 	struct acx_header header;
 
-	u16 beacon_rx_timeout;
-	u16 broadcast_timeout;
+	__le16 beacon_rx_timeout;
+	__le16 broadcast_timeout;
 
 	/* Enables receiving of broadcast packets in PS mode */
 	u8 rx_broadcast_in_ps;
@@ -619,8 +506,8 @@
 struct acx_event_mask {
 	struct acx_header header;
 
-	u32 event_mask;
-	u32 high_event_mask; /* Unused */
+	__le32 event_mask;
+	__le32 high_event_mask; /* Unused */
 } __attribute__ ((packed));
 
 #define CFG_RX_FCS		BIT(2)
@@ -657,11 +544,15 @@
 #define SCAN_TRIGGERED		BIT(2)
 #define SCAN_PRIORITY_HIGH	BIT(3)
 
+/* When set, disable HW encryption */
+#define DF_ENCRYPTION_DISABLE      0x01
+#define DF_SNIFF_MODE_ENABLE       0x80
+
 struct acx_feature_config {
 	struct acx_header header;
 
-	u32 options;
-	u32 data_flow_options;
+	__le32 options;
+	__le32 data_flow_options;
 } __attribute__ ((packed));
 
 struct acx_current_tx_power {
@@ -671,14 +562,6 @@
 	u8  padding[3];
 } __attribute__ ((packed));
 
-enum acx_wake_up_event {
-	WAKE_UP_EVENT_BEACON_BITMAP	= 0x01, /* Wake on every Beacon*/
-	WAKE_UP_EVENT_DTIM_BITMAP	= 0x02,	/* Wake on every DTIM*/
-	WAKE_UP_EVENT_N_DTIM_BITMAP	= 0x04, /* Wake on every Nth DTIM */
-	WAKE_UP_EVENT_N_BEACONS_BITMAP	= 0x08, /* Wake on every Nth Beacon */
-	WAKE_UP_EVENT_BITS_MASK		= 0x0F
-};
-
 struct acx_wake_up_condition {
 	struct acx_header header;
 
@@ -693,7 +576,7 @@
 	/*
 	 * To be set when associated with an AP.
 	 */
-	u16 aid;
+	__le16 aid;
 	u8 pad[2];
 } __attribute__ ((packed));
 
@@ -725,152 +608,152 @@
 } __attribute__ ((packed));
 
 struct acx_tx_statistics {
-	u32 internal_desc_overflow;
+	__le32 internal_desc_overflow;
 }  __attribute__ ((packed));
 
 struct acx_rx_statistics {
-	u32 out_of_mem;
-	u32 hdr_overflow;
-	u32 hw_stuck;
-	u32 dropped;
-	u32 fcs_err;
-	u32 xfr_hint_trig;
-	u32 path_reset;
-	u32 reset_counter;
+	__le32 out_of_mem;
+	__le32 hdr_overflow;
+	__le32 hw_stuck;
+	__le32 dropped;
+	__le32 fcs_err;
+	__le32 xfr_hint_trig;
+	__le32 path_reset;
+	__le32 reset_counter;
 } __attribute__ ((packed));
 
 struct acx_dma_statistics {
-	u32 rx_requested;
-	u32 rx_errors;
-	u32 tx_requested;
-	u32 tx_errors;
+	__le32 rx_requested;
+	__le32 rx_errors;
+	__le32 tx_requested;
+	__le32 tx_errors;
 }  __attribute__ ((packed));
 
 struct acx_isr_statistics {
 	/* host command complete */
-	u32 cmd_cmplt;
+	__le32 cmd_cmplt;
 
 	/* fiqisr() */
-	u32 fiqs;
+	__le32 fiqs;
 
 	/* (INT_STS_ND & INT_TRIG_RX_HEADER) */
-	u32 rx_headers;
+	__le32 rx_headers;
 
 	/* (INT_STS_ND & INT_TRIG_RX_CMPLT) */
-	u32 rx_completes;
+	__le32 rx_completes;
 
 	/* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */
-	u32 rx_mem_overflow;
+	__le32 rx_mem_overflow;
 
 	/* (INT_STS_ND & INT_TRIG_S_RX_RDY) */
-	u32 rx_rdys;
+	__le32 rx_rdys;
 
 	/* irqisr() */
-	u32 irqs;
+	__le32 irqs;
 
 	/* (INT_STS_ND & INT_TRIG_TX_PROC) */
-	u32 tx_procs;
+	__le32 tx_procs;
 
 	/* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */
-	u32 decrypt_done;
+	__le32 decrypt_done;
 
 	/* (INT_STS_ND & INT_TRIG_DMA0) */
-	u32 dma0_done;
+	__le32 dma0_done;
 
 	/* (INT_STS_ND & INT_TRIG_DMA1) */
-	u32 dma1_done;
+	__le32 dma1_done;
 
 	/* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */
-	u32 tx_exch_complete;
+	__le32 tx_exch_complete;
 
 	/* (INT_STS_ND & INT_TRIG_COMMAND) */
-	u32 commands;
+	__le32 commands;
 
 	/* (INT_STS_ND & INT_TRIG_RX_PROC) */
-	u32 rx_procs;
+	__le32 rx_procs;
 
 	/* (INT_STS_ND & INT_TRIG_PM_802) */
-	u32 hw_pm_mode_changes;
+	__le32 hw_pm_mode_changes;
 
 	/* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */
-	u32 host_acknowledges;
+	__le32 host_acknowledges;
 
 	/* (INT_STS_ND & INT_TRIG_PM_PCI) */
-	u32 pci_pm;
+	__le32 pci_pm;
 
 	/* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */
-	u32 wakeups;
+	__le32 wakeups;
 
 	/* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
-	u32 low_rssi;
+	__le32 low_rssi;
 } __attribute__ ((packed));
 
 struct acx_wep_statistics {
 	/* WEP address keys configured */
-	u32 addr_key_count;
+	__le32 addr_key_count;
 
 	/* default keys configured */
-	u32 default_key_count;
+	__le32 default_key_count;
 
-	u32 reserved;
+	__le32 reserved;
 
 	/* number of times that WEP key not found on lookup */
-	u32 key_not_found;
+	__le32 key_not_found;
 
 	/* number of times that WEP key decryption failed */
-	u32 decrypt_fail;
+	__le32 decrypt_fail;
 
 	/* WEP packets decrypted */
-	u32 packets;
+	__le32 packets;
 
 	/* WEP decrypt interrupts */
-	u32 interrupt;
+	__le32 interrupt;
 } __attribute__ ((packed));
 
 #define ACX_MISSED_BEACONS_SPREAD 10
 
 struct acx_pwr_statistics {
 	/* the amount of enters into power save mode (both PD & ELP) */
-	u32 ps_enter;
+	__le32 ps_enter;
 
 	/* the amount of enters into ELP mode */
-	u32 elp_enter;
+	__le32 elp_enter;
 
 	/* the amount of missing beacon interrupts to the host */
-	u32 missing_bcns;
+	__le32 missing_bcns;
 
 	/* the amount of wake on host-access times */
-	u32 wake_on_host;
+	__le32 wake_on_host;
 
 	/* the amount of wake on timer-expire */
-	u32 wake_on_timer_exp;
+	__le32 wake_on_timer_exp;
 
 	/* the number of packets that were transmitted with PS bit set */
-	u32 tx_with_ps;
+	__le32 tx_with_ps;
 
 	/* the number of packets that were transmitted with PS bit clear */
-	u32 tx_without_ps;
+	__le32 tx_without_ps;
 
 	/* the number of received beacons */
-	u32 rcvd_beacons;
+	__le32 rcvd_beacons;
 
 	/* the number of entering into PowerOn (power save off) */
-	u32 power_save_off;
+	__le32 power_save_off;
 
 	/* the number of entries into power save mode */
-	u16 enable_ps;
+	__le16 enable_ps;
 
 	/*
 	 * the number of exits from power save, not including failed PS
 	 * transitions
 	 */
-	u16 disable_ps;
+	__le16 disable_ps;
 
 	/*
 	 * the number of times the TSF counter was adjusted because
 	 * of drift
 	 */
-	u32 fix_tsf_ps;
+	__le32 fix_tsf_ps;
 
 	/* Gives statistics about the spread continuous missed beacons.
 	 * The 16 LSB are dedicated for the PS mode.
@@ -881,53 +764,53 @@
 	 * ...
 	 * cont_miss_bcns_spread[9] - ten and more continuous missed beacons.
 	*/
-	u32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD];
+	__le32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD];
 
 	/* the number of beacons in awake mode */
-	u32 rcvd_awake_beacons;
+	__le32 rcvd_awake_beacons;
 } __attribute__ ((packed));
 
 struct acx_mic_statistics {
-	u32 rx_pkts;
-	u32 calc_failure;
+	__le32 rx_pkts;
+	__le32 calc_failure;
 } __attribute__ ((packed));
 
 struct acx_aes_statistics {
-	u32 encrypt_fail;
-	u32 decrypt_fail;
-	u32 encrypt_packets;
-	u32 decrypt_packets;
-	u32 encrypt_interrupt;
-	u32 decrypt_interrupt;
+	__le32 encrypt_fail;
+	__le32 decrypt_fail;
+	__le32 encrypt_packets;
+	__le32 decrypt_packets;
+	__le32 encrypt_interrupt;
+	__le32 decrypt_interrupt;
 } __attribute__ ((packed));
 
 struct acx_event_statistics {
-	u32 heart_beat;
-	u32 calibration;
-	u32 rx_mismatch;
-	u32 rx_mem_empty;
-	u32 rx_pool;
-	u32 oom_late;
-	u32 phy_transmit_error;
-	u32 tx_stuck;
+	__le32 heart_beat;
+	__le32 calibration;
+	__le32 rx_mismatch;
+	__le32 rx_mem_empty;
+	__le32 rx_pool;
+	__le32 oom_late;
+	__le32 phy_transmit_error;
+	__le32 tx_stuck;
 } __attribute__ ((packed));
 
 struct acx_ps_statistics {
-	u32 pspoll_timeouts;
-	u32 upsd_timeouts;
-	u32 upsd_max_sptime;
-	u32 upsd_max_apturn;
-	u32 pspoll_max_apturn;
-	u32 pspoll_utilization;
-	u32 upsd_utilization;
+	__le32 pspoll_timeouts;
+	__le32 upsd_timeouts;
+	__le32 upsd_max_sptime;
+	__le32 upsd_max_apturn;
+	__le32 pspoll_max_apturn;
+	__le32 pspoll_utilization;
+	__le32 upsd_utilization;
 } __attribute__ ((packed));
 
 struct acx_rxpipe_statistics {
-	u32 rx_prep_beacon_drop;
-	u32 descr_host_int_trig_rx_data;
-	u32 beacon_buffer_thres_host_int_trig_rx_data;
-	u32 missed_beacon_host_int_trig_rx_data;
-	u32 tx_xfr_host_int_trig_rx_data;
+	__le32 rx_prep_beacon_drop;
+	__le32 descr_host_int_trig_rx_data;
+	__le32 beacon_buffer_thres_host_int_trig_rx_data;
+	__le32 missed_beacon_host_int_trig_rx_data;
+	__le32 tx_xfr_host_int_trig_rx_data;
 } __attribute__ ((packed));
 
 struct acx_statistics {
@@ -946,13 +829,8 @@
 	struct acx_rxpipe_statistics rxpipe;
 } __attribute__ ((packed));
 
-#define ACX_MAX_RATE_CLASSES       8
-#define ACX_RATE_MASK_UNSPECIFIED  0
-#define ACX_RATE_MASK_ALL          0x1eff
-#define ACX_RATE_RETRY_LIMIT       10
-
 struct acx_rate_class {
-	u32 enabled_rates;
+	__le32 enabled_rates;
 	u8 short_retry_limit;
 	u8 long_retry_limit;
 	u8 aflags;
@@ -962,47 +840,20 @@
 struct acx_rate_policy {
 	struct acx_header header;
 
-	u32 rate_class_cnt;
-	struct acx_rate_class rate_class[ACX_MAX_RATE_CLASSES];
+	__le32 rate_class_cnt;
+	struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
 } __attribute__ ((packed));
 
-#define WL1271_ACX_AC_COUNT 4
-
 struct acx_ac_cfg {
 	struct acx_header header;
 	u8 ac;
 	u8 cw_min;
-	u16 cw_max;
+	__le16 cw_max;
 	u8 aifsn;
 	u8 reserved;
-	u16 tx_op_limit;
+	__le16 tx_op_limit;
 } __attribute__ ((packed));
 
-enum wl1271_acx_ac {
-	WL1271_ACX_AC_BE = 0,
-	WL1271_ACX_AC_BK = 1,
-	WL1271_ACX_AC_VI = 2,
-	WL1271_ACX_AC_VO = 3,
-	WL1271_ACX_AC_CTS2SELF = 4,
-	WL1271_ACX_AC_ANY_TID = 0x1F,
-	WL1271_ACX_AC_INVALID = 0xFF,
-};
-
-enum wl1271_acx_ps_scheme {
-	WL1271_ACX_PS_SCHEME_LEGACY = 0,
-	WL1271_ACX_PS_SCHEME_UPSD_TRIGGER = 1,
-	WL1271_ACX_PS_SCHEME_LEGACY_PSPOLL = 2,
-	WL1271_ACX_PS_SCHEME_SAPSD = 3,
-};
-
-enum wl1271_acx_ack_policy {
-	WL1271_ACX_ACK_POLICY_LEGACY = 0,
-	WL1271_ACX_ACK_POLICY_NO_ACK = 1,
-	WL1271_ACX_ACK_POLICY_BLOCK = 2,
-};
-
-#define WL1271_ACX_TID_COUNT 7
-
 struct acx_tid_config {
 	struct acx_header header;
 	u8 queue_id;
@@ -1011,22 +862,19 @@
 	u8 ps_scheme;
 	u8 ack_policy;
 	u8 padding[3];
-	u32 apsd_conf[2];
+	__le32 apsd_conf[2];
 } __attribute__ ((packed));
 
 struct acx_frag_threshold {
 	struct acx_header header;
-	u16 frag_threshold;
+	__le16 frag_threshold;
 	u8 padding[2];
 } __attribute__ ((packed));
 
-#define WL1271_ACX_TX_COMPL_TIMEOUT   5
-#define WL1271_ACX_TX_COMPL_THRESHOLD 5
-
 struct acx_tx_config_options {
 	struct acx_header header;
-	u16 tx_compl_timeout;     /* msec */
-	u16 tx_compl_threshold;   /* number of packets */
+	__le16 tx_compl_timeout;     /* msec */
+	__le16 tx_compl_threshold;   /* number of packets */
 } __attribute__ ((packed));
 
 #define ACX_RX_MEM_BLOCKS     64
@@ -1041,79 +889,87 @@
 	u8 tx_min_mem_block_num;
 	u8 num_stations;
 	u8 num_ssid_profiles;
-	u32 total_tx_descriptors;
+	__le32 total_tx_descriptors;
 } __attribute__ ((packed));
 
 struct wl1271_acx_mem_map {
 	struct acx_header header;
 
-	void *code_start;
-	void *code_end;
+	__le32 code_start;
+	__le32 code_end;
 
-	void *wep_defkey_start;
-	void *wep_defkey_end;
+	__le32 wep_defkey_start;
+	__le32 wep_defkey_end;
 
-	void *sta_table_start;
-	void *sta_table_end;
+	__le32 sta_table_start;
+	__le32 sta_table_end;
 
-	void *packet_template_start;
-	void *packet_template_end;
+	__le32 packet_template_start;
+	__le32 packet_template_end;
 
 	/* Address of the TX result interface (control block) */
-	u32 tx_result;
-	u32 tx_result_queue_start;
+	__le32 tx_result;
+	__le32 tx_result_queue_start;
 
-	void *queue_memory_start;
-	void *queue_memory_end;
+	__le32 queue_memory_start;
+	__le32 queue_memory_end;
 
-	u32 packet_memory_pool_start;
-	u32 packet_memory_pool_end;
+	__le32 packet_memory_pool_start;
+	__le32 packet_memory_pool_end;
 
-	void *debug_buffer1_start;
-	void *debug_buffer1_end;
+	__le32 debug_buffer1_start;
+	__le32 debug_buffer1_end;
 
-	void *debug_buffer2_start;
-	void *debug_buffer2_end;
+	__le32 debug_buffer2_start;
+	__le32 debug_buffer2_end;
 
 	/* Number of blocks FW allocated for TX packets */
-	u32 num_tx_mem_blocks;
+	__le32 num_tx_mem_blocks;
 
 	/* Number of blocks FW allocated for RX packets */
-	u32 num_rx_mem_blocks;
+	__le32 num_rx_mem_blocks;
 
 	/* the following 4 fields are valid in SLAVE mode only */
 	u8 *tx_cbuf;
 	u8 *rx_cbuf;
-	void *rx_ctrl;
-	void *tx_ctrl;
+	__le32 rx_ctrl;
+	__le32 tx_ctrl;
 } __attribute__ ((packed));
 
-enum wl1271_acx_rx_queue_type {
-	RX_QUEUE_TYPE_RX_LOW_PRIORITY,    /* All except the high priority */
-	RX_QUEUE_TYPE_RX_HIGH_PRIORITY,   /* Management and voice packets */
-	RX_QUEUE_TYPE_NUM,
-	RX_QUEUE_TYPE_MAX = USHORT_MAX
-};
-
-#define WL1271_RX_INTR_THRESHOLD_DEF  0       /* no pacing, send interrupt on
-					       * every event */
-#define WL1271_RX_INTR_THRESHOLD_MIN  0
-#define WL1271_RX_INTR_THRESHOLD_MAX  15
-
-#define WL1271_RX_INTR_TIMEOUT_DEF    5
-#define WL1271_RX_INTR_TIMEOUT_MIN    1
-#define WL1271_RX_INTR_TIMEOUT_MAX    100
-
 struct wl1271_acx_rx_config_opt {
 	struct acx_header header;
 
-	u16 mblk_threshold;
-	u16 threshold;
-	u16 timeout;
+	__le16 mblk_threshold;
+	__le16 threshold;
+	__le16 timeout;
 	u8 queue_type;
 	u8 reserved;
 } __attribute__ ((packed));
 
+
+struct wl1271_acx_bet_enable {
+	struct acx_header header;
+
+	u8 enable;
+	u8 max_consecutive;
+	u8 padding[2];
+} __attribute__ ((packed));
+
+#define ACX_IPV4_VERSION 4
+#define ACX_IPV6_VERSION 6
+#define ACX_IPV4_ADDR_SIZE 4
+struct wl1271_acx_arp_filter {
+	struct acx_header header;
+	u8 version;         /* ACX_IPV4_VERSION, ACX_IPV6_VERSION */
+	u8 enable;          /* 1 to enable ARP filtering, 0 to disable */
+	u8 padding[2];
+	u8 address[16];     /* The configured device IP address - all ARP
+			       requests directed to this IP address will pass
+			       through. For IPv4, the first four bytes are
+			       used. */
+} __attribute__((packed));
+
+
 enum {
 	ACX_WAKE_UP_CONDITIONS      = 0x0002,
 	ACX_MEM_CFG                 = 0x0003,
@@ -1170,6 +1026,9 @@
 	ACX_PEER_HT_CAP             = 0x0057,
 	ACX_HT_BSS_OPERATION        = 0x0058,
 	ACX_COEX_ACTIVITY           = 0x0059,
+	ACX_SET_SMART_REFLEX_DEBUG  = 0x005A,
+	ACX_SET_SMART_REFLEX_STATE  = 0x005B,
+	ACX_SET_SMART_REFLEX_PARAMS = 0x005F,
 	DOT11_RX_MSDU_LIFE_TIME     = 0x1004,
 	DOT11_CUR_TX_PWR            = 0x100D,
 	DOT11_RX_DOT11_MODE         = 0x1012,
@@ -1182,23 +1041,24 @@
 };
 
 
-int wl1271_acx_wake_up_conditions(struct wl1271 *wl, u8 wake_up_event,
-				  u8 listen_interval);
+int wl1271_acx_wake_up_conditions(struct wl1271 *wl);
 int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth);
 int wl1271_acx_fw_version(struct wl1271 *wl, char *buf, size_t len);
 int wl1271_acx_tx_power(struct wl1271 *wl, int power);
 int wl1271_acx_feature_cfg(struct wl1271 *wl);
 int wl1271_acx_mem_map(struct wl1271 *wl,
 		       struct acx_header *mem_map, size_t len);
-int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl, u32 life_time);
+int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl);
 int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter);
 int wl1271_acx_pd_threshold(struct wl1271 *wl);
 int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time);
-int wl1271_acx_group_address_tbl(struct wl1271 *wl);
+int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
+				 void *mc_list, u32 mc_list_len);
 int wl1271_acx_service_period_timeout(struct wl1271 *wl);
 int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold);
-int wl1271_acx_beacon_filter_opt(struct wl1271 *wl);
+int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
 int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
+int wl1271_acx_conn_monit_params(struct wl1271 *wl);
 int wl1271_acx_sg_enable(struct wl1271 *wl);
 int wl1271_acx_sg_cfg(struct wl1271 *wl);
 int wl1271_acx_cca_threshold(struct wl1271 *wl);
@@ -1207,9 +1067,9 @@
 int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask);
 int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
 int wl1271_acx_cts_protect(struct wl1271 *wl,
-			    enum acx_ctsprotect_type ctsprotect);
+			   enum acx_ctsprotect_type ctsprotect);
 int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
-int wl1271_acx_rate_policies(struct wl1271 *wl);
+int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates);
 int wl1271_acx_ac_cfg(struct wl1271 *wl);
 int wl1271_acx_tid_cfg(struct wl1271 *wl);
 int wl1271_acx_frag_threshold(struct wl1271 *wl);
@@ -1217,5 +1077,9 @@
 int wl1271_acx_mem_cfg(struct wl1271 *wl);
 int wl1271_acx_init_mem_config(struct wl1271 *wl);
 int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
+int wl1271_acx_smart_reflex(struct wl1271 *wl);
+int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
+int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
+			     u8 version);
 
 #endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index 8228ef4..ba4a2b4 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -39,6 +39,14 @@
 			.start = REGISTERS_BASE,
 			.size  = 0x00008800
 		},
+		.mem2 = {
+			.start = 0x00000000,
+			.size  = 0x00000000
+		},
+		.mem3 = {
+			.start = 0x00000000,
+			.size  = 0x00000000
+		},
 	},
 
 	[PART_WORK] = {
@@ -48,7 +56,15 @@
 		},
 		.reg = {
 			.start = REGISTERS_BASE,
-			.size  = 0x0000b000
+			.size  = 0x0000a000
+		},
+		.mem2 = {
+			.start = 0x003004f8,
+			.size  = 0x00000004
+		},
+		.mem3 = {
+			.start = 0x00040404,
+			.size  = 0x00000000
 		},
 	},
 
@@ -60,6 +76,14 @@
 		.reg = {
 			.start = DRPW_BASE,
 			.size  = 0x00006000
+		},
+		.mem2 = {
+			.start = 0x00000000,
+			.size  = 0x00000000
+		},
+		.mem3 = {
+			.start = 0x00000000,
+			.size  = 0x00000000
 		}
 	}
 };
@@ -69,19 +93,19 @@
 	u32 cpu_ctrl;
 
 	/* 10.5.0 run the firmware (I) */
-	cpu_ctrl = wl1271_reg_read32(wl, ACX_REG_ECPU_CONTROL);
+	cpu_ctrl = wl1271_spi_read32(wl, ACX_REG_ECPU_CONTROL);
 
 	/* 10.5.1 run the firmware (II) */
 	cpu_ctrl |= flag;
-	wl1271_reg_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
+	wl1271_spi_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
 }
 
 static void wl1271_boot_fw_version(struct wl1271 *wl)
 {
 	struct wl1271_static_data static_data;
 
-	wl1271_spi_mem_read(wl, wl->cmd_box_addr,
-			    &static_data, sizeof(static_data));
+	wl1271_spi_read(wl, wl->cmd_box_addr,
+			&static_data, sizeof(static_data), false);
 
 	strncpy(wl->chip.fw_ver, static_data.fw_version,
 		sizeof(wl->chip.fw_ver));
@@ -93,8 +117,9 @@
 static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
 					     size_t fw_data_len, u32 dest)
 {
+	struct wl1271_partition_set partition;
 	int addr, chunk_num, partition_limit;
-	u8 *p;
+	u8 *p, *chunk;
 
 	/* whal_FwCtrl_LoadFwImageSm() */
 
@@ -103,16 +128,20 @@
 	wl1271_debug(DEBUG_BOOT, "fw_data_len %zd chunk_size %d",
 		     fw_data_len, CHUNK_SIZE);
 
-
 	if ((fw_data_len % 4) != 0) {
 		wl1271_error("firmware length not multiple of four");
 		return -EIO;
 	}
 
-	wl1271_set_partition(wl, dest,
-			     part_table[PART_DOWN].mem.size,
-			     part_table[PART_DOWN].reg.start,
-			     part_table[PART_DOWN].reg.size);
+	chunk = kmalloc(CHUNK_SIZE, GFP_KERNEL);
+	if (!chunk) {
+		wl1271_error("allocation for firmware upload chunk failed");
+		return -ENOMEM;
+	}
+
+	memcpy(&partition, &part_table[PART_DOWN], sizeof(partition));
+	partition.mem.start = dest;
+	wl1271_set_partition(wl, &partition);
 
 	/* 10.1 set partition limit and chunk num */
 	chunk_num = 0;
@@ -125,21 +154,17 @@
 			addr = dest + chunk_num * CHUNK_SIZE;
 			partition_limit = chunk_num * CHUNK_SIZE +
 				part_table[PART_DOWN].mem.size;
-
-			/* FIXME: Over 80 chars! */
-			wl1271_set_partition(wl,
-					     addr,
-					     part_table[PART_DOWN].mem.size,
-					     part_table[PART_DOWN].reg.start,
-					     part_table[PART_DOWN].reg.size);
+			partition.mem.start = addr;
+			wl1271_set_partition(wl, &partition);
 		}
 
 		/* 10.3 upload the chunk */
 		addr = dest + chunk_num * CHUNK_SIZE;
 		p = buf + chunk_num * CHUNK_SIZE;
+		memcpy(chunk, p, CHUNK_SIZE);
 		wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
 			     p, addr);
-		wl1271_spi_mem_write(wl, addr, p, CHUNK_SIZE);
+		wl1271_spi_write(wl, addr, chunk, CHUNK_SIZE, false);
 
 		chunk_num++;
 	}
@@ -147,28 +172,31 @@
 	/* 10.4 upload the last chunk */
 	addr = dest + chunk_num * CHUNK_SIZE;
 	p = buf + chunk_num * CHUNK_SIZE;
+	memcpy(chunk, p, fw_data_len % CHUNK_SIZE);
 	wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x",
 		     fw_data_len % CHUNK_SIZE, p, addr);
-	wl1271_spi_mem_write(wl, addr, p, fw_data_len % CHUNK_SIZE);
+	wl1271_spi_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
 
+	kfree(chunk);
 	return 0;
 }
 
 static int wl1271_boot_upload_firmware(struct wl1271 *wl)
 {
 	u32 chunks, addr, len;
+	int ret = 0;
 	u8 *fw;
 
 	fw = wl->fw;
-	chunks = be32_to_cpup((u32 *) fw);
+	chunks = be32_to_cpup((__be32 *) fw);
 	fw += sizeof(u32);
 
 	wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks);
 
 	while (chunks--) {
-		addr = be32_to_cpup((u32 *) fw);
+		addr = be32_to_cpup((__be32 *) fw);
 		fw += sizeof(u32);
-		len = be32_to_cpup((u32 *) fw);
+		len = be32_to_cpup((__be32 *) fw);
 		fw += sizeof(u32);
 
 		if (len > 300000) {
@@ -177,11 +205,13 @@
 		}
 		wl1271_debug(DEBUG_BOOT, "chunk %d addr 0x%x len %u",
 			     chunks, addr, len);
-		wl1271_boot_upload_firmware_chunk(wl, fw, len, addr);
+		ret = wl1271_boot_upload_firmware_chunk(wl, fw, len, addr);
+		if (ret != 0)
+			break;
 		fw += len;
 	}
 
-	return 0;
+	return ret;
 }
 
 static int wl1271_boot_upload_nvs(struct wl1271 *wl)
@@ -235,7 +265,7 @@
 			wl1271_debug(DEBUG_BOOT,
 				     "nvs burst write 0x%x: 0x%x",
 				     dest_addr, val);
-			wl1271_reg_write32(wl, dest_addr, val);
+			wl1271_spi_write32(wl, dest_addr, val);
 
 			nvs_ptr += 4;
 			dest_addr += 4;
@@ -253,20 +283,18 @@
 	/* FIXME: The driver sets the partition here, but this is not needed,
 	   since it sets to the same one as currently in use */
 	/* Now we must set the partition correctly */
-	wl1271_set_partition(wl,
-			     part_table[PART_WORK].mem.start,
-			     part_table[PART_WORK].mem.size,
-			     part_table[PART_WORK].reg.start,
-			     part_table[PART_WORK].reg.size);
+	wl1271_set_partition(wl, &part_table[PART_WORK]);
 
 	/* Copy the NVS tables to a new block to ensure alignment */
 	nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);
+	if (!nvs_aligned)
+		return -ENOMEM;
 
 	/* And finally we upload the NVS tables */
 	/* FIXME: In wl1271, we upload everything at once.
 	   No endianness handling needed here?! The ref driver doesn't do
 	   anything about it at this point */
-	wl1271_spi_mem_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len);
+	wl1271_spi_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len, false);
 
 	kfree(nvs_aligned);
 	return 0;
@@ -275,9 +303,9 @@
 static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
 {
 	enable_irq(wl->irq);
-	wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK,
+	wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
 			   WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
-	wl1271_reg_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
+	wl1271_spi_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
 }
 
 static int wl1271_boot_soft_reset(struct wl1271 *wl)
@@ -286,12 +314,13 @@
 	u32 boot_data;
 
 	/* perform soft reset */
-	wl1271_reg_write32(wl, ACX_REG_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);
+	wl1271_spi_write32(wl, ACX_REG_SLV_SOFT_RESET,
+			   ACX_SLV_SOFT_RESET_BIT);
 
 	/* SOFT_RESET is self clearing */
 	timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
 	while (1) {
-		boot_data = wl1271_reg_read32(wl, ACX_REG_SLV_SOFT_RESET);
+		boot_data = wl1271_spi_read32(wl, ACX_REG_SLV_SOFT_RESET);
 		wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
 		if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
 			break;
@@ -307,10 +336,10 @@
 	}
 
 	/* disable Rx/Tx */
-	wl1271_reg_write32(wl, ENABLE, 0x0);
+	wl1271_spi_write32(wl, ENABLE, 0x0);
 
 	/* disable auto calibration on start*/
-	wl1271_reg_write32(wl, SPARE_A2, 0xffff);
+	wl1271_spi_write32(wl, SPARE_A2, 0xffff);
 
 	return 0;
 }
@@ -322,7 +351,7 @@
 
 	wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
 
-	chip_id = wl1271_reg_read32(wl, CHIP_ID_B);
+	chip_id = wl1271_spi_read32(wl, CHIP_ID_B);
 
 	wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
 
@@ -335,7 +364,8 @@
 	loop = 0;
 	while (loop++ < INIT_LOOP) {
 		udelay(INIT_LOOP_DELAY);
-		interrupt = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
+		interrupt = wl1271_spi_read32(wl,
+					      ACX_REG_INTERRUPT_NO_CLEAR);
 
 		if (interrupt == 0xffffffff) {
 			wl1271_error("error reading hardware complete "
@@ -344,7 +374,7 @@
 		}
 		/* check that ACX_INTR_INIT_COMPLETE is enabled */
 		else if (interrupt & WL1271_ACX_INTR_INIT_COMPLETE) {
-			wl1271_reg_write32(wl, ACX_REG_INTERRUPT_ACK,
+			wl1271_spi_write32(wl, ACX_REG_INTERRUPT_ACK,
 					   WL1271_ACX_INTR_INIT_COMPLETE);
 			break;
 		}
@@ -357,17 +387,13 @@
 	}
 
 	/* get hardware config command mail box */
-	wl->cmd_box_addr = wl1271_reg_read32(wl, REG_COMMAND_MAILBOX_PTR);
+	wl->cmd_box_addr = wl1271_spi_read32(wl, REG_COMMAND_MAILBOX_PTR);
 
 	/* get hardware config event mail box */
-	wl->event_box_addr = wl1271_reg_read32(wl, REG_EVENT_MAILBOX_PTR);
+	wl->event_box_addr = wl1271_spi_read32(wl, REG_EVENT_MAILBOX_PTR);
 
 	/* set the working partition to its "running" mode offset */
-	wl1271_set_partition(wl,
-			     part_table[PART_WORK].mem.start,
-			     part_table[PART_WORK].mem.size,
-			     part_table[PART_WORK].reg.start,
-			     part_table[PART_WORK].reg.size);
+	wl1271_set_partition(wl, &part_table[PART_WORK]);
 
 	wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x event_box_addr 0x%x",
 		     wl->cmd_box_addr, wl->event_box_addr);
@@ -379,11 +405,9 @@
 	 * ready to receive event from the command mailbox
 	 */
 
-	/* enable gpio interrupts */
-	wl1271_boot_enable_interrupts(wl);
-
-	/* unmask all mbox events  */
-	wl->event_mask = 0xffffffff;
+	/* unmask required mbox events  */
+	wl->event_mask = BSS_LOSE_EVENT_ID |
+		SCAN_COMPLETE_EVENT_ID;
 
 	ret = wl1271_event_unmask(wl);
 	if (ret < 0) {
@@ -399,34 +423,13 @@
 
 static int wl1271_boot_write_irq_polarity(struct wl1271 *wl)
 {
-	u32 polarity, status, i;
+	u32 polarity;
 
-	wl1271_reg_write32(wl, OCP_POR_CTR, OCP_REG_POLARITY);
-	wl1271_reg_write32(wl, OCP_CMD, OCP_CMD_READ);
-
-	/* Wait until the command is complete (ie. bit 18 is set) */
-	for (i = 0; i < OCP_CMD_LOOP; i++) {
-		polarity = wl1271_reg_read32(wl, OCP_DATA_READ);
-		if (polarity & OCP_READY_MASK)
-			break;
-	}
-	if (i == OCP_CMD_LOOP) {
-		wl1271_error("OCP command timeout!");
-		return -EIO;
-	}
-
-	status = polarity & OCP_STATUS_MASK;
-	if (status != OCP_STATUS_OK) {
-		wl1271_error("OCP command failed (%d)", status);
-		return -EIO;
-	}
+	polarity = wl1271_top_reg_read(wl, OCP_REG_POLARITY);
 
 	/* We use HIGH polarity, so unset the LOW bit */
 	polarity &= ~POLARITY_LOW;
-
-	wl1271_reg_write32(wl, OCP_POR_CTR, OCP_REG_POLARITY);
-	wl1271_reg_write32(wl, OCP_DATA_WRITE, polarity);
-	wl1271_reg_write32(wl, OCP_CMD, OCP_CMD_WRITE);
+	wl1271_top_reg_write(wl, OCP_REG_POLARITY, polarity);
 
 	return 0;
 }
@@ -436,16 +439,32 @@
 	int ret = 0;
 	u32 tmp, clk, pause;
 
-	if (REF_CLOCK == 0 || REF_CLOCK == 2)
-		/* ref clk: 19.2/38.4 */
+	if (REF_CLOCK == 0 || REF_CLOCK == 2 || REF_CLOCK == 4)
+		/* ref clk: 19.2/38.4/38.4-XTAL */
 		clk = 0x3;
 	else if (REF_CLOCK == 1 || REF_CLOCK == 3)
 		/* ref clk: 26/52 */
 		clk = 0x5;
 
-	wl1271_reg_write32(wl, PLL_PARAMETERS, clk);
+	if (REF_CLOCK != 0) {
+		u16 val;
+		/* Set clock type */
+		val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
+		val &= FREF_CLK_TYPE_BITS;
+		val |= CLK_REQ_PRCM;
+		wl1271_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
+	} else {
+		u16 val;
+		/* Set clock polarity */
+		val = wl1271_top_reg_read(wl, OCP_REG_CLK_POLARITY);
+		val &= FREF_CLK_POLARITY_BITS;
+		val |= CLK_REQ_OUTN_SEL;
+		wl1271_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
+	}
 
-	pause = wl1271_reg_read32(wl, PLL_PARAMETERS);
+	wl1271_spi_write32(wl, PLL_PARAMETERS, clk);
+
+	pause = wl1271_spi_read32(wl, PLL_PARAMETERS);
 
 	wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause);
 
@@ -454,39 +473,31 @@
 					   * 0x3ff (magic number ).  How does
 					   * this work?! */
 	pause |= WU_COUNTER_PAUSE_VAL;
-	wl1271_reg_write32(wl, WU_COUNTER_PAUSE, pause);
+	wl1271_spi_write32(wl, WU_COUNTER_PAUSE, pause);
 
 	/* Continue the ELP wake up sequence */
-	wl1271_reg_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
+	wl1271_spi_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
 	udelay(500);
 
-	wl1271_set_partition(wl,
-			     part_table[PART_DRPW].mem.start,
-			     part_table[PART_DRPW].mem.size,
-			     part_table[PART_DRPW].reg.start,
-			     part_table[PART_DRPW].reg.size);
+	wl1271_set_partition(wl, &part_table[PART_DRPW]);
 
 	/* Read-modify-write DRPW_SCRATCH_START register (see next state)
 	   to be used by DRPw FW. The RTRIM value will be added by the FW
 	   before taking DRPw out of reset */
 
 	wl1271_debug(DEBUG_BOOT, "DRPW_SCRATCH_START %08x", DRPW_SCRATCH_START);
-	clk = wl1271_reg_read32(wl, DRPW_SCRATCH_START);
+	clk = wl1271_spi_read32(wl, DRPW_SCRATCH_START);
 
 	wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
 
 	/* 2 */
 	clk |= (REF_CLOCK << 1) << 4;
-	wl1271_reg_write32(wl, DRPW_SCRATCH_START, clk);
+	wl1271_spi_write32(wl, DRPW_SCRATCH_START, clk);
 
-	wl1271_set_partition(wl,
-			     part_table[PART_WORK].mem.start,
-			     part_table[PART_WORK].mem.size,
-			     part_table[PART_WORK].reg.start,
-			     part_table[PART_WORK].reg.size);
+	wl1271_set_partition(wl, &part_table[PART_WORK]);
 
 	/* Disable interrupts */
-	wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
+	wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
 
 	ret = wl1271_boot_soft_reset(wl);
 	if (ret < 0)
@@ -501,21 +512,22 @@
 	 * ACX_EEPROMLESS_IND_REG */
 	wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG");
 
-	wl1271_reg_write32(wl, ACX_EEPROMLESS_IND_REG, ACX_EEPROMLESS_IND_REG);
+	wl1271_spi_write32(wl, ACX_EEPROMLESS_IND_REG,
+			   ACX_EEPROMLESS_IND_REG);
 
-	tmp = wl1271_reg_read32(wl, CHIP_ID_B);
+	tmp = wl1271_spi_read32(wl, CHIP_ID_B);
 
 	wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
 
 	/* 6. read the EEPROM parameters */
-	tmp = wl1271_reg_read32(wl, SCR_PAD2);
+	tmp = wl1271_spi_read32(wl, SCR_PAD2);
 
 	ret = wl1271_boot_write_irq_polarity(wl);
 	if (ret < 0)
 		goto out;
 
 	/* FIXME: Need to check whether this is really what we want */
-	wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK,
+	wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
 			   WL1271_ACX_ALL_EVENTS_VECTOR);
 
 	/* WL1271: The reference driver skips steps 7 to 10 (jumps directly
@@ -530,6 +542,9 @@
 	if (ret < 0)
 		goto out;
 
+	/* Enable firmware interrupts now */
+	wl1271_boot_enable_interrupts(wl);
+
 	/* set the wl1271 default filters */
 	wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
 	wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.h b/drivers/net/wireless/wl12xx/wl1271_boot.h
index b0d8fb4..412443e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.h
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.h
@@ -50,23 +50,17 @@
 #define WU_COUNTER_PAUSE_VAL 0x3FF
 #define WELP_ARM_COMMAND_VAL 0x4
 
-#define OCP_CMD_LOOP  32
-
-#define OCP_CMD_WRITE 0x1
-#define OCP_CMD_READ  0x2
-
-#define OCP_READY_MASK  BIT(18)
-#define OCP_STATUS_MASK (BIT(16) | BIT(17))
-
-#define OCP_STATUS_NO_RESP    0x00000
-#define OCP_STATUS_OK         0x10000
-#define OCP_STATUS_REQ_FAILED 0x20000
-#define OCP_STATUS_RESP_ERROR 0x30000
-
-#define OCP_REG_POLARITY 0x30032
+#define OCP_REG_POLARITY     0x0064
+#define OCP_REG_CLK_TYPE     0x0448
+#define OCP_REG_CLK_POLARITY 0x0cb2
 
 #define CMD_MBOX_ADDRESS 0x407B4
 
 #define POLARITY_LOW BIT(1)
 
+#define FREF_CLK_TYPE_BITS     0xfffffe7f
+#define CLK_REQ_PRCM           0x100
+#define FREF_CLK_POLARITY_BITS 0xfffff8ff
+#define CLK_REQ_OUTN_SEL       0x700
+
 #endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index 2a4351f..0666328 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -50,18 +50,18 @@
 	int ret = 0;
 
 	cmd = buf;
-	cmd->id = id;
+	cmd->id = cpu_to_le16(id);
 	cmd->status = 0;
 
 	WARN_ON(len % 4 != 0);
 
-	wl1271_spi_mem_write(wl, wl->cmd_box_addr, buf, len);
+	wl1271_spi_write(wl, wl->cmd_box_addr, buf, len, false);
 
-	wl1271_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD);
+	wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD);
 
 	timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT);
 
-	intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
+	intr = wl1271_spi_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
 	while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) {
 		if (time_after(jiffies, timeout)) {
 			wl1271_error("command complete timeout");
@@ -71,17 +71,17 @@
 
 		msleep(1);
 
-		intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
+		intr = wl1271_spi_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
 	}
 
-	wl1271_reg_write32(wl, ACX_REG_INTERRUPT_ACK,
+	wl1271_spi_write32(wl, ACX_REG_INTERRUPT_ACK,
 			   WL1271_ACX_INTR_CMD_COMPLETE);
 
 out:
 	return ret;
 }
 
-int wl1271_cmd_cal_channel_tune(struct wl1271 *wl)
+static int wl1271_cmd_cal_channel_tune(struct wl1271 *wl)
 {
 	struct wl1271_cmd_cal_channel_tune *cmd;
 	int ret = 0;
@@ -104,7 +104,7 @@
 	return ret;
 }
 
-int wl1271_cmd_cal_update_ref_point(struct wl1271 *wl)
+static int wl1271_cmd_cal_update_ref_point(struct wl1271 *wl)
 {
 	struct wl1271_cmd_cal_update_ref_point *cmd;
 	int ret = 0;
@@ -129,7 +129,7 @@
 	return ret;
 }
 
-int wl1271_cmd_cal_p2g(struct wl1271 *wl)
+static int wl1271_cmd_cal_p2g(struct wl1271 *wl)
 {
 	struct wl1271_cmd_cal_p2g *cmd;
 	int ret = 0;
@@ -150,7 +150,7 @@
 	return ret;
 }
 
-int wl1271_cmd_cal(struct wl1271 *wl)
+static int wl1271_cmd_cal(struct wl1271 *wl)
 {
 	/*
 	 * FIXME: we must make sure that we're not sleeping when calibration
@@ -175,11 +175,9 @@
 	return ret;
 }
 
-int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval,
-		    u16 beacon_interval, u8 wait)
+int wl1271_cmd_join(struct wl1271 *wl)
 {
 	static bool do_cal = true;
-	unsigned long timeout;
 	struct wl1271_cmd_join *join;
 	int ret, i;
 	u8 *bssid;
@@ -193,6 +191,18 @@
 			do_cal = false;
 	}
 
+	/* FIXME: This is a workaround, because with the current stack, we
+	 * cannot know when we have disassociated.  So, if we have already
+	 * joined, we disconnect before joining again. */
+	if (wl->joined) {
+		ret = wl1271_cmd_disconnect(wl);
+		if (ret < 0) {
+			wl1271_error("failed to disconnect before rejoining");
+			goto out;
+		}
+
+		wl->joined = false;
+	}
 
 	join = kzalloc(sizeof(*join), GFP_KERNEL);
 	if (!join) {
@@ -207,15 +217,34 @@
 	for (i = 0; i < ETH_ALEN; i++)
 		bssid[i] = wl->bssid[ETH_ALEN - i - 1];
 
-	join->rx_config_options = wl->rx_config;
-	join->rx_filter_options = wl->rx_filter;
+	join->rx_config_options = cpu_to_le32(wl->rx_config);
+	join->rx_filter_options = cpu_to_le32(wl->rx_filter);
+	join->bss_type = wl->bss_type;
 
-	join->basic_rate_set = RATE_MASK_1MBPS | RATE_MASK_2MBPS |
-		RATE_MASK_5_5MBPS | RATE_MASK_11MBPS;
+	/*
+	 * FIXME: disable temporarily all filters because after commit
+	 * 9cef8737 "mac80211: fix managed mode BSSID handling" broke
+	 * association. The filter logic needs to be implemented properly
+	 * and once that is done, this hack can be removed.
+	 */
+	join->rx_config_options = cpu_to_le32(0);
+	join->rx_filter_options = cpu_to_le32(WL1271_DEFAULT_RX_FILTER);
 
-	join->beacon_interval = beacon_interval;
-	join->dtim_interval = dtim_interval;
-	join->bss_type = bss_type;
+	if (wl->band == IEEE80211_BAND_2GHZ)
+		join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_1MBPS   |
+						   CONF_HW_BIT_RATE_2MBPS   |
+						   CONF_HW_BIT_RATE_5_5MBPS |
+						   CONF_HW_BIT_RATE_11MBPS);
+	else {
+		join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ;
+		join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_6MBPS  |
+						   CONF_HW_BIT_RATE_12MBPS |
+						   CONF_HW_BIT_RATE_24MBPS);
+	}
+
+	join->beacon_interval = cpu_to_le16(WL1271_DEFAULT_BEACON_INT);
+	join->dtim_interval = WL1271_DEFAULT_DTIM_PERIOD;
+
 	join->channel = wl->channel;
 	join->ssid_len = wl->ssid_len;
 	memcpy(join->ssid, wl->ssid, wl->ssid_len);
@@ -228,6 +257,10 @@
 
 	join->ctrl |= wl->session_counter << WL1271_JOIN_CMD_TX_SESSION_OFFSET;
 
+	/* reset TX security counters */
+	wl->tx_security_last_seq = 0;
+	wl->tx_security_seq_16 = 0;
+	wl->tx_security_seq_32 = 0;
 
 	ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join));
 	if (ret < 0) {
@@ -235,14 +268,13 @@
 		goto out_free;
 	}
 
-	timeout = msecs_to_jiffies(JOIN_TIMEOUT);
+	wl->joined = true;
 
 	/*
 	 * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to
 	 * simplify locking we just sleep instead, for now
 	 */
-	if (wait)
-		msleep(10);
+	msleep(10);
 
 out_free:
 	kfree(join);
@@ -274,19 +306,20 @@
 
 	if (answer) {
 		struct wl1271_command *cmd_answer;
+		u16 status;
 
 		/*
 		 * The test command got in, we can read the answer.
 		 * The answer would be a wl1271_command, where the
 		 * parameter array contains the actual answer.
 		 */
-		wl1271_spi_mem_read(wl, wl->cmd_box_addr, buf, buf_len);
+		wl1271_spi_read(wl, wl->cmd_box_addr, buf, buf_len, false);
 
 		cmd_answer = buf;
+		status = le16_to_cpu(cmd_answer->header.status);
 
-		if (cmd_answer->header.status != CMD_STATUS_SUCCESS)
-			wl1271_error("TEST command answer error: %d",
-				     cmd_answer->header.status);
+		if (status != CMD_STATUS_SUCCESS)
+			wl1271_error("TEST command answer error: %d", status);
 	}
 
 	return 0;
@@ -307,10 +340,10 @@
 
 	wl1271_debug(DEBUG_CMD, "cmd interrogate");
 
-	acx->id = id;
+	acx->id = cpu_to_le16(id);
 
 	/* payload length, does not include any headers */
-	acx->len = len - sizeof(*acx);
+	acx->len = cpu_to_le16(len - sizeof(*acx));
 
 	ret = wl1271_cmd_send(wl, CMD_INTERROGATE, acx, sizeof(*acx));
 	if (ret < 0) {
@@ -319,12 +352,12 @@
 	}
 
 	/* the interrogate command got in, we can read the answer */
-	wl1271_spi_mem_read(wl, wl->cmd_box_addr, buf, len);
+	wl1271_spi_read(wl, wl->cmd_box_addr, buf, len, false);
 
 	acx = buf;
-	if (acx->cmd.status != CMD_STATUS_SUCCESS)
+	if (le16_to_cpu(acx->cmd.status) != CMD_STATUS_SUCCESS)
 		wl1271_error("INTERROGATE command error: %d",
-			     acx->cmd.status);
+			     le16_to_cpu(acx->cmd.status));
 
 out:
 	return ret;
@@ -345,10 +378,10 @@
 
 	wl1271_debug(DEBUG_CMD, "cmd configure");
 
-	acx->id = id;
+	acx->id = cpu_to_le16(id);
 
 	/* payload length, does not include any headers */
-	acx->len = len - sizeof(*acx);
+	acx->len = cpu_to_le16(len - sizeof(*acx));
 
 	ret = wl1271_cmd_send(wl, CMD_CONFIGURE, acx, len);
 	if (ret < 0) {
@@ -414,8 +447,7 @@
 	int ret = 0;
 
 	/* FIXME: this should be in ps.c */
-	ret = wl1271_acx_wake_up_conditions(wl, WAKE_UP_EVENT_DTIM_BITMAP,
-					    wl->listen_int);
+	ret = wl1271_acx_wake_up_conditions(wl);
 	if (ret < 0) {
 		wl1271_error("couldn't set wake up conditions");
 		goto out;
@@ -433,7 +465,7 @@
 	ps_params->send_null_data = 1;
 	ps_params->retries = 5;
 	ps_params->hang_over_period = 128;
-	ps_params->null_data_rate = 1; /* 1 Mbps */
+	ps_params->null_data_rate = cpu_to_le32(1); /* 1 Mbps */
 
 	ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
 			      sizeof(*ps_params));
@@ -464,8 +496,8 @@
 	WARN_ON(len > MAX_READ_SIZE);
 	len = min_t(size_t, len, MAX_READ_SIZE);
 
-	cmd->addr = addr;
-	cmd->size = len;
+	cmd->addr = cpu_to_le32(addr);
+	cmd->size = cpu_to_le32(len);
 
 	ret = wl1271_cmd_send(wl, CMD_READ_MEMORY, cmd, sizeof(*cmd));
 	if (ret < 0) {
@@ -474,11 +506,11 @@
 	}
 
 	/* the read command got in, we can now read the answer */
-	wl1271_spi_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd));
+	wl1271_spi_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd), false);
 
-	if (cmd->header.status != CMD_STATUS_SUCCESS)
+	if (le16_to_cpu(cmd->header.status) != CMD_STATUS_SUCCESS)
 		wl1271_error("error in read command result: %d",
-			     cmd->header.status);
+			     le16_to_cpu(cmd->header.status));
 
 	memcpy(answer, cmd->value, len);
 
@@ -488,14 +520,31 @@
 }
 
 int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
-		    u8 active_scan, u8 high_prio, u8 num_channels,
+		    u8 active_scan, u8 high_prio, u8 band,
 		    u8 probe_requests)
 {
 
 	struct wl1271_cmd_trigger_scan_to *trigger = NULL;
 	struct wl1271_cmd_scan *params = NULL;
-	int i, ret;
+	struct ieee80211_channel *channels;
+	int i, j, n_ch, ret;
 	u16 scan_options = 0;
+	u8 ieee_band;
+
+	if (band == WL1271_SCAN_BAND_2_4_GHZ)
+		ieee_band = IEEE80211_BAND_2GHZ;
+	else if (band == WL1271_SCAN_BAND_DUAL && wl1271_11a_enabled())
+		ieee_band = IEEE80211_BAND_2GHZ;
+	else if (band == WL1271_SCAN_BAND_5_GHZ && wl1271_11a_enabled())
+		ieee_band = IEEE80211_BAND_5GHZ;
+	else
+		return -EINVAL;
+
+	if (wl->hw->wiphy->bands[ieee_band]->channels == NULL)
+		return -EINVAL;
+
+	channels = wl->hw->wiphy->bands[ieee_band]->channels;
+	n_ch = wl->hw->wiphy->bands[ieee_band]->n_channels;
 
 	if (wl->scanning)
 		return -EINVAL;
@@ -512,32 +561,43 @@
 		scan_options |= WL1271_SCAN_OPT_PASSIVE;
 	if (high_prio)
 		scan_options |= WL1271_SCAN_OPT_PRIORITY_HIGH;
-	params->params.scan_options = scan_options;
+	params->params.scan_options = cpu_to_le16(scan_options);
 
-	params->params.num_channels = num_channels;
 	params->params.num_probe_requests = probe_requests;
-	params->params.tx_rate = cpu_to_le32(RATE_MASK_2MBPS);
+	/* Let the fw autodetect suitable tx_rate for probes */
+	params->params.tx_rate = 0;
 	params->params.tid_trigger = 0;
 	params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
 
-	for (i = 0; i < num_channels; i++) {
-		params->channels[i].min_duration =
-			cpu_to_le32(WL1271_SCAN_CHAN_MIN_DURATION);
-		params->channels[i].max_duration =
-			cpu_to_le32(WL1271_SCAN_CHAN_MAX_DURATION);
-		memset(&params->channels[i].bssid_lsb, 0xff, 4);
-		memset(&params->channels[i].bssid_msb, 0xff, 2);
-		params->channels[i].early_termination = 0;
-		params->channels[i].tx_power_att = WL1271_SCAN_CURRENT_TX_PWR;
-		params->channels[i].channel = i + 1;
+	if (band == WL1271_SCAN_BAND_DUAL)
+		params->params.band = WL1271_SCAN_BAND_2_4_GHZ;
+	else
+		params->params.band = band;
+
+	for (i = 0, j = 0; i < n_ch && i < WL1271_SCAN_MAX_CHANNELS; i++) {
+		if (!(channels[i].flags & IEEE80211_CHAN_DISABLED)) {
+			params->channels[j].min_duration =
+				cpu_to_le32(WL1271_SCAN_CHAN_MIN_DURATION);
+			params->channels[j].max_duration =
+				cpu_to_le32(WL1271_SCAN_CHAN_MAX_DURATION);
+			memset(&params->channels[j].bssid_lsb, 0xff, 4);
+			memset(&params->channels[j].bssid_msb, 0xff, 2);
+			params->channels[j].early_termination = 0;
+			params->channels[j].tx_power_att =
+				WL1271_SCAN_CURRENT_TX_PWR;
+			params->channels[j].channel = channels[i].hw_value;
+			j++;
+		}
 	}
 
+	params->params.num_channels = j;
+
 	if (len && ssid) {
 		params->params.ssid_len = len;
 		memcpy(params->params.ssid, ssid, len);
 	}
 
-	ret = wl1271_cmd_build_probe_req(wl, ssid, len);
+	ret = wl1271_cmd_build_probe_req(wl, ssid, len, ieee_band);
 	if (ret < 0) {
 		wl1271_error("PROBE request template failed");
 		goto out;
@@ -562,6 +622,19 @@
 	wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params));
 
 	wl->scanning = true;
+	if (wl1271_11a_enabled()) {
+		wl->scan.state = band;
+		if (band == WL1271_SCAN_BAND_DUAL) {
+			wl->scan.active = active_scan;
+			wl->scan.high_prio = high_prio;
+			wl->scan.probe_requests = probe_requests;
+			if (len && ssid) {
+				wl->scan.ssid_len = len;
+				memcpy(wl->scan.ssid, ssid, len);
+			} else
+				wl->scan.ssid_len = 0;
+		}
+	}
 
 	ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params));
 	if (ret < 0) {
@@ -569,11 +642,12 @@
 		goto out;
 	}
 
-	wl1271_spi_mem_read(wl, wl->cmd_box_addr, params, sizeof(*params));
+	wl1271_spi_read(wl, wl->cmd_box_addr, params, sizeof(*params),
+			false);
 
-	if (params->header.status != CMD_STATUS_SUCCESS) {
+	if (le16_to_cpu(params->header.status) != CMD_STATUS_SUCCESS) {
 		wl1271_error("Scan command error: %d",
-			     params->header.status);
+			     le16_to_cpu(params->header.status));
 		wl->scanning = false;
 		ret = -EIO;
 		goto out;
@@ -603,9 +677,9 @@
 
 	cmd->len = cpu_to_le16(buf_len);
 	cmd->template_type = template_id;
-	cmd->enabled_rates = ACX_RATE_MASK_UNSPECIFIED;
-	cmd->short_retry_limit = ACX_RATE_RETRY_LIMIT;
-	cmd->long_retry_limit = ACX_RATE_RETRY_LIMIT;
+	cmd->enabled_rates = cpu_to_le32(wl->conf.tx.rc_conf.enabled_rates);
+	cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit;
+	cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit;
 
 	if (buf)
 		memcpy(cmd->template_data, buf, buf_len);
@@ -623,30 +697,62 @@
 	return ret;
 }
 
-static int wl1271_build_basic_rates(char *rates)
+static int wl1271_build_basic_rates(char *rates, u8 band)
 {
 	u8 index = 0;
 
-	rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
-	rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
-	rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
-	rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
+	if (band == IEEE80211_BAND_2GHZ) {
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
+	} else if (band == IEEE80211_BAND_5GHZ) {
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB;
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
+	} else {
+		wl1271_error("build_basic_rates invalid band: %d", band);
+	}
 
 	return index;
 }
 
-static int wl1271_build_extended_rates(char *rates)
+static int wl1271_build_extended_rates(char *rates, u8 band)
 {
 	u8 index = 0;
 
-	rates[index++] = IEEE80211_OFDM_RATE_6MB;
-	rates[index++] = IEEE80211_OFDM_RATE_9MB;
-	rates[index++] = IEEE80211_OFDM_RATE_12MB;
-	rates[index++] = IEEE80211_OFDM_RATE_18MB;
-	rates[index++] = IEEE80211_OFDM_RATE_24MB;
-	rates[index++] = IEEE80211_OFDM_RATE_36MB;
-	rates[index++] = IEEE80211_OFDM_RATE_48MB;
-	rates[index++] = IEEE80211_OFDM_RATE_54MB;
+	if (band == IEEE80211_BAND_2GHZ) {
+		rates[index++] = IEEE80211_OFDM_RATE_6MB;
+		rates[index++] = IEEE80211_OFDM_RATE_9MB;
+		rates[index++] = IEEE80211_OFDM_RATE_12MB;
+		rates[index++] = IEEE80211_OFDM_RATE_18MB;
+		rates[index++] = IEEE80211_OFDM_RATE_24MB;
+		rates[index++] = IEEE80211_OFDM_RATE_36MB;
+		rates[index++] = IEEE80211_OFDM_RATE_48MB;
+		rates[index++] = IEEE80211_OFDM_RATE_54MB;
+	} else if (band == IEEE80211_BAND_5GHZ) {
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB;
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB;
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB;
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB;
+		rates[index++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
+	} else {
+		wl1271_error("build_basic_rates invalid band: %d", band);
+	}
 
 	return index;
 }
@@ -665,7 +771,8 @@
 
 	memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
 	template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
-						IEEE80211_STYPE_NULLFUNC);
+						IEEE80211_STYPE_NULLFUNC |
+						IEEE80211_FCTL_TODS);
 
 	return wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, &template,
 				       sizeof(template));
@@ -678,7 +785,10 @@
 
 	memcpy(template.bssid, wl->bssid, ETH_ALEN);
 	memcpy(template.ta, wl->mac_addr, ETH_ALEN);
-	template.aid = aid;
+
+	/* aid in PS-Poll has its two MSBs each set to 1 */
+	template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid);
+
 	template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
 
 	return wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, &template,
@@ -686,12 +796,14 @@
 
 }
 
-int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len)
+int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len,
+			       u8 band)
 {
 	struct wl12xx_probe_req_template template;
 	struct wl12xx_ie_rates *rates;
 	char *ptr;
 	u16 size;
+	int ret;
 
 	ptr = (char *)&template;
 	size = sizeof(struct ieee80211_header);
@@ -713,20 +825,25 @@
 	/* Basic Rates */
 	rates = (struct wl12xx_ie_rates *)ptr;
 	rates->header.id = WLAN_EID_SUPP_RATES;
-	rates->header.len = wl1271_build_basic_rates(rates->rates);
+	rates->header.len = wl1271_build_basic_rates(rates->rates, band);
 	size += sizeof(struct wl12xx_ie_header) + rates->header.len;
 	ptr += sizeof(struct wl12xx_ie_header) + rates->header.len;
 
 	/* Extended rates */
 	rates = (struct wl12xx_ie_rates *)ptr;
 	rates->header.id = WLAN_EID_EXT_SUPP_RATES;
-	rates->header.len = wl1271_build_extended_rates(rates->rates);
+	rates->header.len = wl1271_build_extended_rates(rates->rates, band);
 	size += sizeof(struct wl12xx_ie_header) + rates->header.len;
 
 	wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size);
 
-	return wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
-				       &template, size);
+	if (band == IEEE80211_BAND_2GHZ)
+		ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
+					      &template, size);
+	else
+		ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
+					      &template, size);
+	return ret;
 }
 
 int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
@@ -743,7 +860,7 @@
 	}
 
 	cmd->id = id;
-	cmd->key_action = KEY_SET_ID;
+	cmd->key_action = cpu_to_le16(KEY_SET_ID);
 	cmd->key_type = KEY_WEP;
 
 	ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd));
@@ -759,7 +876,8 @@
 }
 
 int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
-		       u8 key_size, const u8 *key, const u8 *addr)
+		       u8 key_size, const u8 *key, const u8 *addr,
+		       u32 tx_seq_32, u16 tx_seq_16)
 {
 	struct wl1271_cmd_set_keys *cmd;
 	int ret = 0;
@@ -773,16 +891,18 @@
 	if (key_type != KEY_WEP)
 		memcpy(cmd->addr, addr, ETH_ALEN);
 
-	cmd->key_action = action;
+	cmd->key_action = cpu_to_le16(action);
 	cmd->key_size = key_size;
 	cmd->key_type = key_type;
 
+	cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16);
+	cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32);
+
 	/* we have only one SSID profile */
 	cmd->ssid_profile = 0;
 
 	cmd->id = id;
 
-	/* FIXME: this is from wl1251, needs to be checked */
 	if (key_type == KEY_TKIP) {
 		/*
 		 * We get the key in the following form:
@@ -811,3 +931,34 @@
 
 	return ret;
 }
+
+int wl1271_cmd_disconnect(struct wl1271 *wl)
+{
+	struct wl1271_cmd_disconnect *cmd;
+	int ret = 0;
+
+	wl1271_debug(DEBUG_CMD, "cmd disconnect");
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (!cmd) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	cmd->rx_config_options = cpu_to_le32(wl->rx_config);
+	cmd->rx_filter_options = cpu_to_le32(wl->rx_filter);
+	/* disconnect reason is not used in immediate disconnections */
+	cmd->type = DISCONNECT_IMMEDIATE;
+
+	ret = wl1271_cmd_send(wl, CMD_DISCONNECT, cmd, sizeof(*cmd));
+	if (ret < 0) {
+		wl1271_error("failed to send disconnect command");
+		goto out_free;
+	}
+
+out_free:
+	kfree(cmd);
+
+out:
+	return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index 951a844..174b820 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -30,8 +30,7 @@
 struct acx_header;
 
 int wl1271_cmd_send(struct wl1271 *wl, u16 type, void *buf, size_t buf_len);
-int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval,
-		    u16 beacon_interval, u8 wait);
+int wl1271_cmd_join(struct wl1271 *wl);
 int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
 int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
 int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
@@ -40,16 +39,19 @@
 int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
 			   size_t len);
 int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
-		    u8 active_scan, u8 high_prio, u8 num_channels,
+		    u8 active_scan, u8 high_prio, u8 band,
 		    u8 probe_requests);
 int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
 			    void *buf, size_t buf_len);
 int wl1271_cmd_build_null_data(struct wl1271 *wl);
 int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid);
-int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len);
+int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len,
+			       u8 band);
 int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id);
 int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
-		       u8 key_size, const u8 *key, const u8 *addr);
+		       u8 key_size, const u8 *key, const u8 *addr,
+		       u32 tx_seq_32, u16 tx_seq_16);
+int wl1271_cmd_disconnect(struct wl1271 *wl);
 
 enum wl1271_commands {
 	CMD_INTERROGATE     = 1,    /*use this to read information elements*/
@@ -118,8 +120,8 @@
 #define WL1271_CMD_TEMPL_MAX_SIZE  252
 
 struct wl1271_cmd_header {
-	u16 id;
-	u16 status;
+	__le16 id;
+	__le16 status;
 	/* payload */
 	u8 data[0];
 } __attribute__ ((packed));
@@ -172,17 +174,17 @@
 	struct wl1271_cmd_header header;
 
 	/* The address of the memory to read from or write to.*/
-	u32 addr;
+	__le32 addr;
 
 	/* The amount of data in bytes to read from or write to the WiLink
 	 * device.*/
-	u32 size;
+	__le32 size;
 
 	/* The actual value read from or written to the Wilink. The source
 	   of this field is the Host in WRITE command or the Wilink in READ
 	   command. */
 	u8 value[MAX_READ_SIZE];
-};
+} __attribute__ ((packed));
 
 #define CMDMBOX_HEADER_LEN 4
 #define CMDMBOX_INFO_ELEM_HEADER_LEN 4
@@ -196,22 +198,23 @@
 
 #define WL1271_JOIN_CMD_CTRL_TX_FLUSH     0x80 /* Firmware flushes all Tx */
 #define WL1271_JOIN_CMD_TX_SESSION_OFFSET 1
+#define WL1271_JOIN_CMD_BSS_TYPE_5GHZ 0x10
 
 struct wl1271_cmd_join {
 	struct wl1271_cmd_header header;
 
-	u32 bssid_lsb;
-	u16 bssid_msb;
-	u16 beacon_interval; /* in TBTTs */
-	u32 rx_config_options;
-	u32 rx_filter_options;
+	__le32 bssid_lsb;
+	__le16 bssid_msb;
+	__le16 beacon_interval; /* in TBTTs */
+	__le32 rx_config_options;
+	__le32 rx_filter_options;
 
 	/*
 	 * The target uses this field to determine the rate at
 	 * which to transmit control frame responses (such as
 	 * ACK or CTS frames).
 	 */
-	u32 basic_rate_set;
+	__le32 basic_rate_set;
 	u8 dtim_interval;
 	/*
 	 * bits 0-2: This bitwise field specifies the type
@@ -240,10 +243,10 @@
 struct wl1271_cmd_template_set {
 	struct wl1271_cmd_header header;
 
-	u16 len;
+	__le16 len;
 	u8 template_type;
 	u8 index;  /* relevant only for KLV_TEMPLATE type */
-	u32 enabled_rates;
+	__le32 enabled_rates;
 	u8 short_retry_limit;
 	u8 long_retry_limit;
 	u8 aflags;
@@ -280,18 +283,13 @@
 	  * to power save mode.
 	  */
 	u8 hang_over_period;
-	u32 null_data_rate;
+	__le32 null_data_rate;
 } __attribute__ ((packed));
 
 /* HW encryption keys */
 #define NUM_ACCESS_CATEGORIES_COPY 4
 #define MAX_KEY_SIZE 32
 
-/* When set, disable HW encryption */
-#define DF_ENCRYPTION_DISABLE      0x01
-/* When set, disable HW decryption */
-#define DF_SNIFF_MODE_ENABLE       0x80
-
 enum wl1271_cmd_key_action {
 	KEY_ADD_OR_REPLACE = 1,
 	KEY_REMOVE         = 2,
@@ -316,9 +314,9 @@
 	u8 addr[ETH_ALEN];
 
 	/* key_action_e */
-	u16 key_action;
+	__le16 key_action;
 
-	u16 reserved_1;
+	__le16 reserved_1;
 
 	/* key size in bytes */
 	u8 key_size;
@@ -334,8 +332,8 @@
 	u8 id;
 	u8 reserved_2[6];
 	u8 key[MAX_KEY_SIZE];
-	u16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
-	u32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
+	__le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
+	__le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
 } __attribute__ ((packed));
 
 
@@ -347,19 +345,22 @@
 #define WL1271_SCAN_OPT_PRIORITY_HIGH  4
 #define WL1271_SCAN_CHAN_MIN_DURATION  30000  /* TU */
 #define WL1271_SCAN_CHAN_MAX_DURATION  60000  /* TU */
+#define WL1271_SCAN_BAND_2_4_GHZ 0
+#define WL1271_SCAN_BAND_5_GHZ 1
+#define WL1271_SCAN_BAND_DUAL 2
 
 struct basic_scan_params {
-	u32 rx_config_options;
-	u32 rx_filter_options;
+	__le32 rx_config_options;
+	__le32 rx_filter_options;
 	/* Scan option flags (WL1271_SCAN_OPT_*) */
-	u16 scan_options;
+	__le16 scan_options;
 	/* Number of scan channels in the list (maximum 30) */
 	u8 num_channels;
 	/* This field indicates the number of probe requests to send
 	   per channel for an active scan */
 	u8 num_probe_requests;
 	/* Rate bit field for sending the probes */
-	u32 tx_rate;
+	__le32 tx_rate;
 	u8 tid_trigger;
 	u8 ssid_len;
 	/* in order to align */
@@ -374,10 +375,10 @@
 
 struct basic_scan_channel_params {
 	/* Duration in TU to wait for frames on a channel for active scan */
-	u32 min_duration;
-	u32 max_duration;
-	u32 bssid_lsb;
-	u16 bssid_msb;
+	__le32 min_duration;
+	__le32 max_duration;
+	__le32 bssid_lsb;
+	__le16 bssid_msb;
 	u8 early_termination;
 	u8 tx_power_att;
 	u8 channel;
@@ -397,13 +398,13 @@
 struct wl1271_cmd_trigger_scan_to {
 	struct wl1271_cmd_header header;
 
-	u32 timeout;
-};
+	__le32 timeout;
+} __attribute__ ((packed));
 
 struct wl1271_cmd_test_header {
 	u8 id;
 	u8 padding[3];
-};
+} __attribute__ ((packed));
 
 enum wl1271_channel_tune_bands {
 	WL1271_CHANNEL_TUNE_BAND_2_4,
@@ -425,7 +426,7 @@
 	u8 band;
 	u8 channel;
 
-	u16 radio_status;
+	__le16 radio_status;
 } __attribute__ ((packed));
 
 struct wl1271_cmd_cal_update_ref_point {
@@ -433,8 +434,8 @@
 
 	struct wl1271_cmd_test_header test;
 
-	s32 ref_power;
-	s32 ref_detector;
+	__le32 ref_power;
+	__le32 ref_detector;
 	u8  sub_band;
 	u8  padding[3];
 } __attribute__ ((packed));
@@ -449,16 +450,42 @@
 
 	struct wl1271_cmd_test_header test;
 
-	u16 len;
+	__le16 len;
 	u8  buf[MAX_TLV_LENGTH];
 	u8  type;
 	u8  padding;
 
-	s16 radio_status;
+	__le16 radio_status;
 	u8  nvs_version[MAX_NVS_VERSION_LENGTH];
 
 	u8  sub_band_mask;
 	u8  padding2;
 } __attribute__ ((packed));
 
+
+/*
+ * There are three types of disconnections:
+ *
+ * DISCONNECT_IMMEDIATE: the fw doesn't send any frames
+ * DISCONNECT_DEAUTH:    the fw generates a DEAUTH request with the reason
+ *                       we have passed
+ * DISCONNECT_DISASSOC:  the fw generates a DESASSOC request with the reason
+ *                       we have passed
+ */
+enum wl1271_disconnect_type {
+	DISCONNECT_IMMEDIATE,
+	DISCONNECT_DEAUTH,
+	DISCONNECT_DISASSOC
+};
+
+struct wl1271_cmd_disconnect {
+	__le32 rx_config_options;
+	__le32 rx_filter_options;
+
+	__le16 reason;
+	u8  type;
+
+	u8  padding;
+} __attribute__ ((packed));
+
 #endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h
new file mode 100644
index 0000000..061d475
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_conf.h
@@ -0,0 +1,911 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL1271_CONF_H__
+#define __WL1271_CONF_H__
+
+enum {
+	CONF_HW_BIT_RATE_1MBPS   = BIT(0),
+	CONF_HW_BIT_RATE_2MBPS   = BIT(1),
+	CONF_HW_BIT_RATE_5_5MBPS = BIT(2),
+	CONF_HW_BIT_RATE_6MBPS   = BIT(3),
+	CONF_HW_BIT_RATE_9MBPS   = BIT(4),
+	CONF_HW_BIT_RATE_11MBPS  = BIT(5),
+	CONF_HW_BIT_RATE_12MBPS  = BIT(6),
+	CONF_HW_BIT_RATE_18MBPS  = BIT(7),
+	CONF_HW_BIT_RATE_22MBPS  = BIT(8),
+	CONF_HW_BIT_RATE_24MBPS  = BIT(9),
+	CONF_HW_BIT_RATE_36MBPS  = BIT(10),
+	CONF_HW_BIT_RATE_48MBPS  = BIT(11),
+	CONF_HW_BIT_RATE_54MBPS  = BIT(12),
+	CONF_HW_BIT_RATE_MCS_0   = BIT(13),
+	CONF_HW_BIT_RATE_MCS_1   = BIT(14),
+	CONF_HW_BIT_RATE_MCS_2   = BIT(15),
+	CONF_HW_BIT_RATE_MCS_3   = BIT(16),
+	CONF_HW_BIT_RATE_MCS_4   = BIT(17),
+	CONF_HW_BIT_RATE_MCS_5   = BIT(18),
+	CONF_HW_BIT_RATE_MCS_6   = BIT(19),
+	CONF_HW_BIT_RATE_MCS_7   = BIT(20)
+};
+
+enum {
+	CONF_HW_RATE_INDEX_1MBPS   = 0,
+	CONF_HW_RATE_INDEX_2MBPS   = 1,
+	CONF_HW_RATE_INDEX_5_5MBPS = 2,
+	CONF_HW_RATE_INDEX_6MBPS   = 3,
+	CONF_HW_RATE_INDEX_9MBPS   = 4,
+	CONF_HW_RATE_INDEX_11MBPS  = 5,
+	CONF_HW_RATE_INDEX_12MBPS  = 6,
+	CONF_HW_RATE_INDEX_18MBPS  = 7,
+	CONF_HW_RATE_INDEX_22MBPS  = 8,
+	CONF_HW_RATE_INDEX_24MBPS  = 9,
+	CONF_HW_RATE_INDEX_36MBPS  = 10,
+	CONF_HW_RATE_INDEX_48MBPS  = 11,
+	CONF_HW_RATE_INDEX_54MBPS  = 12,
+	CONF_HW_RATE_INDEX_MAX     = CONF_HW_RATE_INDEX_54MBPS,
+};
+
+struct conf_sg_settings {
+	/*
+	 * Defines the PER threshold in PPM of the BT voice of which reaching
+	 * this value will trigger raising the priority of the BT voice by
+	 * the BT IP until next NFS sample interval time as defined in
+	 * nfs_sample_interval.
+	 *
+	 * Unit: PER value in PPM (parts per million)
+	 * #Error_packets / #Total_packets
+
+	 * Range: u32
+	 */
+	u32 per_threshold;
+
+	/*
+	 * This value is an absolute time in micro-seconds to limit the
+	 * maximum scan duration compensation while in SG
+	 */
+	u32 max_scan_compensation_time;
+
+	/* Defines the PER threshold of the BT voice of which reaching this
+	 * value will trigger raising the priority of the BT voice until next
+	 * NFS sample interval time as defined in sample_interval.
+	 *
+	 * Unit: msec
+	 * Range: 1-65000
+	 */
+	u16 nfs_sample_interval;
+
+	/*
+	 * Defines the load ratio for the BT.
+	 * The WLAN ratio is: 100 - load_ratio
+	 *
+	 * Unit: Percent
+	 * Range: 0-100
+	 */
+	u8 load_ratio;
+
+	/*
+	 * true - Co-ex is allowed to enter/exit P.S automatically and
+	 *        transparently to the host
+	 *
+	 * false - Co-ex is disallowed to enter/exit P.S and will trigger an
+	 *         event to the host to notify for the need to enter/exit P.S
+	 *         due to BT change state
+	 *
+	 */
+	u8 auto_ps_mode;
+
+	/*
+	 * This parameter defines the compensation percentage of num of probe
+	 * requests in case scan is initiated during BT voice/BT ACL
+	 * guaranteed link.
+	 *
+	 * Unit: Percent
+	 * Range: 0-255 (0 - No compensation)
+	 */
+	u8 probe_req_compensation;
+
+	/*
+	 * This parameter defines the compensation percentage of scan window
+	 * size in case scan is initiated during BT voice/BT ACL Guaranteed
+	 * link.
+	 *
+	 * Unit: Percent
+	 * Range: 0-255 (0 - No compensation)
+	 */
+	u8 scan_window_compensation;
+
+	/*
+	 * Defines the antenna configuration.
+	 *
+	 * Range: 0 - Single Antenna; 1 - Dual Antenna
+	 */
+	u8 antenna_config;
+
+	/*
+	 * The percent out of the Max consecutive beacon miss roaming trigger
+	 * which is the threshold for raising the priority of beacon
+	 * reception.
+	 *
+	 * Range: 1-100
+	 * N = MaxConsecutiveBeaconMiss
+	 * P = coexMaxConsecutiveBeaconMissPrecent
+	 * Threshold = MIN( N-1, round(N * P / 100))
+	 */
+	u8 beacon_miss_threshold;
+
+	/*
+	 * The RX rate threshold below which rate adaptation is assumed to be
+	 * occurring at the AP which will raise priority for ACTIVE_RX and RX
+	 * SP.
+	 *
+	 * Range: HW_BIT_RATE_*
+	 */
+	u32 rate_adaptation_threshold;
+
+	/*
+	 * The SNR above which the RX rate threshold indicating AP rate
+	 * adaptation is valid
+	 *
+	 * Range: -128 - 127
+	 */
+	s8 rate_adaptation_snr;
+};
+
+enum conf_rx_queue_type {
+	CONF_RX_QUEUE_TYPE_LOW_PRIORITY,  /* All except the high priority */
+	CONF_RX_QUEUE_TYPE_HIGH_PRIORITY, /* Management and voice packets */
+};
+
+struct conf_rx_settings {
+	/*
+	 * The maximum amount of time, in TU, before the
+	 * firmware discards the MSDU.
+	 *
+	 * Range: 0 - 0xFFFFFFFF
+	 */
+	u32 rx_msdu_life_time;
+
+	/*
+	 * Packet detection threshold in the PHY.
+	 *
+	 * FIXME: details unknown.
+	 */
+	u32 packet_detection_threshold;
+
+	/*
+	 * The longest time the STA will wait to receive traffic from the AP
+	 * after a PS-poll has been transmitted.
+	 *
+	 * Range: 0 - 200000
+	 */
+	u16 ps_poll_timeout;
+	/*
+	 * The longest time the STA will wait to receive traffic from the AP
+	 * after a frame has been sent from an UPSD enabled queue.
+	 *
+	 * Range: 0 - 200000
+	 */
+	u16 upsd_timeout;
+
+	/*
+	 * The number of octets in an MPDU, below which an RTS/CTS
+	 * handshake is not performed.
+	 *
+	 * Range: 0 - 4096
+	 */
+	u16 rts_threshold;
+
+	/*
+	 * The RX Clear Channel Assessment threshold in the PHY
+	 * (the energy threshold).
+	 *
+	 * Range: ENABLE_ENERGY_D  == 0x140A
+	 *        DISABLE_ENERGY_D == 0xFFEF
+	 */
+	u16 rx_cca_threshold;
+
+	/*
+	 * Occupied Rx mem-blocks number which requires interrupting the host
+	 * (0 = no buffering, 0xffff = disabled).
+	 *
+	 * Range: u16
+	 */
+	u16 irq_blk_threshold;
+
+	/*
+	 * Rx packets number which requires interrupting the host
+	 * (0 = no buffering).
+	 *
+	 * Range: u16
+	 */
+	u16 irq_pkt_threshold;
+
+	/*
+	 * Max time in msec the FW may delay RX-Complete interrupt.
+	 *
+	 * Range: 1 - 100
+	 */
+	u16 irq_timeout;
+
+	/*
+	 * The RX queue type.
+	 *
+	 * Range: RX_QUEUE_TYPE_RX_LOW_PRIORITY, RX_QUEUE_TYPE_RX_HIGH_PRIORITY,
+	 */
+	u8 queue_type;
+};
+
+#define CONF_TX_MAX_RATE_CLASSES       8
+
+#define CONF_TX_RATE_MASK_UNSPECIFIED  0
+#define CONF_TX_RATE_MASK_ALL          0x1eff
+#define CONF_TX_RATE_RETRY_LIMIT       10
+
+struct conf_tx_rate_class {
+
+	/*
+	 * The rates enabled for this rate class.
+	 *
+	 * Range: CONF_HW_BIT_RATE_* bit mask
+	 */
+	u32 enabled_rates;
+
+	/*
+	 * The dot11 short retry limit used for TX retries.
+	 *
+	 * Range: u8
+	 */
+	u8 short_retry_limit;
+
+	/*
+	 * The dot11 long retry limit used for TX retries.
+	 *
+	 * Range: u8
+	 */
+	u8 long_retry_limit;
+
+	/*
+	 * Flags controlling the attributes of TX transmission.
+	 *
+	 * Range: bit 0: Truncate - when set, FW attempts to send a frame stop
+	 *               when the total valid per-rate attempts have
+	 *               been exhausted; otherwise transmissions
+	 *               will continue at the lowest available rate
+	 *               until the appropriate one of the
+	 *               short_retry_limit, long_retry_limit,
+	 *               dot11_max_transmit_msdu_life_time, or
+	 *               max_tx_life_time, is exhausted.
+	 *            1: Preamble Override - indicates if the preamble type
+	 *               should be used in TX.
+	 *            2: Preamble Type - the type of the preamble to be used by
+	 *               the policy (0 - long preamble, 1 - short preamble.
+	 */
+	u8 aflags;
+};
+
+#define CONF_TX_MAX_AC_COUNT 4
+
+/* Slot number setting to start transmission at PIFS interval */
+#define CONF_TX_AIFS_PIFS 1
+/* Slot number setting to start transmission at DIFS interval normal
+ * DCF access */
+#define CONF_TX_AIFS_DIFS 2
+
+
+enum conf_tx_ac {
+	CONF_TX_AC_BE = 0,         /* best effort / legacy */
+	CONF_TX_AC_BK = 1,         /* background */
+	CONF_TX_AC_VI = 2,         /* video */
+	CONF_TX_AC_VO = 3,         /* voice */
+	CONF_TX_AC_CTS2SELF = 4,   /* fictious AC, follows AC_VO */
+	CONF_TX_AC_ANY_TID = 0x1f
+};
+
+struct conf_tx_ac_category {
+	/*
+	 * The AC class identifier.
+	 *
+	 * Range: enum conf_tx_ac
+	 */
+	u8 ac;
+
+	/*
+	 * The contention window minimum size (in slots) for the access
+	 * class.
+	 *
+	 * Range: u8
+	 */
+	u8 cw_min;
+
+	/*
+	 * The contention window maximum size (in slots) for the access
+	 * class.
+	 *
+	 * Range: u8
+	 */
+	u16 cw_max;
+
+	/*
+	 * The AIF value (in slots) for the access class.
+	 *
+	 * Range: u8
+	 */
+	u8 aifsn;
+
+	/*
+	 * The TX Op Limit (in microseconds) for the access class.
+	 *
+	 * Range: u16
+	 */
+	u16 tx_op_limit;
+};
+
+#define CONF_TX_MAX_TID_COUNT 7
+
+enum {
+	CONF_CHANNEL_TYPE_DCF = 0,   /* DC/LEGACY*/
+	CONF_CHANNEL_TYPE_EDCF = 1,  /* EDCA*/
+	CONF_CHANNEL_TYPE_HCCA = 2,  /* HCCA*/
+};
+
+enum {
+	CONF_PS_SCHEME_LEGACY = 0,
+	CONF_PS_SCHEME_UPSD_TRIGGER = 1,
+	CONF_PS_SCHEME_LEGACY_PSPOLL = 2,
+	CONF_PS_SCHEME_SAPSD = 3,
+};
+
+enum {
+	CONF_ACK_POLICY_LEGACY = 0,
+	CONF_ACK_POLICY_NO_ACK = 1,
+	CONF_ACK_POLICY_BLOCK = 2,
+};
+
+
+struct conf_tx_tid {
+	u8 queue_id;
+	u8 channel_type;
+	u8 tsid;
+	u8 ps_scheme;
+	u8 ack_policy;
+	u32 apsd_conf[2];
+};
+
+struct conf_tx_settings {
+	/*
+	 * The TX ED value for TELEC Enable/Disable.
+	 *
+	 * Range: 0, 1
+	 */
+	u8 tx_energy_detection;
+
+	/*
+	 * Configuration for rate classes for TX (currently only one
+	 * rate class supported.)
+	 */
+	struct conf_tx_rate_class rc_conf;
+
+	/*
+	 * Configuration for access categories for TX rate control.
+	 */
+	u8 ac_conf_count;
+	struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT];
+
+	/*
+	 * Configuration for TID parameters.
+	 */
+	u8 tid_conf_count;
+	struct conf_tx_tid tid_conf[CONF_TX_MAX_TID_COUNT];
+
+	/*
+	 * The TX fragmentation threshold.
+	 *
+	 * Range: u16
+	 */
+	u16 frag_threshold;
+
+	/*
+	 * Max time in msec the FW may delay frame TX-Complete interrupt.
+	 *
+	 * Range: u16
+	 */
+	u16 tx_compl_timeout;
+
+	/*
+	 * Completed TX packet count which requires to issue the TX-Complete
+	 * interrupt.
+	 *
+	 * Range: u16
+	 */
+	u16 tx_compl_threshold;
+
+};
+
+enum {
+	CONF_WAKE_UP_EVENT_BEACON    = 0x01, /* Wake on every Beacon*/
+	CONF_WAKE_UP_EVENT_DTIM      = 0x02, /* Wake on every DTIM*/
+	CONF_WAKE_UP_EVENT_N_DTIM    = 0x04, /* Wake every Nth DTIM */
+	CONF_WAKE_UP_EVENT_N_BEACONS = 0x08, /* Wake every Nth beacon */
+	CONF_WAKE_UP_EVENT_BITS_MASK = 0x0F
+};
+
+#define CONF_MAX_BCN_FILT_IE_COUNT 32
+
+#define CONF_BCN_RULE_PASS_ON_CHANGE         BIT(0)
+#define CONF_BCN_RULE_PASS_ON_APPEARANCE     BIT(1)
+
+#define CONF_BCN_IE_OUI_LEN    3
+#define CONF_BCN_IE_VER_LEN    2
+
+struct conf_bcn_filt_rule {
+	/*
+	 * IE number to which to associate a rule.
+	 *
+	 * Range: u8
+	 */
+	u8 ie;
+
+	/*
+	 * Rule to associate with the specific ie.
+	 *
+	 * Range: CONF_BCN_RULE_PASS_ON_*
+	 */
+	u8 rule;
+
+	/*
+	 * OUI for the vendor specifie IE (221)
+	 */
+	u8 oui[CONF_BCN_IE_OUI_LEN];
+
+	/*
+	 * Type for the vendor specifie IE (221)
+	 */
+	u8 type;
+
+	/*
+	 * Version for the vendor specifie IE (221)
+	 */
+	u8 version[CONF_BCN_IE_VER_LEN];
+};
+
+#define CONF_MAX_RSSI_SNR_TRIGGERS 8
+
+enum {
+	CONF_TRIG_METRIC_RSSI_BEACON = 0,
+	CONF_TRIG_METRIC_RSSI_DATA,
+	CONF_TRIG_METRIC_SNR_BEACON,
+	CONF_TRIG_METRIC_SNR_DATA
+};
+
+enum {
+	CONF_TRIG_EVENT_TYPE_LEVEL = 0,
+	CONF_TRIG_EVENT_TYPE_EDGE
+};
+
+enum {
+	CONF_TRIG_EVENT_DIR_LOW = 0,
+	CONF_TRIG_EVENT_DIR_HIGH,
+	CONF_TRIG_EVENT_DIR_BIDIR
+};
+
+
+struct conf_sig_trigger {
+	/*
+	 * The RSSI / SNR threshold value.
+	 *
+	 * FIXME: what is the range?
+	 */
+	s16 threshold;
+
+	/*
+	 * Minimum delay between two trigger events for this trigger in ms.
+	 *
+	 * Range: 0 - 60000
+	 */
+	u16 pacing;
+
+	/*
+	 * The measurement data source for this trigger.
+	 *
+	 * Range: CONF_TRIG_METRIC_*
+	 */
+	u8 metric;
+
+	/*
+	 * The trigger type of this trigger.
+	 *
+	 * Range: CONF_TRIG_EVENT_TYPE_*
+	 */
+	u8 type;
+
+	/*
+	 * The direction of the trigger.
+	 *
+	 * Range: CONF_TRIG_EVENT_DIR_*
+	 */
+	u8 direction;
+
+	/*
+	 * Hysteresis range of the trigger around the threshold (in dB)
+	 *
+	 * Range: u8
+	 */
+	u8 hysteresis;
+
+	/*
+	 * Index of the trigger rule.
+	 *
+	 * Range: 0 - CONF_MAX_RSSI_SNR_TRIGGERS-1
+	 */
+	u8 index;
+
+	/*
+	 * Enable / disable this rule (to use for clearing rules.)
+	 *
+	 * Range: 1 - Enabled, 2 - Not enabled
+	 */
+	u8 enable;
+};
+
+struct conf_sig_weights {
+
+	/*
+	 * RSSI from beacons average weight.
+	 *
+	 * Range: u8
+	 */
+	u8 rssi_bcn_avg_weight;
+
+	/*
+	 * RSSI from data average weight.
+	 *
+	 * Range: u8
+	 */
+	u8 rssi_pkt_avg_weight;
+
+	/*
+	 * SNR from beacons average weight.
+	 *
+	 * Range: u8
+	 */
+	u8 snr_bcn_avg_weight;
+
+	/*
+	 * SNR from data average weight.
+	 *
+	 * Range: u8
+	 */
+	u8 snr_pkt_avg_weight;
+};
+
+enum conf_bcn_filt_mode {
+	CONF_BCN_FILT_MODE_DISABLED = 0,
+	CONF_BCN_FILT_MODE_ENABLED = 1
+};
+
+enum conf_bet_mode {
+	CONF_BET_MODE_DISABLE = 0,
+	CONF_BET_MODE_ENABLE = 1,
+};
+
+struct conf_conn_settings {
+	/*
+	 * Firmware wakeup conditions configuration. The host may set only
+	 * one bit.
+	 *
+	 * Range: CONF_WAKE_UP_EVENT_*
+	 */
+	u8 wake_up_event;
+
+	/*
+	 * Listen interval for beacons or Dtims.
+	 *
+	 * Range: 0 for beacon and Dtim wakeup
+	 *        1-10 for x Dtims
+	 *        1-255 for x beacons
+	 */
+	u8 listen_interval;
+
+	/*
+	 * Enable or disable the beacon filtering.
+	 *
+	 * Range: CONF_BCN_FILT_MODE_*
+	 */
+	enum conf_bcn_filt_mode bcn_filt_mode;
+
+	/*
+	 * Configure Beacon filter pass-thru rules.
+	 */
+	u8 bcn_filt_ie_count;
+	struct conf_bcn_filt_rule bcn_filt_ie[CONF_MAX_BCN_FILT_IE_COUNT];
+
+	/*
+	 * The number of consequtive beacons to lose, before the firmware
+	 * becomes out of synch.
+	 *
+	 * Range: u32
+	 */
+	u32 synch_fail_thold;
+
+	/*
+	 * After out-of-synch, the number of TU's to wait without a further
+	 * received beacon (or probe response) before issuing the BSS_EVENT_LOSE
+	 * event.
+	 *
+	 * Range: u32
+	 */
+	u32 bss_lose_timeout;
+
+	/*
+	 * Beacon receive timeout.
+	 *
+	 * Range: u32
+	 */
+	u32 beacon_rx_timeout;
+
+	/*
+	 * Broadcast receive timeout.
+	 *
+	 * Range: u32
+	 */
+	u32 broadcast_timeout;
+
+	/*
+	 * Enable/disable reception of broadcast packets in power save mode
+	 *
+	 * Range: 1 - enable, 0 - disable
+	 */
+	u8 rx_broadcast_in_ps;
+
+	/*
+	 * Consequtive PS Poll failures before sending event to driver
+	 *
+	 * Range: u8
+	 */
+	u8 ps_poll_threshold;
+
+	/*
+	 * Configuration of signal (rssi/snr) triggers.
+	 */
+	u8 sig_trigger_count;
+	struct conf_sig_trigger sig_trigger[CONF_MAX_RSSI_SNR_TRIGGERS];
+
+	/*
+	 * Configuration of signal average weights.
+	 */
+	struct conf_sig_weights sig_weights;
+
+	/*
+	 * Specifies if beacon early termination procedure is enabled or
+	 * disabled.
+	 *
+	 * Range: CONF_BET_MODE_*
+	 */
+	u8 bet_enable;
+
+	/*
+	 * Specifies the maximum number of consecutive beacons that may be
+	 * early terminated. After this number is reached at least one full
+	 * beacon must be correctly received in FW before beacon ET
+	 * resumes.
+	 *
+	 * Range 0 - 255
+	 */
+	u8 bet_max_consecutive;
+};
+
+#define CONF_SR_ERR_TBL_MAX_VALUES   14
+
+struct conf_mart_reflex_err_table {
+	/*
+	 * Length of the error table values table.
+	 *
+	 * Range: 0 - CONF_SR_ERR_TBL_MAX_VALUES
+	 */
+	u8 len;
+
+	/*
+	 * Smart Reflex error table upper limit.
+	 *
+	 * Range: s8
+	 */
+	s8 upper_limit;
+
+	/*
+	 * Smart Reflex error table values.
+	 *
+	 * Range: s8
+	 */
+	s8 values[CONF_SR_ERR_TBL_MAX_VALUES];
+};
+
+enum {
+	CONF_REF_CLK_19_2_E,
+	CONF_REF_CLK_26_E,
+	CONF_REF_CLK_38_4_E,
+	CONF_REF_CLK_52_E
+};
+
+enum single_dual_band_enum {
+	CONF_SINGLE_BAND,
+	CONF_DUAL_BAND
+};
+
+struct conf_general_parms {
+	/*
+	 * RF Reference Clock type / speed
+	 *
+	 * Range: CONF_REF_CLK_*
+	 */
+	u8 ref_clk;
+
+	/*
+	 * Settling time of the reference clock after boot.
+	 *
+	 * Range: u8
+	 */
+	u8 settling_time;
+
+	/*
+	 * Flag defining whether clock is valid on wakeup.
+	 *
+	 * Range: 0 - not valid on wakeup, 1 - valid on wakeup
+	 */
+	u8 clk_valid_on_wakeup;
+
+	/*
+	 * DC-to-DC mode.
+	 *
+	 * Range: Unknown
+	 */
+	u8 dc2dcmode;
+
+	/*
+	 * Flag defining whether used as single or dual-band.
+	 *
+	 * Range: CONF_SINGLE_BAND, CONF_DUAL_BAND
+	 */
+	u8 single_dual_band;
+
+	/*
+	 * TX bip fem autodetect flag.
+	 *
+	 * Range: Unknown
+	 */
+	u8 tx_bip_fem_autodetect;
+
+	/*
+	 * TX bip gem manufacturer.
+	 *
+	 * Range: Unknown
+	 */
+	u8 tx_bip_fem_manufacturer;
+
+	/*
+	 * Settings flags.
+	 *
+	 * Range: Unknown
+	 */
+	u8 settings;
+};
+
+#define CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE 15
+#define CONF_NUMBER_OF_SUB_BANDS_5  7
+#define CONF_NUMBER_OF_RATE_GROUPS  6
+#define CONF_NUMBER_OF_CHANNELS_2_4 14
+#define CONF_NUMBER_OF_CHANNELS_5   35
+
+struct conf_radio_parms {
+	/*
+	 * Static radio parameters for 2.4GHz
+	 *
+	 * Range: unknown
+	 */
+	u8 rx_trace_loss;
+	u8 tx_trace_loss;
+	s8 rx_rssi_and_proc_compens[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
+
+	/*
+	 * Static radio parameters for 5GHz
+	 *
+	 * Range: unknown
+	 */
+	u8 rx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
+	u8 tx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
+	s8 rx_rssi_and_proc_compens_5[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
+
+	/*
+	 * Dynamic radio parameters for 2.4GHz
+	 *
+	 * Range: unknown
+	 */
+	s16 tx_ref_pd_voltage;
+	s8  tx_ref_power;
+	s8  tx_offset_db;
+
+	s8  tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
+	s8  tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
+
+	s8  tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
+	s8  tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
+	s8  tx_pdv_rate_offsets[CONF_NUMBER_OF_RATE_GROUPS];
+
+	u8  tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
+	u8  rx_fem_insertion_loss;
+
+	/*
+	 * Dynamic radio parameters for 5GHz
+	 *
+	 * Range: unknown
+	 */
+	s16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
+	s8  tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
+	s8  tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
+
+	s8  tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
+	s8  tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
+
+	s8  tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
+	s8  tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
+
+	/* FIXME: this is inconsistent with the types for 2.4GHz */
+	s8  tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
+	s8  rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
+};
+
+#define CONF_SR_ERR_TBL_COUNT        3
+
+struct conf_init_settings {
+	/*
+	 * Configure Smart Reflex error table values.
+	 */
+	struct conf_mart_reflex_err_table sr_err_tbl[CONF_SR_ERR_TBL_COUNT];
+
+	/*
+	 * Smart Reflex enable flag.
+	 *
+	 * Range: 1 - Smart Reflex enabled, 0 - Smart Reflex disabled
+	 */
+	u8 sr_enable;
+
+	/*
+	 * Configure general parameters.
+	 */
+	struct conf_general_parms genparam;
+
+	/*
+	 * Configure radio parameters.
+	 */
+	struct conf_radio_parms radioparam;
+
+};
+
+struct conf_drv_settings {
+	struct conf_sg_settings sg;
+	struct conf_rx_settings rx;
+	struct conf_tx_settings tx;
+	struct conf_conn_settings conn;
+	struct conf_init_settings init;
+};
+
+#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index f3afd4a..31d396b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -26,20 +26,45 @@
 #include "wl1271_spi.h"
 #include "wl1271_event.h"
 #include "wl1271_ps.h"
+#include "wl12xx_80211.h"
 
 static int wl1271_event_scan_complete(struct wl1271 *wl,
 				      struct event_mailbox *mbox)
 {
+	int size = sizeof(struct wl12xx_probe_req_template);
 	wl1271_debug(DEBUG_EVENT, "status: 0x%x",
 		     mbox->scheduled_scan_status);
 
 	if (wl->scanning) {
-		mutex_unlock(&wl->mutex);
-		ieee80211_scan_completed(wl->hw, false);
-		mutex_lock(&wl->mutex);
-		wl->scanning = false;
-	}
+		if (wl->scan.state == WL1271_SCAN_BAND_DUAL) {
+			wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
+						NULL, size);
+			/* 2.4 GHz band scanned, scan 5 GHz band, pretend
+			 * to the wl1271_cmd_scan function that we are not
+			 * scanning as it checks that.
+			 */
+			wl->scanning = false;
+			wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len,
+						wl->scan.active,
+						wl->scan.high_prio,
+						WL1271_SCAN_BAND_5_GHZ,
+						wl->scan.probe_requests);
+		} else {
+			if (wl->scan.state == WL1271_SCAN_BAND_2_4_GHZ)
+				wl1271_cmd_template_set(wl,
+						CMD_TEMPL_CFG_PROBE_REQ_2_4,
+						NULL, size);
+			else
+				wl1271_cmd_template_set(wl,
+						CMD_TEMPL_CFG_PROBE_REQ_5,
+						NULL, size);
 
+			mutex_unlock(&wl->mutex);
+			ieee80211_scan_completed(wl->hw, false);
+			mutex_lock(&wl->mutex);
+			wl->scanning = false;
+		}
+	}
 	return 0;
 }
 
@@ -57,7 +82,8 @@
 
 	wl1271_event_mbox_dump(mbox);
 
-	vector = mbox->events_vector & ~(mbox->events_mask);
+	vector = le32_to_cpu(mbox->events_vector);
+	vector &= ~(le32_to_cpu(mbox->events_mask));
 	wl1271_debug(DEBUG_EVENT, "vector: 0x%x", vector);
 
 	if (vector & SCAN_COMPLETE_EVENT_ID) {
@@ -66,14 +92,16 @@
 			return ret;
 	}
 
-	if (vector & BSS_LOSE_EVENT_ID) {
+	/*
+	 * The BSS_LOSE_EVENT_ID is only needed while psm (and hence beacon
+	 * filtering) is enabled. Without PSM, the stack will receive all
+	 * beacons and can detect beacon loss by itself.
+	 */
+	if (vector & BSS_LOSE_EVENT_ID && wl->psm) {
 		wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
 
-		if (wl->psm_requested && wl->psm) {
-			ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE);
-			if (ret < 0)
-				return ret;
-		}
+		/* indicate to the stack, that beacons have been lost */
+		ieee80211_beacon_loss(wl->vif);
 	}
 
 	return 0;
@@ -92,14 +120,14 @@
 
 void wl1271_event_mbox_config(struct wl1271 *wl)
 {
-	wl->mbox_ptr[0] = wl1271_reg_read32(wl, REG_EVENT_MAILBOX_PTR);
+	wl->mbox_ptr[0] = wl1271_spi_read32(wl, REG_EVENT_MAILBOX_PTR);
 	wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
 
 	wl1271_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x",
 		     wl->mbox_ptr[0], wl->mbox_ptr[1]);
 }
 
-int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
+int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack)
 {
 	struct event_mailbox mbox;
 	int ret;
@@ -110,8 +138,8 @@
 		return -EINVAL;
 
 	/* first we read the mbox descriptor */
-	wl1271_spi_mem_read(wl, wl->mbox_ptr[mbox_num], &mbox,
-			    sizeof(struct event_mailbox));
+	wl1271_spi_read(wl, wl->mbox_ptr[mbox_num], &mbox,
+			sizeof(struct event_mailbox), false);
 
 	/* process the descriptor */
 	ret = wl1271_event_process(wl, &mbox);
@@ -119,7 +147,9 @@
 		return ret;
 
 	/* then we let the firmware know it can go on...*/
-	wl1271_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK);
+	if (do_ack)
+		wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG,
+				   INTR_TRIG_EVENT_ACK);
 
 	return 0;
 }
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 2cdce7c..3ab53d3 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -66,33 +66,33 @@
 struct event_debug_report {
 	u8 debug_event_id;
 	u8 num_params;
-	u16 pad;
-	u32 report_1;
-	u32 report_2;
-	u32 report_3;
+	__le16 pad;
+	__le32 report_1;
+	__le32 report_2;
+	__le32 report_3;
 } __attribute__ ((packed));
 
 #define NUM_OF_RSSI_SNR_TRIGGERS 8
 
 struct event_mailbox {
-	u32 events_vector;
-	u32 events_mask;
-	u32 reserved_1;
-	u32 reserved_2;
+	__le32 events_vector;
+	__le32 events_mask;
+	__le32 reserved_1;
+	__le32 reserved_2;
 
 	u8 dbg_event_id;
 	u8 num_relevant_params;
-	u16 reserved_3;
-	u32 event_report_p1;
-	u32 event_report_p2;
-	u32 event_report_p3;
+	__le16 reserved_3;
+	__le32 event_report_p1;
+	__le32 event_report_p2;
+	__le32 event_report_p3;
 
 	u8 number_of_scan_results;
 	u8 scan_tag;
 	u8 reserved_4[2];
-	u32 compl_scheduled_scan_status;
+	__le32 compl_scheduled_scan_status;
 
-	u16 scheduled_scan_attended_channels;
+	__le16 scheduled_scan_attended_channels;
 	u8 soft_gemini_sense_info;
 	u8 soft_gemini_protective_info;
 	s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS];
@@ -105,6 +105,6 @@
 
 int wl1271_event_unmask(struct wl1271 *wl);
 void wl1271_event_mbox_config(struct wl1271 *wl);
-int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
+int wl1271_event_handle(struct wl1271 *wl, u8 mbox, bool do_ack);
 
 #endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/wl1271_init.c
index 490df21..417b415 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/wl1271_init.c
@@ -59,6 +59,14 @@
 	if (ret < 0)
 		return ret;
 
+	if (wl1271_11a_enabled()) {
+		ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
+				NULL,
+				sizeof(struct wl12xx_probe_req_template));
+		if (ret < 0)
+			return ret;
+	}
+
 	ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
 				      sizeof(struct wl12xx_null_data_template));
 	if (ret < 0)
@@ -94,7 +102,7 @@
 {
 	int ret;
 
-	ret = wl1271_acx_rx_msdu_life_time(wl, RX_MSDU_LIFETIME_DEF);
+	ret = wl1271_acx_rx_msdu_life_time(wl);
 	if (ret < 0)
 		return ret;
 
@@ -117,7 +125,7 @@
 	if (ret < 0)
 		return ret;
 
-	ret = wl1271_acx_group_address_tbl(wl);
+	ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
 	if (ret < 0)
 		return ret;
 
@@ -125,7 +133,7 @@
 	if (ret < 0)
 		return ret;
 
-	ret = wl1271_acx_rts_threshold(wl, RTS_THRESHOLD_DEF);
+	ret = wl1271_acx_rts_threshold(wl, wl->conf.rx.rts_threshold);
 	if (ret < 0)
 		return ret;
 
@@ -136,7 +144,8 @@
 {
 	int ret;
 
-	ret = wl1271_acx_beacon_filter_opt(wl);
+	/* disable beacon filtering at this stage */
+	ret = wl1271_acx_beacon_filter_opt(wl, false);
 	if (ret < 0)
 		return ret;
 
@@ -187,6 +196,7 @@
 static int wl1271_init_general_parms(struct wl1271 *wl)
 {
 	struct wl1271_general_parms *gen_parms;
+	struct conf_general_parms *g = &wl->conf.init.genparam;
 	int ret;
 
 	gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
@@ -195,15 +205,14 @@
 
 	gen_parms->id = TEST_CMD_INI_FILE_GENERAL_PARAM;
 
-	gen_parms->ref_clk = REF_CLK_38_4_E;
-	/* FIXME: magic numbers */
-	gen_parms->settling_time = 5;
-	gen_parms->clk_valid_on_wakeup = 0;
-	gen_parms->dc2dcmode = 0;
-	gen_parms->single_dual_band = 0;
-	gen_parms->tx_bip_fem_autodetect = 1;
-	gen_parms->tx_bip_fem_manufacturer = 1;
-	gen_parms->settings = 1;
+	gen_parms->ref_clk = g->ref_clk;
+	gen_parms->settling_time = g->settling_time;
+	gen_parms->clk_valid_on_wakeup = g->clk_valid_on_wakeup;
+	gen_parms->dc2dcmode = g->dc2dcmode;
+	gen_parms->single_dual_band = g->single_dual_band;
+	gen_parms->tx_bip_fem_autodetect = g->tx_bip_fem_autodetect;
+	gen_parms->tx_bip_fem_manufacturer = g->tx_bip_fem_manufacturer;
+	gen_parms->settings = g->settings;
 
 	ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0);
 	if (ret < 0) {
@@ -217,32 +226,9 @@
 
 static int wl1271_init_radio_parms(struct wl1271 *wl)
 {
-	/*
-	 * FIXME: All these magic numbers should be moved to some place where
-	 * they can be configured (separate file?)
-	 */
-
 	struct wl1271_radio_parms *radio_parms;
-	int ret;
-	u8 compensation[] = { 0xec, 0xf6, 0x00, 0x0c, 0x18, 0xf8, 0xfc, 0x00,
-			      0x08, 0x10, 0xf0, 0xf8, 0x00, 0x0a, 0x14 };
-
-	u8 tx_rate_limits_normal[]   = { 0x1e, 0x1f, 0x22, 0x24, 0x28, 0x29 };
-	u8 tx_rate_limits_degraded[] = { 0x1b, 0x1c, 0x1e, 0x20, 0x24, 0x25 };
-
-	u8 tx_channel_limits_11b[] = { 0x22, 0x50, 0x50, 0x50,
-				       0x50, 0x50, 0x50, 0x50,
-				       0x50, 0x50, 0x22, 0x50,
-				       0x22, 0x50 };
-
-	u8 tx_channel_limits_ofdm[] = { 0x20, 0x50, 0x50, 0x50,
-					0x50, 0x50, 0x50, 0x50,
-					0x50, 0x50, 0x20, 0x50,
-					0x20, 0x50 };
-
-	u8 tx_pdv_rate_offsets[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-
-	u8 tx_ibias[] = { 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x27 };
+	struct conf_radio_parms *r = &wl->conf.init.radioparam;
+	int i, ret;
 
 	radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
 	if (!radio_parms)
@@ -251,33 +237,59 @@
 	radio_parms->id = TEST_CMD_INI_FILE_RADIO_PARAM;
 
 	/* Static radio parameters */
-	radio_parms->rx_trace_loss = 10;
-	radio_parms->tx_trace_loss = 10;
-	memcpy(radio_parms->rx_rssi_and_proc_compens, compensation,
-	       sizeof(compensation));
+	radio_parms->rx_trace_loss = r->rx_trace_loss;
+	radio_parms->tx_trace_loss = r->tx_trace_loss;
+	memcpy(radio_parms->rx_rssi_and_proc_compens,
+	       r->rx_rssi_and_proc_compens,
+	       CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE);
 
-	/* We don't set the 5GHz -- N/A */
+	memcpy(radio_parms->rx_trace_loss_5, r->rx_trace_loss_5,
+	       CONF_NUMBER_OF_SUB_BANDS_5);
+	memcpy(radio_parms->tx_trace_loss_5, r->tx_trace_loss_5,
+	       CONF_NUMBER_OF_SUB_BANDS_5);
+	memcpy(radio_parms->rx_rssi_and_proc_compens_5,
+	       r->rx_rssi_and_proc_compens_5,
+	       CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE);
 
 	/* Dynamic radio parameters */
-	radio_parms->tx_ref_pd_voltage = cpu_to_le16(0x24e);
-	radio_parms->tx_ref_power = 0x78;
-	radio_parms->tx_offset_db = 0x0;
+	radio_parms->tx_ref_pd_voltage = cpu_to_le16(r->tx_ref_pd_voltage);
+	radio_parms->tx_ref_power = r->tx_ref_power;
+	radio_parms->tx_offset_db = r->tx_offset_db;
 
-	memcpy(radio_parms->tx_rate_limits_normal, tx_rate_limits_normal,
-	       sizeof(tx_rate_limits_normal));
-	memcpy(radio_parms->tx_rate_limits_degraded, tx_rate_limits_degraded,
-	       sizeof(tx_rate_limits_degraded));
+	memcpy(radio_parms->tx_rate_limits_normal, r->tx_rate_limits_normal,
+	       CONF_NUMBER_OF_RATE_GROUPS);
+	memcpy(radio_parms->tx_rate_limits_degraded, r->tx_rate_limits_degraded,
+	       CONF_NUMBER_OF_RATE_GROUPS);
 
-	memcpy(radio_parms->tx_channel_limits_11b, tx_channel_limits_11b,
-	       sizeof(tx_channel_limits_11b));
-	memcpy(radio_parms->tx_channel_limits_ofdm, tx_channel_limits_ofdm,
-	       sizeof(tx_channel_limits_ofdm));
-	memcpy(radio_parms->tx_pdv_rate_offsets, tx_pdv_rate_offsets,
-	       sizeof(tx_pdv_rate_offsets));
-	memcpy(radio_parms->tx_ibias, tx_ibias,
-	       sizeof(tx_ibias));
+	memcpy(radio_parms->tx_channel_limits_11b, r->tx_channel_limits_11b,
+	       CONF_NUMBER_OF_CHANNELS_2_4);
+	memcpy(radio_parms->tx_channel_limits_ofdm, r->tx_channel_limits_ofdm,
+	       CONF_NUMBER_OF_CHANNELS_2_4);
+	memcpy(radio_parms->tx_pdv_rate_offsets, r->tx_pdv_rate_offsets,
+	       CONF_NUMBER_OF_RATE_GROUPS);
+	memcpy(radio_parms->tx_ibias, r->tx_ibias, CONF_NUMBER_OF_RATE_GROUPS);
 
-	radio_parms->rx_fem_insertion_loss = 0x14;
+	radio_parms->rx_fem_insertion_loss = r->rx_fem_insertion_loss;
+
+	for (i = 0; i < CONF_NUMBER_OF_SUB_BANDS_5; i++)
+		radio_parms->tx_ref_pd_voltage_5[i] =
+			cpu_to_le16(r->tx_ref_pd_voltage_5[i]);
+	memcpy(radio_parms->tx_ref_power_5, r->tx_ref_power_5,
+	       CONF_NUMBER_OF_SUB_BANDS_5);
+	memcpy(radio_parms->tx_offset_db_5, r->tx_offset_db_5,
+	       CONF_NUMBER_OF_SUB_BANDS_5);
+	memcpy(radio_parms->tx_rate_limits_normal_5,
+	       r->tx_rate_limits_normal_5, CONF_NUMBER_OF_RATE_GROUPS);
+	memcpy(radio_parms->tx_rate_limits_degraded_5,
+	       r->tx_rate_limits_degraded_5, CONF_NUMBER_OF_RATE_GROUPS);
+	memcpy(radio_parms->tx_channel_limits_ofdm_5,
+	       r->tx_channel_limits_ofdm_5, CONF_NUMBER_OF_CHANNELS_5);
+	memcpy(radio_parms->tx_pdv_rate_offsets_5, r->tx_pdv_rate_offsets_5,
+	       CONF_NUMBER_OF_RATE_GROUPS);
+	memcpy(radio_parms->tx_ibias_5, r->tx_ibias_5,
+	       CONF_NUMBER_OF_RATE_GROUPS);
+	memcpy(radio_parms->rx_fem_insertion_loss_5,
+	       r->rx_fem_insertion_loss_5, CONF_NUMBER_OF_SUB_BANDS_5);
 
 	ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
 	if (ret < 0)
@@ -311,8 +323,8 @@
 
 	/* RX config */
 	ret = wl1271_init_rx_config(wl,
-				       RX_CFG_PROMISCUOUS | RX_CFG_TSF,
-				       RX_FILTER_OPTION_DEF);
+				    RX_CFG_PROMISCUOUS | RX_CFG_TSF,
+				    RX_FILTER_OPTION_DEF);
 	/* RX_CONFIG_OPTION_ANY_DST_ANY_BSS,
 	   RX_FILTER_OPTION_FILTER_ALL); */
 	if (ret < 0)
@@ -323,6 +335,11 @@
 	if (ret < 0)
 		goto out_free_memmap;
 
+	/* Initialize connection monitoring thresholds */
+	ret = wl1271_acx_conn_monit_params(wl);
+	if (ret < 0)
+		goto out_free_memmap;
+
 	/* Beacon filtering */
 	ret = wl1271_init_beacon_filter(wl);
 	if (ret < 0)
@@ -369,7 +386,7 @@
 		goto out_free_memmap;
 
 	/* Configure TX rate classes */
-	ret = wl1271_acx_rate_policies(wl);
+	ret = wl1271_acx_rate_policies(wl, CONF_TX_RATE_MASK_ALL);
 	if (ret < 0)
 		goto out_free_memmap;
 
@@ -388,10 +405,16 @@
 	if (ret < 0)
 		goto out_free_memmap;
 
+	/* Configure smart reflex */
+	ret = wl1271_acx_smart_reflex(wl);
+	if (ret < 0)
+		goto out_free_memmap;
+
 	return 0;
 
  out_free_memmap:
 	kfree(wl->target_mem_map);
+	wl->target_mem_map = NULL;
 
 	return ret;
 }
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.h b/drivers/net/wireless/wl12xx/wl1271_init.h
index bd8ff0f..6e21cee 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.h
+++ b/drivers/net/wireless/wl12xx/wl1271_init.h
@@ -48,19 +48,6 @@
 	u8 settings;
 } __attribute__ ((packed));
 
-enum ref_clk_enum {
-	REF_CLK_19_2_E,
-	REF_CLK_26_E,
-	REF_CLK_38_4_E,
-	REF_CLK_52_E
-};
-
-#define RSSI_AND_PROCESS_COMPENSATION_SIZE 15
-#define NUMBER_OF_SUB_BANDS_5  7
-#define NUMBER_OF_RATE_GROUPS  6
-#define NUMBER_OF_CHANNELS_2_4 14
-#define NUMBER_OF_CHANNELS_5   35
-
 struct wl1271_radio_parms {
 	u8 id;
 	u8 padding[3];
@@ -69,45 +56,45 @@
 	/* 2.4GHz */
 	u8 rx_trace_loss;
 	u8 tx_trace_loss;
-	s8 rx_rssi_and_proc_compens[RSSI_AND_PROCESS_COMPENSATION_SIZE];
+	s8 rx_rssi_and_proc_compens[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
 
 	/* 5GHz */
-	u8 rx_trace_loss_5[NUMBER_OF_SUB_BANDS_5];
-	u8 tx_trace_loss_5[NUMBER_OF_SUB_BANDS_5];
-	s8 rx_rssi_and_proc_compens_5[RSSI_AND_PROCESS_COMPENSATION_SIZE];
+	u8 rx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
+	u8 tx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
+	s8 rx_rssi_and_proc_compens_5[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
 
 	/* Dynamic radio parameters */
 	/* 2.4GHz */
-	s16 tx_ref_pd_voltage;
+	__le16 tx_ref_pd_voltage;
 	s8  tx_ref_power;
 	s8  tx_offset_db;
 
-	s8  tx_rate_limits_normal[NUMBER_OF_RATE_GROUPS];
-	s8  tx_rate_limits_degraded[NUMBER_OF_RATE_GROUPS];
+	s8  tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
+	s8  tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
 
-	s8  tx_channel_limits_11b[NUMBER_OF_CHANNELS_2_4];
-	s8  tx_channel_limits_ofdm[NUMBER_OF_CHANNELS_2_4];
-	s8  tx_pdv_rate_offsets[NUMBER_OF_RATE_GROUPS];
+	s8  tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
+	s8  tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
+	s8  tx_pdv_rate_offsets[CONF_NUMBER_OF_RATE_GROUPS];
 
-	u8  tx_ibias[NUMBER_OF_RATE_GROUPS];
+	u8  tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
 	u8  rx_fem_insertion_loss;
 
 	u8 padding2;
 
 	/* 5GHz */
-	s16 tx_ref_pd_voltage_5[NUMBER_OF_SUB_BANDS_5];
-	s8  tx_ref_power_5[NUMBER_OF_SUB_BANDS_5];
-	s8  tx_offset_db_5[NUMBER_OF_SUB_BANDS_5];
+	__le16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
+	s8  tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
+	s8  tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
 
-	s8  tx_rate_limits_normal_5[NUMBER_OF_RATE_GROUPS];
-	s8  tx_rate_limits_degraded_5[NUMBER_OF_RATE_GROUPS];
+	s8  tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
+	s8  tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
 
-	s8  tx_channel_limits_ofdm_5[NUMBER_OF_CHANNELS_5];
-	s8  tx_pdv_rate_offsets_5[NUMBER_OF_RATE_GROUPS];
+	s8  tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
+	s8  tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
 
 	/* FIXME: this is inconsistent with the types for 2.4GHz */
-	s8  tx_ibias_5[NUMBER_OF_RATE_GROUPS];
-	s8  rx_fem_insertion_loss_5[NUMBER_OF_SUB_BANDS_5];
+	s8  tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
+	s8  rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
 
 	u8 padding3[2];
 } __attribute__ ((packed));
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index 27298b1..86132bb 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -30,7 +30,9 @@
 #include <linux/spi/spi.h>
 #include <linux/crc32.h>
 #include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
 #include <linux/spi/wl12xx.h>
+#include <linux/inetdevice.h>
 
 #include "wl1271.h"
 #include "wl12xx_80211.h"
@@ -45,6 +47,308 @@
 #include "wl1271_cmd.h"
 #include "wl1271_boot.h"
 
+static struct conf_drv_settings default_conf = {
+	.sg = {
+		.per_threshold               = 7500,
+		.max_scan_compensation_time  = 120000,
+		.nfs_sample_interval         = 400,
+		.load_ratio                  = 50,
+		.auto_ps_mode                = 0,
+		.probe_req_compensation      = 170,
+		.scan_window_compensation    = 50,
+		.antenna_config              = 0,
+		.beacon_miss_threshold       = 60,
+		.rate_adaptation_threshold   = CONF_HW_BIT_RATE_12MBPS,
+		.rate_adaptation_snr         = 0
+	},
+	.rx = {
+		.rx_msdu_life_time           = 512000,
+		.packet_detection_threshold  = 0,
+		.ps_poll_timeout             = 15,
+		.upsd_timeout                = 15,
+		.rts_threshold               = 2347,
+		.rx_cca_threshold            = 0xFFEF,
+		.irq_blk_threshold           = 0,
+		.irq_pkt_threshold           = USHORT_MAX,
+		.irq_timeout                 = 5,
+		.queue_type                  = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
+	},
+	.tx = {
+		.tx_energy_detection         = 0,
+		.rc_conf                     = {
+			.enabled_rates       = CONF_TX_RATE_MASK_UNSPECIFIED,
+			.short_retry_limit   = 10,
+			.long_retry_limit    = 10,
+			.aflags              = 0
+		},
+		.ac_conf_count               = 4,
+		.ac_conf                     = {
+			[0] = {
+				.ac          = CONF_TX_AC_BE,
+				.cw_min      = 15,
+				.cw_max      = 63,
+				.aifsn       = 3,
+				.tx_op_limit = 0,
+			},
+			[1] = {
+				.ac          = CONF_TX_AC_BK,
+				.cw_min      = 15,
+				.cw_max      = 63,
+				.aifsn       = 7,
+				.tx_op_limit = 0,
+			},
+			[2] = {
+				.ac          = CONF_TX_AC_VI,
+				.cw_min      = 15,
+				.cw_max      = 63,
+				.aifsn       = CONF_TX_AIFS_PIFS,
+				.tx_op_limit = 3008,
+			},
+			[3] = {
+				.ac          = CONF_TX_AC_VO,
+				.cw_min      = 15,
+				.cw_max      = 63,
+				.aifsn       = CONF_TX_AIFS_PIFS,
+				.tx_op_limit = 1504,
+			},
+		},
+		.tid_conf_count = 7,
+		.tid_conf = {
+			[0] = {
+				.queue_id    = 0,
+				.channel_type = CONF_CHANNEL_TYPE_DCF,
+				.tsid        = CONF_TX_AC_BE,
+				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
+				.ack_policy  = CONF_ACK_POLICY_LEGACY,
+				.apsd_conf   = {0, 0},
+			},
+			[1] = {
+				.queue_id    = 1,
+				.channel_type = CONF_CHANNEL_TYPE_DCF,
+				.tsid        = CONF_TX_AC_BE,
+				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
+				.ack_policy  = CONF_ACK_POLICY_LEGACY,
+				.apsd_conf   = {0, 0},
+			},
+			[2] = {
+				.queue_id    = 2,
+				.channel_type = CONF_CHANNEL_TYPE_DCF,
+				.tsid        = CONF_TX_AC_BE,
+				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
+				.ack_policy  = CONF_ACK_POLICY_LEGACY,
+				.apsd_conf   = {0, 0},
+			},
+			[3] = {
+				.queue_id    = 3,
+				.channel_type = CONF_CHANNEL_TYPE_DCF,
+				.tsid        = CONF_TX_AC_BE,
+				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
+				.ack_policy  = CONF_ACK_POLICY_LEGACY,
+				.apsd_conf   = {0, 0},
+			},
+			[4] = {
+				.queue_id    = 4,
+				.channel_type = CONF_CHANNEL_TYPE_DCF,
+				.tsid        = CONF_TX_AC_BE,
+				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
+				.ack_policy  = CONF_ACK_POLICY_LEGACY,
+				.apsd_conf   = {0, 0},
+			},
+			[5] = {
+				.queue_id    = 5,
+				.channel_type = CONF_CHANNEL_TYPE_DCF,
+				.tsid        = CONF_TX_AC_BE,
+				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
+				.ack_policy  = CONF_ACK_POLICY_LEGACY,
+				.apsd_conf   = {0, 0},
+			},
+			[6] = {
+				.queue_id    = 6,
+				.channel_type = CONF_CHANNEL_TYPE_DCF,
+				.tsid        = CONF_TX_AC_BE,
+				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
+				.ack_policy  = CONF_ACK_POLICY_LEGACY,
+				.apsd_conf   = {0, 0},
+			}
+		},
+		.frag_threshold              = IEEE80211_MAX_FRAG_THRESHOLD,
+		.tx_compl_timeout            = 5,
+		.tx_compl_threshold          = 5
+	},
+	.conn = {
+		.wake_up_event               = CONF_WAKE_UP_EVENT_DTIM,
+		.listen_interval             = 0,
+		.bcn_filt_mode               = CONF_BCN_FILT_MODE_ENABLED,
+		.bcn_filt_ie_count           = 1,
+		.bcn_filt_ie = {
+			[0] = {
+				.ie          = WLAN_EID_CHANNEL_SWITCH,
+				.rule        = CONF_BCN_RULE_PASS_ON_APPEARANCE,
+			}
+		},
+		.synch_fail_thold            = 5,
+		.bss_lose_timeout            = 100,
+		.beacon_rx_timeout           = 10000,
+		.broadcast_timeout           = 20000,
+		.rx_broadcast_in_ps          = 1,
+		.ps_poll_threshold           = 4,
+		.sig_trigger_count           = 2,
+		.sig_trigger = {
+			[0] = {
+				.threshold   = -75,
+				.pacing      = 500,
+				.metric      = CONF_TRIG_METRIC_RSSI_BEACON,
+				.type        = CONF_TRIG_EVENT_TYPE_EDGE,
+				.direction   = CONF_TRIG_EVENT_DIR_LOW,
+				.hysteresis  = 2,
+				.index       = 0,
+				.enable      = 1
+			},
+			[1] = {
+				.threshold   = -75,
+				.pacing      = 500,
+				.metric      = CONF_TRIG_METRIC_RSSI_BEACON,
+				.type        = CONF_TRIG_EVENT_TYPE_EDGE,
+				.direction   = CONF_TRIG_EVENT_DIR_HIGH,
+				.hysteresis  = 2,
+				.index       = 1,
+				.enable      = 1
+			}
+		},
+		.sig_weights = {
+			.rssi_bcn_avg_weight = 10,
+			.rssi_pkt_avg_weight = 10,
+			.snr_bcn_avg_weight  = 10,
+			.snr_pkt_avg_weight  = 10
+		},
+		.bet_enable                  = CONF_BET_MODE_ENABLE,
+		.bet_max_consecutive         = 100
+	},
+	.init = {
+		.sr_err_tbl = {
+			[0] = {
+				.len         = 7,
+				.upper_limit = 0x03,
+				.values      = {
+					0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
+					0x00 }
+			},
+			[1] = {
+				.len         = 7,
+				.upper_limit = 0x03,
+				.values      = {
+					0x18, 0x10, 0x05, 0xf6, 0xf0, 0xe8,
+					0x00 }
+			},
+			[2] = {
+				.len         = 7,
+				.upper_limit = 0x03,
+				.values      = {
+					0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
+					0x00 }
+			}
+		},
+		.sr_enable                   = 1,
+		.genparam                    = {
+			/*
+			 * FIXME: The correct value CONF_REF_CLK_38_4_E
+			 *        causes the firmware to crash on boot.
+			 *        The value 5 apparently is an
+			 *        unnoficial XTAL configuration of the
+			 *        same frequency, which appears to work.
+			 */
+			.ref_clk             = 5,
+			.settling_time       = 5,
+			.clk_valid_on_wakeup = 0,
+			.dc2dcmode           = 0,
+			.single_dual_band    = CONF_SINGLE_BAND,
+			.tx_bip_fem_autodetect = 0,
+			.tx_bip_fem_manufacturer = 1,
+			.settings = 1,
+		},
+		.radioparam = {
+			.rx_trace_loss       = 10,
+			.tx_trace_loss       = 10,
+			.rx_rssi_and_proc_compens = {
+				0xec, 0xf6, 0x00, 0x0c, 0x18, 0xf8,
+				0xfc, 0x00, 0x08, 0x10, 0xf0, 0xf8,
+				0x00, 0x0a, 0x14 },
+			.rx_trace_loss_5     = { 0, 0, 0, 0, 0, 0, 0 },
+			.tx_trace_loss_5     = { 0, 0, 0, 0, 0, 0, 0 },
+			.rx_rssi_and_proc_compens_5 = {
+				0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+				0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+				0x00, 0x00, 0x00 },
+			.tx_ref_pd_voltage   = 0x24e,
+			.tx_ref_power        = 0x78,
+			.tx_offset_db        = 0x0,
+			.tx_rate_limits_normal = {
+				0x1e, 0x1f, 0x22, 0x24, 0x28, 0x29 },
+			.tx_rate_limits_degraded = {
+				0x1b, 0x1c, 0x1e, 0x20, 0x24, 0x25 },
+			.tx_channel_limits_11b = {
+				0x22, 0x50, 0x50, 0x50, 0x50, 0x50,
+				0x50, 0x50, 0x50, 0x50, 0x22, 0x50,
+				0x22, 0x50 },
+			.tx_channel_limits_ofdm = {
+				0x20, 0x50, 0x50, 0x50, 0x50, 0x50,
+				0x50, 0x50, 0x50, 0x50, 0x20, 0x50,
+				0x20, 0x50 },
+			.tx_pdv_rate_offsets = {
+				0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+			.tx_ibias            = {
+				0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x27 },
+			.rx_fem_insertion_loss = 0x14,
+			.tx_ref_pd_voltage_5 = {
+				0x0190, 0x01a4, 0x01c3, 0x01d8,
+				0x020a, 0x021c },
+			.tx_ref_power_5      = {
+				0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 },
+			.tx_offset_db_5      = {
+				0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+			.tx_rate_limits_normal_5 = {
+				0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
+			.tx_rate_limits_degraded_5 = {
+				0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
+			.tx_channel_limits_ofdm_5 = {
+				0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
+				0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
+				0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
+				0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
+				0x50, 0x50, 0x50 },
+			.tx_pdv_rate_offsets_5 = {
+				0x01, 0x02, 0x02, 0x02, 0x02, 0x00 },
+			.tx_ibias_5          = {
+				0x10, 0x10, 0x10, 0x10, 0x10, 0x10 },
+			.rx_fem_insertion_loss_5 = {
+				0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 }
+		}
+	}
+};
+
+static LIST_HEAD(wl_list);
+
+static void wl1271_conf_init(struct wl1271 *wl)
+{
+
+	/*
+	 * This function applies the default configuration to the driver. This
+	 * function is invoked upon driver load (spi probe.)
+	 *
+	 * The configuration is stored in a run-time structure in order to
+	 * facilitate for run-time adjustment of any of the parameters. Making
+	 * changes to the configuration structure will apply the new values on
+	 * the next interface up (wl1271_op_start.)
+	 */
+
+	/* apply driver default configuration */
+	memcpy(&wl->conf, &default_conf, sizeof(default_conf));
+
+	if (wl1271_11a_enabled())
+		wl->conf.init.genparam.single_dual_band = CONF_DUAL_BAND;
+}
+
+
 static int wl1271_plt_init(struct wl1271 *wl)
 {
 	int ret;
@@ -75,20 +379,14 @@
 	wl->set_power(true);
 }
 
-static void wl1271_fw_status(struct wl1271 *wl, struct wl1271_fw_status *status)
+static void wl1271_fw_status(struct wl1271 *wl,
+			     struct wl1271_fw_status *status)
 {
 	u32 total = 0;
 	int i;
 
-	/*
-	 * FIXME: Reading the FW status directly from the registers seems to
-	 * be the right thing to do, but it doesn't work.  And in the
-	 * reference driver, there is a workaround called
-	 * USE_SDIO_24M_WORKAROUND, which reads the status from memory
-	 * instead, so we do the same here.
-	 */
-
-	wl1271_spi_mem_read(wl, STATUS_MEM_ADDRESS, status, sizeof(*status));
+	wl1271_spi_read(wl, FW_STATUS_ADDR, status,
+			sizeof(*status), false);
 
 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
 		     "drv_rx_counter = %d, tx_results_counter = %d)",
@@ -99,25 +397,28 @@
 
 	/* update number of available TX blocks */
 	for (i = 0; i < NUM_TX_QUEUES; i++) {
-		u32 cnt = status->tx_released_blks[i] - wl->tx_blocks_freed[i];
-		wl->tx_blocks_freed[i] = status->tx_released_blks[i];
+		u32 cnt = le32_to_cpu(status->tx_released_blks[i]) -
+			wl->tx_blocks_freed[i];
+
+		wl->tx_blocks_freed[i] =
+			le32_to_cpu(status->tx_released_blks[i]);
 		wl->tx_blocks_available += cnt;
 		total += cnt;
 	}
 
 	/* if more blocks are available now, schedule some tx work */
 	if (total && !skb_queue_empty(&wl->tx_queue))
-		schedule_work(&wl->tx_work);
+		ieee80211_queue_work(wl->hw, &wl->tx_work);
 
 	/* update the host-chipset time offset */
-	wl->time_offset = jiffies_to_usecs(jiffies) - status->fw_localtime;
+	wl->time_offset = jiffies_to_usecs(jiffies) -
+		le32_to_cpu(status->fw_localtime);
 }
 
-#define WL1271_IRQ_MAX_LOOPS 10
 static void wl1271_irq_work(struct work_struct *work)
 {
-	u32 intr, ctr = WL1271_IRQ_MAX_LOOPS;
 	int ret;
+	u32 intr;
 	struct wl1271 *wl =
 		container_of(work, struct wl1271, irq_work);
 
@@ -132,9 +433,10 @@
 	if (ret < 0)
 		goto out;
 
-	wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
+	wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
 
-	intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR);
+	wl1271_fw_status(wl, wl->fw_status);
+	intr = le32_to_cpu(wl->fw_status->intr);
 	if (!intr) {
 		wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
 		goto out_sleep;
@@ -142,46 +444,39 @@
 
 	intr &= WL1271_INTR_MASK;
 
-	do {
-		wl1271_fw_status(wl, wl->fw_status);
+	if (intr & WL1271_ACX_INTR_EVENT_A) {
+		bool do_ack = (intr & WL1271_ACX_INTR_EVENT_B) ? false : true;
+		wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
+		wl1271_event_handle(wl, 0, do_ack);
+	}
 
+	if (intr & WL1271_ACX_INTR_EVENT_B) {
+		wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
+		wl1271_event_handle(wl, 1, true);
+	}
 
-		if (intr & (WL1271_ACX_INTR_EVENT_A |
-			    WL1271_ACX_INTR_EVENT_B)) {
-			wl1271_debug(DEBUG_IRQ,
-				     "WL1271_ACX_INTR_EVENT (0x%x)", intr);
-			if (intr & WL1271_ACX_INTR_EVENT_A)
-				wl1271_event_handle(wl, 0);
-			else
-				wl1271_event_handle(wl, 1);
-		}
+	if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
+		wl1271_debug(DEBUG_IRQ,
+			     "WL1271_ACX_INTR_INIT_COMPLETE");
 
-		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
-			wl1271_debug(DEBUG_IRQ,
-				     "WL1271_ACX_INTR_INIT_COMPLETE");
+	if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
+		wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
 
-		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
-			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
+	if (intr & WL1271_ACX_INTR_DATA) {
+		u8 tx_res_cnt = wl->fw_status->tx_results_counter -
+			wl->tx_results_count;
 
-		if (intr & WL1271_ACX_INTR_DATA) {
-			u8 tx_res_cnt = wl->fw_status->tx_results_counter -
-				wl->tx_results_count;
+		wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
 
-			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
+		/* check for tx results */
+		if (tx_res_cnt)
+			wl1271_tx_complete(wl, tx_res_cnt);
 
-			/* check for tx results */
-			if (tx_res_cnt)
-				wl1271_tx_complete(wl, tx_res_cnt);
-
-			wl1271_rx(wl, wl->fw_status);
-		}
-
-		intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR);
-		intr &= WL1271_INTR_MASK;
-	} while (intr && --ctr);
+		wl1271_rx(wl, wl->fw_status);
+	}
 
 out_sleep:
-	wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK,
+	wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
 			   WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
 	wl1271_ps_elp_sleep(wl);
 
@@ -205,7 +500,7 @@
 		wl->elp_compl = NULL;
 	}
 
-	schedule_work(&wl->irq_work);
+	ieee80211_queue_work(wl->hw, &wl->irq_work);
 	spin_unlock_irqrestore(&wl->wl_lock, flags);
 
 	return IRQ_HANDLED;
@@ -231,7 +526,7 @@
 	}
 
 	wl->fw_len = fw->size;
-	wl->fw = kmalloc(wl->fw_len, GFP_KERNEL);
+	wl->fw = vmalloc(wl->fw_len);
 
 	if (!wl->fw) {
 		wl1271_error("could not allocate memory for the firmware");
@@ -292,7 +587,7 @@
 	u32 elp_reg;
 
 	elp_reg = ELPCTRL_WAKE_UP;
-	wl1271_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
+	wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
 }
 
 static int wl1271_setup(struct wl1271 *wl)
@@ -314,6 +609,7 @@
 
 static int wl1271_chip_wakeup(struct wl1271 *wl)
 {
+	struct wl1271_partition_set partition;
 	int ret = 0;
 
 	wl1271_power_on(wl);
@@ -323,11 +619,10 @@
 
 	/* We don't need a real memory partition here, because we only want
 	 * to use the registers at this point. */
-	wl1271_set_partition(wl,
-			     0x00000000,
-			     0x00000000,
-			     REGISTERS_BASE,
-			     REGISTERS_DOWN_SIZE);
+	memset(&partition, 0, sizeof(partition));
+	partition.reg.start = REGISTERS_BASE;
+	partition.reg.size = REGISTERS_DOWN_SIZE;
+	wl1271_set_partition(wl, &partition);
 
 	/* ELP module wake up */
 	wl1271_fw_wakeup(wl);
@@ -335,7 +630,7 @@
 	/* whal_FwCtrl_BootSm() */
 
 	/* 0. read chip id from CHIP_ID */
-	wl->chip.id = wl1271_reg_read32(wl, CHIP_ID_B);
+	wl->chip.id = wl1271_spi_read32(wl, CHIP_ID_B);
 
 	/* 1. check if chip id is valid */
 
@@ -346,7 +641,7 @@
 
 		ret = wl1271_setup(wl);
 		if (ret < 0)
-			goto out;
+			goto out_power_off;
 		break;
 	case CHIP_ID_1271_PG20:
 		wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
@@ -354,58 +649,36 @@
 
 		ret = wl1271_setup(wl);
 		if (ret < 0)
-			goto out;
+			goto out_power_off;
 		break;
 	default:
 		wl1271_error("unsupported chip id: 0x%x", wl->chip.id);
 		ret = -ENODEV;
-		goto out;
+		goto out_power_off;
 	}
 
 	if (wl->fw == NULL) {
 		ret = wl1271_fetch_firmware(wl);
 		if (ret < 0)
-			goto out;
+			goto out_power_off;
 	}
 
 	/* No NVS from netlink, try to get it from the filesystem */
 	if (wl->nvs == NULL) {
 		ret = wl1271_fetch_nvs(wl);
 		if (ret < 0)
-			goto out;
+			goto out_power_off;
 	}
 
+	goto out;
+
+out_power_off:
+	wl1271_power_off(wl);
+
 out:
 	return ret;
 }
 
-static void wl1271_filter_work(struct work_struct *work)
-{
-	struct wl1271 *wl =
-		container_of(work, struct wl1271, filter_work);
-	int ret;
-
-	mutex_lock(&wl->mutex);
-
-	if (wl->state == WL1271_STATE_OFF)
-		goto out;
-
-	ret = wl1271_ps_elp_wakeup(wl, false);
-	if (ret < 0)
-		goto out;
-
-	/* FIXME: replace the magic numbers with proper definitions */
-	ret = wl1271_cmd_join(wl, wl->bss_type, 1, 100, 0);
-	if (ret < 0)
-		goto out_sleep;
-
-out_sleep:
-	wl1271_ps_elp_sleep(wl);
-
-out:
-	mutex_unlock(&wl->mutex);
-}
-
 int wl1271_plt_start(struct wl1271 *wl)
 {
 	int ret;
@@ -429,13 +702,26 @@
 
 	ret = wl1271_boot(wl);
 	if (ret < 0)
-		goto out;
+		goto out_power_off;
 
 	wl1271_notice("firmware booted in PLT mode (%s)", wl->chip.fw_ver);
 
 	ret = wl1271_plt_init(wl);
 	if (ret < 0)
-		goto out;
+		goto out_irq_disable;
+
+	/* Make sure power saving is disabled */
+	ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+	if (ret < 0)
+		goto out_irq_disable;
+
+	goto out;
+
+out_irq_disable:
+	wl1271_disable_interrupts(wl);
+
+out_power_off:
+	wl1271_power_off(wl);
 
 out:
 	mutex_unlock(&wl->mutex);
@@ -462,6 +748,7 @@
 	wl1271_power_off(wl);
 
 	wl->state = WL1271_STATE_OFF;
+	wl->rx_counter = 0;
 
 out:
 	mutex_unlock(&wl->mutex);
@@ -481,7 +768,7 @@
 	 * before that, the tx_work will not be initialized!
 	 */
 
-	schedule_work(&wl->tx_work);
+	ieee80211_queue_work(wl->hw, &wl->tx_work);
 
 	/*
 	 * The workqueue is slow to process the tx_queue and we need stop
@@ -501,6 +788,93 @@
 	return NETDEV_TX_OK;
 }
 
+static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
+			     void *arg)
+{
+	struct net_device *dev;
+	struct wireless_dev *wdev;
+	struct wiphy *wiphy;
+	struct ieee80211_hw *hw;
+	struct wl1271 *wl;
+	struct wl1271 *wl_temp;
+	struct in_device *idev;
+	struct in_ifaddr *ifa = arg;
+	int ret = 0;
+
+	/* FIXME: this ugly function should probably be implemented in the
+	 * mac80211, and here should only be a simple callback handling actual
+	 * setting of the filters. Now we need to dig up references to
+	 * various structures to gain access to what we need.
+	 * Also, because of this, there is no "initial" setting of the filter
+	 * in "op_start", because we don't want to dig up struct net_device
+	 * there - the filter will be set upon first change of the interface
+	 * IP address. */
+
+	dev = ifa->ifa_dev->dev;
+
+	wdev = dev->ieee80211_ptr;
+	if (wdev == NULL)
+		return -ENODEV;
+
+	wiphy = wdev->wiphy;
+	if (wiphy == NULL)
+		return -ENODEV;
+
+	hw = wiphy_priv(wiphy);
+	if (hw == NULL)
+		return -ENODEV;
+
+	/* Check that the interface is one supported by this driver. */
+	wl_temp = hw->priv;
+	list_for_each_entry(wl, &wl_list, list) {
+		if (wl == wl_temp)
+			break;
+	}
+	if (wl == NULL)
+		return -ENODEV;
+
+	/* Get the interface IP address for the device. "ifa" will become
+	   NULL if:
+	     - there is no IPV4 protocol address configured
+	     - there are multiple (virtual) IPV4 addresses configured
+	   When "ifa" is NULL, filtering will be disabled.
+	*/
+	ifa = NULL;
+	idev = dev->ip_ptr;
+	if (idev)
+		ifa = idev->ifa_list;
+
+	if (ifa && ifa->ifa_next)
+		ifa = NULL;
+
+	mutex_lock(&wl->mutex);
+
+	if (wl->state == WL1271_STATE_OFF)
+		goto out;
+
+	ret = wl1271_ps_elp_wakeup(wl, false);
+	if (ret < 0)
+		goto out;
+	if (ifa)
+		ret = wl1271_acx_arp_ip_filter(wl, true,
+					       (u8 *)&ifa->ifa_address,
+					       ACX_IPV4_VERSION);
+	else
+		ret = wl1271_acx_arp_ip_filter(wl, false, NULL,
+					       ACX_IPV4_VERSION);
+	wl1271_ps_elp_sleep(wl);
+
+out:
+	mutex_unlock(&wl->mutex);
+
+	return ret;
+}
+
+static struct notifier_block wl1271_dev_notifier = {
+	.notifier_call = wl1271_dev_notify,
+};
+
+
 static int wl1271_op_start(struct ieee80211_hw *hw)
 {
 	struct wl1271 *wl = hw->priv;
@@ -523,22 +897,32 @@
 
 	ret = wl1271_boot(wl);
 	if (ret < 0)
-		goto out;
+		goto out_power_off;
 
 	ret = wl1271_hw_init(wl);
 	if (ret < 0)
-		goto out;
+		goto out_irq_disable;
 
 	wl->state = WL1271_STATE_ON;
 
 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
 
-out:
-	if (ret < 0)
-		wl1271_power_off(wl);
+	goto out;
 
+out_irq_disable:
+	wl1271_disable_interrupts(wl);
+
+out_power_off:
+	wl1271_power_off(wl);
+
+out:
 	mutex_unlock(&wl->mutex);
 
+	if (!ret) {
+		list_add(&wl->list, &wl_list);
+		register_inetaddr_notifier(&wl1271_dev_notifier);
+	}
+
 	return ret;
 }
 
@@ -551,6 +935,9 @@
 
 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
 
+	unregister_inetaddr_notifier(&wl1271_dev_notifier);
+	list_del(&wl->list);
+
 	mutex_lock(&wl->mutex);
 
 	WARN_ON(wl->state != WL1271_STATE_ON);
@@ -570,7 +957,6 @@
 
 	cancel_work_sync(&wl->irq_work);
 	cancel_work_sync(&wl->tx_work);
-	cancel_work_sync(&wl->filter_work);
 
 	mutex_lock(&wl->mutex);
 
@@ -581,8 +967,8 @@
 	memset(wl->bssid, 0, ETH_ALEN);
 	memset(wl->ssid, 0, IW_ESSID_MAX_SIZE + 1);
 	wl->ssid_len = 0;
-	wl->listen_int = 1;
 	wl->bss_type = MAX_BSS_TYPE;
+	wl->band = IEEE80211_BAND_2GHZ;
 
 	wl->rx_counter = 0;
 	wl->elp = false;
@@ -592,8 +978,13 @@
 	wl->tx_blocks_available = 0;
 	wl->tx_results_count = 0;
 	wl->tx_packets_count = 0;
+	wl->tx_security_last_seq = 0;
+	wl->tx_security_seq_16 = 0;
+	wl->tx_security_seq_32 = 0;
 	wl->time_offset = 0;
 	wl->session_counter = 0;
+	wl->joined = false;
+
 	for (i = 0; i < NUM_TX_QUEUES; i++)
 		wl->tx_blocks_freed[i] = 0;
 
@@ -611,6 +1002,12 @@
 		     conf->type, conf->mac_addr);
 
 	mutex_lock(&wl->mutex);
+	if (wl->vif) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	wl->vif = conf->vif;
 
 	switch (conf->type) {
 	case NL80211_IFTYPE_STATION:
@@ -634,7 +1031,12 @@
 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
 					 struct ieee80211_if_init_conf *conf)
 {
+	struct wl1271 *wl = hw->priv;
+
+	mutex_lock(&wl->mutex);
 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
+	wl->vif = NULL;
+	mutex_unlock(&wl->mutex);
 }
 
 #if 0
@@ -657,7 +1059,15 @@
 	if (ret < 0)
 		goto out;
 
-	memcpy(wl->bssid, conf->bssid, ETH_ALEN);
+	if (memcmp(wl->bssid, conf->bssid, ETH_ALEN)) {
+		wl1271_debug(DEBUG_MAC80211, "bssid changed");
+
+		memcpy(wl->bssid, conf->bssid, ETH_ALEN);
+
+		ret = wl1271_cmd_join(wl);
+		if (ret < 0)
+			goto out_sleep;
+	}
 
 	ret = wl1271_cmd_build_null_data(wl);
 	if (ret < 0)
@@ -667,13 +1077,6 @@
 	if (wl->ssid_len)
 		memcpy(wl->ssid, conf->ssid, wl->ssid_len);
 
-	if (wl->bss_type != BSS_TYPE_IBSS) {
-		/* FIXME: replace the magic numbers with proper definitions */
-		ret = wl1271_cmd_join(wl, wl->bss_type, 5, 100, 1);
-		if (ret < 0)
-			goto out_sleep;
-	}
-
 	if (conf->changed & IEEE80211_IFCC_BEACON) {
 		beacon = ieee80211_beacon_get(hw, vif);
 		ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
@@ -691,12 +1094,6 @@
 
 		if (ret < 0)
 			goto out_sleep;
-
-		/* FIXME: replace the magic numbers with proper definitions */
-		ret = wl1271_cmd_join(wl, wl->bss_type, 1, 100, 0);
-
-		if (ret < 0)
-			goto out_sleep;
 	}
 
 out_sleep:
@@ -724,20 +1121,20 @@
 
 	mutex_lock(&wl->mutex);
 
+	wl->band = conf->channel->band;
+
 	ret = wl1271_ps_elp_wakeup(wl, false);
 	if (ret < 0)
 		goto out;
 
 	if (channel != wl->channel) {
-		u8 old_channel = wl->channel;
+		/*
+		 * We assume that the stack will configure the right channel
+		 * before associating, so we don't need to send a join
+		 * command here.  We will join the right channel when the
+		 * BSSID changes
+		 */
 		wl->channel = channel;
-
-		/* FIXME: use beacon interval provided by mac80211 */
-		ret = wl1271_cmd_join(wl, wl->bss_type, 1, 100, 0);
-		if (ret < 0) {
-			wl->channel = old_channel;
-			goto out_sleep;
-		}
 	}
 
 	ret = wl1271_cmd_build_null_data(wl);
@@ -782,6 +1179,45 @@
 	return ret;
 }
 
+struct wl1271_filter_params {
+	bool enabled;
+	int mc_list_length;
+	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
+};
+
+static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
+				       struct dev_addr_list *mc_list)
+{
+	struct wl1271_filter_params *fp;
+	int i;
+
+	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
+	if (!fp) {
+		wl1271_error("Out of memory setting filters.");
+		return 0;
+	}
+
+	/* update multicast filtering parameters */
+	fp->enabled = true;
+	if (mc_count > ACX_MC_ADDRESS_GROUP_MAX) {
+		mc_count = 0;
+		fp->enabled = false;
+	}
+
+	fp->mc_list_length = 0;
+	for (i = 0; i < mc_count; i++) {
+		if (mc_list->da_addrlen == ETH_ALEN) {
+			memcpy(fp->mc_list[fp->mc_list_length],
+			       mc_list->da_addr, ETH_ALEN);
+			fp->mc_list_length++;
+		} else
+			wl1271_warning("Unknown mc address length.");
+		mc_list = mc_list->next;
+	}
+
+	return (u64)(unsigned long)fp;
+}
+
 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
 				  FIF_ALLMULTI | \
 				  FIF_FCSFAIL | \
@@ -791,28 +1227,53 @@
 
 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
 				       unsigned int changed,
-				       unsigned int *total,u64 multicast)
+				       unsigned int *total, u64 multicast)
 {
+	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
 	struct wl1271 *wl = hw->priv;
+	int ret;
 
 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter");
 
+	mutex_lock(&wl->mutex);
+
+	if (wl->state == WL1271_STATE_OFF)
+		goto out;
+
+	ret = wl1271_ps_elp_wakeup(wl, false);
+	if (ret < 0)
+		goto out;
+
 	*total &= WL1271_SUPPORTED_FILTERS;
 	changed &= WL1271_SUPPORTED_FILTERS;
 
+	if (*total & FIF_ALLMULTI)
+		ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
+	else if (fp)
+		ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
+						   fp->mc_list,
+						   fp->mc_list_length);
+	if (ret < 0)
+		goto out_sleep;
+
+	kfree(fp);
+
+	/* FIXME: We still need to set our filters properly */
+
+	/* determine, whether supported filter values have changed */
 	if (changed == 0)
-		return;
+		goto out_sleep;
 
-	/* FIXME: wl->rx_config and wl->rx_filter are not protected */
-	wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
-	wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
+	/* apply configured filters */
+	ret = wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
+	if (ret < 0)
+		goto out_sleep;
 
-	/*
-	 * FIXME: workqueues need to be properly cancelled on stop(), for
-	 * now let's just disable changing the filter settings. They will
-	 * be updated any on config().
-	 */
-	/* schedule_work(&wl->filter_work); */
+out_sleep:
+	wl1271_ps_elp_sleep(wl);
+
+out:
+	mutex_unlock(&wl->mutex);
 }
 
 static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -823,6 +1284,8 @@
 	struct wl1271 *wl = hw->priv;
 	const u8 *addr;
 	int ret;
+	u32 tx_seq_32 = 0;
+	u16 tx_seq_16 = 0;
 	u8 key_type;
 
 	static const u8 bcast_addr[ETH_ALEN] =
@@ -861,11 +1324,15 @@
 		key_type = KEY_TKIP;
 
 		key_conf->hw_key_idx = key_conf->keyidx;
+		tx_seq_32 = wl->tx_security_seq_32;
+		tx_seq_16 = wl->tx_security_seq_16;
 		break;
 	case ALG_CCMP:
 		key_type = KEY_AES;
 
 		key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+		tx_seq_32 = wl->tx_security_seq_32;
+		tx_seq_16 = wl->tx_security_seq_16;
 		break;
 	default:
 		wl1271_error("Unknown key algo 0x%x", key_conf->alg);
@@ -879,7 +1346,7 @@
 		ret = wl1271_cmd_set_key(wl, KEY_ADD_OR_REPLACE,
 					 key_conf->keyidx, key_type,
 					 key_conf->keylen, key_conf->key,
-					 addr);
+					 addr, tx_seq_32, tx_seq_16);
 		if (ret < 0) {
 			wl1271_error("Could not add or replace key");
 			goto out_sleep;
@@ -890,7 +1357,7 @@
 		ret = wl1271_cmd_set_key(wl, KEY_REMOVE,
 					 key_conf->keyidx, key_type,
 					 key_conf->keylen, key_conf->key,
-					 addr);
+					 addr, 0, 0);
 		if (ret < 0) {
 			wl1271_error("Could not remove key");
 			goto out_sleep;
@@ -921,13 +1388,13 @@
 	struct wl1271 *wl = hw->priv;
 	int ret;
 	u8 *ssid = NULL;
-	size_t ssid_len = 0;
+	size_t len = 0;
 
 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
 
 	if (req->n_ssids) {
 		ssid = req->ssids[0].ssid;
-		ssid_len = req->ssids[0].ssid_len;
+		len = req->ssids[0].ssid_len;
 	}
 
 	mutex_lock(&wl->mutex);
@@ -936,7 +1403,12 @@
 	if (ret < 0)
 		goto out;
 
-	ret = wl1271_cmd_scan(hw->priv, ssid, ssid_len, 1, 0, 13, 3);
+	if (wl1271_11a_enabled())
+		ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0,
+				      WL1271_SCAN_BAND_DUAL, 3);
+	else
+		ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0,
+				      WL1271_SCAN_BAND_2_4_GHZ, 3);
 
 	wl1271_ps_elp_sleep(wl);
 
@@ -969,6 +1441,22 @@
 	return ret;
 }
 
+static u32 wl1271_enabled_rates_get(struct wl1271 *wl, u64 basic_rate_set)
+{
+	struct ieee80211_supported_band *band;
+	u32 enabled_rates = 0;
+	int bit;
+
+	band = wl->hw->wiphy->bands[wl->band];
+	for (bit = 0; bit < band->n_bitrates; bit++) {
+		if (basic_rate_set & 0x1)
+			enabled_rates |= band->bitrates[bit].hw_value;
+		basic_rate_set >>= 1;
+	}
+
+	return enabled_rates;
+}
+
 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
 				       struct ieee80211_vif *vif,
 				       struct ieee80211_bss_conf *bss_conf,
@@ -990,6 +1478,12 @@
 		if (bss_conf->assoc) {
 			wl->aid = bss_conf->aid;
 
+			/*
+			 * with wl1271, we don't need to update the
+			 * beacon_int and dtim_period, because the firmware
+			 * updates it by itself when the first beacon is
+			 * received after a join.
+			 */
 			ret = wl1271_cmd_build_ps_poll(wl, wl->aid);
 			if (ret < 0)
 				goto out_sleep;
@@ -1005,8 +1499,14 @@
 				if (ret < 0)
 					goto out_sleep;
 			}
+		} else {
+			/* use defaults when not associated */
+			wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET;
+			wl->aid = 0;
 		}
+
 	}
+
 	if (changed & BSS_CHANGED_ERP_SLOT) {
 		if (bss_conf->use_short_slot)
 			ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
@@ -1036,6 +1536,17 @@
 		}
 	}
 
+	if (changed & BSS_CHANGED_BASIC_RATES) {
+		wl->basic_rate_set = wl1271_enabled_rates_get(
+			wl, bss_conf->basic_rates);
+
+		ret = wl1271_acx_rate_policies(wl, wl->basic_rate_set);
+		if (ret < 0) {
+			wl1271_warning("Set rate policies failed %d", ret);
+			goto out_sleep;
+		}
+	}
+
 out_sleep:
 	wl1271_ps_elp_sleep(wl);
 
@@ -1047,44 +1558,44 @@
 /* can't be const, mac80211 writes to this */
 static struct ieee80211_rate wl1271_rates[] = {
 	{ .bitrate = 10,
-	  .hw_value = 0x1,
-	  .hw_value_short = 0x1, },
+	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
 	{ .bitrate = 20,
-	  .hw_value = 0x2,
-	  .hw_value_short = 0x2,
+	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
 	{ .bitrate = 55,
-	  .hw_value = 0x4,
-	  .hw_value_short = 0x4,
+	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
 	{ .bitrate = 110,
-	  .hw_value = 0x20,
-	  .hw_value_short = 0x20,
+	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
 	{ .bitrate = 60,
-	  .hw_value = 0x8,
-	  .hw_value_short = 0x8, },
+	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
 	{ .bitrate = 90,
-	  .hw_value = 0x10,
-	  .hw_value_short = 0x10, },
+	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
 	{ .bitrate = 120,
-	  .hw_value = 0x40,
-	  .hw_value_short = 0x40, },
+	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
 	{ .bitrate = 180,
-	  .hw_value = 0x80,
-	  .hw_value_short = 0x80, },
+	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
 	{ .bitrate = 240,
-	  .hw_value = 0x200,
-	  .hw_value_short = 0x200, },
+	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
 	{ .bitrate = 360,
-	 .hw_value = 0x400,
-	 .hw_value_short = 0x400, },
+	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
+	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
 	{ .bitrate = 480,
-	  .hw_value = 0x800,
-	  .hw_value_short = 0x800, },
+	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
 	{ .bitrate = 540,
-	  .hw_value = 0x1000,
-	  .hw_value_short = 0x1000, },
+	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
 };
 
 /* can't be const, mac80211 writes to this */
@@ -1112,6 +1623,88 @@
 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
 };
 
+/* 5 GHz data rates for WL1273 */
+static struct ieee80211_rate wl1271_rates_5ghz[] = {
+	{ .bitrate = 60,
+	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
+	{ .bitrate = 90,
+	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
+	{ .bitrate = 120,
+	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
+	{ .bitrate = 180,
+	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
+	{ .bitrate = 240,
+	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
+	{ .bitrate = 360,
+	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
+	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
+	{ .bitrate = 480,
+	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
+	{ .bitrate = 540,
+	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
+	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
+};
+
+/* 5 GHz band channels for WL1273 */
+static struct ieee80211_channel wl1271_channels_5ghz[] = {
+	{ .hw_value = 183, .center_freq = 4915},
+	{ .hw_value = 184, .center_freq = 4920},
+	{ .hw_value = 185, .center_freq = 4925},
+	{ .hw_value = 187, .center_freq = 4935},
+	{ .hw_value = 188, .center_freq = 4940},
+	{ .hw_value = 189, .center_freq = 4945},
+	{ .hw_value = 192, .center_freq = 4960},
+	{ .hw_value = 196, .center_freq = 4980},
+	{ .hw_value = 7, .center_freq = 5035},
+	{ .hw_value = 8, .center_freq = 5040},
+	{ .hw_value = 9, .center_freq = 5045},
+	{ .hw_value = 11, .center_freq = 5055},
+	{ .hw_value = 12, .center_freq = 5060},
+	{ .hw_value = 16, .center_freq = 5080},
+	{ .hw_value = 34, .center_freq = 5170},
+	{ .hw_value = 36, .center_freq = 5180},
+	{ .hw_value = 38, .center_freq = 5190},
+	{ .hw_value = 40, .center_freq = 5200},
+	{ .hw_value = 42, .center_freq = 5210},
+	{ .hw_value = 44, .center_freq = 5220},
+	{ .hw_value = 46, .center_freq = 5230},
+	{ .hw_value = 48, .center_freq = 5240},
+	{ .hw_value = 52, .center_freq = 5260},
+	{ .hw_value = 56, .center_freq = 5280},
+	{ .hw_value = 60, .center_freq = 5300},
+	{ .hw_value = 64, .center_freq = 5320},
+	{ .hw_value = 100, .center_freq = 5500},
+	{ .hw_value = 104, .center_freq = 5520},
+	{ .hw_value = 108, .center_freq = 5540},
+	{ .hw_value = 112, .center_freq = 5560},
+	{ .hw_value = 116, .center_freq = 5580},
+	{ .hw_value = 120, .center_freq = 5600},
+	{ .hw_value = 124, .center_freq = 5620},
+	{ .hw_value = 128, .center_freq = 5640},
+	{ .hw_value = 132, .center_freq = 5660},
+	{ .hw_value = 136, .center_freq = 5680},
+	{ .hw_value = 140, .center_freq = 5700},
+	{ .hw_value = 149, .center_freq = 5745},
+	{ .hw_value = 153, .center_freq = 5765},
+	{ .hw_value = 157, .center_freq = 5785},
+	{ .hw_value = 161, .center_freq = 5805},
+	{ .hw_value = 165, .center_freq = 5825},
+};
+
+
+static struct ieee80211_supported_band wl1271_band_5ghz = {
+	.channels = wl1271_channels_5ghz,
+	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
+	.bitrates = wl1271_rates_5ghz,
+	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
+};
+
 static const struct ieee80211_ops wl1271_ops = {
 	.start = wl1271_op_start,
 	.stop = wl1271_op_stop,
@@ -1119,6 +1712,7 @@
 	.remove_interface = wl1271_op_remove_interface,
 	.config = wl1271_op_config,
 /* 	.config_interface = wl1271_op_config_interface, */
+	.prepare_multicast = wl1271_op_prepare_multicast,
 	.configure_filter = wl1271_op_configure_filter,
 	.tx = wl1271_op_tx,
 	.set_key = wl1271_op_set_key,
@@ -1151,24 +1745,25 @@
 
 static int wl1271_init_ieee80211(struct wl1271 *wl)
 {
-	/*
-	 * The tx descriptor buffer and the TKIP space.
-	 *
-	 * FIXME: add correct 1271 descriptor size
-	 */
-	wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE;
+	/* The tx descriptor buffer and the TKIP space. */
+	wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE +
+		sizeof(struct wl1271_tx_hw_descr);
 
 	/* unit us */
 	/* FIXME: find a proper value */
 	wl->hw->channel_change_time = 10000;
 
 	wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
-		IEEE80211_HW_NOISE_DBM;
+		IEEE80211_HW_NOISE_DBM |
+		IEEE80211_HW_BEACON_FILTER;
 
 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
 	wl->hw->wiphy->max_scan_ssids = 1;
 	wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz;
 
+	if (wl1271_11a_enabled())
+		wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz;
+
 	SET_IEEE80211_DEV(wl->hw, &wl->spi->dev);
 
 	return 0;
@@ -1213,17 +1808,18 @@
 	wl = hw->priv;
 	memset(wl, 0, sizeof(*wl));
 
+	INIT_LIST_HEAD(&wl->list);
+
 	wl->hw = hw;
 	dev_set_drvdata(&spi->dev, wl);
 	wl->spi = spi;
 
 	skb_queue_head_init(&wl->tx_queue);
 
-	INIT_WORK(&wl->filter_work, wl1271_filter_work);
+	INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
 	wl->channel = WL1271_DEFAULT_CHANNEL;
 	wl->scanning = false;
 	wl->default_key = 0;
-	wl->listen_int = 1;
 	wl->rx_counter = 0;
 	wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
 	wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
@@ -1232,10 +1828,12 @@
 	wl->psm_requested = false;
 	wl->tx_queue_stopped = false;
 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
+	wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET;
+	wl->band = IEEE80211_BAND_2GHZ;
+	wl->vif = NULL;
+	wl->joined = false;
 
-	/* We use the default power on sleep time until we know which chip
-	 * we're using */
-	for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
+	for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
 		wl->tx_frames[i] = NULL;
 
 	spin_lock_init(&wl->wl_lock);
@@ -1250,13 +1848,6 @@
 	wl->state = WL1271_STATE_OFF;
 	mutex_init(&wl->mutex);
 
-	wl->rx_descriptor = kmalloc(sizeof(*wl->rx_descriptor), GFP_KERNEL);
-	if (!wl->rx_descriptor) {
-		wl1271_error("could not allocate memory for rx descriptor");
-		ret = -ENOMEM;
-		goto out_free;
-	}
-
 	/* This is the only SPI value that we need to set here, the rest
 	 * comes from the board-peripherals file */
 	spi->bits_per_word = 32;
@@ -1298,6 +1889,9 @@
 	}
 	dev_set_drvdata(&wl1271_device.dev, wl);
 
+	/* Apply default driver configuration. */
+	wl1271_conf_init(wl);
+
 	ret = wl1271_init_ieee80211(wl);
 	if (ret)
 		goto out_platform;
@@ -1319,9 +1913,6 @@
 	free_irq(wl->irq, wl);
 
  out_free:
-	kfree(wl->rx_descriptor);
-	wl->rx_descriptor = NULL;
-
 	ieee80211_free_hw(hw);
 
 	return ret;
@@ -1337,14 +1928,11 @@
 	platform_device_unregister(&wl1271_device);
 	free_irq(wl->irq, wl);
 	kfree(wl->target_mem_map);
-	kfree(wl->fw);
+	vfree(wl->fw);
 	wl->fw = NULL;
 	kfree(wl->nvs);
 	wl->nvs = NULL;
 
-	kfree(wl->rx_descriptor);
-	wl->rx_descriptor = NULL;
-
 	kfree(wl->fw_status);
 	kfree(wl->tx_res_if);
 
@@ -1391,3 +1979,4 @@
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/wl1271_ps.c
index 1dc74b0..507cd91 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.c
@@ -27,25 +27,38 @@
 
 #define WL1271_WAKEUP_TIMEOUT 500
 
+void wl1271_elp_work(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct wl1271 *wl;
+
+	dwork = container_of(work, struct delayed_work, work);
+	wl = container_of(dwork, struct wl1271, elp_work);
+
+	wl1271_debug(DEBUG_PSM, "elp work");
+
+	mutex_lock(&wl->mutex);
+
+	if (wl->elp || !wl->psm)
+		goto out;
+
+	wl1271_debug(DEBUG_PSM, "chip to elp");
+	wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
+	wl->elp = true;
+
+out:
+	mutex_unlock(&wl->mutex);
+}
+
+#define ELP_ENTRY_DELAY  5
+
 /* Routines to toggle sleep mode while in ELP */
 void wl1271_ps_elp_sleep(struct wl1271 *wl)
 {
-	/*
-	 * FIXME: due to a problem in the firmware (causing a firmware
-	 * crash), ELP entry is prevented below. Remove the "true" to
-	 * re-enable ELP entry.
-	 */
-	if (true || wl->elp || !wl->psm)
-		return;
-
-	/*
-	 * Go to ELP unless there is work already pending - pending work
-	 * will immediately wakeup the chipset anyway.
-	 */
-	if (!work_pending(&wl->irq_work) && !work_pending(&wl->tx_work)) {
-		wl1271_debug(DEBUG_PSM, "chip to elp");
-		wl1271_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
-		wl->elp = true;
+	if (wl->psm) {
+		cancel_delayed_work(&wl->elp_work);
+		ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
+					msecs_to_jiffies(ELP_ENTRY_DELAY));
 	}
 }
 
@@ -73,7 +86,7 @@
 		wl->elp_compl = &compl;
 	spin_unlock_irqrestore(&wl->wl_lock, flags);
 
-	wl1271_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
+	wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
 
 	if (!pending) {
 		ret = wait_for_completion_timeout(
@@ -111,6 +124,17 @@
 	switch (mode) {
 	case STATION_POWER_SAVE_MODE:
 		wl1271_debug(DEBUG_PSM, "entering psm");
+
+		/* enable beacon filtering */
+		ret = wl1271_acx_beacon_filter_opt(wl, true);
+		if (ret < 0)
+			return ret;
+
+		/* enable beacon early termination */
+		ret = wl1271_acx_bet_enable(wl, true);
+		if (ret < 0)
+			return ret;
+
 		ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
 		if (ret < 0)
 			return ret;
@@ -128,6 +152,16 @@
 		if (ret < 0)
 			return ret;
 
+		/* disable beacon early termination */
+		ret = wl1271_acx_bet_enable(wl, false);
+		if (ret < 0)
+			return ret;
+
+		/* disable beacon filtering */
+		ret = wl1271_acx_beacon_filter_opt(wl, false);
+		if (ret < 0)
+			return ret;
+
 		ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
 		if (ret < 0)
 			return ret;
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.h b/drivers/net/wireless/wl12xx/wl1271_ps.h
index de2bd3c..779653d 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.h
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.h
@@ -30,6 +30,6 @@
 int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode);
 void wl1271_ps_elp_sleep(struct wl1271 *wl);
 int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake);
-
+void wl1271_elp_work(struct work_struct *work);
 
 #endif /* __WL1271_PS_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_reg.h b/drivers/net/wireless/wl12xx/wl1271_reg.h
index f8ed4a4..1f23738 100644
--- a/drivers/net/wireless/wl12xx/wl1271_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1271_reg.h
@@ -34,7 +34,7 @@
 #define REGISTERS_WORK_SIZE 0x0000b000
 
 #define HW_ACCESS_ELP_CTRL_REG_ADDR         0x1FFFC
-#define STATUS_MEM_ADDRESS                  0x40400
+#define FW_STATUS_ADDR                      (0x14FC0 + 0xA000)
 
 /* ELP register commands */
 #define ELPCTRL_WAKE_UP             0x1
@@ -213,7 +213,6 @@
 ==============================================*/
 #define ACX_REG_INTERRUPT_ACK          (REGISTERS_BASE + 0x04F0)
 
-#define RX_DRIVER_DUMMY_WRITE_ADDRESS  (REGISTERS_BASE + 0x0534)
 #define RX_DRIVER_COUNTER_ADDRESS      (REGISTERS_BASE + 0x0538)
 
 /* Device Configuration registers*/
@@ -614,50 +613,6 @@
 	MAX_RADIO_BANDS = 0xFF
 };
 
-enum {
-	NO_RATE      = 0,
-	RATE_1MBPS   = 0x0A,
-	RATE_2MBPS   = 0x14,
-	RATE_5_5MBPS = 0x37,
-	RATE_6MBPS   = 0x0B,
-	RATE_9MBPS   = 0x0F,
-	RATE_11MBPS  = 0x6E,
-	RATE_12MBPS  = 0x0A,
-	RATE_18MBPS  = 0x0E,
-	RATE_22MBPS  = 0xDC,
-	RATE_24MBPS  = 0x09,
-	RATE_36MBPS  = 0x0D,
-	RATE_48MBPS  = 0x08,
-	RATE_54MBPS  = 0x0C
-};
-
-enum {
-	RATE_INDEX_1MBPS   =  0,
-	RATE_INDEX_2MBPS   =  1,
-	RATE_INDEX_5_5MBPS =  2,
-	RATE_INDEX_6MBPS   =  3,
-	RATE_INDEX_9MBPS   =  4,
-	RATE_INDEX_11MBPS  =  5,
-	RATE_INDEX_12MBPS  =  6,
-	RATE_INDEX_18MBPS  =  7,
-	RATE_INDEX_22MBPS  =  8,
-	RATE_INDEX_24MBPS  =  9,
-	RATE_INDEX_36MBPS  =  10,
-	RATE_INDEX_48MBPS  =  11,
-	RATE_INDEX_54MBPS  =  12,
-	RATE_INDEX_MAX     =  RATE_INDEX_54MBPS,
-	MAX_RATE_INDEX,
-	INVALID_RATE_INDEX = MAX_RATE_INDEX,
-	RATE_INDEX_ENUM_MAX_SIZE = 0x7FFFFFFF
-};
-
-enum {
-	RATE_MASK_1MBPS = 0x1,
-	RATE_MASK_2MBPS = 0x2,
-	RATE_MASK_5_5MBPS = 0x4,
-	RATE_MASK_11MBPS = 0x20,
-};
-
 #define SHORT_PREAMBLE_BIT   BIT(0) /* CCK or Barker depending on the rate */
 #define OFDM_RATE_BIT        BIT(6)
 #define PBCC_RATE_BIT        BIT(7)
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c
index ad8b690..37d81ab 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.c
@@ -30,14 +30,15 @@
 static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status,
 				  u32 drv_rx_counter)
 {
-	return status->rx_pkt_descs[drv_rx_counter] & RX_MEM_BLOCK_MASK;
+	return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
+		RX_MEM_BLOCK_MASK;
 }
 
 static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status,
 				 u32 drv_rx_counter)
 {
-	return (status->rx_pkt_descs[drv_rx_counter] & RX_BUF_SIZE_MASK) >>
-		RX_BUF_SIZE_SHIFT_DIV;
+	return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
+		RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV;
 }
 
 /* The values of this table must match the wl1271_rates[] array */
@@ -70,6 +71,36 @@
 	0                           /* WL1271_RATE_1    */
 };
 
+/* The values of this table must match the wl1271_rates[] array */
+static u8 wl1271_5_ghz_rx_rate_to_idx[] = {
+	/* MCS rates are used only with 11n */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS7 */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS6 */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS5 */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS4 */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS3 */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS2 */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS1 */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS0 */
+
+	7,                          /* WL1271_RATE_54   */
+	6,                          /* WL1271_RATE_48   */
+	5,                          /* WL1271_RATE_36   */
+	4,                          /* WL1271_RATE_24   */
+
+	/* TI-specific rate */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_22   */
+
+	3,                          /* WL1271_RATE_18   */
+	2,                          /* WL1271_RATE_12   */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_11   */
+	1,                          /* WL1271_RATE_9    */
+	0,                          /* WL1271_RATE_6    */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_5_5  */
+	WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_2    */
+	WL1271_RX_RATE_UNSUPPORTED  /* WL1271_RATE_1    */
+};
+
 static void wl1271_rx_status(struct wl1271 *wl,
 			     struct wl1271_rx_descriptor *desc,
 			     struct ieee80211_rx_status *status,
@@ -77,12 +108,21 @@
 {
 	memset(status, 0, sizeof(struct ieee80211_rx_status));
 
-	if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG)
+	if ((desc->flags & WL1271_RX_DESC_BAND_MASK) ==
+	    WL1271_RX_DESC_BAND_BG) {
 		status->band = IEEE80211_BAND_2GHZ;
-	else
+		status->rate_idx = wl1271_rx_rate_to_idx[desc->rate];
+	} else if ((desc->flags & WL1271_RX_DESC_BAND_MASK) ==
+		 WL1271_RX_DESC_BAND_A) {
+		status->band = IEEE80211_BAND_5GHZ;
+		status->rate_idx = wl1271_5_ghz_rx_rate_to_idx[desc->rate];
+	} else
 		wl1271_warning("unsupported band 0x%x",
 			       desc->flags & WL1271_RX_DESC_BAND_MASK);
 
+	if (unlikely(status->rate_idx == WL1271_RX_RATE_UNSUPPORTED))
+		wl1271_warning("unsupported rate");
+
 	/*
 	 * FIXME: Add mactime handling.  For IBSS (ad-hoc) we need to get the
 	 * timestamp from the beacon (acx_tsf_info).  In BSS mode (infra) we
@@ -91,12 +131,6 @@
 	 */
 	status->signal = desc->rssi;
 
-	/* FIXME: Should this be optimized? */
-	status->qual = (desc->rssi - WL1271_RX_MIN_RSSI) * 100 /
-		(WL1271_RX_MAX_RSSI - WL1271_RX_MIN_RSSI);
-	status->qual = min(status->qual, 100);
-	status->qual = max(status->qual, 0);
-
 	/*
 	 * FIXME: In wl1251, the SNR should be divided by two.  In wl1271 we
 	 * need to divide by two for now, but TI has been discussing about
@@ -109,17 +143,11 @@
 	if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
 		status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
 
-		if (likely(!(desc->flags & WL1271_RX_DESC_DECRYPT_FAIL)))
+		if (likely(!(desc->status & WL1271_RX_DESC_DECRYPT_FAIL)))
 			status->flag |= RX_FLAG_DECRYPTED;
-
-		if (unlikely(desc->flags & WL1271_RX_DESC_MIC_FAIL))
+		if (unlikely(desc->status & WL1271_RX_DESC_MIC_FAIL))
 			status->flag |= RX_FLAG_MMIC_ERROR;
 	}
-
-	status->rate_idx = wl1271_rx_rate_to_idx[desc->rate];
-
-	if (status->rate_idx == WL1271_RX_RATE_UNSUPPORTED)
-		wl1271_warning("unsupported rate");
 }
 
 static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
@@ -138,7 +166,7 @@
 	}
 
 	buf = skb_put(skb, length);
-	wl1271_spi_reg_read(wl, WL1271_SLV_MEM_DATA, buf, length, true);
+	wl1271_spi_read(wl, WL1271_SLV_MEM_DATA, buf, length, true);
 
 	/* the data read starts with the descriptor */
 	desc = (struct wl1271_rx_descriptor *) buf;
@@ -156,7 +184,7 @@
 		     beacon ? "beacon" : "");
 
 	memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
-	ieee80211_rx(wl->hw, skb);
+	ieee80211_rx_ni(wl->hw, skb);
 }
 
 void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
@@ -176,15 +204,15 @@
 			break;
 		}
 
-		wl->rx_mem_pool_addr.addr =
-			(mem_block << 8) + wl_mem_map->packet_memory_pool_start;
+		wl->rx_mem_pool_addr.addr = (mem_block << 8) +
+			le32_to_cpu(wl_mem_map->packet_memory_pool_start);
 		wl->rx_mem_pool_addr.addr_extra =
 			wl->rx_mem_pool_addr.addr + 4;
 
 		/* Choose the block we want to read */
-		wl1271_spi_reg_write(wl, WL1271_SLV_REG_DATA,
-				     &wl->rx_mem_pool_addr,
-				     sizeof(wl->rx_mem_pool_addr), false);
+		wl1271_spi_write(wl, WL1271_SLV_REG_DATA,
+				 &wl->rx_mem_pool_addr,
+				 sizeof(wl->rx_mem_pool_addr), false);
 
 		wl1271_rx_handle_data(wl, buf_size);
 
@@ -192,9 +220,5 @@
 		drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
 	}
 
-	wl1271_reg_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
-
-	/* This is a workaround for some problems in the chip */
-	wl1271_reg_write32(wl, RX_DRIVER_DUMMY_WRITE_ADDRESS, 0x1);
-
+	wl1271_spi_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
 }
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.h b/drivers/net/wireless/wl12xx/wl1271_rx.h
index d1ca60e..1ae6d17 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.h
@@ -102,14 +102,14 @@
 #define RX_BUF_SIZE_SHIFT_DIV 6
 
 struct wl1271_rx_descriptor {
-	u16 length;
+	__le16 length;
 	u8  status;
 	u8  flags;
 	u8  rate;
 	u8  channel;
 	s8  rssi;
 	u8  snr;
-	u32 timestamp;
+	__le32 timestamp;
 	u8  packet_class;
 	u8  process_id;
 	u8  pad_len;
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 4a12880..02978a1 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -30,17 +30,29 @@
 #include "wl12xx_80211.h"
 #include "wl1271_spi.h"
 
-static int wl1271_translate_reg_addr(struct wl1271 *wl, int addr)
+static int wl1271_translate_addr(struct wl1271 *wl, int addr)
 {
-	return addr - wl->physical_reg_addr + wl->virtual_reg_addr;
+	/*
+	 * To translate, first check to which window of addresses the
+	 * particular address belongs. Then subtract the starting address
+	 * of that window from the address. Then, add offset of the
+	 * translated region.
+	 *
+	 * The translated regions occur next to each other in physical device
+	 * memory, so just add the sizes of the preceeding address regions to
+	 * get the offset to the new region.
+	 *
+	 * Currently, only the two first regions are addressed, and the
+	 * assumption is that all addresses will fall into either of those
+	 * two.
+	 */
+	if ((addr >= wl->part.reg.start) &&
+	    (addr < wl->part.reg.start + wl->part.reg.size))
+		return addr - wl->part.reg.start + wl->part.mem.size;
+	else
+		return addr - wl->part.mem.start;
 }
 
-static int wl1271_translate_mem_addr(struct wl1271 *wl, int addr)
-{
-	return addr - wl->physical_mem_addr + wl->virtual_mem_addr;
-}
-
-
 void wl1271_spi_reset(struct wl1271 *wl)
 {
 	u8 *cmd;
@@ -123,133 +135,137 @@
 
 /* Set the SPI partitions to access the chip addresses
  *
- * There are two VIRTUAL (SPI) partitions (the memory partition and the
- * registers partition), which are mapped to two different areas of the
- * PHYSICAL (hardware) memory.  This function also makes other checks to
- * ensure that the partitions are not overlapping.  In the diagram below, the
- * memory partition comes before the register partition, but the opposite is
- * also supported.
+ * To simplify driver code, a fixed (virtual) memory map is defined for
+ * register and memory addresses. Because in the chipset, in different stages
+ * of operation, those addresses will move around, an address translation
+ * mechanism is required.
  *
- *                               PHYSICAL address
+ * There are four partitions (three memory and one register partition),
+ * which are mapped to two different areas of the hardware memory.
+ *
+ *                                Virtual address
  *                                     space
  *
  *                                    |    |
- *                                 ...+----+--> mem_start
- *          VIRTUAL address     ...   |    |
+ *                                 ...+----+--> mem.start
+ *          Physical address    ...   |    |
  *               space       ...      |    | [PART_0]
  *                        ...         |    |
- * 0x00000000 <--+----+...         ...+----+--> mem_start + mem_size
+ *  00000000  <--+----+...         ...+----+--> mem.start + mem.size
  *               |    |         ...   |    |
  *               |MEM |      ...      |    |
  *               |    |   ...         |    |
- *  part_size <--+----+...            |    | {unused area)
+ *  mem.size  <--+----+...            |    | {unused area)
  *               |    |   ...         |    |
  *               |REG |      ...      |    |
- *  part_size    |    |         ...   |    |
- *      +     <--+----+...         ...+----+--> reg_start
- *  reg_size              ...         |    |
- *                           ...      |    | [PART_1]
- *                              ...   |    |
- *                                 ...+----+--> reg_start + reg_size
+ *  mem.size     |    |         ...   |    |
+ *      +     <--+----+...         ...+----+--> reg.start
+ *  reg.size     |    |   ...         |    |
+ *               |MEM2|      ...      |    | [PART_1]
+ *               |    |         ...   |    |
+ *                                 ...+----+--> reg.start + reg.size
  *                                    |    |
  *
  */
 int wl1271_set_partition(struct wl1271 *wl,
-			  u32 mem_start, u32 mem_size,
-			  u32 reg_start, u32 reg_size)
+			 struct wl1271_partition_set *p)
 {
-	struct wl1271_partition *partition;
-	struct spi_transfer t;
-	struct spi_message m;
-	size_t len, cmd_len;
-	u32 *cmd;
-	int addr;
-
-	cmd_len = sizeof(u32) + 2 * sizeof(struct wl1271_partition);
-	cmd = kzalloc(cmd_len, GFP_KERNEL);
-	if (!cmd)
-		return -ENOMEM;
-
-	spi_message_init(&m);
-	memset(&t, 0, sizeof(t));
-
-	partition = (struct wl1271_partition *) (cmd + 1);
-	addr = HW_ACCESS_PART0_SIZE_ADDR;
-	len = 2 * sizeof(struct wl1271_partition);
-
-	*cmd |= WSPI_CMD_WRITE;
-	*cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH;
-	*cmd |= addr & WSPI_CMD_BYTE_ADDR;
+	/* copy partition info */
+	memcpy(&wl->part, p, sizeof(*p));
 
 	wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
-		     mem_start, mem_size);
+		     p->mem.start, p->mem.size);
 	wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
-		     reg_start, reg_size);
+		     p->reg.start, p->reg.size);
+	wl1271_debug(DEBUG_SPI, "mem2_start %08X mem2_size %08X",
+		     p->mem2.start, p->mem2.size);
+	wl1271_debug(DEBUG_SPI, "mem3_start %08X mem3_size %08X",
+		     p->mem3.start, p->mem3.size);
 
-	/* Make sure that the two partitions together don't exceed the
-	 * address range */
-	if ((mem_size + reg_size) > HW_ACCESS_MEMORY_MAX_RANGE) {
-		wl1271_debug(DEBUG_SPI, "Total size exceeds maximum virtual"
-			     " address range.  Truncating partition[0].");
-		mem_size = HW_ACCESS_MEMORY_MAX_RANGE - reg_size;
-		wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
-			     mem_start, mem_size);
-		wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
-			     reg_start, reg_size);
-	}
-
-	if ((mem_start < reg_start) &&
-	    ((mem_start + mem_size) > reg_start)) {
-		/* Guarantee that the memory partition doesn't overlap the
-		 * registers partition */
-		wl1271_debug(DEBUG_SPI, "End of partition[0] is "
-			     "overlapping partition[1].  Adjusted.");
-		mem_size = reg_start - mem_start;
-		wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
-			     mem_start, mem_size);
-		wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
-			     reg_start, reg_size);
-	} else if ((reg_start < mem_start) &&
-		   ((reg_start + reg_size) > mem_start)) {
-		/* Guarantee that the register partition doesn't overlap the
-		 * memory partition */
-		wl1271_debug(DEBUG_SPI, "End of partition[1] is"
-			     " overlapping partition[0].  Adjusted.");
-		reg_size = mem_start - reg_start;
-		wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
-			     mem_start, mem_size);
-		wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
-			     reg_start, reg_size);
-	}
-
-	partition[0].start = mem_start;
-	partition[0].size  = mem_size;
-	partition[1].start = reg_start;
-	partition[1].size  = reg_size;
-
-	wl->physical_mem_addr = mem_start;
-	wl->physical_reg_addr = reg_start;
-
-	wl->virtual_mem_addr = 0;
-	wl->virtual_reg_addr = mem_size;
-
-	t.tx_buf = cmd;
-	t.len = cmd_len;
-	spi_message_add_tail(&t, &m);
-
-	spi_sync(wl->spi, &m);
-
-	kfree(cmd);
+	/* write partition info to the chipset */
+	wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
+	wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
+	wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
+	wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
+	wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
+	wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
+	wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
 
 	return 0;
 }
 
-void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf,
-		     size_t len, bool fixed)
+#define WL1271_BUSY_WORD_TIMEOUT 1000
+
+/* FIXME: Check busy words, removed due to SPI bug */
+#if 0
+static void wl1271_spi_read_busy(struct wl1271 *wl, void *buf, size_t len)
+{
+	struct spi_transfer t[1];
+	struct spi_message m;
+	u32 *busy_buf;
+	int num_busy_bytes = 0;
+
+	wl1271_info("spi read BUSY!");
+
+	/*
+	 * Look for the non-busy word in the read buffer, and if found,
+	 * read in the remaining data into the buffer.
+	 */
+	busy_buf = (u32 *)buf;
+	for (; (u32)busy_buf < (u32)buf + len; busy_buf++) {
+		num_busy_bytes += sizeof(u32);
+		if (*busy_buf & 0x1) {
+			spi_message_init(&m);
+			memset(t, 0, sizeof(t));
+			memmove(buf, busy_buf, len - num_busy_bytes);
+			t[0].rx_buf = buf + (len - num_busy_bytes);
+			t[0].len = num_busy_bytes;
+			spi_message_add_tail(&t[0], &m);
+			spi_sync(wl->spi, &m);
+			return;
+		}
+	}
+
+	/*
+	 * Read further busy words from SPI until a non-busy word is
+	 * encountered, then read the data itself into the buffer.
+	 */
+	wl1271_info("spi read BUSY-polling needed!");
+
+	num_busy_bytes = WL1271_BUSY_WORD_TIMEOUT;
+	busy_buf = wl->buffer_busyword;
+	while (num_busy_bytes) {
+		num_busy_bytes--;
+		spi_message_init(&m);
+		memset(t, 0, sizeof(t));
+		t[0].rx_buf = busy_buf;
+		t[0].len = sizeof(u32);
+		spi_message_add_tail(&t[0], &m);
+		spi_sync(wl->spi, &m);
+
+		if (*busy_buf & 0x1) {
+			spi_message_init(&m);
+			memset(t, 0, sizeof(t));
+			t[0].rx_buf = buf;
+			t[0].len = len;
+			spi_message_add_tail(&t[0], &m);
+			spi_sync(wl->spi, &m);
+			return;
+		}
+	}
+
+	/* The SPI bus is unresponsive, the read failed. */
+	memset(buf, 0, len);
+	wl1271_error("SPI read busy-word timeout!\n");
+}
+#endif
+
+void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
+			 size_t len, bool fixed)
 {
 	struct spi_transfer t[3];
 	struct spi_message m;
-	u8 *busy_buf;
+	u32 *busy_buf;
 	u32 *cmd;
 
 	cmd = &wl->buffer_cmd;
@@ -281,14 +297,16 @@
 
 	spi_sync(wl->spi, &m);
 
-	/* FIXME: check busy words */
+	/* FIXME: Check busy words, removed due to SPI bug */
+	/* if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1))
+	   wl1271_spi_read_busy(wl, buf, len); */
 
 	wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
 	wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, len);
 }
 
-void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf,
-		      size_t len, bool fixed)
+void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
+			  size_t len, bool fixed)
 {
 	struct spi_transfer t[2];
 	struct spi_message m;
@@ -321,62 +339,77 @@
 	wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len);
 }
 
-void wl1271_spi_mem_read(struct wl1271 *wl, int addr, void *buf,
-			 size_t len)
+void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, size_t len,
+		     bool fixed)
 {
 	int physical;
 
-	physical = wl1271_translate_mem_addr(wl, addr);
+	physical = wl1271_translate_addr(wl, addr);
 
-	wl1271_spi_read(wl, physical, buf, len, false);
+	wl1271_spi_raw_read(wl, physical, buf, len, fixed);
 }
 
-void wl1271_spi_mem_write(struct wl1271 *wl, int addr, void *buf,
-			  size_t len)
+void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, size_t len,
+		      bool fixed)
 {
 	int physical;
 
-	physical = wl1271_translate_mem_addr(wl, addr);
+	physical = wl1271_translate_addr(wl, addr);
 
-	wl1271_spi_write(wl, physical, buf, len, false);
+	wl1271_spi_raw_write(wl, physical, buf, len, fixed);
 }
 
-void wl1271_spi_reg_read(struct wl1271 *wl, int addr, void *buf, size_t len,
-			 bool fixed)
+u32 wl1271_spi_read32(struct wl1271 *wl, int addr)
 {
-	int physical;
-
-	physical = wl1271_translate_reg_addr(wl, addr);
-
-	wl1271_spi_read(wl, physical, buf, len, fixed);
+	return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
 }
 
-void wl1271_spi_reg_write(struct wl1271 *wl, int addr, void *buf, size_t len,
-			  bool fixed)
+void wl1271_spi_write32(struct wl1271 *wl, int addr, u32 val)
 {
-	int physical;
-
-	physical = wl1271_translate_reg_addr(wl, addr);
-
-	wl1271_spi_write(wl, physical, buf, len, fixed);
+	wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
 }
 
-u32 wl1271_mem_read32(struct wl1271 *wl, int addr)
+void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
 {
-	return wl1271_read32(wl, wl1271_translate_mem_addr(wl, addr));
+	/* write address >> 1 + 0x30000 to OCP_POR_CTR */
+	addr = (addr >> 1) + 0x30000;
+	wl1271_spi_write32(wl, OCP_POR_CTR, addr);
+
+	/* write value to OCP_POR_WDATA */
+	wl1271_spi_write32(wl, OCP_DATA_WRITE, val);
+
+	/* write 1 to OCP_CMD */
+	wl1271_spi_write32(wl, OCP_CMD, OCP_CMD_WRITE);
 }
 
-void wl1271_mem_write32(struct wl1271 *wl, int addr, u32 val)
+u16 wl1271_top_reg_read(struct wl1271 *wl, int addr)
 {
-	wl1271_write32(wl, wl1271_translate_mem_addr(wl, addr), val);
-}
+	u32 val;
+	int timeout = OCP_CMD_LOOP;
 
-u32 wl1271_reg_read32(struct wl1271 *wl, int addr)
-{
-	return wl1271_read32(wl, wl1271_translate_reg_addr(wl, addr));
-}
+	/* write address >> 1 + 0x30000 to OCP_POR_CTR */
+	addr = (addr >> 1) + 0x30000;
+	wl1271_spi_write32(wl, OCP_POR_CTR, addr);
 
-void wl1271_reg_write32(struct wl1271 *wl, int addr, u32 val)
-{
-	wl1271_write32(wl, wl1271_translate_reg_addr(wl, addr), val);
+	/* write 2 to OCP_CMD */
+	wl1271_spi_write32(wl, OCP_CMD, OCP_CMD_READ);
+
+	/* poll for data ready */
+	do {
+		val = wl1271_spi_read32(wl, OCP_DATA_READ);
+		timeout--;
+	} while (!(val & OCP_READY_MASK) && timeout);
+
+	if (!timeout) {
+		wl1271_warning("Top register access timed out.");
+		return 0xffff;
+	}
+
+	/* check data status and return if OK */
+	if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK)
+		return val & 0xffff;
+	else {
+		wl1271_warning("Top register access returned error.");
+		return 0xffff;
+	}
 }
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.h b/drivers/net/wireless/wl12xx/wl1271_spi.h
index 2c99684..cb7df1c 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.h
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.h
@@ -29,10 +29,14 @@
 
 #define HW_ACCESS_MEMORY_MAX_RANGE		0x1FFC0
 
-#define HW_ACCESS_PART0_SIZE_ADDR           0x1FFC0
-#define HW_ACCESS_PART0_START_ADDR          0x1FFC4
-#define HW_ACCESS_PART1_SIZE_ADDR           0x1FFC8
-#define HW_ACCESS_PART1_START_ADDR          0x1FFCC
+#define HW_PARTITION_REGISTERS_ADDR         0x1ffc0
+#define HW_PART0_SIZE_ADDR                  (HW_PARTITION_REGISTERS_ADDR)
+#define HW_PART0_START_ADDR                 (HW_PARTITION_REGISTERS_ADDR + 4)
+#define HW_PART1_SIZE_ADDR                  (HW_PARTITION_REGISTERS_ADDR + 8)
+#define HW_PART1_START_ADDR                 (HW_PARTITION_REGISTERS_ADDR + 12)
+#define HW_PART2_SIZE_ADDR                  (HW_PARTITION_REGISTERS_ADDR + 16)
+#define HW_PART2_START_ADDR                 (HW_PARTITION_REGISTERS_ADDR + 20)
+#define HW_PART3_START_ADDR                 (HW_PARTITION_REGISTERS_ADDR + 24)
 
 #define HW_ACCESS_REGISTER_SIZE             4
 
@@ -67,47 +71,56 @@
 		((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32))
 #define HW_ACCESS_WSPI_INIT_CMD_MASK  0
 
+#define OCP_CMD_LOOP  32
+
+#define OCP_CMD_WRITE 0x1
+#define OCP_CMD_READ  0x2
+
+#define OCP_READY_MASK  BIT(18)
+#define OCP_STATUS_MASK (BIT(16) | BIT(17))
+
+#define OCP_STATUS_NO_RESP    0x00000
+#define OCP_STATUS_OK         0x10000
+#define OCP_STATUS_REQ_FAILED 0x20000
+#define OCP_STATUS_RESP_ERROR 0x30000
 
 /* Raw target IO, address is not translated */
-void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf,
+void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
 		      size_t len, bool fixed);
-void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf,
+void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
 		     size_t len, bool fixed);
 
-/* Memory target IO, address is tranlated to partition 0 */
-void wl1271_spi_mem_read(struct wl1271 *wl, int addr, void *buf, size_t len);
-void wl1271_spi_mem_write(struct wl1271 *wl, int addr, void *buf, size_t len);
-u32 wl1271_mem_read32(struct wl1271 *wl, int addr);
-void wl1271_mem_write32(struct wl1271 *wl, int addr, u32 val);
+/* Translated target IO */
+void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, size_t len,
+		     bool fixed);
+void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, size_t len,
+		      bool fixed);
+u32 wl1271_spi_read32(struct wl1271 *wl, int addr);
+void wl1271_spi_write32(struct wl1271 *wl, int addr, u32 val);
 
-/* Registers IO */
-void wl1271_spi_reg_read(struct wl1271 *wl, int addr, void *buf, size_t len,
-			 bool fixed);
-void wl1271_spi_reg_write(struct wl1271 *wl, int addr, void *buf, size_t len,
-			  bool fixed);
-u32 wl1271_reg_read32(struct wl1271 *wl, int addr);
-void wl1271_reg_write32(struct wl1271 *wl, int addr, u32 val);
+/* Top Register IO */
+void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
+u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
 
 /* INIT and RESET words */
 void wl1271_spi_reset(struct wl1271 *wl);
 void wl1271_spi_init(struct wl1271 *wl);
 int wl1271_set_partition(struct wl1271 *wl,
-			 u32 part_start, u32 part_size,
-			 u32 reg_start,  u32 reg_size);
+			 struct wl1271_partition_set *p);
 
-static inline u32 wl1271_read32(struct wl1271 *wl, int addr)
+static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
 {
-	wl1271_spi_read(wl, addr, &wl->buffer_32,
-			sizeof(wl->buffer_32), false);
+	wl1271_spi_raw_read(wl, addr, &wl->buffer_32,
+			    sizeof(wl->buffer_32), false);
 
 	return wl->buffer_32;
 }
 
-static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
+static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val)
 {
 	wl->buffer_32 = val;
-	wl1271_spi_write(wl, addr, &wl->buffer_32,
-			 sizeof(wl->buffer_32), false);
+	wl1271_spi_raw_write(wl, addr, &wl->buffer_32,
+			     sizeof(wl->buffer_32), false);
 }
 
 #endif /* __WL1271_SPI_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index ff22125..00af065 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -33,8 +33,7 @@
 static int wl1271_tx_id(struct wl1271 *wl, struct sk_buff *skb)
 {
 	int i;
-
-	for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
+	for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
 		if (wl->tx_frames[i] == NULL) {
 			wl->tx_frames[i] = skb;
 			return i;
@@ -58,8 +57,8 @@
 	/* approximate the number of blocks required for this packet
 	   in the firmware */
 	/* FIXME: try to figure out what is done here and make it cleaner */
-	total_blocks = (skb->len) >> TX_HW_BLOCK_SHIFT_DIV;
-	excluded = (total_blocks << 2) + (skb->len & 0xff) + 34;
+	total_blocks = (total_len + 20) >> TX_HW_BLOCK_SHIFT_DIV;
+	excluded = (total_blocks << 2) + ((total_len + 20) & 0xff) + 34;
 	total_blocks += (excluded > 252) ? 2 : 1;
 	total_blocks += TX_HW_BLOCK_SPARE;
 
@@ -89,15 +88,25 @@
 {
 	struct wl1271_tx_hw_descr *desc;
 	int pad;
+	u16 tx_attr;
 
 	desc = (struct wl1271_tx_hw_descr *) skb->data;
 
+	/* relocate space for security header */
+	if (extra) {
+		void *framestart = skb->data + sizeof(*desc);
+		u16 fc = *(u16 *)(framestart + extra);
+		int hdrlen = ieee80211_hdrlen(cpu_to_le16(fc));
+		memmove(framestart, framestart + extra, hdrlen);
+	}
+
 	/* configure packet life time */
-	desc->start_time = jiffies_to_usecs(jiffies) - wl->time_offset;
-	desc->life_time = TX_HW_MGMT_PKT_LIFETIME_TU;
+	desc->start_time = cpu_to_le32(jiffies_to_usecs(jiffies) -
+				       wl->time_offset);
+	desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
 
 	/* configure the tx attributes */
-	desc->tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
+	tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
 	/* FIXME: do we know the packet priority? can we identify mgmt
 	   packets, and use max prio for them at least? */
 	desc->tid = 0;
@@ -106,11 +115,13 @@
 
 	/* align the length (and store in terms of words) */
 	pad = WL1271_TX_ALIGN(skb->len);
-	desc->length = pad >> 2;
+	desc->length = cpu_to_le16(pad >> 2);
 
 	/* calculate number of padding bytes */
 	pad = pad - skb->len;
-	desc->tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
+	tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
+
+	desc->tx_attr = cpu_to_le16(tx_attr);
 
 	wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
 	return 0;
@@ -147,11 +158,11 @@
 	len = WL1271_TX_ALIGN(skb->len);
 
 	/* perform a fixed address block write with the packet */
-	wl1271_spi_reg_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true);
+	wl1271_spi_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true);
 
 	/* write packet new counter into the write access register */
 	wl->tx_packets_count++;
-	wl1271_reg_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
+	wl1271_spi_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
 
 	desc = (struct wl1271_tx_hw_descr *) skb->data;
 	wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)",
@@ -254,14 +265,13 @@
 static void wl1271_tx_complete_packet(struct wl1271 *wl,
 				      struct wl1271_tx_hw_res_descr *result)
 {
-
 	struct ieee80211_tx_info *info;
 	struct sk_buff *skb;
-	u32 header_len;
+	u16 seq;
 	int id = result->id;
 
 	/* check for id legality */
-	if (id >= TX_HW_RESULT_QUEUE_LEN || wl->tx_frames[id] == NULL) {
+	if (id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL) {
 		wl1271_warning("TX result illegal id: %d", id);
 		return;
 	}
@@ -284,22 +294,32 @@
 	/* info->status.retry_count = result->ack_failures; */
 	wl->stats.retry_count += result->ack_failures;
 
-	/* get header len */
+	/* update security sequence number */
+	seq = wl->tx_security_seq_16 +
+		(result->lsb_security_sequence_number -
+		 wl->tx_security_last_seq);
+	wl->tx_security_last_seq = result->lsb_security_sequence_number;
+
+	if (seq < wl->tx_security_seq_16)
+		wl->tx_security_seq_32++;
+	wl->tx_security_seq_16 = seq;
+
+	/* remove private header from packet */
+	skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
+
+	/* remove TKIP header space if present */
 	if (info->control.hw_key &&
-	    info->control.hw_key->alg == ALG_TKIP)
-		header_len = WL1271_TKIP_IV_SPACE +
-			sizeof(struct wl1271_tx_hw_descr);
-	else
-		header_len = sizeof(struct wl1271_tx_hw_descr);
+	    info->control.hw_key->alg == ALG_TKIP) {
+		int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+		memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, hdrlen);
+		skb_pull(skb, WL1271_TKIP_IV_SPACE);
+	}
 
 	wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
 		     " status 0x%x",
 		     result->id, skb, result->ack_failures,
 		     result->rate_class_index, result->status);
 
-	/* remove private header from packet */
-	skb_pull(skb, header_len);
-
 	/* return the packet to the stack */
 	ieee80211_tx_status(wl->hw, skb);
 	wl->tx_frames[result->id] = NULL;
@@ -315,8 +335,8 @@
 	wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
 
 	/* read the tx results from the chipset */
-	wl1271_spi_mem_read(wl, memmap->tx_result,
-			    wl->tx_res_if, sizeof(*wl->tx_res_if));
+	wl1271_spi_read(wl, le32_to_cpu(memmap->tx_result),
+			wl->tx_res_if, sizeof(*wl->tx_res_if), false);
 
 	/* verify that the result buffer is not getting overrun */
 	if (count > TX_HW_RESULT_QUEUE_LEN) {
@@ -337,10 +357,10 @@
 	}
 
 	/* write host counter to chipset (to ack) */
-	wl1271_mem_write32(wl, memmap->tx_result +
+	wl1271_spi_write32(wl, le32_to_cpu(memmap->tx_result) +
 			   offsetof(struct wl1271_tx_hw_res_if,
 				    tx_result_host_counter),
-			   wl->tx_res_if->tx_result_fw_counter);
+			   le32_to_cpu(wl->tx_res_if->tx_result_fw_counter));
 }
 
 /* caller must hold wl->mutex */
@@ -364,7 +384,7 @@
 		ieee80211_tx_status(wl->hw, skb);
 	}
 
-	for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
+	for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
 		if (wl->tx_frames[i] != NULL) {
 			skb = wl->tx_frames[i];
 			info = IEEE80211_SKB_CB(skb);
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h
index 4a61406..416396c 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.h
@@ -58,7 +58,7 @@
 
 struct wl1271_tx_hw_descr {
 	/* Length of packet in words, including descriptor+header+data */
-	u16 length;
+	__le16 length;
 	/* Number of extra memory blocks to allocate for this packet in
 	   addition to the number of blocks derived from the packet length */
 	u8 extra_mem_blocks;
@@ -67,12 +67,12 @@
 	   HW!! */
 	u8 total_mem_blocks;
 	/* Device time (in us) when the packet arrived to the driver */
-	u32 start_time;
+	__le32 start_time;
 	/* Max delay in TUs until transmission. The last device time the
 	   packet can be transmitted is: startTime+(1024*LifeTime) */
-	u16 life_time;
+	__le16 life_time;
 	/* Bitwise fields - see TX_ATTR... definitions above. */
-	u16 tx_attr;
+	__le16 tx_attr;
 	/* Packet identifier used also in the Tx-Result. */
 	u8 id;
 	/* The packet TID value (as User-Priority) */
@@ -100,12 +100,12 @@
 	   several possible reasons for failure. */
 	u8 status;
 	/* Total air access duration including all retrys and overheads.*/
-	u16 medium_usage;
+	__le16 medium_usage;
 	/* The time passed from host xfer to Tx-complete.*/
-	u32 fw_handling_time;
+	__le32 fw_handling_time;
 	/* Total media delay
 	   (from 1st EDCA AIFS counter until TX Complete). */
-	u32 medium_delay;
+	__le32 medium_delay;
 	/* LS-byte of last TKIP seq-num (saved per AC for recovery). */
 	u8 lsb_security_sequence_number;
 	/* Retry count - number of transmissions without successful ACK.*/
@@ -118,8 +118,8 @@
 } __attribute__ ((packed));
 
 struct wl1271_tx_hw_res_if {
-	u32 tx_result_fw_counter;
-	u32 tx_result_host_counter;
+	__le32 tx_result_fw_counter;
+	__le32 tx_result_host_counter;
 	struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN];
 } __attribute__ ((packed));
 
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index 657c2db..055d7bc 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -122,8 +122,8 @@
 } __attribute__ ((packed));
 
 struct wl12xx_ps_poll_template {
-	u16 fc;
-	u16 aid;
+	__le16 fc;
+	__le16 aid;
 	u8 bssid[ETH_ALEN];
 	u8 ta[ETH_ALEN];
 } __attribute__ ((packed));
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 4e79a98..dfa1b9bc 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -755,7 +755,7 @@
 static int zd1211_hw_init_hmac(struct zd_chip *chip)
 {
 	static const struct zd_ioreq32 ioreqs[] = {
-		{ CR_ZD1211_RETRY_MAX,		0x2 },
+		{ CR_ZD1211_RETRY_MAX,		ZD1211_RETRY_COUNT },
 		{ CR_RX_THRESHOLD,		0x000c0640 },
 	};
 
@@ -767,7 +767,7 @@
 static int zd1211b_hw_init_hmac(struct zd_chip *chip)
 {
 	static const struct zd_ioreq32 ioreqs[] = {
-		{ CR_ZD1211B_RETRY_MAX,		0x02020202 },
+		{ CR_ZD1211B_RETRY_MAX,		ZD1211B_RETRY_COUNT },
 		{ CR_ZD1211B_CWIN_MAX_MIN_AC0,	0x007f003f },
 		{ CR_ZD1211B_CWIN_MAX_MIN_AC1,	0x007f003f },
 		{ CR_ZD1211B_CWIN_MAX_MIN_AC2,  0x003f001f },
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 678c139..9fd8f35 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -642,13 +642,29 @@
 #define CR_ZD1211B_TXOP			CTL_REG(0x0b20)
 #define CR_ZD1211B_RETRY_MAX		CTL_REG(0x0b28)
 
+/* Value for CR_ZD1211_RETRY_MAX & CR_ZD1211B_RETRY_MAX. Vendor driver uses 2,
+ * we use 0. The first rate is tried (count+2), then all next rates are tried
+ * twice, until 1 Mbits is tried. */
+#define	ZD1211_RETRY_COUNT		0
+#define	ZD1211B_RETRY_COUNT	\
+	(ZD1211_RETRY_COUNT <<  0)|	\
+	(ZD1211_RETRY_COUNT <<  8)|	\
+	(ZD1211_RETRY_COUNT << 16)|	\
+	(ZD1211_RETRY_COUNT << 24)
+
 /* Used to detect PLL lock */
 #define UW2453_INTR_REG			((zd_addr_t)0x85c1)
 
 #define CWIN_SIZE			0x007f043f
 
 
-#define HWINT_ENABLED			0x004f0000
+#define HWINT_ENABLED			\
+	(INT_TX_COMPLETE_EN|		\
+	 INT_RX_COMPLETE_EN|		\
+	 INT_RETRY_FAIL_EN|		\
+	 INT_WAKEUP_EN|			\
+	 INT_CFG_NEXT_BCN_EN)
+
 #define HWINT_DISABLED			0
 
 #define E2P_PWR_INT_GUARD		8
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 6d66635..8a243732 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -88,6 +88,34 @@
 	  .flags = 0 },
 };
 
+/*
+ * Zydas retry rates table. Each line is listed in the same order as
+ * in zd_rates[] and contains all the rate used when a packet is sent
+ * starting with a given rates. Let's consider an example :
+ *
+ * "11 Mbits : 4, 3, 2, 1, 0" means :
+ * - packet is sent using 4 different rates
+ * - 1st rate is index 3 (ie 11 Mbits)
+ * - 2nd rate is index 2 (ie 5.5 Mbits)
+ * - 3rd rate is index 1 (ie 2 Mbits)
+ * - 4th rate is index 0 (ie 1 Mbits)
+ */
+
+static const struct tx_retry_rate zd_retry_rates[] = {
+	{ /*  1 Mbits */	1, { 0 }},
+	{ /*  2 Mbits */	2, { 1,  0 }},
+	{ /*  5.5 Mbits */	3, { 2,  1, 0 }},
+	{ /* 11 Mbits */	4, { 3,  2, 1, 0 }},
+	{ /*  6 Mbits */	5, { 4,  3, 2, 1, 0 }},
+	{ /*  9 Mbits */	6, { 5,  4, 3, 2, 1, 0}},
+	{ /* 12 Mbits */	5, { 6,  3, 2, 1, 0 }},
+	{ /* 18 Mbits */	6, { 7,  6, 3, 2, 1, 0 }},
+	{ /* 24 Mbits */	6, { 8,  6, 3, 2, 1, 0 }},
+	{ /* 36 Mbits */	7, { 9,  8, 6, 3, 2, 1, 0 }},
+	{ /* 48 Mbits */	8, {10,  9, 8, 6, 3, 2, 1, 0 }},
+	{ /* 54 Mbits */	9, {11, 10, 9, 8, 6, 3, 2, 1, 0 }}
+};
+
 static const struct ieee80211_channel zd_channels[] = {
 	{ .center_freq = 2412, .hw_value = 1 },
 	{ .center_freq = 2417, .hw_value = 2 },
@@ -282,7 +310,7 @@
 }
 
 /**
- * tx_status - reports tx status of a packet if required
+ * zd_mac_tx_status - reports tx status of a packet if required
  * @hw - a &struct ieee80211_hw pointer
  * @skb - a sk-buffer
  * @flags: extra flags to set in the TX status info
@@ -295,15 +323,49 @@
  *
  * If no status information has been requested, the skb is freed.
  */
-static void tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
-		      int ackssi, bool success)
+static void zd_mac_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
+		      int ackssi, struct tx_status *tx_status)
 {
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	int i;
+	int success = 1, retry = 1;
+	int first_idx;
+	const struct tx_retry_rate *retries;
 
 	ieee80211_tx_info_clear_status(info);
 
-	if (success)
+	if (tx_status) {
+		success = !tx_status->failure;
+		retry = tx_status->retry + success;
+	}
+
+	if (success) {
+		/* success */
 		info->flags |= IEEE80211_TX_STAT_ACK;
+	} else {
+		/* failure */
+		info->flags &= ~IEEE80211_TX_STAT_ACK;
+	}
+
+	first_idx = info->status.rates[0].idx;
+	ZD_ASSERT(0<=first_idx && first_idx<ARRAY_SIZE(zd_retry_rates));
+	retries = &zd_retry_rates[first_idx];
+	ZD_ASSERT(0<=retry && retry<=retries->count);
+
+	info->status.rates[0].idx = retries->rate[0];
+	info->status.rates[0].count = 1; // (retry > 1 ? 2 : 1);
+
+	for (i=1; i<IEEE80211_TX_MAX_RATES-1 && i<retry; i++) {
+		info->status.rates[i].idx = retries->rate[i];
+		info->status.rates[i].count = 1; // ((i==retry-1) && success ? 1:2);
+	}
+	for (; i<IEEE80211_TX_MAX_RATES && i<retry; i++) {
+		info->status.rates[i].idx = retries->rate[retry-1];
+		info->status.rates[i].count = 1; // (success ? 1:2);
+	}
+	if (i<IEEE80211_TX_MAX_RATES)
+		info->status.rates[i].idx = -1; /* terminate */
+
 	info->status.ack_signal = ackssi;
 	ieee80211_tx_status_irqsafe(hw, skb);
 }
@@ -316,16 +378,79 @@
  * transferred. The first frame from the tx queue, will be selected and
  * reported as error to the upper layers.
  */
-void zd_mac_tx_failed(struct ieee80211_hw *hw)
+void zd_mac_tx_failed(struct urb *urb)
 {
-	struct sk_buff_head *q = &zd_hw_mac(hw)->ack_wait_queue;
+	struct ieee80211_hw * hw = zd_usb_to_hw(urb->context);
+	struct zd_mac *mac = zd_hw_mac(hw);
+	struct sk_buff_head *q = &mac->ack_wait_queue;
 	struct sk_buff *skb;
+	struct tx_status *tx_status = (struct tx_status *)urb->transfer_buffer;
+	unsigned long flags;
+	int success = !tx_status->failure;
+	int retry = tx_status->retry + success;
+	int found = 0;
+	int i, position = 0;
 
-	skb = skb_dequeue(q);
-	if (skb == NULL)
-		return;
+	q = &mac->ack_wait_queue;
+	spin_lock_irqsave(&q->lock, flags);
 
-	tx_status(hw, skb, 0, 0);
+	skb_queue_walk(q, skb) {
+		struct ieee80211_hdr *tx_hdr;
+		struct ieee80211_tx_info *info;
+		int first_idx, final_idx;
+		const struct tx_retry_rate *retries;
+		u8 final_rate;
+
+		position ++;
+
+		/* if the hardware reports a failure and we had a 802.11 ACK
+		 * pending, then we skip the first skb when searching for a
+		 * matching frame */
+		if (tx_status->failure && mac->ack_pending &&
+		    skb_queue_is_first(q, skb)) {
+			continue;
+		}
+
+		tx_hdr = (struct ieee80211_hdr *)skb->data;
+
+		/* we skip all frames not matching the reported destination */
+		if (unlikely(memcmp(tx_hdr->addr1, tx_status->mac, ETH_ALEN))) {
+			continue;
+		}
+
+		/* we skip all frames not matching the reported final rate */
+
+		info = IEEE80211_SKB_CB(skb);
+		first_idx = info->status.rates[0].idx;
+		ZD_ASSERT(0<=first_idx && first_idx<ARRAY_SIZE(zd_retry_rates));
+		retries = &zd_retry_rates[first_idx];
+		if (retry < 0 || retry > retries->count) {
+			continue;
+		}
+
+		ZD_ASSERT(0<=retry && retry<=retries->count);
+		final_idx = retries->rate[retry-1];
+		final_rate = zd_rates[final_idx].hw_value;
+
+		if (final_rate != tx_status->rate) {
+			continue;
+		}
+
+		found = 1;
+		break;
+	}
+
+	if (found) {
+		for (i=1; i<=position; i++) {
+			skb = __skb_dequeue(q);
+			zd_mac_tx_status(hw, skb,
+					 mac->ack_pending ? mac->ack_signal : 0,
+					 i == position ? tx_status : NULL);
+			mac->ack_pending = 0;
+		}
+	}
+
+	spin_unlock_irqrestore(&q->lock, flags);
 }
 
 /**
@@ -342,18 +467,27 @@
 {
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_hw *hw = info->rate_driver_data[0];
+	struct zd_mac *mac = zd_hw_mac(hw);
+
+	ieee80211_tx_info_clear_status(info);
 
 	skb_pull(skb, sizeof(struct zd_ctrlset));
 	if (unlikely(error ||
 	    (info->flags & IEEE80211_TX_CTL_NO_ACK))) {
-		tx_status(hw, skb, 0, !error);
+		/*
+		 * FIXME : do we need to fill in anything ?
+		 */
+		ieee80211_tx_status_irqsafe(hw, skb);
 	} else {
-		struct sk_buff_head *q =
-			&zd_hw_mac(hw)->ack_wait_queue;
+		struct sk_buff_head *q = &mac->ack_wait_queue;
 
 		skb_queue_tail(q, skb);
-		while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS)
-			zd_mac_tx_failed(hw);
+		while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) {
+			zd_mac_tx_status(hw, skb_dequeue(q),
+					 mac->ack_pending ? mac->ack_signal : 0,
+					 NULL);
+			mac->ack_pending = 0;
+		}
 	}
 }
 
@@ -606,27 +740,47 @@
 static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
 		      struct ieee80211_rx_status *stats)
 {
+	struct zd_mac *mac = zd_hw_mac(hw);
 	struct sk_buff *skb;
 	struct sk_buff_head *q;
 	unsigned long flags;
+	int found = 0;
+	int i, position = 0;
 
 	if (!ieee80211_is_ack(rx_hdr->frame_control))
 		return 0;
 
-	q = &zd_hw_mac(hw)->ack_wait_queue;
+	q = &mac->ack_wait_queue;
 	spin_lock_irqsave(&q->lock, flags);
 	skb_queue_walk(q, skb) {
 		struct ieee80211_hdr *tx_hdr;
 
+		position ++;
+
+		if (mac->ack_pending && skb_queue_is_first(q, skb))
+		    continue;
+
 		tx_hdr = (struct ieee80211_hdr *)skb->data;
 		if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN)))
 		{
-			__skb_unlink(skb, q);
-			tx_status(hw, skb, stats->signal, 1);
-			goto out;
+			found = 1;
+			break;
 		}
 	}
-out:
+
+	if (found) {
+		for (i=1; i<position; i++) {
+			skb = __skb_dequeue(q);
+			zd_mac_tx_status(hw, skb,
+					 mac->ack_pending ? mac->ack_signal : 0,
+					 NULL);
+			mac->ack_pending = 0;
+		}
+
+		mac->ack_pending = 1;
+		mac->ack_signal = stats->signal;
+	}
+
 	spin_unlock_irqrestore(&q->lock, flags);
 	return 1;
 }
@@ -709,6 +863,7 @@
 		skb_reserve(skb, 2);
 	}
 
+	/* FIXME : could we avoid this big memcpy ? */
 	memcpy(skb_put(skb, length), buffer, length);
 
 	memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats));
@@ -999,7 +1154,14 @@
 	hw->queues = 1;
 	hw->extra_tx_headroom = sizeof(struct zd_ctrlset);
 
+	/*
+	 * Tell mac80211 that we support multi rate retries
+	 */
+	hw->max_rates = IEEE80211_TX_MAX_RATES;
+	hw->max_rate_tries = 18;	/* 9 rates * 2 retries/rate */
+
 	skb_queue_head_init(&mac->ack_wait_queue);
+	mac->ack_pending = 0;
 
 	zd_chip_init(&mac->chip, hw, intf);
 	housekeeping_init(mac);
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 7c27591..630c298 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -140,6 +140,21 @@
 #define ZD_RX_CRC16_ERROR		0x40
 #define ZD_RX_ERROR			0x80
 
+struct tx_retry_rate {
+	int count;	/* number of valid element in rate[] array */
+	int rate[10];	/* retry rates, described by an index in zd_rates[] */
+};
+
+struct tx_status {
+	u8 type;	/* must always be 0x01 : USB_INT_TYPE */
+	u8 id;		/* must always be 0xa0 : USB_INT_ID_RETRY_FAILED */
+	u8 rate;
+	u8 pad;
+	u8 mac[ETH_ALEN];
+	u8 retry;
+	u8 failure;
+} __attribute__((packed));
+
 enum mac_flags {
 	MAC_FIXED_CHANNEL = 0x01,
 };
@@ -150,7 +165,7 @@
 
 #define ZD_MAC_STATS_BUFFER_SIZE 16
 
-#define ZD_MAC_MAX_ACK_WAITERS 10
+#define ZD_MAC_MAX_ACK_WAITERS 50
 
 struct zd_mac {
 	struct zd_chip chip;
@@ -184,6 +199,12 @@
 
 	/* whether to pass control frames to stack */
 	unsigned int pass_ctrl:1;
+
+	/* whether we have received a 802.11 ACK that is pending */
+	unsigned int ack_pending:1;
+
+	/* signal strength of the last 802.11 ACK received */
+	int ack_signal;
 };
 
 #define ZD_REGDOMAIN_FCC	0x10
@@ -279,7 +300,7 @@
 int zd_mac_init_hw(struct ieee80211_hw *hw);
 
 int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length);
-void zd_mac_tx_failed(struct ieee80211_hw *hw);
+void zd_mac_tx_failed(struct urb *urb);
 void zd_mac_tx_to_dev(struct sk_buff *skb, int error);
 
 #ifdef DEBUG
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 23a6a6d..d46f20a 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -419,7 +419,7 @@
 		handle_regs_int(urb);
 		break;
 	case USB_INT_ID_RETRY_FAILED:
-		zd_mac_tx_failed(zd_usb_to_hw(urb->context));
+		zd_mac_tx_failed(urb);
 		break;
 	default:
 		dev_dbg_f(urb_dev(urb), "error: urb %p unknown id %x\n", urb,
@@ -553,6 +553,8 @@
 
 	if (length < sizeof(struct rx_length_info)) {
 		/* It's not a complete packet anyhow. */
+		printk("%s: invalid, small RX packet : %d\n",
+		       __func__, length);
 		return;
 	}
 	length_info = (struct rx_length_info *)
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index ff4617e..7c7914f 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -879,10 +879,10 @@
 	PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "cis/DP83903.cis"),
 	PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"),
 	PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"),
-	PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "SW_8xx_SER.cis"),  /* Sierra Wireless AC850 3G Network Adapter R1 */
-	PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0x0710, "SW_7xx_SER.cis"),	/* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */
-	PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "SW_555_SER.cis"),  /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */
-	PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "SW_555_SER.cis"),  /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */
+	PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */
+	PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC710/AC750", 0xd85f6206, 0x761b11e0, "cis/SW_7xx_SER.cis"),  /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */
+	PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "cis/SW_555_SER.cis"),  /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */
+	PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "cis/SW_555_SER.cis"),  /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */
 	PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "cis/MT5634ZLX.cis"),
 	PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "cis/COMpad2.cis"),
 	PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"),
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 538c570..f1dcd79 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -551,13 +551,13 @@
 	might_sleep_if(pdev->id.coreid != SSB_DEV_PCI);
 
 	/* Enable interrupts for this device. */
-	if (bus->host_pci &&
-	    ((pdev->id.revision >= 6) || (pdev->id.coreid == SSB_DEV_PCIE))) {
+	if ((pdev->id.revision >= 6) || (pdev->id.coreid == SSB_DEV_PCIE)) {
 		u32 coremask;
 
 		/* Calculate the "coremask" for the device. */
 		coremask = (1 << dev->core_index);
 
+		SSB_WARN_ON(bus->bustype != SSB_BUSTYPE_PCI);
 		err = pci_read_config_dword(bus->host_pci, SSB_PCI_IRQMASK, &tmp);
 		if (err)
 			goto out;
diff --git a/firmware/Makefile b/firmware/Makefile
index 45c0466..6d5c3ab 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -69,11 +69,13 @@
 fw-shipped-$(CONFIG_MYRI_SBUS) += myricom/lanai.bin
 fw-shipped-$(CONFIG_PCMCIA_PCNET) += cis/LA-PCM.cis cis/PCMLM28.cis \
 				     cis/DP83903.cis cis/NE2K.cis \
-				     cis/tamarack.cis
+				     cis/tamarack.cis cis/PE-200.cis
 fw-shipped-$(CONFIG_PCMCIA_3C589) += cis/3CXEM556.cis
 fw-shipped-$(CONFIG_PCMCIA_3C574) += cis/3CCFEM556.cis
 fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis \
-				       cis/COMpad2.cis cis/COMpad4.cis
+				       cis/COMpad2.cis cis/COMpad4.cis \
+				       cis/SW_555_SER.cis cis/SW_7xx_SER.cis \
+				       cis/SW_8xx_SER.cis
 fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin
 fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \
 				      advansys/3550.bin advansys/38C0800.bin
diff --git a/firmware/WHENCE b/firmware/WHENCE
index a07aede..34b5d0a 100644
--- a/firmware/WHENCE
+++ b/firmware/WHENCE
@@ -600,6 +600,7 @@
       cis/DP83903.cis
       cis/NE2K.cis
       cis/tamarack.cis
+      cis/PE-200.cis
 
 Licence: GPL
 
@@ -633,6 +634,9 @@
       cis/RS-COM-2P.cis
       cis/COMpad2.cis
       cis/COMpad4.cis
+      cis/SW_555_SER.cis
+      cis/SW_7xx_SER.cis
+      cis/SW_8xx_SER.cis
 
 Licence: GPL
 
diff --git a/firmware/cis/PE-200.cis.ihex b/firmware/cis/PE-200.cis.ihex
new file mode 100644
index 0000000..e6dbdab
--- /dev/null
+++ b/firmware/cis/PE-200.cis.ihex
@@ -0,0 +1,9 @@
+:1000000001030000FF151E0401504D582020200060
+:1000100050452D3230300045544845524E4554002D
+:1000200052303100FF210206031A050101000101CF
+:100030001B0EC181190155E051000F100F30FFFF59
+:040040001400FF00A9
+:00000001FF
+#
+# Replacement CIS for PE-200 ethernet card
+#
diff --git a/firmware/cis/SW_555_SER.cis.ihex b/firmware/cis/SW_555_SER.cis.ihex
new file mode 100644
index 0000000..9b9348ac
--- /dev/null
+++ b/firmware/cis/SW_555_SER.cis.ihex
@@ -0,0 +1,12 @@
+:100000000101FF17034100FF20043F0110072102F7
+:100010000200152A070053696572726120576972E0
+:10002000656C657373004169724361726420353594
+:1000300035004135353500526576203100FF1A050F
+:1000400001030007731B0BE00118A360F8030730DE
+:10005000BC3F1B08A10108A360F802071B08A2010E
+:1000600008A360E803071B08A30108A360E80207D0
+:0A0070001B04A40108231400FF0084
+:00000001FF
+#
+# Replacement CIS for AC555 provided by Sierra Wireless
+#
diff --git a/firmware/cis/SW_7xx_SER.cis.ihex b/firmware/cis/SW_7xx_SER.cis.ihex
new file mode 100644
index 0000000..11e44ad
--- /dev/null
+++ b/firmware/cis/SW_7xx_SER.cis.ihex
@@ -0,0 +1,13 @@
+:100000000101FF17034100FF2004920110072102A4
+:1000100002001537070053696572726120576972D3
+:10002000656C6573730041433731302F4143373579
+:10003000300047505253204E6574776F726B2041E9
+:1000400064617074657200523100FF1A050103008B
+:1000500007731B10E00119784D555D25A360F80367
+:100060000730BC861B08A10108A360F802071B0823
+:10007000A20108A360E803071B08A30108A360E826
+:0C00800002071B04A40108231400FF0069
+:00000001FF
+#
+# Replacement CIS for AC7xx provided by Sierra Wireless
+#
diff --git a/firmware/cis/SW_8xx_SER.cis.ihex b/firmware/cis/SW_8xx_SER.cis.ihex
new file mode 100644
index 0000000..bbcfe63
--- /dev/null
+++ b/firmware/cis/SW_8xx_SER.cis.ihex
@@ -0,0 +1,13 @@
+:100000000101FF17034100FF2004920110072102A4
+:100010000200152F070053696572726120576972DB
+:10002000656C657373004143383530003347204EAB
+:100030006574776F726B20416461707465720052F1
+:100040003100FF1A0501030007731B10E001197846
+:100050004D555D25A360F8480730BC861B08A101FB
+:1000600008A360F847071B08A20108A360E8480737
+:100070001B08A30108A360E847071B04A401082389
+:040080001400FF0069
+:00000001FF
+#
+# Replacement CIS for AC8xx provided by Sierra Wireless
+#
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 9b64b6d..2b31b91 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -1,6 +1,13 @@
-#define PHY_BRCM_WIRESPEED_ENABLE	0x00000001
-#define PHY_BRCM_AUTO_PWRDWN_ENABLE	0x00000002
-#define PHY_BRCM_APD_CLK125_ENABLE	0x00000004
-#define PHY_BRCM_STD_IBND_DISABLE	0x00000008
-#define PHY_BRCM_EXT_IBND_RX_ENABLE	0x00000010
-#define PHY_BRCM_EXT_IBND_TX_ENABLE	0x00000020
+#define PHY_BCM_FLAGS_MODE_COPPER	0x00000001
+#define PHY_BCM_FLAGS_MODE_1000BX	0x00000002
+#define PHY_BCM_FLAGS_INTF_SGMII	0x00000010
+#define PHY_BCM_FLAGS_INTF_XAUI		0x00000020
+#define PHY_BRCM_WIRESPEED_ENABLE	0x00000100
+#define PHY_BRCM_AUTO_PWRDWN_ENABLE	0x00000200
+#define PHY_BRCM_RX_REFCLK_UNUSED	0x00000400
+#define PHY_BRCM_STD_IBND_DISABLE	0x00000800
+#define PHY_BRCM_EXT_IBND_RX_ENABLE	0x00001000
+#define PHY_BRCM_EXT_IBND_TX_ENABLE	0x00002000
+#define PHY_BRCM_CLEAR_RGMII_MODE	0x00004000
+#define PHY_BRCM_DIS_TXCRXC_NOENRGY	0x00008000
+#define PHY_BCM_FLAGS_VALID		0x80000000
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index 25085cb..6c507be 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -32,14 +32,12 @@
  * struct can_proto - CAN protocol structure
  * @type:       type argument in socket() syscall, e.g. SOCK_DGRAM.
  * @protocol:   protocol number in socket() syscall.
- * @capability: capability needed to open the socket, or -1 for no restriction.
  * @ops:        pointer to struct proto_ops for sock->ops.
  * @prot:       pointer to struct proto structure.
  */
 struct can_proto {
 	int              type;
 	int              protocol;
-	int              capability;
 	struct proto_ops *ops;
 	struct proto     *prot;
 };
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 1d3f7f0..1ed2a5c 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -68,4 +68,8 @@
 void can_get_echo_skb(struct net_device *dev, unsigned int idx);
 void can_free_echo_skb(struct net_device *dev, unsigned int idx);
 
+struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
+struct sk_buff *alloc_can_err_skb(struct net_device *dev,
+				  struct can_frame **cf);
+
 #endif /* CAN_DEV_H */
diff --git a/include/linux/dn.h b/include/linux/dn.h
index fe99908..9c50445 100644
--- a/include/linux/dn.h
+++ b/include/linux/dn.h
@@ -71,14 +71,12 @@
 /* Structures */
 
 
-struct dn_naddr 
-{
+struct dn_naddr {
 	__le16		a_len;
 	__u8 a_addr[DN_MAXADDL]; /* Two bytes little endian */
 };
 
-struct sockaddr_dn
-{
+struct sockaddr_dn {
 	__u16		sdn_family;
 	__u8		sdn_flags;
 	__u8		sdn_objnum;
@@ -101,8 +99,7 @@
         __u8   opt_data[16];   /* User data              */
 };
 
-struct accessdata_dn
-{
+struct accessdata_dn {
 	__u8		acc_accl;
 	__u8		acc_acc[DN_MAXACCL];
 	__u8 		acc_passl;
diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h
index ec12cc743..034072c 100644
--- a/include/linux/errqueue.h
+++ b/include/linux/errqueue.h
@@ -3,8 +3,7 @@
 
 #include <linux/types.h>
 
-struct sock_extended_err
-{
+struct sock_extended_err {
 	__u32	ee_errno;	
 	__u8	ee_origin;
 	__u8	ee_type;
@@ -31,8 +30,7 @@
 
 #define SKB_EXT_ERR(skb) ((struct sock_exterr_skb *) ((skb)->cb))
 
-struct sock_exterr_skb
-{
+struct sock_exterr_skb {
 	union {
 		struct inet_skb_parm	h4;
 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index eb1a48d..edd03b7 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -674,6 +674,8 @@
 #define	AH_V6_FLOW	0x0b
 #define	ESP_V6_FLOW	0x0c
 #define	IP_USER_FLOW	0x0d
+#define IPV4_FLOW       0x10
+#define IPV6_FLOW       0x11
 
 /* L3-L4 network traffic flow hash options */
 #define	RXH_L2DA	(1 << 1)
diff --git a/include/linux/fib_rules.h b/include/linux/fib_rules.h
index 87b606b..c7e5b70 100644
--- a/include/linux/fib_rules.h
+++ b/include/linux/fib_rules.h
@@ -13,8 +13,7 @@
 /* try to find source address in routing lookups */
 #define FIB_RULE_FIND_SADDR	0x00010000
 
-struct fib_rule_hdr
-{
+struct fib_rule_hdr {
 	__u8		family;
 	__u8		dst_len;
 	__u8		src_len;
@@ -28,8 +27,7 @@
 	__u32		flags;
 };
 
-enum
-{
+enum {
 	FRA_UNSPEC,
 	FRA_DST,	/* destination address */
 	FRA_SRC,	/* source address */
@@ -52,8 +50,7 @@
 
 #define FRA_MAX (__FRA_MAX - 1)
 
-enum
-{
+enum {
 	FR_ACT_UNSPEC,
 	FR_ACT_TO_TBL,		/* Pass to fixed table */
 	FR_ACT_GOTO,		/* Jump to another rule */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 1354aaf..29a0e3d 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -23,16 +23,14 @@
  *	the BPF code definitions which need to match so you can share filters
  */
  
-struct sock_filter	/* Filter block */
-{
+struct sock_filter {	/* Filter block */
 	__u16	code;   /* Actual filter code */
 	__u8	jt;	/* Jump true */
 	__u8	jf;	/* Jump false */
 	__u32	k;      /* Generic multiuse field */
 };
 
-struct sock_fprog	/* Required for SO_ATTACH_FILTER. */
-{
+struct sock_fprog {	/* Required for SO_ATTACH_FILTER. */
 	unsigned short		len;	/* Number of filter blocks */
 	struct sock_filter __user *filter;
 };
@@ -123,7 +121,9 @@
 #define SKF_AD_IFINDEX 	8
 #define SKF_AD_NLATTR	12
 #define SKF_AD_NLATTR_NEST	16
-#define SKF_AD_MAX	20
+#define SKF_AD_MARK 	20
+#define SKF_AD_QUEUE	24
+#define SKF_AD_MAX	28
 #define SKF_NET_OFF   (-0x100000)
 #define SKF_LL_OFF    (-0x200000)
 
diff --git a/include/linux/gen_stats.h b/include/linux/gen_stats.h
index 710e901..552c8a0 100644
--- a/include/linux/gen_stats.h
+++ b/include/linux/gen_stats.h
@@ -18,13 +18,11 @@
  * @bytes: number of seen bytes
  * @packets: number of seen packets
  */
-struct gnet_stats_basic
-{
+struct gnet_stats_basic {
 	__u64	bytes;
 	__u32	packets;
 };
-struct gnet_stats_basic_packed
-{
+struct gnet_stats_basic_packed {
 	__u64	bytes;
 	__u32	packets;
 } __attribute__ ((packed));
@@ -34,8 +32,7 @@
  * @bps: current byte rate
  * @pps: current packet rate
  */
-struct gnet_stats_rate_est
-{
+struct gnet_stats_rate_est {
 	__u32	bps;
 	__u32	pps;
 };
@@ -48,8 +45,7 @@
  * @requeues: number of requeues
  * @overlimits: number of enqueues over the limit
  */
-struct gnet_stats_queue
-{
+struct gnet_stats_queue {
 	__u32	qlen;
 	__u32	backlog;
 	__u32	drops;
@@ -62,8 +58,7 @@
  * @interval: sampling period
  * @ewma_log: the log of measurement window weight
  */
-struct gnet_estimator
-{
+struct gnet_estimator {
 	signed char	interval;
 	unsigned char	ewma_log;
 };
diff --git a/include/linux/if.h b/include/linux/if.h
index b9a6229..3b2a46b 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -125,8 +125,7 @@
  *	being very small might be worth keeping for clean configuration.
  */
 
-struct ifmap 
-{
+struct ifmap {
 	unsigned long mem_start;
 	unsigned long mem_end;
 	unsigned short base_addr; 
@@ -136,8 +135,7 @@
 	/* 3 bytes spare */
 };
 
-struct if_settings
-{
+struct if_settings {
 	unsigned int type;	/* Type of physical device or protocol */
 	unsigned int size;	/* Size of the data allocated by the caller */
 	union {
@@ -161,8 +159,7 @@
  * remainder may be interface specific.
  */
 
-struct ifreq 
-{
+struct ifreq {
 #define IFHWADDRLEN	6
 	union
 	{
@@ -211,11 +208,9 @@
  * must know all networks accessible).
  */
 
-struct ifconf 
-{
+struct ifconf  {
 	int	ifc_len;			/* size of buffer	*/
-	union 
-	{
+	union {
 		char __user *ifcu_buf;
 		struct ifreq __user *ifcu_req;
 	} ifc_ifcu;
diff --git a/include/linux/if_addr.h b/include/linux/if_addr.h
index fd97404..23357ab 100644
--- a/include/linux/if_addr.h
+++ b/include/linux/if_addr.h
@@ -4,8 +4,7 @@
 #include <linux/types.h>
 #include <linux/netlink.h>
 
-struct ifaddrmsg
-{
+struct ifaddrmsg {
 	__u8		ifa_family;
 	__u8		ifa_prefixlen;	/* The prefix length		*/
 	__u8		ifa_flags;	/* Flags			*/
@@ -20,8 +19,7 @@
  * but for point-to-point IFA_ADDRESS is DESTINATION address,
  * local address is supplied in IFA_LOCAL attribute.
  */
-enum
-{
+enum {
 	IFA_UNSPEC,
 	IFA_ADDRESS,
 	IFA_LOCAL,
@@ -47,8 +45,7 @@
 #define IFA_F_TENTATIVE		0x40
 #define IFA_F_PERMANENT		0x80
 
-struct ifa_cacheinfo
-{
+struct ifa_cacheinfo {
 	__u32	ifa_prefered;
 	__u32	ifa_valid;
 	__u32	cstamp; /* created timestamp, hundredths of seconds */
diff --git a/include/linux/if_addrlabel.h b/include/linux/if_addrlabel.h
index 89571f6..54580c2 100644
--- a/include/linux/if_addrlabel.h
+++ b/include/linux/if_addrlabel.h
@@ -12,8 +12,7 @@
 
 #include <linux/types.h>
 
-struct ifaddrlblmsg
-{
+struct ifaddrlblmsg {
 	__u8		ifal_family;		/* Address family */
 	__u8		__ifal_reserved;	/* Reserved */
 	__u8		ifal_prefixlen;		/* Prefix length */
@@ -22,8 +21,7 @@
 	__u32		ifal_seq;		/* sequence number */
 };
 
-enum
-{
+enum {
 	IFAL_ADDRESS = 1,
 	IFAL_LABEL = 2,
 	__IFAL_MAX
diff --git a/include/linux/if_arcnet.h b/include/linux/if_arcnet.h
index 0835deb..46e34bd 100644
--- a/include/linux/if_arcnet.h
+++ b/include/linux/if_arcnet.h
@@ -56,8 +56,7 @@
 /*
  * The RFC1201-specific components of an arcnet packet header.
  */
-struct arc_rfc1201
-{
+struct arc_rfc1201 {
     __u8  proto;		/* protocol ID field - varies		*/
     __u8  split_flag;	/* for use with split packets		*/
     __be16   sequence;		/* sequence number			*/
@@ -69,8 +68,7 @@
 /*
  * The RFC1051-specific components.
  */
-struct arc_rfc1051
-{
+struct arc_rfc1051 {
     __u8 proto;		/* ARC_P_RFC1051_ARP/RFC1051_IP	*/
     __u8 payload[0];		/* 507 bytes			*/
 };
@@ -81,8 +79,7 @@
  * The ethernet-encap-specific components.  We have a real ethernet header
  * and some data.
  */
-struct arc_eth_encap
-{
+struct arc_eth_encap {
     __u8 proto;		/* Always ARC_P_ETHER			*/
     struct ethhdr eth;		/* standard ethernet header (yuck!)	*/
     __u8 payload[0];		/* 493 bytes				*/
@@ -90,8 +87,7 @@
 #define ETH_ENCAP_HDR_SIZE 14
 
 
-struct arc_cap
-{
+struct arc_cap {
 	__u8 proto;
 	__u8 cookie[sizeof(int)];   /* Actually NOT sent over the network */
 	union {
@@ -108,8 +104,7 @@
  * the _end_ of the 512-byte buffer.  We hide this complexity inside the
  * driver.
  */
-struct arc_hardware
-{
+struct arc_hardware {
     __u8  source,		/* source ARCnet - filled in automagically */
              dest,		/* destination ARCnet - 0 for broadcast    */
     	     offset[2];		/* offset bytes (some weird semantics)     */
@@ -120,8 +115,7 @@
  * This is an ARCnet frame header, as seen by the kernel (and userspace,
  * when you do a raw packet capture).
  */
-struct archdr
-{
+struct archdr {
     /* hardware requirements */
     struct arc_hardware hard;
      
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index 282eb37..e80b7f8 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -133,8 +133,7 @@
  *	This structure defines an ethernet arp header.
  */
 
-struct arphdr
-{
+struct arphdr {
 	__be16		ar_hrd;		/* format of hardware address	*/
 	__be16		ar_pro;		/* format of protocol address	*/
 	unsigned char	ar_hln;		/* length of hardware address	*/
diff --git a/include/linux/if_bonding.h b/include/linux/if_bonding.h
index 65c2d24..cd525fa 100644
--- a/include/linux/if_bonding.h
+++ b/include/linux/if_bonding.h
@@ -94,8 +94,7 @@
 	__s32 miimon;
 } ifbond;
 
-typedef struct ifslave
-{
+typedef struct ifslave {
 	__s32 slave_id; /* Used as an IN param to the BOND_SLAVE_INFO_QUERY ioctl */
 	char slave_name[IFNAMSIZ];
 	__s8 link;
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 6badb3e..938b7e8 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -49,8 +49,7 @@
 #define BR_STATE_FORWARDING 3
 #define BR_STATE_BLOCKING 4
 
-struct __bridge_info
-{
+struct __bridge_info {
 	__u64 designated_root;
 	__u64 bridge_id;
 	__u32 root_path_cost;
@@ -72,8 +71,7 @@
 	__u32 gc_timer_value;
 };
 
-struct __port_info
-{
+struct __port_info {
 	__u64 designated_root;
 	__u64 designated_bridge;
 	__u16 port_id;
@@ -89,8 +87,7 @@
 	__u32 hold_timer_value;
 };
 
-struct __fdb_entry
-{
+struct __fdb_entry {
 	__u8 mac_addr[6];
 	__u8 port_no;
 	__u8 is_local;
diff --git a/include/linux/if_ec.h b/include/linux/if_ec.h
index e7499aa..d85f9f4 100644
--- a/include/linux/if_ec.h
+++ b/include/linux/if_ec.h
@@ -5,14 +5,12 @@
 
 /* User visible stuff. Glibc provides its own but libc5 folk will use these */
 
-struct ec_addr
-{
+struct ec_addr {
   unsigned char station;		/* Station number.  */
   unsigned char net;			/* Network number.  */
 };
 
-struct sockaddr_ec
-{
+struct sockaddr_ec {
   unsigned short sec_family;
   unsigned char port;			/* Port number.  */
   unsigned char cb;			/* Control/flag byte.  */
@@ -37,8 +35,7 @@
 #define EC_HLEN				6
 
 /* This is what an Econet frame looks like on the wire. */
-struct ec_framehdr 
-{
+struct ec_framehdr {
   unsigned char dst_stn;
   unsigned char dst_net;
   unsigned char src_stn;
@@ -62,8 +59,7 @@
 	return (struct econet_sock *)sk;
 }
 
-struct ec_device
-{
+struct ec_device {
   unsigned char station, net;		/* Econet protocol address */
 };
 
diff --git a/include/linux/if_fddi.h b/include/linux/if_fddi.h
index 45de104..5459c5c 100644
--- a/include/linux/if_fddi.h
+++ b/include/linux/if_fddi.h
@@ -63,36 +63,32 @@
 #define FDDI_UI_CMD			0x03
 
 /* Define 802.2 Type 1 header */
-struct fddi_8022_1_hdr
-	{
+struct fddi_8022_1_hdr {
 	__u8	dsap;					/* destination service access point */
 	__u8	ssap;					/* source service access point */
 	__u8	ctrl;					/* control byte #1 */
-	} __attribute__ ((packed));
+} __attribute__ ((packed));
 
 /* Define 802.2 Type 2 header */
-struct fddi_8022_2_hdr
-	{
+struct fddi_8022_2_hdr {
 	__u8	dsap;					/* destination service access point */
 	__u8	ssap;					/* source service access point */
 	__u8	ctrl_1;					/* control byte #1 */
 	__u8	ctrl_2;					/* control byte #2 */
-	} __attribute__ ((packed));
+} __attribute__ ((packed));
 
 /* Define 802.2 SNAP header */
 #define FDDI_K_OUI_LEN	3
-struct fddi_snap_hdr
-	{
+struct fddi_snap_hdr {
 	__u8	dsap;					/* always 0xAA */
 	__u8	ssap;					/* always 0xAA */
 	__u8	ctrl;					/* always 0x03 */
 	__u8	oui[FDDI_K_OUI_LEN];	/* organizational universal id */
 	__be16	ethertype;				/* packet type ID field */
-	} __attribute__ ((packed));
+} __attribute__ ((packed));
 
 /* Define FDDI LLC frame header */
-struct fddihdr
-	{
+struct fddihdr {
 	__u8	fc;						/* frame control */
 	__u8	daddr[FDDI_K_ALEN];		/* destination address */
 	__u8	saddr[FDDI_K_ALEN];		/* source address */
@@ -102,7 +98,7 @@
 		struct fddi_8022_2_hdr		llc_8022_2;
 		struct fddi_snap_hdr		llc_snap;
 		} hdr;
-	} __attribute__ ((packed));
+} __attribute__ ((packed));
 
 #ifdef __KERNEL__
 #include <linux/netdevice.h>
@@ -197,7 +193,7 @@
 	__u32	port_pc_withhold[2];
 	__u32	port_ler_flag[2];
 	__u32	port_hardware_present[2];
-	};
+};
 #endif /* __KERNEL__ */
 
 #endif	/* _LINUX_IF_FDDI_H */
diff --git a/include/linux/if_hippi.h b/include/linux/if_hippi.h
index 4a7c994..8d038eb 100644
--- a/include/linux/if_hippi.h
+++ b/include/linux/if_hippi.h
@@ -51,8 +51,7 @@
  *	HIPPI statistics collection data. 
  */
  
-struct hipnet_statistics
-{
+struct hipnet_statistics {
 	int	rx_packets;		/* total packets received	*/
 	int	tx_packets;		/* total packets transmitted	*/
 	int	rx_errors;		/* bad packets received		*/
@@ -77,8 +76,7 @@
 };
 
 
-struct hippi_fp_hdr
-{
+struct hippi_fp_hdr {
 #if 0
 	__u8		ulp;				/* must contain 4 */
 #if defined (__BIG_ENDIAN_BITFIELD)
@@ -108,8 +106,7 @@
 	__be32		d2_size;
 } __attribute__ ((packed));
 
-struct hippi_le_hdr
-{
+struct hippi_le_hdr {
 #if defined (__BIG_ENDIAN_BITFIELD)
 	__u8		fc:3;
 	__u8		double_wide:1;
@@ -139,8 +136,7 @@
  * Looks like the dsap and ssap fields have been swapped by mistake in
  * RFC 2067 "IP over HIPPI".
  */
-struct hippi_snap_hdr
-{
+struct hippi_snap_hdr {
 	__u8	dsap;			/* always 0xAA */
 	__u8	ssap;			/* always 0xAA */
 	__u8	ctrl;			/* always 0x03 */
@@ -148,8 +144,7 @@
 	__be16	ethertype;		/* packet type ID field */
 } __attribute__ ((packed));
 
-struct hippi_hdr
-{
+struct hippi_hdr {
 	struct hippi_fp_hdr	fp;
 	struct hippi_le_hdr	le;
 	struct hippi_snap_hdr	snap;
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 176c518..1d3b242 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -5,8 +5,7 @@
 #include <linux/netlink.h>
 
 /* The struct should be in sync with struct net_device_stats */
-struct rtnl_link_stats
-{
+struct rtnl_link_stats {
 	__u32	rx_packets;		/* total packets received	*/
 	__u32	tx_packets;		/* total packets transmitted	*/
 	__u32	rx_bytes;		/* total bytes received 	*/
@@ -39,8 +38,7 @@
 };
 
 /* The struct should be in sync with struct ifmap */
-struct rtnl_link_ifmap
-{
+struct rtnl_link_ifmap {
 	__u64	mem_start;
 	__u64	mem_end;
 	__u64	base_addr;
@@ -49,8 +47,7 @@
 	__u8	port;
 };
 
-enum
-{
+enum {
 	IFLA_UNSPEC,
 	IFLA_ADDRESS,
 	IFLA_BROADCAST,
@@ -123,8 +120,7 @@
  */
 
 /* Subtype attributes for IFLA_PROTINFO */
-enum
-{
+enum {
 	IFLA_INET6_UNSPEC,
 	IFLA_INET6_FLAGS,	/* link flags			*/
 	IFLA_INET6_CONF,	/* sysctl parameters		*/
@@ -137,16 +133,14 @@
 
 #define IFLA_INET6_MAX	(__IFLA_INET6_MAX - 1)
 
-struct ifla_cacheinfo
-{
+struct ifla_cacheinfo {
 	__u32	max_reasm_len;
 	__u32	tstamp;		/* ipv6InterfaceTable updated timestamp */
 	__u32	reachable_time;
 	__u32	retrans_time;
 };
 
-enum
-{
+enum {
 	IFLA_INFO_UNSPEC,
 	IFLA_INFO_KIND,
 	IFLA_INFO_DATA,
@@ -158,8 +152,7 @@
 
 /* VLAN section */
 
-enum
-{
+enum {
 	IFLA_VLAN_UNSPEC,
 	IFLA_VLAN_ID,
 	IFLA_VLAN_FLAGS,
@@ -175,8 +168,7 @@
 	__u32	mask;
 };
 
-enum
-{
+enum {
 	IFLA_VLAN_QOS_UNSPEC,
 	IFLA_VLAN_QOS_MAPPING,
 	__IFLA_VLAN_QOS_MAX
@@ -184,8 +176,7 @@
 
 #define IFLA_VLAN_QOS_MAX	(__IFLA_VLAN_QOS_MAX - 1)
 
-struct ifla_vlan_qos_mapping
-{
+struct ifla_vlan_qos_mapping {
 	__u32 from;
 	__u32 to;
 };
diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h
index dea7d6b..4021d47 100644
--- a/include/linux/if_packet.h
+++ b/include/linux/if_packet.h
@@ -3,15 +3,13 @@
 
 #include <linux/types.h>
 
-struct sockaddr_pkt
-{
+struct sockaddr_pkt {
 	unsigned short spkt_family;
 	unsigned char spkt_device[14];
 	__be16 spkt_protocol;
 };
 
-struct sockaddr_ll
-{
+struct sockaddr_ll {
 	unsigned short	sll_family;
 	__be16		sll_protocol;
 	int		sll_ifindex;
@@ -49,14 +47,12 @@
 #define PACKET_TX_RING			13
 #define PACKET_LOSS			14
 
-struct tpacket_stats
-{
+struct tpacket_stats {
 	unsigned int	tp_packets;
 	unsigned int	tp_drops;
 };
 
-struct tpacket_auxdata
-{
+struct tpacket_auxdata {
 	__u32		tp_status;
 	__u32		tp_len;
 	__u32		tp_snaplen;
@@ -78,8 +74,7 @@
 #define TP_STATUS_SENDING	0x2
 #define TP_STATUS_WRONG_FORMAT	0x4
 
-struct tpacket_hdr
-{
+struct tpacket_hdr {
 	unsigned long	tp_status;
 	unsigned int	tp_len;
 	unsigned int	tp_snaplen;
@@ -93,8 +88,7 @@
 #define TPACKET_ALIGN(x)	(((x)+TPACKET_ALIGNMENT-1)&~(TPACKET_ALIGNMENT-1))
 #define TPACKET_HDRLEN		(TPACKET_ALIGN(sizeof(struct tpacket_hdr)) + sizeof(struct sockaddr_ll))
 
-struct tpacket2_hdr
-{
+struct tpacket2_hdr {
 	__u32		tp_status;
 	__u32		tp_len;
 	__u32		tp_snaplen;
@@ -107,8 +101,7 @@
 
 #define TPACKET2_HDRLEN		(TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + sizeof(struct sockaddr_ll))
 
-enum tpacket_versions
-{
+enum tpacket_versions {
 	TPACKET_V1,
 	TPACKET_V2,
 };
@@ -126,16 +119,14 @@
    - Pad to align to TPACKET_ALIGNMENT=16
  */
 
-struct tpacket_req
-{
+struct tpacket_req {
 	unsigned int	tp_block_size;	/* Minimal size of contiguous block */
 	unsigned int	tp_block_nr;	/* Number of blocks */
 	unsigned int	tp_frame_size;	/* Size of frame */
 	unsigned int	tp_frame_nr;	/* Total number of frames */
 };
 
-struct packet_mreq
-{
+struct packet_mreq {
 	int		mr_ifindex;
 	unsigned short	mr_type;
 	unsigned short	mr_alen;
diff --git a/include/linux/if_plip.h b/include/linux/if_plip.h
index 153a649..6298c7e 100644
--- a/include/linux/if_plip.h
+++ b/include/linux/if_plip.h
@@ -15,8 +15,7 @@
 
 #define	SIOCDEVPLIP	SIOCDEVPRIVATE
 
-struct plipconf
-{
+struct plipconf {
 	unsigned short pcmd;
 	unsigned long  nibble;
 	unsigned long  trigger;
diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h
index 3a14b08..c58baea 100644
--- a/include/linux/if_pppol2tp.h
+++ b/include/linux/if_pppol2tp.h
@@ -24,8 +24,7 @@
 /* Structure used to connect() the socket to a particular tunnel UDP
  * socket.
  */
-struct pppol2tp_addr
-{
+struct pppol2tp_addr {
 	__kernel_pid_t	pid;		/* pid that owns the fd.
 					 * 0 => current */
 	int	fd;			/* FD of UDP socket to use */
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
index 8d76cb4..1822d63 100644
--- a/include/linux/if_tunnel.h
+++ b/include/linux/if_tunnel.h
@@ -30,8 +30,7 @@
 #define GRE_FLAGS	__cpu_to_be16(0x00F8)
 #define GRE_VERSION	__cpu_to_be16(0x0007)
 
-struct ip_tunnel_parm
-{
+struct ip_tunnel_parm {
 	char			name[IFNAMSIZ];
 	int			link;
 	__be16			i_flags;
@@ -63,8 +62,7 @@
 	__u16			relay_prefixlen;
 };
 
-enum
-{
+enum {
 	IFLA_GRE_UNSPEC,
 	IFLA_GRE_LINK,
 	IFLA_GRE_IFLAGS,
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 7ff9af1..153f6b9 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -63,7 +63,11 @@
 	return (struct vlan_ethhdr *)skb_mac_header(skb);
 }
 
-#define VLAN_VID_MASK	0xfff
+#define VLAN_PRIO_MASK		0xe000 /* Priority Code Point */
+#define VLAN_PRIO_SHIFT		13
+#define VLAN_CFI_MASK		0x1000 /* Canonical Format Indicator */
+#define VLAN_TAG_PRESENT	VLAN_CFI_MASK
+#define VLAN_VID_MASK		0x0fff /* VLAN Identifier */
 
 /* found in socket.c */
 extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
@@ -81,6 +85,7 @@
 					    * the vlan is attached to.
 					    */
 	unsigned int		nr_vlans;
+	int			killall;
 	struct hlist_node	hlist;	/* linked list */
 	struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
 	struct rcu_head		rcu;
@@ -105,8 +110,8 @@
 	array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev;
 }
 
-#define vlan_tx_tag_present(__skb)	((__skb)->vlan_tci)
-#define vlan_tx_tag_get(__skb)		((__skb)->vlan_tci)
+#define vlan_tx_tag_present(__skb)	((__skb)->vlan_tci & VLAN_TAG_PRESENT)
+#define vlan_tx_tag_get(__skb)		((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
 
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
@@ -115,10 +120,12 @@
 extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
 			     u16 vlan_tci, int polling);
 extern int vlan_hwaccel_do_receive(struct sk_buff *skb);
-extern int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
-			    unsigned int vlan_tci, struct sk_buff *skb);
-extern int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
-			  unsigned int vlan_tci);
+extern gro_result_t
+vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
+		 unsigned int vlan_tci, struct sk_buff *skb);
+extern gro_result_t
+vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
+	       unsigned int vlan_tci);
 
 #else
 static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
@@ -145,17 +152,18 @@
 	return 0;
 }
 
-static inline int vlan_gro_receive(struct napi_struct *napi,
-				   struct vlan_group *grp,
-				   unsigned int vlan_tci, struct sk_buff *skb)
+static inline gro_result_t
+vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
+		 unsigned int vlan_tci, struct sk_buff *skb)
 {
-	return NET_RX_DROP;
+	return GRO_DROP;
 }
 
-static inline int vlan_gro_frags(struct napi_struct *napi,
-				 struct vlan_group *grp, unsigned int vlan_tci)
+static inline gro_result_t
+vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
+	       unsigned int vlan_tci)
 {
-	return NET_RX_DROP;
+	return GRO_DROP;
 }
 #endif
 
@@ -231,7 +239,7 @@
 static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb,
 						     u16 vlan_tci)
 {
-	skb->vlan_tci = vlan_tci;
+	skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
 	return skb;
 }
 
@@ -284,7 +292,7 @@
 					 u16 *vlan_tci)
 {
 	if (vlan_tx_tag_present(skb)) {
-		*vlan_tci = skb->vlan_tci;
+		*vlan_tci = vlan_tx_tag_get(skb);
 		return 0;
 	} else {
 		*vlan_tci = 0;
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index fe158e0..724c27e 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -27,8 +27,7 @@
  *	Header in on cable format
  */
 
-struct igmphdr
-{
+struct igmphdr {
 	__u8 type;
 	__u8 code;		/* For newer IGMP */
 	__sum16 csum;
@@ -151,8 +150,7 @@
 extern int sysctl_igmp_max_memberships;
 extern int sysctl_igmp_max_msf;
 
-struct ip_sf_socklist
-{
+struct ip_sf_socklist {
 	unsigned int		sl_max;
 	unsigned int		sl_count;
 	__be32			sl_addr[0];
@@ -167,16 +165,14 @@
    this list never used in fast path code
  */
 
-struct ip_mc_socklist
-{
+struct ip_mc_socklist {
 	struct ip_mc_socklist	*next;
 	struct ip_mreqn		multi;
 	unsigned int		sfmode;		/* MCAST_{INCLUDE,EXCLUDE} */
 	struct ip_sf_socklist	*sflist;
 };
 
-struct ip_sf_list
-{
+struct ip_sf_list {
 	struct ip_sf_list	*sf_next;
 	__be32			sf_inaddr;
 	unsigned long		sf_count[2];	/* include/exclude counts */
@@ -185,8 +181,7 @@
 	unsigned char		sf_crcount;	/* retrans. left to send */
 };
 
-struct ip_mc_list
-{
+struct ip_mc_list {
 	struct in_device	*interface;
 	__be32			multiaddr;
 	struct ip_sf_list	*sources;
diff --git a/include/linux/in.h b/include/linux/in.h
index cf196da..b615649 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -118,14 +118,12 @@
 
 /* Request struct for multicast socket ops */
 
-struct ip_mreq 
-{
+struct ip_mreq  {
 	struct in_addr imr_multiaddr;	/* IP multicast address of group */
 	struct in_addr imr_interface;	/* local IP address of interface */
 };
 
-struct ip_mreqn
-{
+struct ip_mreqn {
 	struct in_addr	imr_multiaddr;		/* IP multicast address of group */
 	struct in_addr	imr_address;		/* local IP address of interface */
 	int		imr_ifindex;		/* Interface index */
@@ -149,21 +147,18 @@
 	(sizeof(struct ip_msfilter) - sizeof(__u32) \
 	+ (numsrc) * sizeof(__u32))
 
-struct group_req
-{
+struct group_req {
 	__u32				 gr_interface;	/* interface index */
 	struct __kernel_sockaddr_storage gr_group;	/* group address */
 };
 
-struct group_source_req
-{
+struct group_source_req {
 	__u32				 gsr_interface;	/* interface index */
 	struct __kernel_sockaddr_storage gsr_group;	/* group address */
 	struct __kernel_sockaddr_storage gsr_source;	/* source address */
 };
 
-struct group_filter
-{
+struct group_filter {
 	__u32				 gf_interface;	/* interface index */
 	struct __kernel_sockaddr_storage gf_group;	/* multicast address */
 	__u32				 gf_fmode;	/* filter mode */
@@ -175,8 +170,7 @@
 	(sizeof(struct group_filter) - sizeof(struct __kernel_sockaddr_storage) \
 	+ (numsrc) * sizeof(struct __kernel_sockaddr_storage))
 
-struct in_pktinfo
-{
+struct in_pktinfo {
 	int		ipi_ifindex;
 	struct in_addr	ipi_spec_dst;
 	struct in_addr	ipi_addr;
diff --git a/include/linux/in6.h b/include/linux/in6.h
index 718bf21..dfa2916 100644
--- a/include/linux/in6.h
+++ b/include/linux/in6.h
@@ -27,10 +27,8 @@
  *	IPv6 address structure
  */
 
-struct in6_addr
-{
-	union 
-	{
+struct in6_addr {
+	union {
 		__u8		u6_addr8[16];
 		__be16		u6_addr16[8];
 		__be32		u6_addr32[4];
@@ -75,8 +73,7 @@
 
 #define ipv6mr_acaddr	ipv6mr_multiaddr
 
-struct in6_flowlabel_req
-{
+struct in6_flowlabel_req {
 	struct in6_addr	flr_dst;
 	__be32	flr_label;
 	__u8	flr_action;
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index ad27c7d..eecfa55 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -10,15 +10,13 @@
 #include <linux/timer.h>
 #include <linux/sysctl.h>
 
-struct ipv4_devconf
-{
+struct ipv4_devconf {
 	void	*sysctl;
 	int	data[__NET_IPV4_CONF_MAX - 1];
 	DECLARE_BITMAP(state, __NET_IPV4_CONF_MAX - 1);
 };
 
-struct in_device
-{
+struct in_device {
 	struct net_device	*dev;
 	atomic_t		refcnt;
 	int			dead;
@@ -110,8 +108,7 @@
 #define IN_DEV_ARP_IGNORE(in_dev)	IN_DEV_MAXCONF((in_dev), ARP_IGNORE)
 #define IN_DEV_ARP_NOTIFY(in_dev)	IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
 
-struct in_ifaddr
-{
+struct in_ifaddr {
 	struct in_ifaddr	*ifa_next;
 	struct in_device	*ifa_dev;
 	struct rcu_head		rcu_head;
diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h
index 148265e..dfc1703 100644
--- a/include/linux/ip_vs.h
+++ b/include/linux/ip_vs.h
@@ -127,8 +127,7 @@
 /*
  *	IPVS statistics object (for user space)
  */
-struct ip_vs_stats_user
-{
+struct ip_vs_stats_user {
 	__u32                   conns;          /* connections scheduled */
 	__u32                   inpkts;         /* incoming packets */
 	__u32                   outpkts;        /* outgoing packets */
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index d5f6915..c5f3d53 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -76,8 +76,7 @@
  *	Cache manipulation structures for mrouted and PIMd
  */
  
-struct mfcctl
-{
+struct mfcctl {
 	struct in_addr mfcc_origin;		/* Origin of mcast	*/
 	struct in_addr mfcc_mcastgrp;		/* Group in question	*/
 	vifi_t	mfcc_parent;			/* Where it arrived	*/
@@ -92,8 +91,7 @@
  *	Group count retrieval for mrouted
  */
  
-struct sioc_sg_req
-{
+struct sioc_sg_req {
 	struct in_addr src;
 	struct in_addr grp;
 	unsigned long pktcnt;
@@ -105,8 +103,7 @@
  *	To get vif packet counts
  */
 
-struct sioc_vif_req
-{
+struct sioc_vif_req {
 	vifi_t	vifi;		/* Which iface */
 	unsigned long icount;	/* In packets */
 	unsigned long ocount;	/* Out packets */
@@ -119,8 +116,7 @@
  *	data. Magically happens to be like an IP packet as per the original
  */
  
-struct igmpmsg
-{
+struct igmpmsg {
 	__u32 unused1,unused2;
 	unsigned char im_msgtype;		/* What is this */
 	unsigned char im_mbz;			/* Must be zero */
@@ -181,8 +177,7 @@
 }
 #endif
 
-struct vif_device
-{
+struct vif_device {
 	struct net_device 	*dev;			/* Device we are using */
 	unsigned long	bytes_in,bytes_out;
 	unsigned long	pkt_in,pkt_out;		/* Statistics 			*/
@@ -195,8 +190,7 @@
 
 #define VIFF_STATIC 0x8000
 
-struct mfc_cache 
-{
+struct mfc_cache {
 	struct mfc_cache *next;			/* Next entry on cache line 	*/
 #ifdef CONFIG_NET_NS
 	struct net *mfc_net;
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index b191865..2caa1a8 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -75,8 +75,7 @@
  *	Cache manipulation structures for mrouted and PIMd
  */
 
-struct mf6cctl
-{
+struct mf6cctl {
 	struct sockaddr_in6 mf6cc_origin;		/* Origin of mcast	*/
 	struct sockaddr_in6 mf6cc_mcastgrp;		/* Group in question	*/
 	mifi_t	mf6cc_parent;			/* Where it arrived	*/
@@ -87,8 +86,7 @@
  *	Group count retrieval for pim6sd
  */
 
-struct sioc_sg_req6
-{
+struct sioc_sg_req6 {
 	struct sockaddr_in6 src;
 	struct sockaddr_in6 grp;
 	unsigned long pktcnt;
@@ -100,8 +98,7 @@
  *	To get vif packet counts
  */
 
-struct sioc_mif_req6
-{
+struct sioc_mif_req6 {
 	mifi_t	mifi;		/* Which iface */
 	unsigned long icount;	/* In packets */
 	unsigned long ocount;	/* Out packets */
@@ -172,8 +169,7 @@
 }
 #endif
 
-struct mif_device
-{
+struct mif_device {
 	struct net_device 	*dev;			/* Device we are using */
 	unsigned long	bytes_in,bytes_out;
 	unsigned long	pkt_in,pkt_out;		/* Statistics 			*/
@@ -185,8 +181,7 @@
 
 #define VIFF_STATIC 0x8000
 
-struct mfc6_cache
-{
+struct mfc6_cache {
 	struct mfc6_cache *next;		/* Next entry on cache line 	*/
 #ifdef CONFIG_NET_NS
 	struct net *mfc6_net;
diff --git a/include/linux/neighbour.h b/include/linux/neighbour.h
index 12c9de1..a7003b7a 100644
--- a/include/linux/neighbour.h
+++ b/include/linux/neighbour.h
@@ -4,8 +4,7 @@
 #include <linux/types.h>
 #include <linux/netlink.h>
 
-struct ndmsg
-{
+struct ndmsg {
 	__u8		ndm_family;
 	__u8		ndm_pad1;
 	__u16		ndm_pad2;
@@ -15,8 +14,7 @@
 	__u8		ndm_type;
 };
 
-enum
-{
+enum {
 	NDA_UNSPEC,
 	NDA_DST,
 	NDA_LLADDR,
@@ -56,8 +54,7 @@
    NUD_PERMANENT is also cannot be deleted by garbage collectors.
  */
 
-struct nda_cacheinfo
-{
+struct nda_cacheinfo {
 	__u32		ndm_confirmed;
 	__u32		ndm_used;
 	__u32		ndm_updated;
@@ -89,8 +86,7 @@
  * device.
  ****/
 
-struct ndt_stats
-{
+struct ndt_stats {
 	__u64		ndts_allocs;
 	__u64		ndts_destroys;
 	__u64		ndts_hash_grows;
@@ -124,15 +120,13 @@
 };
 #define NDTPA_MAX (__NDTPA_MAX - 1)
 
-struct ndtmsg
-{
+struct ndtmsg {
 	__u8		ndtm_family;
 	__u8		ndtm_pad1;
 	__u16		ndtm_pad2;
 };
 
-struct ndt_config
-{
+struct ndt_config {
 	__u16		ndtc_key_len;
 	__u16		ndtc_entry_size;
 	__u32		ndtc_entries;
diff --git a/include/linux/net.h b/include/linux/net.h
index b42bb60..70ee3c3 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -199,9 +199,13 @@
 				       struct pipe_inode_info *pipe, size_t len, unsigned int flags);
 };
 
+#define DECLARE_SOCKADDR(type, dst, src)	\
+	type dst = ({ __sockaddr_check_size(sizeof(*dst)); (type) src; })
+
 struct net_proto_family {
 	int		family;
-	int		(*create)(struct net *net, struct socket *sock, int protocol);
+	int		(*create)(struct net *net, struct socket *sock,
+				  int protocol, int kern);
 	struct module	*owner;
 };
 
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 8380009..465add6 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -125,8 +125,7 @@
  *	with byte counters.
  */
 
-struct net_device_stats
-{
+struct net_device_stats {
 	unsigned long	rx_packets;		/* total packets received	*/
 	unsigned long	tx_packets;		/* total packets transmitted	*/
 	unsigned long	rx_bytes;		/* total bytes received 	*/
@@ -179,8 +178,7 @@
 struct neigh_parms;
 struct sk_buff;
 
-struct netif_rx_stats
-{
+struct netif_rx_stats {
 	unsigned total;
 	unsigned dropped;
 	unsigned time_squeeze;
@@ -189,8 +187,7 @@
 
 DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
 
-struct dev_addr_list
-{
+struct dev_addr_list {
 	struct dev_addr_list	*next;
 	u8			da_addr[MAX_ADDR_LEN];
 	u8			da_addrlen;
@@ -227,8 +224,7 @@
 	int			count;
 };
 
-struct hh_cache
-{
+struct hh_cache {
 	struct hh_cache *hh_next;	/* Next entry			     */
 	atomic_t	hh_refcnt;	/* number of users                   */
 /*
@@ -291,8 +287,7 @@
  * code.
  */
 
-enum netdev_state_t
-{
+enum netdev_state_t {
 	__LINK_STATE_START,
 	__LINK_STATE_PRESENT,
 	__LINK_STATE_NOCARRIER,
@@ -341,20 +336,20 @@
 	struct sk_buff		*skb;
 };
 
-enum
-{
+enum {
 	NAPI_STATE_SCHED,	/* Poll is scheduled */
 	NAPI_STATE_DISABLE,	/* Disable pending */
 	NAPI_STATE_NPSVC,	/* Netpoll - don't dequeue from poll_list */
 };
 
-enum {
+enum gro_result {
 	GRO_MERGED,
 	GRO_MERGED_FREE,
 	GRO_HELD,
 	GRO_NORMAL,
 	GRO_DROP,
 };
+typedef enum gro_result gro_result_t;
 
 extern void __napi_schedule(struct napi_struct *n);
 
@@ -457,8 +452,7 @@
 # define napi_synchronize(n)	barrier()
 #endif
 
-enum netdev_queue_state_t
-{
+enum netdev_queue_state_t {
 	__QUEUE_STATE_XOFF,
 	__QUEUE_STATE_FROZEN,
 };
@@ -635,6 +629,10 @@
 						      unsigned int sgc);
 	int			(*ndo_fcoe_ddp_done)(struct net_device *dev,
 						     u16 xid);
+#define NETDEV_FCOE_WWNN 0
+#define NETDEV_FCOE_WWPN 1
+	int			(*ndo_fcoe_get_wwn)(struct net_device *dev,
+						    u64 *wwn, int type);
 #endif
 };
 
@@ -648,8 +646,7 @@
  *	moves out.
  */
 
-struct net_device
-{
+struct net_device {
 
 	/*
 	 * This is the first field of the "visible" part of this structure
@@ -683,6 +680,7 @@
 
 	struct list_head	dev_list;
 	struct list_head	napi_list;
+	struct list_head	unreg_list;
 
 	/* Net device features */
 	unsigned long		features;
@@ -894,8 +892,8 @@
 
 	/* class/net/name entry */
 	struct device		dev;
-	/* space for optional statistics and wireless sysfs groups */
-	const struct attribute_group *sysfs_groups[3];
+	/* space for optional device, statistics, and wireless sysfs groups */
+	const struct attribute_group *sysfs_groups[4];
 
 	/* rtnetlink link ops */
 	const struct rtnl_link_ops *rtnl_link_ops;
@@ -1075,6 +1073,8 @@
 
 #define for_each_netdev(net, d)		\
 		list_for_each_entry(d, &(net)->dev_base_head, dev_list)
+#define for_each_netdev_rcu(net, d)		\
+		list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
 #define for_each_netdev_safe(net, d, n)	\
 		list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
 #define for_each_netdev_continue(net, d)		\
@@ -1109,6 +1109,7 @@
 extern struct net_device	*dev_get_by_flags(struct net *net, unsigned short flags,
 						  unsigned short mask);
 extern struct net_device	*dev_get_by_name(struct net *net, const char *name);
+extern struct net_device	*dev_get_by_name_rcu(struct net *net, const char *name);
 extern struct net_device	*__dev_get_by_name(struct net *net, const char *name);
 extern int		dev_alloc_name(struct net_device *dev, const char *name);
 extern int		dev_open(struct net_device *dev);
@@ -1116,7 +1117,14 @@
 extern void		dev_disable_lro(struct net_device *dev);
 extern int		dev_queue_xmit(struct sk_buff *skb);
 extern int		register_netdevice(struct net_device *dev);
-extern void		unregister_netdevice(struct net_device *dev);
+extern void		unregister_netdevice_queue(struct net_device *dev,
+						   struct list_head *head);
+extern void		unregister_netdevice_many(struct list_head *head);
+static inline void unregister_netdevice(struct net_device *dev)
+{
+	unregister_netdevice_queue(dev, NULL);
+}
+
 extern void		free_netdev(struct net_device *dev);
 extern void		synchronize_net(void);
 extern int 		register_netdevice_notifier(struct notifier_block *nb);
@@ -1127,6 +1135,7 @@
 extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
 extern struct net_device	*dev_get_by_index(struct net *net, int ifindex);
 extern struct net_device	*__dev_get_by_index(struct net *net, int ifindex);
+extern struct net_device	*dev_get_by_index_rcu(struct net *net, int ifindex);
 extern int		dev_restart(struct net_device *dev);
 #ifdef CONFIG_NETPOLL_TRAP
 extern int		netpoll_trap(void);
@@ -1212,8 +1221,7 @@
  * Incoming packets are placed on per-cpu queues so that
  * no locking is needed.
  */
-struct softnet_data
-{
+struct softnet_data {
 	struct Qdisc		*output_queue;
 	struct sk_buff_head	input_pkt_queue;
 	struct list_head	poll_list;
@@ -1467,18 +1475,19 @@
 #define HAVE_NETIF_RECEIVE_SKB 1
 extern int		netif_receive_skb(struct sk_buff *skb);
 extern void		napi_gro_flush(struct napi_struct *napi);
-extern int		dev_gro_receive(struct napi_struct *napi,
+extern gro_result_t	dev_gro_receive(struct napi_struct *napi,
 					struct sk_buff *skb);
-extern int		napi_skb_finish(int ret, struct sk_buff *skb);
-extern int		napi_gro_receive(struct napi_struct *napi,
+extern gro_result_t	napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
+extern gro_result_t	napi_gro_receive(struct napi_struct *napi,
 					 struct sk_buff *skb);
 extern void		napi_reuse_skb(struct napi_struct *napi,
 				       struct sk_buff *skb);
 extern struct sk_buff *	napi_get_frags(struct napi_struct *napi);
-extern int		napi_frags_finish(struct napi_struct *napi,
-					  struct sk_buff *skb, int ret);
+extern gro_result_t	napi_frags_finish(struct napi_struct *napi,
+					  struct sk_buff *skb,
+					  gro_result_t ret);
 extern struct sk_buff *	napi_frags_skb(struct napi_struct *napi);
-extern int		napi_gro_frags(struct napi_struct *napi);
+extern gro_result_t	napi_gro_frags(struct napi_struct *napi);
 
 static inline void napi_free_frags(struct napi_struct *napi)
 {
@@ -1609,7 +1618,8 @@
  *
  * Check if carrier is operational
  */
-static inline int netif_oper_up(const struct net_device *dev) {
+static inline int netif_oper_up(const struct net_device *dev)
+{
 	return (dev->operstate == IF_OPER_UP ||
 		dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
 }
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 6132b5e..48c5496 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -93,8 +93,7 @@
 			       const struct net_device *out,
 			       int (*okfn)(struct sk_buff *));
 
-struct nf_hook_ops
-{
+struct nf_hook_ops {
 	struct list_head list;
 
 	/* User fills in from here down. */
@@ -106,8 +105,7 @@
 	int priority;
 };
 
-struct nf_sockopt_ops
-{
+struct nf_sockopt_ops {
 	struct list_head list;
 
 	u_int8_t pf;
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index a8248ee..a374787 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -3,8 +3,7 @@
 /* Connection state tracking for netfilter.  This is separated from,
    but required by, the NAT layer; it can also be used by an iptables
    extension. */
-enum ip_conntrack_info
-{
+enum ip_conntrack_info {
 	/* Part of an established connection (either direction). */
 	IP_CT_ESTABLISHED,
 
@@ -76,8 +75,7 @@
 };
 
 #ifdef __KERNEL__
-struct ip_conntrack_stat
-{
+struct ip_conntrack_stat {
 	unsigned int searched;
 	unsigned int found;
 	unsigned int new;
diff --git a/include/linux/netfilter/nf_conntrack_ftp.h b/include/linux/netfilter/nf_conntrack_ftp.h
index 47727d7..3e3aa08 100644
--- a/include/linux/netfilter/nf_conntrack_ftp.h
+++ b/include/linux/netfilter/nf_conntrack_ftp.h
@@ -3,8 +3,7 @@
 /* FTP tracking. */
 
 /* This enum is exposed to userspace */
-enum nf_ct_ftp_type
-{
+enum nf_ct_ftp_type {
 	/* PORT command from client */
 	NF_CT_FTP_PORT,
 	/* PASV response from server */
diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
index 768f78c..ceeefe6 100644
--- a/include/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/linux/netfilter/nf_conntrack_sctp.h
@@ -16,8 +16,7 @@
 	SCTP_CONNTRACK_MAX
 };
 
-struct ip_ct_sctp
-{
+struct ip_ct_sctp {
 	enum sctp_conntrack state;
 
 	__be32 vtag[IP_CT_DIR_MAX];
diff --git a/include/linux/netfilter/nf_conntrack_tcp.h b/include/linux/netfilter/nf_conntrack_tcp.h
index 4352fee..f6d97f6 100644
--- a/include/linux/netfilter/nf_conntrack_tcp.h
+++ b/include/linux/netfilter/nf_conntrack_tcp.h
@@ -55,8 +55,7 @@
 	u_int8_t	flags;		/* per direction options */
 };
 
-struct ip_ct_tcp
-{
+struct ip_ct_tcp {
 	struct ip_ct_tcp_state seen[2];	/* connection parameters per direction */
 	u_int8_t	state;		/* state of the connection (enum tcp_conntrack) */
 	/* For detecting stale connections */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 9f00da2..49d321f 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -55,8 +55,7 @@
 #include <linux/capability.h>
 #include <net/netlink.h>
 
-struct nfnl_callback
-{
+struct nfnl_callback {
 	int (*call)(struct sock *nl, struct sk_buff *skb, 
 		    const struct nlmsghdr *nlh,
 		    const struct nlattr * const cda[]);
@@ -64,8 +63,7 @@
 	const u_int16_t attr_count;		/* number of nlattr's */
 };
 
-struct nfnetlink_subsystem
-{
+struct nfnetlink_subsystem {
 	const char *name;
 	__u8 subsys_id;			/* nfnetlink subsystem ID */
 	__u8 cb_count;			/* number of callbacks */
diff --git a/include/linux/netfilter/nfnetlink_compat.h b/include/linux/netfilter/nfnetlink_compat.h
index eda55ca..ffb9503 100644
--- a/include/linux/netfilter/nfnetlink_compat.h
+++ b/include/linux/netfilter/nfnetlink_compat.h
@@ -21,8 +21,7 @@
  * ! nfnetlink use the same attributes methods. - J. Schulist.
  */
 
-struct nfattr
-{
+struct nfattr {
 	__u16 nfa_len;
 	__u16 nfa_type;	/* we use 15 bits for the type, and the highest
 				 * bit to indicate whether the payload is nested */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 812cb15..378f27a 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -6,8 +6,7 @@
 #define XT_FUNCTION_MAXNAMELEN 30
 #define XT_TABLE_MAXNAMELEN 32
 
-struct xt_entry_match
-{
+struct xt_entry_match {
 	union {
 		struct {
 			__u16 match_size;
@@ -31,8 +30,7 @@
 	unsigned char data[0];
 };
 
-struct xt_entry_target
-{
+struct xt_entry_target {
 	union {
 		struct {
 			__u16 target_size;
@@ -64,16 +62,14 @@
 	},								       \
 }
 
-struct xt_standard_target
-{
+struct xt_standard_target {
 	struct xt_entry_target target;
 	int verdict;
 };
 
 /* The argument to IPT_SO_GET_REVISION_*.  Returns highest revision
  * kernel supports, if >= revision. */
-struct xt_get_revision
-{
+struct xt_get_revision {
 	char name[XT_FUNCTION_MAXNAMELEN-1];
 
 	__u8 revision;
@@ -90,8 +86,7 @@
  * ip6t_entry and arpt_entry.  This sucks, and it is a hack.  It will be my
  * personal pleasure to remove it -HW
  */
-struct _xt_align
-{
+struct _xt_align {
 	__u8 u8;
 	__u16 u16;
 	__u32 u32;
@@ -109,14 +104,12 @@
 #define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0)
 #define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
 
-struct xt_counters
-{
+struct xt_counters {
 	__u64 pcnt, bcnt;			/* Packet and byte counters */
 };
 
 /* The argument to IPT_SO_ADD_COUNTERS. */
-struct xt_counters_info
-{
+struct xt_counters_info {
 	/* Which table. */
 	char name[XT_TABLE_MAXNAMELEN];
 
@@ -269,8 +262,7 @@
 	u_int8_t family;
 };
 
-struct xt_match
-{
+struct xt_match {
 	struct list_head list;
 
 	const char name[XT_FUNCTION_MAXNAMELEN-1];
@@ -310,8 +302,7 @@
 };
 
 /* Registration hooks for targets. */
-struct xt_target
-{
+struct xt_target {
 	struct list_head list;
 
 	const char name[XT_FUNCTION_MAXNAMELEN-1];
@@ -349,8 +340,7 @@
 };
 
 /* Furniture shopping... */
-struct xt_table
-{
+struct xt_table {
 	struct list_head list;
 
 	/* What hooks you will enter on */
@@ -371,8 +361,7 @@
 #include <linux/netfilter_ipv4.h>
 
 /* The table itself */
-struct xt_table_info
-{
+struct xt_table_info {
 	/* Size per table */
 	unsigned int size;
 	/* Number of entries: FIXME. --RR */
@@ -528,8 +517,7 @@
 #ifdef CONFIG_COMPAT
 #include <net/compat.h>
 
-struct compat_xt_entry_match
-{
+struct compat_xt_entry_match {
 	union {
 		struct {
 			u_int16_t match_size;
@@ -545,8 +533,7 @@
 	unsigned char data[0];
 };
 
-struct compat_xt_entry_target
-{
+struct compat_xt_entry_target {
 	union {
 		struct {
 			u_int16_t target_size;
@@ -566,8 +553,7 @@
  * need to change whole approach in order to calculate align as function of
  * current task alignment */
 
-struct compat_xt_counters
-{
+struct compat_xt_counters {
 #if defined(CONFIG_X86_64) || defined(CONFIG_IA64)
 	u_int32_t cnt[4];
 #else
@@ -575,8 +561,7 @@
 #endif
 };
 
-struct compat_xt_counters_info
-{
+struct compat_xt_counters_info {
 	char name[XT_TABLE_MAXNAMELEN];
 	compat_uint_t num_counters;
 	struct compat_xt_counters counters[0];
diff --git a/include/linux/netfilter/xt_connbytes.h b/include/linux/netfilter/xt_connbytes.h
index 52bd615..92fcbb0 100644
--- a/include/linux/netfilter/xt_connbytes.h
+++ b/include/linux/netfilter/xt_connbytes.h
@@ -15,8 +15,7 @@
 	XT_CONNBYTES_DIR_BOTH,
 };
 
-struct xt_connbytes_info
-{
+struct xt_connbytes_info {
 	struct {
 		aligned_u64 from;	/* count to be matched */
 		aligned_u64 to;		/* count to be matched */
diff --git a/include/linux/netfilter/xt_esp.h b/include/linux/netfilter/xt_esp.h
index ef6fa47..ee68824 100644
--- a/include/linux/netfilter/xt_esp.h
+++ b/include/linux/netfilter/xt_esp.h
@@ -3,8 +3,7 @@
 
 #include <linux/types.h>
 
-struct xt_esp
-{
+struct xt_esp {
 	__u32 spis[2];	/* Security Parameter Index */
 	__u8  invflags;	/* Inverse flags */
 };
diff --git a/include/linux/netfilter/xt_multiport.h b/include/linux/netfilter/xt_multiport.h
index 185db49..5b7e72d 100644
--- a/include/linux/netfilter/xt_multiport.h
+++ b/include/linux/netfilter/xt_multiport.h
@@ -3,8 +3,7 @@
 
 #include <linux/types.h>
 
-enum xt_multiport_flags
-{
+enum xt_multiport_flags {
 	XT_MULTIPORT_SOURCE,
 	XT_MULTIPORT_DESTINATION,
 	XT_MULTIPORT_EITHER
@@ -13,15 +12,13 @@
 #define XT_MULTI_PORTS	15
 
 /* Must fit inside union xt_matchinfo: 16 bytes */
-struct xt_multiport
-{
+struct xt_multiport {
 	__u8 flags;				/* Type of comparison */
 	__u8 count;				/* Number of ports */
 	__u16 ports[XT_MULTI_PORTS];	/* Ports */
 };
 
-struct xt_multiport_v1
-{
+struct xt_multiport_v1 {
 	__u8 flags;				/* Type of comparison */
 	__u8 count;				/* Number of ports */
 	__u16 ports[XT_MULTI_PORTS];	/* Ports */
diff --git a/include/linux/netfilter/xt_policy.h b/include/linux/netfilter/xt_policy.h
index 7bb64e7..be8ead0 100644
--- a/include/linux/netfilter/xt_policy.h
+++ b/include/linux/netfilter/xt_policy.h
@@ -5,22 +5,19 @@
 
 #define XT_POLICY_MAX_ELEM	4
 
-enum xt_policy_flags
-{
+enum xt_policy_flags {
 	XT_POLICY_MATCH_IN	= 0x1,
 	XT_POLICY_MATCH_OUT	= 0x2,
 	XT_POLICY_MATCH_NONE	= 0x4,
 	XT_POLICY_MATCH_STRICT	= 0x8,
 };
 
-enum xt_policy_modes
-{
+enum xt_policy_modes {
 	XT_POLICY_MODE_TRANSPORT,
 	XT_POLICY_MODE_TUNNEL
 };
 
-struct xt_policy_spec
-{
+struct xt_policy_spec {
 	__u8	saddr:1,
 			daddr:1,
 			proto:1,
@@ -30,15 +27,13 @@
 };
 
 #ifndef __KERNEL__
-union xt_policy_addr
-{
+union xt_policy_addr {
 	struct in_addr	a4;
 	struct in6_addr	a6;
 };
 #endif
 
-struct xt_policy_elem
-{
+struct xt_policy_elem {
 	union {
 #ifdef __KERNEL__
 		struct {
@@ -65,8 +60,7 @@
 	struct xt_policy_spec	invert;
 };
 
-struct xt_policy_info
-{
+struct xt_policy_info {
 	struct xt_policy_elem pol[XT_POLICY_MAX_ELEM];
 	__u16 flags;
 	__u16 len;
diff --git a/include/linux/netfilter/xt_state.h b/include/linux/netfilter/xt_state.h
index c06f32e..7b32de8 100644
--- a/include/linux/netfilter/xt_state.h
+++ b/include/linux/netfilter/xt_state.h
@@ -6,8 +6,7 @@
 
 #define XT_STATE_UNTRACKED (1 << (IP_CT_NUMBER + 1))
 
-struct xt_state_info
-{
+struct xt_state_info {
 	unsigned int statemask;
 };
 #endif /*_XT_STATE_H*/
diff --git a/include/linux/netfilter/xt_string.h b/include/linux/netfilter/xt_string.h
index ecbb95f..235347c 100644
--- a/include/linux/netfilter/xt_string.h
+++ b/include/linux/netfilter/xt_string.h
@@ -11,8 +11,7 @@
 	XT_STRING_FLAG_IGNORECASE	= 0x02
 };
 
-struct xt_string_info
-{
+struct xt_string_info {
 	__u16 from_offset;
 	__u16 to_offset;
 	char	  algo[XT_STRING_MAX_ALGO_NAME_SIZE];
diff --git a/include/linux/netfilter/xt_tcpudp.h b/include/linux/netfilter/xt_tcpudp.h
index a490a0b..38aa7b3 100644
--- a/include/linux/netfilter/xt_tcpudp.h
+++ b/include/linux/netfilter/xt_tcpudp.h
@@ -4,8 +4,7 @@
 #include <linux/types.h>
 
 /* TCP matching stuff */
-struct xt_tcp
-{
+struct xt_tcp {
 	__u16 spts[2];			/* Source port range. */
 	__u16 dpts[2];			/* Destination port range. */
 	__u8 option;			/* TCP Option iff non-zero*/
@@ -22,8 +21,7 @@
 #define XT_TCP_INV_MASK		0x0F	/* All possible flags. */
 
 /* UDP matching stuff */
-struct xt_udp
-{
+struct xt_udp {
 	__u16 spts[2];			/* Source port range. */
 	__u16 dpts[2];			/* Destination port range. */
 	__u8 invflags;			/* Inverse flags */
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
index 6fe3e6a..f233652 100644
--- a/include/linux/netfilter_arp/arp_tables.h
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -132,8 +132,7 @@
 #define ARPT_RETURN XT_RETURN
 
 /* The argument to ARPT_SO_GET_INFO */
-struct arpt_getinfo
-{
+struct arpt_getinfo {
 	/* Which table: caller fills this in. */
 	char name[ARPT_TABLE_MAXNAMELEN];
 
@@ -155,8 +154,7 @@
 };
 
 /* The argument to ARPT_SO_SET_REPLACE. */
-struct arpt_replace
-{
+struct arpt_replace {
 	/* Which table. */
 	char name[ARPT_TABLE_MAXNAMELEN];
 
@@ -191,8 +189,7 @@
 #define arpt_counters xt_counters
 
 /* The argument to ARPT_SO_GET_ENTRIES. */
-struct arpt_get_entries
-{
+struct arpt_get_entries {
 	/* Which table: user fills this in. */
 	char name[ARPT_TABLE_MAXNAMELEN];
 
@@ -224,20 +221,17 @@
 #ifdef __KERNEL__
 
 /* Standard entry. */
-struct arpt_standard
-{
+struct arpt_standard {
 	struct arpt_entry entry;
 	struct arpt_standard_target target;
 };
 
-struct arpt_error_target
-{
+struct arpt_error_target {
 	struct arpt_entry_target target;
 	char errorname[ARPT_FUNCTION_MAXNAMELEN];
 };
 
-struct arpt_error
-{
+struct arpt_error {
 	struct arpt_entry entry;
 	struct arpt_error_target target;
 };
@@ -279,8 +273,7 @@
 #ifdef CONFIG_COMPAT
 #include <net/compat.h>
 
-struct compat_arpt_entry
-{
+struct compat_arpt_entry {
 	struct arpt_arp arp;
 	u_int16_t target_offset;
 	u_int16_t next_offset;
diff --git a/include/linux/netfilter_bridge/ebt_802_3.h b/include/linux/netfilter_bridge/ebt_802_3.h
index a11b0c2..c73ef0b 100644
--- a/include/linux/netfilter_bridge/ebt_802_3.h
+++ b/include/linux/netfilter_bridge/ebt_802_3.h
@@ -58,8 +58,7 @@
 }
 #endif
 
-struct ebt_802_3_info 
-{
+struct ebt_802_3_info {
 	uint8_t  sap;
 	__be16 type;
 	uint8_t  bitmask;
diff --git a/include/linux/netfilter_bridge/ebt_among.h b/include/linux/netfilter_bridge/ebt_among.h
index 7654069..0009558 100644
--- a/include/linux/netfilter_bridge/ebt_among.h
+++ b/include/linux/netfilter_bridge/ebt_among.h
@@ -29,14 +29,12 @@
  * Yes, it is a memory overhead, but in 2003 AD, who cares?
  */
 
-struct ebt_mac_wormhash_tuple
-{
+struct ebt_mac_wormhash_tuple {
 	uint32_t cmp[2];
 	__be32 ip;
 };
 
-struct ebt_mac_wormhash
-{
+struct ebt_mac_wormhash {
 	int table[257];
 	int poolsize;
 	struct ebt_mac_wormhash_tuple pool[0];
@@ -45,8 +43,7 @@
 #define ebt_mac_wormhash_size(x) ((x) ? sizeof(struct ebt_mac_wormhash) \
 		+ (x)->poolsize * sizeof(struct ebt_mac_wormhash_tuple) : 0)
 
-struct ebt_among_info
-{
+struct ebt_among_info {
 	int wh_dst_ofs;
 	int wh_src_ofs;
 	int bitmask;
diff --git a/include/linux/netfilter_bridge/ebt_arpreply.h b/include/linux/netfilter_bridge/ebt_arpreply.h
index 96a8339..7e77896 100644
--- a/include/linux/netfilter_bridge/ebt_arpreply.h
+++ b/include/linux/netfilter_bridge/ebt_arpreply.h
@@ -1,8 +1,7 @@
 #ifndef __LINUX_BRIDGE_EBT_ARPREPLY_H
 #define __LINUX_BRIDGE_EBT_ARPREPLY_H
 
-struct ebt_arpreply_info
-{
+struct ebt_arpreply_info {
 	unsigned char mac[ETH_ALEN];
 	int target;
 };
diff --git a/include/linux/netfilter_bridge/ebt_ip.h b/include/linux/netfilter_bridge/ebt_ip.h
index d684747..6a708fb 100644
--- a/include/linux/netfilter_bridge/ebt_ip.h
+++ b/include/linux/netfilter_bridge/ebt_ip.h
@@ -26,8 +26,7 @@
 #define EBT_IP_MATCH "ip"
 
 /* the same values are used for the invflags */
-struct ebt_ip_info
-{
+struct ebt_ip_info {
 	__be32 saddr;
 	__be32 daddr;
 	__be32 smsk;
diff --git a/include/linux/netfilter_bridge/ebt_ip6.h b/include/linux/netfilter_bridge/ebt_ip6.h
index 2273c3a..e5de987 100644
--- a/include/linux/netfilter_bridge/ebt_ip6.h
+++ b/include/linux/netfilter_bridge/ebt_ip6.h
@@ -23,8 +23,7 @@
 #define EBT_IP6_MATCH "ip6"
 
 /* the same values are used for the invflags */
-struct ebt_ip6_info
-{
+struct ebt_ip6_info {
 	struct in6_addr saddr;
 	struct in6_addr daddr;
 	struct in6_addr smsk;
diff --git a/include/linux/netfilter_bridge/ebt_limit.h b/include/linux/netfilter_bridge/ebt_limit.h
index d8b6500..4bf76b7 100644
--- a/include/linux/netfilter_bridge/ebt_limit.h
+++ b/include/linux/netfilter_bridge/ebt_limit.h
@@ -9,8 +9,7 @@
 /* 1/10,000 sec period => max of 10,000/sec.  Min rate is then 429490
    seconds, or one every 59 hours. */
 
-struct ebt_limit_info
-{
+struct ebt_limit_info {
 	u_int32_t avg;    /* Average secs between packets * scale */
 	u_int32_t burst;  /* Period multiplier for upper limit. */
 
diff --git a/include/linux/netfilter_bridge/ebt_log.h b/include/linux/netfilter_bridge/ebt_log.h
index b76e653..cc2cdfb 100644
--- a/include/linux/netfilter_bridge/ebt_log.h
+++ b/include/linux/netfilter_bridge/ebt_log.h
@@ -9,8 +9,7 @@
 #define EBT_LOG_PREFIX_SIZE 30
 #define EBT_LOG_WATCHER "log"
 
-struct ebt_log_info
-{
+struct ebt_log_info {
 	uint8_t loglevel;
 	uint8_t prefix[EBT_LOG_PREFIX_SIZE];
 	uint32_t bitmask;
diff --git a/include/linux/netfilter_bridge/ebt_mark_m.h b/include/linux/netfilter_bridge/ebt_mark_m.h
index 301524f..9ceb10e 100644
--- a/include/linux/netfilter_bridge/ebt_mark_m.h
+++ b/include/linux/netfilter_bridge/ebt_mark_m.h
@@ -4,8 +4,7 @@
 #define EBT_MARK_AND 0x01
 #define EBT_MARK_OR 0x02
 #define EBT_MARK_MASK (EBT_MARK_AND | EBT_MARK_OR)
-struct ebt_mark_m_info
-{
+struct ebt_mark_m_info {
 	unsigned long mark, mask;
 	uint8_t invert;
 	uint8_t bitmask;
diff --git a/include/linux/netfilter_bridge/ebt_mark_t.h b/include/linux/netfilter_bridge/ebt_mark_t.h
index 6270f6f..7d5a268 100644
--- a/include/linux/netfilter_bridge/ebt_mark_t.h
+++ b/include/linux/netfilter_bridge/ebt_mark_t.h
@@ -13,8 +13,7 @@
 #define MARK_AND_VALUE (0xffffffd0)
 #define MARK_XOR_VALUE (0xffffffc0)
 
-struct ebt_mark_t_info
-{
+struct ebt_mark_t_info {
 	unsigned long mark;
 	/* EBT_ACCEPT, EBT_DROP, EBT_CONTINUE or EBT_RETURN */
 	int target;
diff --git a/include/linux/netfilter_bridge/ebt_nat.h b/include/linux/netfilter_bridge/ebt_nat.h
index 435b886..5e74e3b 100644
--- a/include/linux/netfilter_bridge/ebt_nat.h
+++ b/include/linux/netfilter_bridge/ebt_nat.h
@@ -2,8 +2,7 @@
 #define __LINUX_BRIDGE_EBT_NAT_H
 
 #define NAT_ARP_BIT  (0x00000010)
-struct ebt_nat_info
-{
+struct ebt_nat_info {
 	unsigned char mac[ETH_ALEN];
 	/* EBT_ACCEPT, EBT_DROP, EBT_CONTINUE or EBT_RETURN */
 	int target;
diff --git a/include/linux/netfilter_bridge/ebt_pkttype.h b/include/linux/netfilter_bridge/ebt_pkttype.h
index 0d64bbb..51a7998 100644
--- a/include/linux/netfilter_bridge/ebt_pkttype.h
+++ b/include/linux/netfilter_bridge/ebt_pkttype.h
@@ -1,8 +1,7 @@
 #ifndef __LINUX_BRIDGE_EBT_PKTTYPE_H
 #define __LINUX_BRIDGE_EBT_PKTTYPE_H
 
-struct ebt_pkttype_info
-{
+struct ebt_pkttype_info {
 	uint8_t pkt_type;
 	uint8_t invert;
 };
diff --git a/include/linux/netfilter_bridge/ebt_redirect.h b/include/linux/netfilter_bridge/ebt_redirect.h
index 5c67990..dd9622c 100644
--- a/include/linux/netfilter_bridge/ebt_redirect.h
+++ b/include/linux/netfilter_bridge/ebt_redirect.h
@@ -1,8 +1,7 @@
 #ifndef __LINUX_BRIDGE_EBT_REDIRECT_H
 #define __LINUX_BRIDGE_EBT_REDIRECT_H
 
-struct ebt_redirect_info
-{
+struct ebt_redirect_info {
 	/* EBT_ACCEPT, EBT_DROP, EBT_CONTINUE or EBT_RETURN */
 	int target;
 };
diff --git a/include/linux/netfilter_bridge/ebt_stp.h b/include/linux/netfilter_bridge/ebt_stp.h
index e5fd678..e503a0a 100644
--- a/include/linux/netfilter_bridge/ebt_stp.h
+++ b/include/linux/netfilter_bridge/ebt_stp.h
@@ -20,8 +20,7 @@
 
 #define EBT_STP_MATCH "stp"
 
-struct ebt_stp_config_info
-{
+struct ebt_stp_config_info {
 	uint8_t flags;
 	uint16_t root_priol, root_priou;
 	char root_addr[6], root_addrmsk[6];
@@ -35,8 +34,7 @@
 	uint16_t forward_delayl, forward_delayu;
 };
 
-struct ebt_stp_info
-{
+struct ebt_stp_info {
 	uint8_t type;
 	struct ebt_stp_config_info config;
 	uint16_t bitmask;
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index ea281e6..3cc40c1 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -34,14 +34,12 @@
 struct xt_match;
 struct xt_target;
 
-struct ebt_counter
-{
+struct ebt_counter {
 	uint64_t pcnt;
 	uint64_t bcnt;
 };
 
-struct ebt_replace
-{
+struct ebt_replace {
 	char name[EBT_TABLE_MAXNAMELEN];
 	unsigned int valid_hooks;
 	/* nr of rules in the table */
@@ -57,8 +55,7 @@
 	char __user *entries;
 };
 
-struct ebt_replace_kernel
-{
+struct ebt_replace_kernel {
 	char name[EBT_TABLE_MAXNAMELEN];
 	unsigned int valid_hooks;
 	/* nr of rules in the table */
@@ -120,8 +117,7 @@
 #define EBT_INV_MASK (EBT_IPROTO | EBT_IIN | EBT_IOUT | EBT_ILOGICALIN \
    | EBT_ILOGICALOUT | EBT_ISOURCE | EBT_IDEST)
 
-struct ebt_entry_match
-{
+struct ebt_entry_match {
 	union {
 		char name[EBT_FUNCTION_MAXNAMELEN];
 		struct xt_match *match;
@@ -131,8 +127,7 @@
 	unsigned char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
 };
 
-struct ebt_entry_watcher
-{
+struct ebt_entry_watcher {
 	union {
 		char name[EBT_FUNCTION_MAXNAMELEN];
 		struct xt_target *watcher;
@@ -142,8 +137,7 @@
 	unsigned char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
 };
 
-struct ebt_entry_target
-{
+struct ebt_entry_target {
 	union {
 		char name[EBT_FUNCTION_MAXNAMELEN];
 		struct xt_target *target;
@@ -154,8 +148,7 @@
 };
 
 #define EBT_STANDARD_TARGET "standard"
-struct ebt_standard_target
-{
+struct ebt_standard_target {
 	struct ebt_entry_target target;
 	int verdict;
 };
@@ -206,8 +199,7 @@
 #define EBT_MATCH 0
 #define EBT_NOMATCH 1
 
-struct ebt_match
-{
+struct ebt_match {
 	struct list_head list;
 	const char name[EBT_FUNCTION_MAXNAMELEN];
 	bool (*match)(const struct sk_buff *skb, const struct net_device *in,
@@ -224,8 +216,7 @@
 	struct module *me;
 };
 
-struct ebt_watcher
-{
+struct ebt_watcher {
 	struct list_head list;
 	const char name[EBT_FUNCTION_MAXNAMELEN];
 	unsigned int (*target)(struct sk_buff *skb,
@@ -242,8 +233,7 @@
 	struct module *me;
 };
 
-struct ebt_target
-{
+struct ebt_target {
 	struct list_head list;
 	const char name[EBT_FUNCTION_MAXNAMELEN];
 	/* returns one of the standard EBT_* verdicts */
@@ -262,15 +252,13 @@
 };
 
 /* used for jumping from and into user defined chains (udc) */
-struct ebt_chainstack
-{
+struct ebt_chainstack {
 	struct ebt_entries *chaininfo; /* pointer to chain data */
 	struct ebt_entry *e; /* pointer to entry data */
 	unsigned int n; /* n'th entry */
 };
 
-struct ebt_table_info
-{
+struct ebt_table_info {
 	/* total size of the entries */
 	unsigned int entries_size;
 	unsigned int nentries;
@@ -282,8 +270,7 @@
 	struct ebt_counter counters[0] ____cacheline_aligned;
 };
 
-struct ebt_table
-{
+struct ebt_table {
 	struct list_head list;
 	char name[EBT_TABLE_MAXNAMELEN];
 	struct ebt_replace_kernel *table;
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index 61fafc8..27b3f58 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -76,8 +76,7 @@
 /* This structure defines each of the firewall rules.  Consists of 3
    parts which are 1) general IP header stuff 2) match specific
    stuff 3) the target to perform if the rule matches */
-struct ipt_entry
-{
+struct ipt_entry {
 	struct ipt_ip ip;
 
 	/* Mark with fields that we care about. */
@@ -135,8 +134,7 @@
 #define IPT_UDP_INV_MASK	XT_UDP_INV_MASK
 
 /* ICMP matching stuff */
-struct ipt_icmp
-{
+struct ipt_icmp {
 	u_int8_t type;				/* type to match */
 	u_int8_t code[2];			/* range of code */
 	u_int8_t invflags;			/* Inverse flags */
@@ -146,8 +144,7 @@
 #define IPT_ICMP_INV	0x01	/* Invert the sense of type/code test */
 
 /* The argument to IPT_SO_GET_INFO */
-struct ipt_getinfo
-{
+struct ipt_getinfo {
 	/* Which table: caller fills this in. */
 	char name[IPT_TABLE_MAXNAMELEN];
 
@@ -169,8 +166,7 @@
 };
 
 /* The argument to IPT_SO_SET_REPLACE. */
-struct ipt_replace
-{
+struct ipt_replace {
 	/* Which table. */
 	char name[IPT_TABLE_MAXNAMELEN];
 
@@ -204,8 +200,7 @@
 #define ipt_counters_info xt_counters_info
 
 /* The argument to IPT_SO_GET_ENTRIES. */
-struct ipt_get_entries
-{
+struct ipt_get_entries {
 	/* Which table: user fills this in. */
 	char name[IPT_TABLE_MAXNAMELEN];
 
@@ -250,20 +245,17 @@
 extern void ipt_unregister_table(struct xt_table *table);
 
 /* Standard entry. */
-struct ipt_standard
-{
+struct ipt_standard {
 	struct ipt_entry entry;
 	struct ipt_standard_target target;
 };
 
-struct ipt_error_target
-{
+struct ipt_error_target {
 	struct ipt_entry_target target;
 	char errorname[IPT_FUNCTION_MAXNAMELEN];
 };
 
-struct ipt_error
-{
+struct ipt_error {
 	struct ipt_entry entry;
 	struct ipt_error_target target;
 };
@@ -301,8 +293,7 @@
 #ifdef CONFIG_COMPAT
 #include <net/compat.h>
 
-struct compat_ipt_entry
-{
+struct compat_ipt_entry {
 	struct ipt_ip ip;
 	compat_uint_t nfcache;
 	u_int16_t target_offset;
diff --git a/include/linux/netfilter_ipv4/ipt_SAME.h b/include/linux/netfilter_ipv4/ipt_SAME.h
index be6e682..2529660 100644
--- a/include/linux/netfilter_ipv4/ipt_SAME.h
+++ b/include/linux/netfilter_ipv4/ipt_SAME.h
@@ -5,8 +5,7 @@
 
 #define IPT_SAME_NODST		0x01
 
-struct ipt_same_info
-{
+struct ipt_same_info {
 	unsigned char info;
 	u_int32_t rangesize;
 	u_int32_t ipnum;
diff --git a/include/linux/netfilter_ipv4/ipt_ah.h b/include/linux/netfilter_ipv4/ipt_ah.h
index 7b9a2ac..2e555b4 100644
--- a/include/linux/netfilter_ipv4/ipt_ah.h
+++ b/include/linux/netfilter_ipv4/ipt_ah.h
@@ -1,8 +1,7 @@
 #ifndef _IPT_AH_H
 #define _IPT_AH_H
 
-struct ipt_ah
-{
+struct ipt_ah {
 	u_int32_t spis[2];			/* Security Parameter Index */
 	u_int8_t  invflags;			/* Inverse flags */
 };
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index a64e145..b31050d 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -88,8 +88,7 @@
 /* This structure defines each of the firewall rules.  Consists of 3
    parts which are 1) general IP header stuff 2) match specific
    stuff 3) the target to perform if the rule matches */
-struct ip6t_entry
-{
+struct ip6t_entry {
 	struct ip6t_ip6 ipv6;
 
 	/* Mark with fields that we care about. */
@@ -111,20 +110,17 @@
 };
 
 /* Standard entry */
-struct ip6t_standard
-{
+struct ip6t_standard {
 	struct ip6t_entry entry;
 	struct ip6t_standard_target target;
 };
 
-struct ip6t_error_target
-{
+struct ip6t_error_target {
 	struct ip6t_entry_target target;
 	char errorname[IP6T_FUNCTION_MAXNAMELEN];
 };
 
-struct ip6t_error
-{
+struct ip6t_error {
 	struct ip6t_entry entry;
 	struct ip6t_error_target target;
 };
@@ -195,8 +191,7 @@
 #define IP6T_UDP_INV_MASK	XT_UDP_INV_MASK
 
 /* ICMP matching stuff */
-struct ip6t_icmp
-{
+struct ip6t_icmp {
 	u_int8_t type;				/* type to match */
 	u_int8_t code[2];			/* range of code */
 	u_int8_t invflags;			/* Inverse flags */
@@ -206,8 +201,7 @@
 #define IP6T_ICMP_INV	0x01	/* Invert the sense of type/code test */
 
 /* The argument to IP6T_SO_GET_INFO */
-struct ip6t_getinfo
-{
+struct ip6t_getinfo {
 	/* Which table: caller fills this in. */
 	char name[IP6T_TABLE_MAXNAMELEN];
 
@@ -229,8 +223,7 @@
 };
 
 /* The argument to IP6T_SO_SET_REPLACE. */
-struct ip6t_replace
-{
+struct ip6t_replace {
 	/* Which table. */
 	char name[IP6T_TABLE_MAXNAMELEN];
 
@@ -264,8 +257,7 @@
 #define ip6t_counters_info xt_counters_info
 
 /* The argument to IP6T_SO_GET_ENTRIES. */
-struct ip6t_get_entries
-{
+struct ip6t_get_entries {
 	/* Which table: user fills this in. */
 	char name[IP6T_TABLE_MAXNAMELEN];
 
@@ -330,8 +322,7 @@
 #ifdef CONFIG_COMPAT
 #include <net/compat.h>
 
-struct compat_ip6t_entry
-{
+struct compat_ip6t_entry {
 	struct ip6t_ip6 ipv6;
 	compat_uint_t nfcache;
 	u_int16_t target_offset;
diff --git a/include/linux/netfilter_ipv6/ip6t_ah.h b/include/linux/netfilter_ipv6/ip6t_ah.h
index 8531879..17a745c 100644
--- a/include/linux/netfilter_ipv6/ip6t_ah.h
+++ b/include/linux/netfilter_ipv6/ip6t_ah.h
@@ -1,8 +1,7 @@
 #ifndef _IP6T_AH_H
 #define _IP6T_AH_H
 
-struct ip6t_ah
-{
+struct ip6t_ah {
 	u_int32_t spis[2];			/* Security Parameter Index */
 	u_int32_t hdrlen;			/* Header Length */
 	u_int8_t  hdrres;			/* Test of the Reserved Filed */
diff --git a/include/linux/netfilter_ipv6/ip6t_frag.h b/include/linux/netfilter_ipv6/ip6t_frag.h
index 66070a0..3724d08 100644
--- a/include/linux/netfilter_ipv6/ip6t_frag.h
+++ b/include/linux/netfilter_ipv6/ip6t_frag.h
@@ -1,8 +1,7 @@
 #ifndef _IP6T_FRAG_H
 #define _IP6T_FRAG_H
 
-struct ip6t_frag
-{
+struct ip6t_frag {
 	u_int32_t ids[2];			/* Security Parameter Index */
 	u_int32_t hdrlen;			/* Header Length */
 	u_int8_t  flags;			/*  */
diff --git a/include/linux/netfilter_ipv6/ip6t_ipv6header.h b/include/linux/netfilter_ipv6/ip6t_ipv6header.h
index 51c53fc..01dfd44 100644
--- a/include/linux/netfilter_ipv6/ip6t_ipv6header.h
+++ b/include/linux/netfilter_ipv6/ip6t_ipv6header.h
@@ -8,8 +8,7 @@
 #ifndef __IPV6HEADER_H
 #define __IPV6HEADER_H
 
-struct ip6t_ipv6header_info
-{
+struct ip6t_ipv6header_info {
 	u_int8_t matchflags;
 	u_int8_t invflags;
 	u_int8_t modeflag;
diff --git a/include/linux/netfilter_ipv6/ip6t_mh.h b/include/linux/netfilter_ipv6/ip6t_mh.h
index b9ca9a5..18549bc 100644
--- a/include/linux/netfilter_ipv6/ip6t_mh.h
+++ b/include/linux/netfilter_ipv6/ip6t_mh.h
@@ -2,8 +2,7 @@
 #define _IP6T_MH_H
 
 /* MH matching stuff */
-struct ip6t_mh
-{
+struct ip6t_mh {
 	u_int8_t types[2];	/* MH type range */
 	u_int8_t invflags;	/* Inverse flags */
 };
diff --git a/include/linux/netfilter_ipv6/ip6t_opts.h b/include/linux/netfilter_ipv6/ip6t_opts.h
index a07e363..62d89bc 100644
--- a/include/linux/netfilter_ipv6/ip6t_opts.h
+++ b/include/linux/netfilter_ipv6/ip6t_opts.h
@@ -3,8 +3,7 @@
 
 #define IP6T_OPTS_OPTSNR 16
 
-struct ip6t_opts
-{
+struct ip6t_opts {
 	u_int32_t hdrlen;			/* Header Length */
 	u_int8_t flags;				/*  */
 	u_int8_t invflags;			/* Inverse flags */
diff --git a/include/linux/netfilter_ipv6/ip6t_rt.h b/include/linux/netfilter_ipv6/ip6t_rt.h
index 5215602..ab91bfd 100644
--- a/include/linux/netfilter_ipv6/ip6t_rt.h
+++ b/include/linux/netfilter_ipv6/ip6t_rt.h
@@ -5,8 +5,7 @@
 
 #define IP6T_RT_HOPS 16
 
-struct ip6t_rt
-{
+struct ip6t_rt {
 	u_int32_t rt_type;			/* Routing Type */
 	u_int32_t segsleft[2];			/* Segments Left */
 	u_int32_t hdrlen;			/* Header Length */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index ab5d312..fde27c0 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -29,16 +29,14 @@
 
 struct net;
 
-struct sockaddr_nl
-{
+struct sockaddr_nl {
 	sa_family_t	nl_family;	/* AF_NETLINK	*/
 	unsigned short	nl_pad;		/* zero		*/
 	__u32		nl_pid;		/* port ID	*/
        	__u32		nl_groups;	/* multicast groups mask */
 };
 
-struct nlmsghdr
-{
+struct nlmsghdr {
 	__u32		nlmsg_len;	/* Length of message including header */
 	__u16		nlmsg_type;	/* Message content */
 	__u16		nlmsg_flags;	/* Additional flags */
@@ -94,8 +92,7 @@
 
 #define NLMSG_MIN_TYPE		0x10	/* < 0x10: reserved control messages */
 
-struct nlmsgerr
-{
+struct nlmsgerr {
 	int		error;
 	struct nlmsghdr msg;
 };
@@ -106,8 +103,7 @@
 #define NETLINK_BROADCAST_ERROR	4
 #define NETLINK_NO_ENOBUFS	5
 
-struct nl_pktinfo
-{
+struct nl_pktinfo {
 	__u32	group;
 };
 
@@ -127,8 +123,7 @@
  *  <-------------- nlattr->nla_len -------------->
  */
 
-struct nlattr
-{
+struct nlattr {
 	__u16           nla_len;
 	__u16           nla_type;
 };
@@ -161,8 +156,7 @@
 	return (struct nlmsghdr *)skb->data;
 }
 
-struct netlink_skb_parms
-{
+struct netlink_skb_parms {
 	struct ucred		creds;		/* Skb credentials	*/
 	__u32			pid;
 	__u32			dst_group;
@@ -220,8 +214,7 @@
 #define NLMSG_DEFAULT_SIZE (NLMSG_GOODSIZE - NLMSG_HDRLEN)
 
 
-struct netlink_callback
-{
+struct netlink_callback {
 	struct sk_buff		*skb;
 	const struct nlmsghdr	*nlh;
 	int			(*dump)(struct sk_buff * skb,
@@ -231,8 +224,7 @@
 	long			args[6];
 };
 
-struct netlink_notify
-{
+struct netlink_notify {
 	struct net *net;
 	int pid;
 	int protocol;
diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h
index 3c842ed..7f6ba86 100644
--- a/include/linux/pkt_cls.h
+++ b/include/linux/pkt_cls.h
@@ -75,8 +75,7 @@
 #define SET_TC_AT(v,n)   ((V_TC_AT(n)) | (v & ~M_TC_AT))
 
 /* Action attributes */
-enum
-{
+enum {
 	TCA_ACT_UNSPEC,
 	TCA_ACT_KIND,
 	TCA_ACT_OPTIONS,
@@ -108,8 +107,7 @@
 #define TC_ACT_JUMP		0x10000000
 
 /* Action type identifiers*/
-enum
-{
+enum {
 	TCA_ID_UNSPEC=0,
 	TCA_ID_POLICE=1,
 	/* other actions go here */
@@ -118,8 +116,7 @@
 
 #define TCA_ID_MAX __TCA_ID_MAX
 
-struct tc_police
-{
+struct tc_police {
 	__u32			index;
 	int			action;
 #define TC_POLICE_UNSPEC	TC_ACT_UNSPEC
@@ -138,15 +135,13 @@
 	__u32			capab;
 };
 
-struct tcf_t
-{
+struct tcf_t {
 	__u64   install;
 	__u64   lastuse;
 	__u64   expires;
 };
 
-struct tc_cnt
-{
+struct tc_cnt {
 	int                   refcnt; 
 	int                   bindcnt;
 };
@@ -158,8 +153,7 @@
 	int                   refcnt; \
 	int                   bindcnt
 
-enum
-{
+enum {
 	TCA_POLICE_UNSPEC,
 	TCA_POLICE_TBF,
 	TCA_POLICE_RATE,
@@ -182,8 +176,7 @@
 #define TC_U32_UNSPEC	0
 #define TC_U32_ROOT	(0xFFF00000)
 
-enum
-{
+enum {
 	TCA_U32_UNSPEC,
 	TCA_U32_CLASSID,
 	TCA_U32_HASH,
@@ -200,16 +193,14 @@
 
 #define TCA_U32_MAX (__TCA_U32_MAX - 1)
 
-struct tc_u32_key
-{
+struct tc_u32_key {
 	__be32		mask;
 	__be32		val;
 	int		off;
 	int		offmask;
 };
 
-struct tc_u32_sel
-{
+struct tc_u32_sel {
 	unsigned char		flags;
 	unsigned char		offshift;
 	unsigned char		nkeys;
@@ -223,15 +214,13 @@
 	struct tc_u32_key	keys[0];
 };
 
-struct tc_u32_mark
-{
+struct tc_u32_mark {
 	__u32		val;
 	__u32		mask;
 	__u32		success;
 };
 
-struct tc_u32_pcnt
-{
+struct tc_u32_pcnt {
 	__u64 rcnt;
 	__u64 rhit;
 	__u64 kcnts[0];
@@ -249,8 +238,7 @@
 
 /* RSVP filter */
 
-enum
-{
+enum {
 	TCA_RSVP_UNSPEC,
 	TCA_RSVP_CLASSID,
 	TCA_RSVP_DST,
@@ -263,15 +251,13 @@
 
 #define TCA_RSVP_MAX (__TCA_RSVP_MAX - 1 )
 
-struct tc_rsvp_gpi
-{
+struct tc_rsvp_gpi {
 	__u32	key;
 	__u32	mask;
 	int	offset;
 };
 
-struct tc_rsvp_pinfo
-{
+struct tc_rsvp_pinfo {
 	struct tc_rsvp_gpi dpi;
 	struct tc_rsvp_gpi spi;
 	__u8	protocol;
@@ -282,8 +268,7 @@
 
 /* ROUTE filter */
 
-enum
-{
+enum {
 	TCA_ROUTE4_UNSPEC,
 	TCA_ROUTE4_CLASSID,
 	TCA_ROUTE4_TO,
@@ -299,8 +284,7 @@
 
 /* FW filter */
 
-enum
-{
+enum {
 	TCA_FW_UNSPEC,
 	TCA_FW_CLASSID,
 	TCA_FW_POLICE,
@@ -314,8 +298,7 @@
 
 /* TC index filter */
 
-enum
-{
+enum {
 	TCA_TCINDEX_UNSPEC,
 	TCA_TCINDEX_HASH,
 	TCA_TCINDEX_MASK,
@@ -331,8 +314,7 @@
 
 /* Flow filter */
 
-enum
-{
+enum {
 	FLOW_KEY_SRC,
 	FLOW_KEY_DST,
 	FLOW_KEY_PROTO,
@@ -355,14 +337,12 @@
 
 #define FLOW_KEY_MAX	(__FLOW_KEY_MAX - 1)
 
-enum
-{
+enum {
 	FLOW_MODE_MAP,
 	FLOW_MODE_HASH,
 };
 
-enum
-{
+enum {
 	TCA_FLOW_UNSPEC,
 	TCA_FLOW_KEYS,
 	TCA_FLOW_MODE,
@@ -383,8 +363,7 @@
 
 /* Basic filter */
 
-enum
-{
+enum {
 	TCA_BASIC_UNSPEC,
 	TCA_BASIC_CLASSID,
 	TCA_BASIC_EMATCHES,
@@ -398,8 +377,7 @@
 
 /* Cgroup classifier */
 
-enum
-{
+enum {
 	TCA_CGROUP_UNSPEC,
 	TCA_CGROUP_ACT,
 	TCA_CGROUP_POLICE,
@@ -411,14 +389,12 @@
 
 /* Extended Matches */
 
-struct tcf_ematch_tree_hdr
-{
+struct tcf_ematch_tree_hdr {
 	__u16		nmatches;
 	__u16		progid;
 };
 
-enum
-{
+enum {
 	TCA_EMATCH_TREE_UNSPEC,
 	TCA_EMATCH_TREE_HDR,
 	TCA_EMATCH_TREE_LIST,
@@ -426,8 +402,7 @@
 };
 #define TCA_EMATCH_TREE_MAX (__TCA_EMATCH_TREE_MAX - 1)
 
-struct tcf_ematch_hdr
-{
+struct tcf_ematch_hdr {
 	__u16		matchid;
 	__u16		kind;
 	__u16		flags;
@@ -457,8 +432,7 @@
 #define TCF_EM_REL_MASK	3
 #define TCF_EM_REL_VALID(v) (((v) & TCF_EM_REL_MASK) != TCF_EM_REL_MASK)
 
-enum
-{
+enum {
 	TCF_LAYER_LINK,
 	TCF_LAYER_NETWORK,
 	TCF_LAYER_TRANSPORT,
@@ -479,13 +453,11 @@
 #define        TCF_EM_VLAN		6
 #define	TCF_EM_MAX		6
 
-enum
-{
+enum {
 	TCF_EM_PROG_TC
 };
 
-enum
-{
+enum {
 	TCF_EM_OPND_EQ,
 	TCF_EM_OPND_GT,
 	TCF_EM_OPND_LT
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index d51a2b3..2cfa4bc 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -29,8 +29,7 @@
    Particular schedulers may have also their private records.
  */
 
-struct tc_stats
-{
+struct tc_stats {
 	__u64	bytes;			/* NUmber of enqueues bytes */
 	__u32	packets;		/* Number of enqueued packets	*/
 	__u32	drops;			/* Packets dropped because of lack of resources */
@@ -42,8 +41,7 @@
 	__u32	backlog;
 };
 
-struct tc_estimator
-{
+struct tc_estimator {
 	signed char	interval;
 	unsigned char	ewma_log;
 };
@@ -75,8 +73,7 @@
 #define TC_H_ROOT	(0xFFFFFFFFU)
 #define TC_H_INGRESS    (0xFFFFFFF1U)
 
-struct tc_ratespec
-{
+struct tc_ratespec {
 	unsigned char	cell_log;
 	unsigned char	__reserved;
 	unsigned short	overhead;
@@ -109,8 +106,7 @@
 
 /* FIFO section */
 
-struct tc_fifo_qopt
-{
+struct tc_fifo_qopt {
 	__u32	limit;	/* Queue length: bytes for bfifo, packets for pfifo */
 };
 
@@ -119,8 +115,7 @@
 #define TCQ_PRIO_BANDS	16
 #define TCQ_MIN_PRIO_BANDS 2
 
-struct tc_prio_qopt
-{
+struct tc_prio_qopt {
 	int	bands;			/* Number of bands */
 	__u8	priomap[TC_PRIO_MAX+1];	/* Map: logical priority -> PRIO band */
 };
@@ -134,8 +129,7 @@
 
 /* TBF section */
 
-struct tc_tbf_qopt
-{
+struct tc_tbf_qopt {
 	struct tc_ratespec rate;
 	struct tc_ratespec peakrate;
 	__u32		limit;
@@ -143,8 +137,7 @@
 	__u32		mtu;
 };
 
-enum
-{
+enum {
 	TCA_TBF_UNSPEC,
 	TCA_TBF_PARMS,
 	TCA_TBF_RTAB,
@@ -161,8 +154,7 @@
 
 /* SFQ section */
 
-struct tc_sfq_qopt
-{
+struct tc_sfq_qopt {
 	unsigned	quantum;	/* Bytes per round allocated to flow */
 	int		perturb_period;	/* Period of hash perturbation */
 	__u32		limit;		/* Maximal packets in queue */
@@ -170,8 +162,7 @@
 	unsigned	flows;		/* Maximal number of flows  */
 };
 
-struct tc_sfq_xstats
-{
+struct tc_sfq_xstats {
 	__s32		allot;
 };
 
@@ -186,8 +177,7 @@
 
 /* RED section */
 
-enum
-{
+enum {
 	TCA_RED_UNSPEC,
 	TCA_RED_PARMS,
 	TCA_RED_STAB,
@@ -196,8 +186,7 @@
 
 #define TCA_RED_MAX (__TCA_RED_MAX - 1)
 
-struct tc_red_qopt
-{
+struct tc_red_qopt {
 	__u32		limit;		/* HARD maximal queue length (bytes)	*/
 	__u32		qth_min;	/* Min average length threshold (bytes) */
 	__u32		qth_max;	/* Max average length threshold (bytes) */
@@ -209,8 +198,7 @@
 #define TC_RED_HARDDROP	2
 };
 
-struct tc_red_xstats
-{
+struct tc_red_xstats {
 	__u32           early;          /* Early drops */
 	__u32           pdrop;          /* Drops due to queue limits */
 	__u32           other;          /* Drops due to drop() calls */
@@ -221,8 +209,7 @@
 
 #define MAX_DPs 16
 
-enum
-{
+enum {
        TCA_GRED_UNSPEC,
        TCA_GRED_PARMS,
        TCA_GRED_STAB,
@@ -232,8 +219,7 @@
 
 #define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
 
-struct tc_gred_qopt
-{
+struct tc_gred_qopt {
 	__u32		limit;        /* HARD maximal queue length (bytes)    */
 	__u32		qth_min;      /* Min average length threshold (bytes) */
 	__u32		qth_max;      /* Max average length threshold (bytes) */
@@ -253,8 +239,7 @@
 };
 
 /* gred setup */
-struct tc_gred_sopt
-{
+struct tc_gred_sopt {
 	__u32		DPs;
 	__u32		def_DP;
 	__u8		grio;
@@ -267,8 +252,7 @@
 #define TC_HTB_MAXDEPTH		8
 #define TC_HTB_PROTOVER		3 /* the same as HTB and TC's major */
 
-struct tc_htb_opt
-{
+struct tc_htb_opt {
 	struct tc_ratespec 	rate;
 	struct tc_ratespec 	ceil;
 	__u32	buffer;
@@ -277,8 +261,7 @@
 	__u32	level;		/* out only */
 	__u32	prio;
 };
-struct tc_htb_glob
-{
+struct tc_htb_glob {
 	__u32 version;		/* to match HTB/TC */
     	__u32 rate2quantum;	/* bps->quantum divisor */
     	__u32 defcls;		/* default class number */
@@ -287,8 +270,7 @@
 	/* stats */
 	__u32 direct_pkts; /* count of non shapped packets */
 };
-enum
-{
+enum {
 	TCA_HTB_UNSPEC,
 	TCA_HTB_PARMS,
 	TCA_HTB_INIT,
@@ -299,8 +281,7 @@
 
 #define TCA_HTB_MAX (__TCA_HTB_MAX - 1)
 
-struct tc_htb_xstats
-{
+struct tc_htb_xstats {
 	__u32 lends;
 	__u32 borrows;
 	__u32 giants;	/* too big packets (rate will not be accurate) */
@@ -310,28 +291,24 @@
 
 /* HFSC section */
 
-struct tc_hfsc_qopt
-{
+struct tc_hfsc_qopt {
 	__u16	defcls;		/* default class */
 };
 
-struct tc_service_curve
-{
+struct tc_service_curve {
 	__u32	m1;		/* slope of the first segment in bps */
 	__u32	d;		/* x-projection of the first segment in us */
 	__u32	m2;		/* slope of the second segment in bps */
 };
 
-struct tc_hfsc_stats
-{
+struct tc_hfsc_stats {
 	__u64	work;		/* total work done */
 	__u64	rtwork;		/* work done by real-time criteria */
 	__u32	period;		/* current period */
 	__u32	level;		/* class level in hierarchy */
 };
 
-enum
-{
+enum {
 	TCA_HFSC_UNSPEC,
 	TCA_HFSC_RSC,
 	TCA_HFSC_FSC,
@@ -348,8 +325,7 @@
 #define TC_CBQ_MAXLEVEL		8
 #define TC_CBQ_DEF_EWMA		5
 
-struct tc_cbq_lssopt
-{
+struct tc_cbq_lssopt {
 	unsigned char	change;
 	unsigned char	flags;
 #define TCF_CBQ_LSS_BOUNDED	1
@@ -368,8 +344,7 @@
 	__u32		avpkt;
 };
 
-struct tc_cbq_wrropt
-{
+struct tc_cbq_wrropt {
 	unsigned char	flags;
 	unsigned char	priority;
 	unsigned char	cpriority;
@@ -378,8 +353,7 @@
 	__u32		weight;
 };
 
-struct tc_cbq_ovl
-{
+struct tc_cbq_ovl {
 	unsigned char	strategy;
 #define	TC_CBQ_OVL_CLASSIC	0
 #define	TC_CBQ_OVL_DELAY	1
@@ -391,30 +365,26 @@
 	__u32		penalty;
 };
 
-struct tc_cbq_police
-{
+struct tc_cbq_police {
 	unsigned char	police;
 	unsigned char	__res1;
 	unsigned short	__res2;
 };
 
-struct tc_cbq_fopt
-{
+struct tc_cbq_fopt {
 	__u32		split;
 	__u32		defmap;
 	__u32		defchange;
 };
 
-struct tc_cbq_xstats
-{
+struct tc_cbq_xstats {
 	__u32		borrows;
 	__u32		overactions;
 	__s32		avgidle;
 	__s32		undertime;
 };
 
-enum
-{
+enum {
 	TCA_CBQ_UNSPEC,
 	TCA_CBQ_LSSOPT,
 	TCA_CBQ_WRROPT,
@@ -459,8 +429,7 @@
 
 /* Network emulator */
 
-enum
-{
+enum {
 	TCA_NETEM_UNSPEC,
 	TCA_NETEM_CORR,
 	TCA_NETEM_DELAY_DIST,
@@ -471,8 +440,7 @@
 
 #define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1)
 
-struct tc_netem_qopt
-{
+struct tc_netem_qopt {
 	__u32	latency;	/* added delay (us) */
 	__u32   limit;		/* fifo limit (packets) */
 	__u32	loss;		/* random packet loss (0=none ~0=100%) */
@@ -481,21 +449,18 @@
 	__u32	jitter;		/* random jitter in latency (us) */
 };
 
-struct tc_netem_corr
-{
+struct tc_netem_corr {
 	__u32	delay_corr;	/* delay correlation */
 	__u32	loss_corr;	/* packet loss correlation */
 	__u32	dup_corr;	/* duplicate correlation  */
 };
 
-struct tc_netem_reorder
-{
+struct tc_netem_reorder {
 	__u32	probability;
 	__u32	correlation;
 };
 
-struct tc_netem_corrupt
-{
+struct tc_netem_corrupt {
 	__u32	probability;
 	__u32	correlation;
 };
@@ -504,8 +469,7 @@
 
 /* DRR */
 
-enum
-{
+enum {
 	TCA_DRR_UNSPEC,
 	TCA_DRR_QUANTUM,
 	__TCA_DRR_MAX
@@ -513,8 +477,7 @@
 
 #define TCA_DRR_MAX	(__TCA_DRR_MAX - 1)
 
-struct tc_drr_stats
-{
+struct tc_drr_stats {
 	__u32	deficit;
 };
 
diff --git a/include/linux/rds.h b/include/linux/rds.h
index 89d46e1a..cab4994 100644
--- a/include/linux/rds.h
+++ b/include/linux/rds.h
@@ -56,6 +56,7 @@
 /* deprecated: RDS_BARRIER 4 */
 #define RDS_RECVERR			5
 #define RDS_CONG_MONITOR		6
+#define RDS_GET_MR_FOR_DEST		7
 
 /*
  * Control message types for SOL_RDS.
@@ -224,6 +225,13 @@
 	uint64_t	flags;
 };
 
+struct rds_get_mr_for_dest_args {
+	struct sockaddr_storage	dest_addr;
+	struct rds_iovec 	vec;
+	u_int64_t		cookie_addr;
+	uint64_t		flags;
+};
+
 struct rds_free_mr_args {
 	rds_rdma_cookie_t cookie;
 	u_int64_t	flags;
diff --git a/include/linux/route.h b/include/linux/route.h
index f7ed35d..6600708 100644
--- a/include/linux/route.h
+++ b/include/linux/route.h
@@ -27,8 +27,7 @@
 #include <linux/compiler.h>
 
 /* This structure gets passed by the SIOCADDRT and SIOCDELRT calls. */
-struct rtentry 
-{
+struct rtentry {
 	unsigned long	rt_pad1;
 	struct sockaddr	rt_dst;		/* target address		*/
 	struct sockaddr	rt_gateway;	/* gateway addr (RTF_GATEWAY)	*/
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index adf2068..14fc906 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -127,8 +127,7 @@
    with attribute type.
  */
 
-struct rtattr
-{
+struct rtattr {
 	unsigned short	rta_len;
 	unsigned short	rta_type;
 };
@@ -154,8 +153,7 @@
  *		Definitions used in routing table administration.
  ****/
 
-struct rtmsg
-{
+struct rtmsg {
 	unsigned char		rtm_family;
 	unsigned char		rtm_dst_len;
 	unsigned char		rtm_src_len;
@@ -171,8 +169,7 @@
 
 /* rtm_type */
 
-enum
-{
+enum {
 	RTN_UNSPEC,
 	RTN_UNICAST,		/* Gateway or direct route	*/
 	RTN_LOCAL,		/* Accept locally		*/
@@ -230,8 +227,7 @@
    could be assigned a value between UNIVERSE and LINK.
 */
 
-enum rt_scope_t
-{
+enum rt_scope_t {
 	RT_SCOPE_UNIVERSE=0,
 /* User defined values  */
 	RT_SCOPE_SITE=200,
@@ -249,8 +245,7 @@
 
 /* Reserved table identifiers */
 
-enum rt_class_t
-{
+enum rt_class_t {
 	RT_TABLE_UNSPEC=0,
 /* User defined values */
 	RT_TABLE_COMPAT=252,
@@ -263,8 +258,7 @@
 
 /* Routing message attributes */
 
-enum rtattr_type_t
-{
+enum rtattr_type_t {
 	RTA_UNSPEC,
 	RTA_DST,
 	RTA_SRC,
@@ -298,8 +292,7 @@
  * and rtt for different paths from multipath.
  */
 
-struct rtnexthop
-{
+struct rtnexthop {
 	unsigned short		rtnh_len;
 	unsigned char		rtnh_flags;
 	unsigned char		rtnh_hops;
@@ -325,8 +318,7 @@
 
 /* RTM_CACHEINFO */
 
-struct rta_cacheinfo
-{
+struct rta_cacheinfo {
 	__u32	rta_clntref;
 	__u32	rta_lastuse;
 	__s32	rta_expires;
@@ -341,8 +333,7 @@
 
 /* RTM_METRICS --- array of struct rtattr with types of RTAX_* */
 
-enum
-{
+enum {
 	RTAX_UNSPEC,
 #define RTAX_UNSPEC RTAX_UNSPEC
 	RTAX_LOCK,
@@ -377,12 +368,13 @@
 #define RTAX_MAX (__RTAX_MAX - 1)
 
 #define RTAX_FEATURE_ECN	0x00000001
-#define RTAX_FEATURE_SACK	0x00000002
-#define RTAX_FEATURE_TIMESTAMP	0x00000004
+#define RTAX_FEATURE_NO_SACK	0x00000002
+#define RTAX_FEATURE_NO_TSTAMP	0x00000004
 #define RTAX_FEATURE_ALLFRAG	0x00000008
+#define RTAX_FEATURE_NO_WSCALE	0x00000010
+#define RTAX_FEATURE_NO_DSACK	0x00000020
 
-struct rta_session
-{
+struct rta_session {
 	__u8	proto;
 	__u8	pad1;
 	__u16	pad2;
@@ -407,8 +399,7 @@
  *		General form of address family dependent message.
  ****/
 
-struct rtgenmsg
-{
+struct rtgenmsg {
 	unsigned char		rtgen_family;
 };
 
@@ -421,8 +412,7 @@
  * on network protocol.
  */
 
-struct ifinfomsg
-{
+struct ifinfomsg {
 	unsigned char	ifi_family;
 	unsigned char	__ifi_pad;
 	unsigned short	ifi_type;		/* ARPHRD_* */
@@ -435,8 +425,7 @@
  *		prefix information 
  ****/
 
-struct prefixmsg
-{
+struct prefixmsg {
 	unsigned char	prefix_family;
 	unsigned char	prefix_pad1;
 	unsigned short	prefix_pad2;
@@ -457,8 +446,7 @@
 
 #define PREFIX_MAX	(__PREFIX_MAX - 1)
 
-struct prefix_cacheinfo
-{
+struct prefix_cacheinfo {
 	__u32	preferred_time;
 	__u32	valid_time;
 };
@@ -468,8 +456,7 @@
  *		Traffic control messages.
  ****/
 
-struct tcmsg
-{
+struct tcmsg {
 	unsigned char	tcm_family;
 	unsigned char	tcm__pad1;
 	unsigned short	tcm__pad2;
@@ -479,8 +466,7 @@
 	__u32		tcm_info;
 };
 
-enum
-{
+enum {
 	TCA_UNSPEC,
 	TCA_KIND,
 	TCA_OPTIONS,
@@ -502,8 +488,7 @@
  *		Neighbor Discovery userland options
  ****/
 
-struct nduseroptmsg
-{
+struct nduseroptmsg {
 	unsigned char	nduseropt_family;
 	unsigned char	nduseropt_pad1;
 	unsigned short	nduseropt_opts_len;	/* Total length of options */
@@ -515,8 +500,7 @@
 	/* Followed by one or more ND options */
 };
 
-enum
-{
+enum {
 	NDUSEROPT_UNSPEC,
 	NDUSEROPT_SRCADDR,
 	__NDUSEROPT_MAX
@@ -598,8 +582,7 @@
 #define RTNLGRP_MAX	(__RTNLGRP_MAX - 1)
 
 /* TC action piece */
-struct tcamsg
-{
+struct tcamsg {
 	unsigned char	tca_family;
 	unsigned char	tca__pad1;
 	unsigned short	tca__pad2;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0c68fbd..63f4742 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -354,8 +354,8 @@
 				ipvs_property:1,
 				peeked:1,
 				nf_trace:1;
+	__be16			protocol:16;
 	kmemcheck_bitfield_end(flags1);
-	__be16			protocol;
 
 	void			(*destructor)(struct sk_buff *skb);
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
@@ -367,7 +367,6 @@
 #endif
 
 	int			iif;
-	__u16			queue_mapping;
 #ifdef CONFIG_NET_SCHED
 	__u16			tc_index;	/* traffic control index */
 #ifdef CONFIG_NET_CLS_ACT
@@ -376,6 +375,7 @@
 #endif
 
 	kmemcheck_bitfield_begin(flags2);
+	__u16			queue_mapping:16;
 #ifdef CONFIG_IPV6_NDISC_NODETYPE
 	__u8			ndisc_nodetype:2;
 #endif
@@ -491,8 +491,7 @@
 			int len,int odd, struct sk_buff *skb),
 			void *from, int length);
 
-struct skb_seq_state
-{
+struct skb_seq_state {
 	__u32		lower_offset;
 	__u32		upper_offset;
 	__u32		frag_idx;
@@ -1769,6 +1768,8 @@
 						     int to_offset,
 						     int size);
 extern void	       skb_free_datagram(struct sock *sk, struct sk_buff *skb);
+extern void	       skb_free_datagram_locked(struct sock *sk,
+						struct sk_buff *skb);
 extern int	       skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
 					 unsigned int flags);
 extern __wsum	       skb_checksum(const struct sk_buff *skb, int offset,
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 59966f1..7b3aae2 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -24,6 +24,9 @@
 #include <linux/types.h>		/* pid_t			*/
 #include <linux/compiler.h>		/* __user			*/
 
+#define __sockaddr_check_size(size)	\
+	BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage)))
+
 #ifdef __KERNEL__
 # ifdef CONFIG_PROC_FS
 struct seq_file;
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 3d0a9ff..24f9885 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -269,7 +269,8 @@
 
 	const struct ssb_bus_ops *ops;
 
-	/* The core in the basic address register window. (PCI bus only) */
+	/* The core currently mapped into the MMIO window.
+	 * Not valid on all host-buses. So don't use outside of SSB. */
 	struct ssb_device *mapped_device;
 	union {
 		/* Currently mapped PCMCIA segment. (bustype == SSB_BUSTYPE_PCMCIA only) */
@@ -281,14 +282,17 @@
 	 * On PCMCIA-host busses this is used to protect the whole MMIO access. */
 	spinlock_t bar_lock;
 
-	/* The bus this backplane is running on. */
+	/* The host-bus this backplane is running on. */
 	enum ssb_bustype bustype;
-	/* Pointer to the PCI bus (only valid if bustype == SSB_BUSTYPE_PCI). */
-	struct pci_dev *host_pci;
-	/* Pointer to the PCMCIA device (only if bustype == SSB_BUSTYPE_PCMCIA). */
-	struct pcmcia_device *host_pcmcia;
-	/* Pointer to the SDIO device (only if bustype == SSB_BUSTYPE_SDIO). */
-	struct sdio_func *host_sdio;
+	/* Pointers to the host-bus. Check bustype before using any of these pointers. */
+	union {
+		/* Pointer to the PCI bus (only valid if bustype == SSB_BUSTYPE_PCI). */
+		struct pci_dev *host_pci;
+		/* Pointer to the PCMCIA device (only if bustype == SSB_BUSTYPE_PCMCIA). */
+		struct pcmcia_device *host_pcmcia;
+		/* Pointer to the SDIO device (only if bustype == SSB_BUSTYPE_SDIO). */
+		struct sdio_func *host_sdio;
+	};
 
 	/* See enum ssb_quirks */
 	unsigned int quirks;
diff --git a/include/linux/tc_act/tc_defact.h b/include/linux/tc_act/tc_defact.h
index 964f473..6f65d07 100644
--- a/include/linux/tc_act/tc_defact.h
+++ b/include/linux/tc_act/tc_defact.h
@@ -3,13 +3,11 @@
 
 #include <linux/pkt_cls.h>
 
-struct tc_defact
-{
+struct tc_defact {
 	tc_gen;
 };
                                                                                 
-enum
-{
+enum {
 	TCA_DEF_UNSPEC,
 	TCA_DEF_TM,
 	TCA_DEF_PARMS,
diff --git a/include/linux/tc_act/tc_gact.h b/include/linux/tc_act/tc_gact.h
index e895c0a..f7bf94e 100644
--- a/include/linux/tc_act/tc_gact.h
+++ b/include/linux/tc_act/tc_gact.h
@@ -5,14 +5,12 @@
 #include <linux/pkt_cls.h>
 
 #define TCA_ACT_GACT 5
-struct tc_gact
-{
+struct tc_gact {
 	tc_gen;
 
 };
 
-struct tc_gact_p
-{
+struct tc_gact_p {
 #define PGACT_NONE              0
 #define PGACT_NETRAND           1
 #define PGACT_DETERM            2
@@ -22,8 +20,7 @@
 	int                   paction;
 };
  
-enum
-{
+enum {
 	TCA_GACT_UNSPEC,
 	TCA_GACT_TM,
 	TCA_GACT_PARMS,
diff --git a/include/linux/tc_act/tc_ipt.h b/include/linux/tc_act/tc_ipt.h
index 4b6f7b6..a233556 100644
--- a/include/linux/tc_act/tc_ipt.h
+++ b/include/linux/tc_act/tc_ipt.h
@@ -5,8 +5,7 @@
 
 #define TCA_ACT_IPT 6
 
-enum
-{
+enum {
 	TCA_IPT_UNSPEC,
 	TCA_IPT_TABLE,
 	TCA_IPT_HOOK,
diff --git a/include/linux/tc_act/tc_mirred.h b/include/linux/tc_act/tc_mirred.h
index 0a99ab6..7561750 100644
--- a/include/linux/tc_act/tc_mirred.h
+++ b/include/linux/tc_act/tc_mirred.h
@@ -10,15 +10,13 @@
 #define TCA_INGRESS_REDIR 3  /* packet redirect to INGRESS*/
 #define TCA_INGRESS_MIRROR 4 /* mirror packet to INGRESS */
                                                                                 
-struct tc_mirred
-{
+struct tc_mirred {
 	tc_gen;
 	int                     eaction;   /* one of IN/EGRESS_MIRROR/REDIR */
 	__u32                   ifindex;  /* ifindex of egress port */
 };
                                                                                 
-enum
-{
+enum {
 	TCA_MIRRED_UNSPEC,
 	TCA_MIRRED_TM,
 	TCA_MIRRED_PARMS,
diff --git a/include/linux/tc_act/tc_nat.h b/include/linux/tc_act/tc_nat.h
index e7cf31e..6663aeb 100644
--- a/include/linux/tc_act/tc_nat.h
+++ b/include/linux/tc_act/tc_nat.h
@@ -6,8 +6,7 @@
 
 #define TCA_ACT_NAT 9
 
-enum
-{
+enum {
 	TCA_NAT_UNSPEC,
 	TCA_NAT_PARMS,
 	TCA_NAT_TM,
@@ -17,8 +16,7 @@
 
 #define TCA_NAT_FLAG_EGRESS 1
 
-struct tc_nat
-{
+struct tc_nat {
 	tc_gen;
 	__be32 old_addr;
 	__be32 new_addr;
diff --git a/include/linux/tc_act/tc_pedit.h b/include/linux/tc_act/tc_pedit.h
index 54ce906..716cfab 100644
--- a/include/linux/tc_act/tc_pedit.h
+++ b/include/linux/tc_act/tc_pedit.h
@@ -6,8 +6,7 @@
 
 #define TCA_ACT_PEDIT 7
 
-enum
-{
+enum {
 	TCA_PEDIT_UNSPEC,
 	TCA_PEDIT_TM,
 	TCA_PEDIT_PARMS,
@@ -15,8 +14,7 @@
 };
 #define TCA_PEDIT_MAX (__TCA_PEDIT_MAX - 1)
                                                                                 
-struct tc_pedit_key
-{
+struct tc_pedit_key {
 	__u32           mask;  /* AND */
 	__u32           val;   /*XOR */
 	__u32           off;  /*offset */
@@ -25,8 +23,7 @@
 	__u32           shift;
 };
                                                                                 
-struct tc_pedit_sel
-{
+struct tc_pedit_sel {
 	tc_gen;
 	unsigned char           nkeys;
 	unsigned char           flags;
diff --git a/include/linux/tc_act/tc_skbedit.h b/include/linux/tc_act/tc_skbedit.h
index a14e461..7a2e910 100644
--- a/include/linux/tc_act/tc_skbedit.h
+++ b/include/linux/tc_act/tc_skbedit.h
@@ -26,6 +26,7 @@
 
 #define SKBEDIT_F_PRIORITY		0x1
 #define SKBEDIT_F_QUEUE_MAPPING		0x2
+#define SKBEDIT_F_MARK			0x4
 
 struct tc_skbedit {
 	tc_gen;
@@ -37,6 +38,7 @@
 	TCA_SKBEDIT_PARMS,
 	TCA_SKBEDIT_PRIORITY,
 	TCA_SKBEDIT_QUEUE_MAPPING,
+	TCA_SKBEDIT_MARK,
 	__TCA_SKBEDIT_MAX
 };
 #define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1)
diff --git a/include/linux/tc_ematch/tc_em_cmp.h b/include/linux/tc_ematch/tc_em_cmp.h
index 38e7f7b..f34bb1b 100644
--- a/include/linux/tc_ematch/tc_em_cmp.h
+++ b/include/linux/tc_ematch/tc_em_cmp.h
@@ -4,8 +4,7 @@
 #include <linux/types.h>
 #include <linux/pkt_cls.h>
 
-struct tcf_em_cmp
-{
+struct tcf_em_cmp {
 	__u32		val;
 	__u32		mask;
 	__u16		off;
@@ -15,8 +14,7 @@
 	__u8		opnd:4;
 };
 
-enum
-{
+enum {
 	TCF_EM_ALIGN_U8  = 1,
 	TCF_EM_ALIGN_U16 = 2,
 	TCF_EM_ALIGN_U32 = 4
diff --git a/include/linux/tc_ematch/tc_em_meta.h b/include/linux/tc_ematch/tc_em_meta.h
index dcfb733..0864206 100644
--- a/include/linux/tc_ematch/tc_em_meta.h
+++ b/include/linux/tc_ematch/tc_em_meta.h
@@ -4,8 +4,7 @@
 #include <linux/types.h>
 #include <linux/pkt_cls.h>
 
-enum
-{
+enum {
 	TCA_EM_META_UNSPEC,
 	TCA_EM_META_HDR,
 	TCA_EM_META_LVALUE,
@@ -14,8 +13,7 @@
 };
 #define TCA_EM_META_MAX (__TCA_EM_META_MAX - 1)
 
-struct tcf_meta_val
-{
+struct tcf_meta_val {
 	__u16			kind;
 	__u8			shift;
 	__u8			op;
@@ -26,16 +24,14 @@
 #define TCF_META_ID_MASK	0x7ff
 #define TCF_META_ID(kind)	((kind) & TCF_META_ID_MASK)
 
-enum
-{
+enum {
 	TCF_META_TYPE_VAR,
 	TCF_META_TYPE_INT,
 	__TCF_META_TYPE_MAX
 };
 #define TCF_META_TYPE_MAX (__TCF_META_TYPE_MAX - 1)
 
-enum
-{
+enum {
 	TCF_META_ID_VALUE,
 	TCF_META_ID_RANDOM,
 	TCF_META_ID_LOADAVG_0,
@@ -87,8 +83,7 @@
 };
 #define TCF_META_ID_MAX (__TCF_META_ID_MAX - 1)
 
-struct tcf_meta_hdr
-{
+struct tcf_meta_hdr {
 	struct tcf_meta_val	left;
 	struct tcf_meta_val	right;
 };
diff --git a/include/linux/tc_ematch/tc_em_nbyte.h b/include/linux/tc_ematch/tc_em_nbyte.h
index 9ed8c2e..7172cfb 100644
--- a/include/linux/tc_ematch/tc_em_nbyte.h
+++ b/include/linux/tc_ematch/tc_em_nbyte.h
@@ -4,8 +4,7 @@
 #include <linux/types.h>
 #include <linux/pkt_cls.h>
 
-struct tcf_em_nbyte
-{
+struct tcf_em_nbyte {
 	__u16		off;
 	__u16		len:12;
 	__u8		layer:4;
diff --git a/include/linux/tc_ematch/tc_em_text.h b/include/linux/tc_ematch/tc_em_text.h
index d12a73a..5aac404 100644
--- a/include/linux/tc_ematch/tc_em_text.h
+++ b/include/linux/tc_ematch/tc_em_text.h
@@ -6,8 +6,7 @@
 
 #define TC_EM_TEXT_ALGOSIZ	16
 
-struct tcf_em_text
-{
+struct tcf_em_text {
 	char		algo[TC_EM_TEXT_ALGOSIZ];
 	__u16		from_offset;
 	__u16		to_offset;
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 61723a7..eeecb85 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -102,8 +102,7 @@
 #define TCPI_OPT_WSCALE		4
 #define TCPI_OPT_ECN		8
 
-enum tcp_ca_state
-{
+enum tcp_ca_state {
 	TCP_CA_Open = 0,
 #define TCPF_CA_Open	(1<<TCP_CA_Open)
 	TCP_CA_Disorder = 1,
@@ -116,8 +115,7 @@
 #define TCPF_CA_Loss	(1<<TCP_CA_Loss)
 };
 
-struct tcp_info
-{
+struct tcp_info {
 	__u8	tcpi_state;
 	__u8	tcpi_ca_state;
 	__u8	tcpi_retransmits;
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index 2d4ec15..3246f0e 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -10,8 +10,7 @@
 /* Structure to encapsulate addresses. I do not want to use
  * "standard" structure. My apologies.
  */
-typedef union
-{
+typedef union {
 	__be32		a4;
 	__be32		a6[4];
 } xfrm_address_t;
@@ -20,8 +19,7 @@
  * the state by (spi,daddr,ah/esp) or to store information about
  * spi, protocol and tunnel address on output.
  */
-struct xfrm_id
-{
+struct xfrm_id {
 	xfrm_address_t	daddr;
 	__be32		spi;
 	__u8		proto;
@@ -45,8 +43,7 @@
 
 /* Selector, used as selector both on policy rules (SPD) and SAs. */
 
-struct xfrm_selector
-{
+struct xfrm_selector {
 	xfrm_address_t	daddr;
 	xfrm_address_t	saddr;
 	__be16	dport;
@@ -63,8 +60,7 @@
 
 #define XFRM_INF (~(__u64)0)
 
-struct xfrm_lifetime_cfg
-{
+struct xfrm_lifetime_cfg {
 	__u64	soft_byte_limit;
 	__u64	hard_byte_limit;
 	__u64	soft_packet_limit;
@@ -75,16 +71,14 @@
 	__u64	hard_use_expires_seconds;
 };
 
-struct xfrm_lifetime_cur
-{
+struct xfrm_lifetime_cur {
 	__u64	bytes;
 	__u64	packets;
 	__u64	add_time;
 	__u64	use_time;
 };
 
-struct xfrm_replay_state
-{
+struct xfrm_replay_state {
 	__u32	oseq;
 	__u32	seq;
 	__u32	bitmap;
@@ -109,16 +103,14 @@
 	__u32	integrity_failed;
 };
 
-enum
-{
+enum {
 	XFRM_POLICY_TYPE_MAIN	= 0,
 	XFRM_POLICY_TYPE_SUB	= 1,
 	XFRM_POLICY_TYPE_MAX	= 2,
 	XFRM_POLICY_TYPE_ANY	= 255
 };
 
-enum
-{
+enum {
 	XFRM_POLICY_IN	= 0,
 	XFRM_POLICY_OUT	= 1,
 	XFRM_POLICY_FWD	= 2,
@@ -126,8 +118,7 @@
 	XFRM_POLICY_MAX	= 3
 };
 
-enum
-{
+enum {
 	XFRM_SHARE_ANY,		/* No limitations */
 	XFRM_SHARE_SESSION,	/* For this session only */
 	XFRM_SHARE_USER,	/* For this user only */
diff --git a/include/net/ah.h b/include/net/ah.h
index 7573a71..f0129f7 100644
--- a/include/net/ah.h
+++ b/include/net/ah.h
@@ -8,8 +8,7 @@
 
 struct crypto_ahash;
 
-struct ah_data
-{
+struct ah_data {
 	int			icv_full_len;
 	int			icv_trunc_len;
 
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 6f4862b..ff67865 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1809,30 +1809,45 @@
  * @dev: network device
  * @buf: deauthentication frame (header + body)
  * @len: length of the frame data
- * @cookie: cookie from ->deauth if called within that callback,
- *	%NULL otherwise
  *
  * This function is called whenever deauthentication has been processed in
  * station mode. This includes both received deauthentication frames and
  * locally generated ones. This function may sleep.
  */
-void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len,
-			  void *cookie);
+void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len);
+
+/**
+ * __cfg80211_send_deauth - notification of processed deauthentication
+ * @dev: network device
+ * @buf: deauthentication frame (header + body)
+ * @len: length of the frame data
+ *
+ * Like cfg80211_send_deauth(), but doesn't take the wdev lock.
+ */
+void __cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len);
 
 /**
  * cfg80211_send_disassoc - notification of processed disassociation
  * @dev: network device
  * @buf: disassociation response frame (header + body)
  * @len: length of the frame data
- * @cookie: cookie from ->disassoc if called within that callback,
- *	%NULL otherwise
  *
  * This function is called whenever disassociation has been processed in
  * station mode. This includes both received disassociation frames and locally
  * generated ones. This function may sleep.
  */
-void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len,
-			    void *cookie);
+void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len);
+
+/**
+ * __cfg80211_send_disassoc - notification of processed disassociation
+ * @dev: network device
+ * @buf: disassociation response frame (header + body)
+ * @len: length of the frame data
+ *
+ * Like cfg80211_send_disassoc(), but doesn't take the wdev lock.
+ */
+void __cfg80211_send_disassoc(struct net_device *dev, const u8 *buf,
+	size_t len);
 
 /**
  * cfg80211_michael_mic_failure - notification of Michael MIC failure (TKIP)
diff --git a/include/net/compat.h b/include/net/compat.h
index 9679f05..3c7d4e3 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -33,7 +33,11 @@
 extern int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
 
 #else /* defined(CONFIG_COMPAT) */
-#define compat_msghdr	msghdr		/* to avoid compiler warnings */
+/*
+ * To avoid compiler warnings:
+ */
+#define compat_msghdr	msghdr
+#define compat_mmsghdr	mmsghdr
 #endif /* defined(CONFIG_COMPAT) */
 
 extern int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *);
diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h
index cee4682..28966ca 100644
--- a/include/net/dn_dev.h
+++ b/include/net/dn_dev.h
@@ -97,16 +97,14 @@
 	unsigned long uptime;     /* Time device went up in jiffies */
 };
 
-struct dn_short_packet
-{
+struct dn_short_packet {
 	__u8    msgflg;
 	__le16 dstnode;
 	__le16 srcnode;
 	__u8   forward;
 } __attribute__((packed));
 
-struct dn_long_packet
-{
+struct dn_long_packet {
 	__u8   msgflg;
 	__u8   d_area;
 	__u8   d_subarea;
@@ -122,8 +120,7 @@
 
 /*------------------------- DRP - Routing messages ---------------------*/
 
-struct endnode_hello_message
-{
+struct endnode_hello_message {
 	__u8   msgflg;
 	__u8   tiver[3];
 	__u8   id[6];
@@ -138,8 +135,7 @@
 	__u8   data[2];
 } __attribute__((packed));
 
-struct rtnode_hello_message
-{
+struct rtnode_hello_message {
 	__u8   msgflg;
 	__u8   tiver[3];
 	__u8   id[6];
diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h
index c378be7..52da6c3 100644
--- a/include/net/dn_fib.h
+++ b/include/net/dn_fib.h
@@ -4,8 +4,7 @@
 /* WARNING: The ordering of these elements must match ordering
  *          of RTA_* rtnetlink attribute numbers.
  */
-struct dn_kern_rta
-{
+struct dn_kern_rta {
         void            *rta_dst;
         void            *rta_src;
         int             *rta_iif;
diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h
index 96e816b..17d43d2 100644
--- a/include/net/dn_nsp.h
+++ b/include/net/dn_nsp.h
@@ -70,30 +70,26 @@
 
 /* Data Messages    (data segment/interrupt/link service)               */
 
-struct nsp_data_seg_msg
-{
+struct nsp_data_seg_msg {
 	__u8   msgflg;
 	__le16 dstaddr;
 	__le16 srcaddr;
 } __attribute__((packed));
 
-struct nsp_data_opt_msg
-{
+struct nsp_data_opt_msg {
 	__le16 acknum;
 	__le16 segnum;
 	__le16 lsflgs;
 } __attribute__((packed));
 
-struct nsp_data_opt_msg1
-{
+struct nsp_data_opt_msg1 {
 	__le16 acknum;
 	__le16 segnum;
 } __attribute__((packed));
 
 
 /* Acknowledgment Message (data/other data)                             */
-struct nsp_data_ack_msg
-{
+struct nsp_data_ack_msg {
 	__u8   msgflg;
 	__le16 dstaddr;
 	__le16 srcaddr;
@@ -101,16 +97,14 @@
 } __attribute__((packed));
 
 /* Connect Acknowledgment Message */
-struct  nsp_conn_ack_msg
-{
+struct  nsp_conn_ack_msg {
 	__u8 msgflg;
 	__le16 dstaddr;
 } __attribute__((packed));
 
 
 /* Connect Initiate/Retransmit Initiate/Connect Confirm */
-struct  nsp_conn_init_msg
-{
+struct  nsp_conn_init_msg {
 	__u8   msgflg;
 #define NSP_CI      0x18            /* Connect Initiate     */
 #define NSP_RCI     0x68            /* Retrans. Conn Init   */
@@ -126,8 +120,7 @@
 } __attribute__((packed));
 
 /* Disconnect Initiate/Disconnect Confirm */
-struct  nsp_disconn_init_msg
-{
+struct  nsp_disconn_init_msg {
 	__u8   msgflg;
 	__le16 dstaddr;
 	__le16 srcaddr;
@@ -136,8 +129,7 @@
 
 
 
-struct  srcobj_fmt
-{
+struct  srcobj_fmt {
 	__u8   format;
 	__u8   task;
 	__le16 grpcode;
diff --git a/include/net/dst.h b/include/net/dst.h
index 5a900dd..387cb3c 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -36,8 +36,7 @@
 
 struct sk_buff;
 
-struct dst_entry
-{
+struct dst_entry {
 	struct rcu_head		rcu_head;
 	struct dst_entry	*child;
 	struct net_device       *dev;
@@ -111,6 +110,12 @@
 	return dst->metrics[metric-1];
 }
 
+static inline u32
+dst_feature(const struct dst_entry *dst, u32 feature)
+{
+	return (dst ? dst_metric(dst, RTAX_FEATURES) & feature : 0);
+}
+
 static inline u32 dst_mtu(const struct dst_entry *dst)
 {
 	u32 mtu = dst_metric(dst, RTAX_MTU);
@@ -136,7 +141,7 @@
 static inline u32
 dst_allfrag(const struct dst_entry *dst)
 {
-	int ret = dst_metric(dst, RTAX_FEATURES) & RTAX_FEATURE_ALLFRAG;
+	int ret = dst_feature(dst,  RTAX_FEATURE_ALLFRAG);
 	/* Yes, _exactly_. This is paranoia. */
 	barrier();
 	return ret;
@@ -222,11 +227,19 @@
 		neigh_confirm(dst->neighbour);
 }
 
-static inline void dst_negative_advice(struct dst_entry **dst_p)
+static inline void dst_negative_advice(struct dst_entry **dst_p,
+				       struct sock *sk)
 {
 	struct dst_entry * dst = *dst_p;
-	if (dst && dst->ops->negative_advice)
+	if (dst && dst->ops->negative_advice) {
 		*dst_p = dst->ops->negative_advice(dst);
+
+		if (dst != *dst_p) {
+			extern void sk_reset_txq(struct sock *sk);
+
+			sk_reset_txq(sk);
+		}
+	}
 }
 
 static inline void dst_link_failure(struct sk_buff *skb)
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index ca4b2e8..2cd707b 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -7,8 +7,7 @@
 #include <net/flow.h>
 #include <net/rtnetlink.h>
 
-struct fib_rule
-{
+struct fib_rule {
 	struct list_head	list;
 	atomic_t		refcnt;
 	int			ifindex;
@@ -25,15 +24,13 @@
 	struct net *		fr_net;
 };
 
-struct fib_lookup_arg
-{
+struct fib_lookup_arg {
 	void			*lookup_ptr;
 	void			*result;
 	struct fib_rule		*rule;
 };
 
-struct fib_rules_ops
-{
+struct fib_rules_ops {
 	int			family;
 	struct list_head	list;
 	int			rule_size;
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index eb87a14..fa15771 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -6,8 +6,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/pkt_sched.h>
 
-struct gnet_dump
-{
+struct gnet_dump {
 	spinlock_t *      lock;
 	struct sk_buff *  skb;
 	struct nlattr *   tail;
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 2a1c0687..eb551ba 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -13,8 +13,7 @@
  * @list: list entry for linking
  * @family: pointer to family, need not be set before registering
  */
-struct genl_multicast_group
-{
+struct genl_multicast_group {
 	struct genl_family	*family;	/* private */
 	struct list_head	list;		/* private */
 	char			name[GENL_NAMSIZ];
@@ -35,8 +34,7 @@
  * @family_list: family list
  * @mcast_groups: multicast groups list
  */
-struct genl_family
-{
+struct genl_family {
 	unsigned int		id;
 	unsigned int		hdrsize;
 	char			name[GENL_NAMSIZ];
@@ -58,8 +56,7 @@
  * @userhdr: user specific header
  * @attrs: netlink attributes
  */
-struct genl_info
-{
+struct genl_info {
 	u32			snd_seq;
 	u32			snd_pid;
 	struct nlmsghdr *	nlhdr;
@@ -102,8 +99,7 @@
  * @done: completion callback for dumps
  * @ops_list: operations list
  */
-struct genl_ops
-{
+struct genl_ops {
 	u8			cmd;
 	unsigned int		flags;
 	const struct nla_policy	*policy;
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 38b7813..e9d69d1 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -32,8 +32,7 @@
 
 #ifdef __KERNEL__
 
-struct inet6_ifaddr 
-{
+struct inet6_ifaddr {
 	struct in6_addr		addr;
 	__u32			prefix_len;
 	
@@ -67,8 +66,7 @@
 	int			dead;
 };
 
-struct ip6_sf_socklist
-{
+struct ip6_sf_socklist {
 	unsigned int		sl_max;
 	unsigned int		sl_count;
 	struct in6_addr		sl_addr[0];
@@ -79,8 +77,7 @@
 
 #define IP6_SFBLOCK	10	/* allocate this many at once */
 
-struct ipv6_mc_socklist
-{
+struct ipv6_mc_socklist {
 	struct in6_addr		addr;
 	int			ifindex;
 	struct ipv6_mc_socklist *next;
@@ -89,8 +86,7 @@
 	struct ip6_sf_socklist	*sflist;
 };
 
-struct ip6_sf_list
-{
+struct ip6_sf_list {
 	struct ip6_sf_list	*sf_next;
 	struct in6_addr		sf_addr;
 	unsigned long		sf_count[2];	/* include/exclude counts */
@@ -105,8 +101,7 @@
 #define MAF_NOREPORT		0x08
 #define MAF_GSQUERY		0x10
 
-struct ifmcaddr6
-{
+struct ifmcaddr6 {
 	struct in6_addr		mca_addr;
 	struct inet6_dev	*idev;
 	struct ifmcaddr6	*next;
@@ -126,15 +121,13 @@
 
 /* Anycast stuff */
 
-struct ipv6_ac_socklist
-{
+struct ipv6_ac_socklist {
 	struct in6_addr		acl_addr;
 	int			acl_ifindex;
 	struct ipv6_ac_socklist *acl_next;
 };
 
-struct ifacaddr6
-{
+struct ifacaddr6 {
 	struct in6_addr		aca_addr;
 	struct inet6_dev	*aca_idev;
 	struct rt6_info		*aca_rt;
@@ -157,8 +150,7 @@
 	DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg);
 };
 
-struct inet6_dev 
-{
+struct inet6_dev {
 	struct net_device		*dev;
 
 	struct inet6_ifaddr	*addr_list;
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 37f3aea..773b10f 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -130,11 +130,11 @@
 	__u16			tw_num;
 	kmemcheck_bitfield_begin(flags);
 	/* And these are ours. */
-	__u8			tw_ipv6only:1,
-				tw_transparent:1;
-	/* 14 bits hole, try to pack */
+	unsigned int		tw_ipv6only     : 1,
+				tw_transparent  : 1,
+				tw_pad		: 14,	/* 14 bits hole */
+				tw_ipv6_offset  : 16;
 	kmemcheck_bitfield_end(flags);
-	__u16			tw_ipv6_offset;
 	unsigned long		tw_ttd;
 	struct inet_bind_bucket	*tw_tb;
 	struct hlist_node	tw_death_node;
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 15e1f8fe..35ad7b9 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -13,8 +13,7 @@
 #include <linux/spinlock.h>
 #include <asm/atomic.h>
 
-struct inet_peer
-{
+struct inet_peer {
 	/* group together avl_left,avl_right,v4daddr to speedup lookups */
 	struct inet_peer	*avl_left, *avl_right;
 	__be32			v4daddr;	/* peer's address */
diff --git a/include/net/ip.h b/include/net/ip.h
index 376adf4..e6b9d12 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -33,8 +33,7 @@
 
 struct sock;
 
-struct inet_skb_parm
-{
+struct inet_skb_parm {
 	struct ip_options	opt;		/* Compiled IP options		*/
 	unsigned char		flags;
 
@@ -50,8 +49,7 @@
 	return ip_hdr(skb)->ihl * 4;
 }
 
-struct ipcm_cookie
-{
+struct ipcm_cookie {
 	__be32			addr;
 	int			oif;
 	struct ip_options	*opt;
@@ -60,8 +58,7 @@
 
 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
 
-struct ip_ra_chain
-{
+struct ip_ra_chain {
 	struct ip_ra_chain	*next;
 	struct sock		*sk;
 	void			(*destructor)(struct sock *);
@@ -159,8 +156,7 @@
 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
 		   unsigned int len); 
 
-struct ipv4_config
-{
+struct ipv4_config {
 	int	log_martians;
 	int	no_pmtu_disc;
 };
@@ -336,8 +332,7 @@
  *	Functions provided by ip_fragment.c
  */
 
-enum ip_defrag_users
-{
+enum ip_defrag_users {
 	IP_DEFRAG_LOCAL_DELIVER,
 	IP_DEFRAG_CALL_RA_CHAIN,
 	IP_DEFRAG_CONNTRACK_IN,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 15b492a..2578081 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -30,8 +30,7 @@
 
 struct rt6_info;
 
-struct fib6_config
-{
+struct fib6_config {
 	u32		fc_table;
 	u32		fc_metric;
 	int		fc_dst_len;
@@ -51,8 +50,7 @@
 	struct nl_info	fc_nlinfo;
 };
 
-struct fib6_node
-{
+struct fib6_node {
 	struct fib6_node	*parent;
 	struct fib6_node	*left;
 	struct fib6_node	*right;
@@ -78,16 +76,14 @@
  *
  */
 
-struct rt6key
-{
+struct rt6key {
 	struct in6_addr	addr;
 	int		plen;
 };
 
 struct fib6_table;
 
-struct rt6_info
-{
+struct rt6_info {
 	union {
 		struct dst_entry	dst;
 	} u;
@@ -127,8 +123,7 @@
 	return ((struct rt6_info *)dst)->rt6i_idev;
 }
 
-struct fib6_walker_t
-{
+struct fib6_walker_t {
 	struct fib6_walker_t *prev, *next;
 	struct fib6_node *root, *node;
 	struct rt6_info *leaf;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 0e1b8ae..4a808de 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -103,8 +103,7 @@
 
 struct netlink_callback;
 
-struct rt6_rtnl_dump_arg
-{
+struct rt6_rtnl_dump_arg {
 	struct sk_buff *skb;
 	struct netlink_callback *cb;
 	struct net *net;
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 68fd5eb..c93f94e 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -213,7 +213,8 @@
 extern const struct nla_policy rtm_ipv4_policy[];
 extern void		ip_fib_init(void);
 extern int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
-			       struct net_device *dev, __be32 *spec_dst, u32 *itag);
+			       struct net_device *dev, __be32 *spec_dst,
+			       u32 *itag, u32 mark);
 extern void fib_select_default(struct net *net, const struct flowi *flp,
 			       struct fib_result *res);
 
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 98978e7..8dc3296 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -251,8 +251,7 @@
 	u32			outbps;
 };
 
-struct ip_vs_stats
-{
+struct ip_vs_stats {
 	struct ip_vs_stats_user	ustats;         /* statistics */
 	struct ip_vs_estimator	est;		/* estimator */
 
@@ -518,8 +517,7 @@
 /*
  *	The application module object (a.k.a. app incarnation)
  */
-struct ip_vs_app
-{
+struct ip_vs_app {
 	struct list_head	a_list;		/* member in app list */
 	int			type;		/* IP_VS_APP_TYPE_xxx */
 	char			*name;		/* application module name */
diff --git a/include/net/ipip.h b/include/net/ipip.h
index 86f1c8b..11e8513 100644
--- a/include/net/ipip.h
+++ b/include/net/ipip.h
@@ -8,16 +8,14 @@
 #define IPTUNNEL_ERR_TIMEO	(30*HZ)
 
 /* 6rd prefix/relay information */
-struct ip_tunnel_6rd_parm
-{
+struct ip_tunnel_6rd_parm {
 	struct in6_addr		prefix;
 	__be32			relay_prefix;
 	u16			prefixlen;
 	u16			relay_prefixlen;
 };
 
-struct ip_tunnel
-{
+struct ip_tunnel {
 	struct ip_tunnel	*next;
 	struct net_device	*dev;
 
@@ -40,11 +38,11 @@
 	unsigned int			prl_count;	/* # of entries in PRL */
 };
 
-struct ip_tunnel_prl_entry
-{
+struct ip_tunnel_prl_entry {
 	struct ip_tunnel_prl_entry	*next;
 	__be32				addr;
 	u16				flags;
+	struct rcu_head			rcu_head;
 };
 
 #define IPTUNNEL_XMIT() do {						\
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 8c31d8a..92db861 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -160,8 +160,7 @@
 #define ICMP6MSGIN_INC_STATS_BH(net, idev, field)	\
 	_DEVINC(net, icmpv6msg, _BH, idev, field)
 
-struct ip6_ra_chain
-{
+struct ip6_ra_chain {
 	struct ip6_ra_chain	*next;
 	struct sock		*sk;
 	int			sel;
@@ -176,8 +175,7 @@
    ancillary data and passed to IPv6.
  */
 
-struct ipv6_txoptions
-{
+struct ipv6_txoptions {
 	/* Length of this structure */
 	int			tot_len;
 
@@ -194,8 +192,7 @@
 	/* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
 };
 
-struct ip6_flowlabel
-{
+struct ip6_flowlabel {
 	struct ip6_flowlabel	*next;
 	__be32			label;
 	atomic_t		users;
@@ -212,8 +209,7 @@
 #define IPV6_FLOWINFO_MASK	cpu_to_be32(0x0FFFFFFF)
 #define IPV6_FLOWLABEL_MASK	cpu_to_be32(0x000FFFFF)
 
-struct ipv6_fl_socklist
-{
+struct ipv6_fl_socklist {
 	struct ipv6_fl_socklist	*next;
 	struct ip6_flowlabel	*fl;
 };
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index d5d3371..b2b98f3 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -300,8 +300,7 @@
  * This struct is also my long term insurance. I can add new fields here
  * without breaking the prototype of iw_handler...
  */
-struct iw_request_info
-{
+struct iw_request_info {
 	__u16		cmd;		/* Wireless Extension command */
 	__u16		flags;		/* More to come ;-) */
 };
@@ -321,8 +320,7 @@
  * shared by all driver instances... Same for the members...
  * This will be linked from net_device in <linux/netdevice.h>
  */
-struct iw_handler_def
-{
+struct iw_handler_def {
 
 	/* Array of handlers for standard ioctls
 	 * We will call dev->wireless_handlers->standard[ioctl - SIOCSIWCOMMIT]
@@ -372,8 +370,7 @@
 /*
  * Describe how a standard IOCTL looks like.
  */
-struct iw_ioctl_description
-{
+struct iw_ioctl_description {
 	__u8	header_type;		/* NULL, iw_point or other */
 	__u8	token_type;		/* Future */
 	__u16	token_size;		/* Granularity of payload */
@@ -395,8 +392,7 @@
 /*
  * Instance specific spy data, i.e. addresses spied and quality for them.
  */
-struct iw_spy_data
-{
+struct iw_spy_data {
 	/* --- Standard spy support --- */
 	int			spy_number;
 	u_char			spy_address[IW_MAX_SPY][ETH_ALEN];
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index c75b960..c42c4a8 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1665,11 +1665,11 @@
  * header if %RX_FLAG_RADIOTAP is set in the @status flags.
  *
  * This function may not be called in IRQ context. Calls to this function
- * for a single hardware must be synchronized against each other. Calls
- * to this function and ieee80211_rx_irqsafe() may not be mixed for a
- * single hardware.
+ * for a single hardware must be synchronized against each other. Calls to
+ * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
+ * mixed for a single hardware.
  *
- * Note that right now, this function must be called with softirqs disabled.
+ * In process context use instead ieee80211_rx_ni().
  *
  * @hw: the hardware this frame came in on
  * @skb: the buffer to receive, owned by mac80211 after this call
@@ -1682,8 +1682,8 @@
  * Like ieee80211_rx() but can be called in IRQ context
  * (internally defers to a tasklet.)
  *
- * Calls to this function and ieee80211_rx() may not be mixed for a
- * single hardware.
+ * Calls to this function, ieee80211_rx() or ieee80211_rx_ni() may not
+ * be mixed for a single hardware.
  *
  * @hw: the hardware this frame came in on
  * @skb: the buffer to receive, owned by mac80211 after this call
@@ -1691,6 +1691,26 @@
 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb);
 
 /**
+ * ieee80211_rx_ni - receive frame (in process context)
+ *
+ * Like ieee80211_rx() but can be called in process context
+ * (internally disables bottom halves).
+ *
+ * Calls to this function, ieee80211_rx() and ieee80211_rx_irqsafe() may
+ * not be mixed for a single hardware.
+ *
+ * @hw: the hardware this frame came in on
+ * @skb: the buffer to receive, owned by mac80211 after this call
+ */
+static inline void ieee80211_rx_ni(struct ieee80211_hw *hw,
+				   struct sk_buff *skb)
+{
+	local_bh_disable();
+	ieee80211_rx(hw, skb);
+	local_bh_enable();
+}
+
+/**
  * ieee80211_tx_status - transmit status callback
  *
  * Call this function for all transmitted frames after they have been
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 3817fda..db8e96d 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -37,8 +37,7 @@
 
 struct neighbour;
 
-struct neigh_parms
-{
+struct neigh_parms {
 #ifdef CONFIG_NET_NS
 	struct net *net;
 #endif
@@ -70,8 +69,7 @@
 	int	locktime;
 };
 
-struct neigh_statistics
-{
+struct neigh_statistics {
 	unsigned long allocs;		/* number of allocated neighs */
 	unsigned long destroys;		/* number of destroyed neighs */
 	unsigned long hash_grows;	/* number of hash resizes */
@@ -97,8 +95,7 @@
 		preempt_enable();					\
 	} while (0)
 
-struct neighbour
-{
+struct neighbour {
 	struct neighbour	*next;
 	struct neigh_table	*tbl;
 	struct neigh_parms	*parms;
@@ -122,8 +119,7 @@
 	u8			primary_key[0];
 };
 
-struct neigh_ops
-{
+struct neigh_ops {
 	int			family;
 	void			(*solicit)(struct neighbour *, struct sk_buff*);
 	void			(*error_report)(struct neighbour *, struct sk_buff*);
@@ -133,8 +129,7 @@
 	int			(*queue_xmit)(struct sk_buff*);
 };
 
-struct pneigh_entry
-{
+struct pneigh_entry {
 	struct pneigh_entry	*next;
 #ifdef CONFIG_NET_NS
 	struct net		*net;
@@ -149,8 +144,7 @@
  */
 
 
-struct neigh_table
-{
+struct neigh_table {
 	struct neigh_table	*next;
 	int			family;
 	int			entry_size;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 6994101..0addd45 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -28,6 +28,10 @@
 struct net_generic;
 struct sock;
 
+
+#define NETDEV_HASHBITS    8
+#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
+
 struct net {
 	atomic_t		count;		/* To decided when the network
 						 *  namespace should be freed.
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index cbdd628..5cf7270 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -255,11 +255,9 @@
 }
 
 /* These are for NAT.  Icky. */
-/* Update TCP window tracking data when NAT mangles the packet */
-extern void nf_conntrack_tcp_update(const struct sk_buff *skb,
-				    unsigned int dataoff,
-				    struct nf_conn *ct, int dir,
-				    s16 offset);
+extern s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
+			       enum ip_conntrack_dir dir,
+			       u32 seq);
 
 /* Fake conntrack entry for untracked connections */
 extern struct nf_conn nf_conntrack_untracked;
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 4f20d58..475facc 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -13,8 +13,7 @@
 #include <net/netfilter/nf_conntrack_extend.h>
 
 /* Connection tracking event types */
-enum ip_conntrack_events
-{
+enum ip_conntrack_events {
 	IPCT_NEW		= 0,	/* new conntrack */
 	IPCT_RELATED		= 1,	/* related conntrack */
 	IPCT_DESTROY		= 2,	/* destroyed conntrack */
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index a965280..9a2b9cb 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -9,8 +9,7 @@
 extern unsigned int nf_ct_expect_hsize;
 extern unsigned int nf_ct_expect_max;
 
-struct nf_conntrack_expect
-{
+struct nf_conntrack_expect {
 	/* Conntrack expectation list member */
 	struct hlist_node lnode;
 
@@ -64,8 +63,7 @@
 #endif
 }
 
-struct nf_conntrack_expect_policy
-{
+struct nf_conntrack_expect_policy {
 	unsigned int	max_expected;
 	unsigned int	timeout;
 };
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 7f8fc5d..e192dc1 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -3,8 +3,7 @@
 
 #include <net/netfilter/nf_conntrack.h>
 
-enum nf_ct_ext_id
-{
+enum nf_ct_ext_id {
 	NF_CT_EXT_HELPER,
 	NF_CT_EXT_NAT,
 	NF_CT_EXT_ACCT,
@@ -65,8 +64,7 @@
 
 #define NF_CT_EXT_F_PREALLOC	0x0001
 
-struct nf_ct_ext_type
-{
+struct nf_ct_ext_type {
 	/* Destroys relationships (can be NULL). */
 	void (*destroy)(struct nf_conn *ct);
 	/* Called when realloacted (can be NULL).
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 1b70680..d015de9 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -16,8 +16,7 @@
 
 #define NF_CT_HELPER_NAME_LEN	16
 
-struct nf_conntrack_helper
-{
+struct nf_conntrack_helper {
 	struct hlist_node hnode;	/* Internal use. */
 
 	const char *name;		/* name of the module */
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index 9f99d36..a754761 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -16,8 +16,7 @@
 #include <linux/seq_file.h>
 #include <net/netfilter/nf_conntrack.h>
 
-struct nf_conntrack_l3proto
-{
+struct nf_conntrack_l3proto {
 	/* L3 Protocol Family number. ex) PF_INET */
 	u_int16_t l3proto;
 
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 3767fb4..ca6dcf3 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -15,8 +15,7 @@
 
 struct seq_file;
 
-struct nf_conntrack_l4proto
-{
+struct nf_conntrack_l4proto {
 	/* L3 Protocol number. */
 	u_int16_t l3proto;
 
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
index 2628c15..4ee44c8 100644
--- a/include/net/netfilter/nf_conntrack_tuple.h
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -26,8 +26,7 @@
 
 /* The protocol-specific manipulable parts of the tuple: always in
    network order! */
-union nf_conntrack_man_proto
-{
+union nf_conntrack_man_proto {
 	/* Add other protocols here. */
 	__be16 all;
 
@@ -52,8 +51,7 @@
 };
 
 /* The manipulable part of the tuple. */
-struct nf_conntrack_man
-{
+struct nf_conntrack_man {
 	union nf_inet_addr u3;
 	union nf_conntrack_man_proto u;
 	/* Layer 3 protocol */
@@ -61,8 +59,7 @@
 };
 
 /* This contains the information to distinguish a connection. */
-struct nf_conntrack_tuple
-{
+struct nf_conntrack_tuple {
 	struct nf_conntrack_man src;
 
 	/* These are the parts of the tuple which are fixed. */
@@ -100,8 +97,7 @@
 	} dst;
 };
 
-struct nf_conntrack_tuple_mask
-{
+struct nf_conntrack_tuple_mask {
 	struct {
 		union nf_inet_addr u3;
 		union nf_conntrack_man_proto u;
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index 8df0b7f..f5f09f03 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -5,8 +5,7 @@
 
 #define NF_NAT_MAPPING_TYPE_MAX_NAMELEN 16
 
-enum nf_nat_manip_type
-{
+enum nf_nat_manip_type {
 	IP_NAT_MANIP_SRC,
 	IP_NAT_MANIP_DST
 };
@@ -30,8 +29,7 @@
 };
 
 /* Single range specification. */
-struct nf_nat_range
-{
+struct nf_nat_range {
 	/* Set to OR of flags above. */
 	unsigned int flags;
 
@@ -43,8 +41,7 @@
 };
 
 /* For backwards compat: don't use in modern code. */
-struct nf_nat_multi_range_compat
-{
+struct nf_nat_multi_range_compat {
 	unsigned int rangesize; /* Must be 1. */
 
 	/* hangs off end. */
@@ -57,8 +54,7 @@
 #include <net/netfilter/nf_conntrack_extend.h>
 
 /* per conntrack: nat application helper private data */
-union nf_conntrack_nat_help
-{
+union nf_conntrack_nat_help {
 	/* insert nat helper private data here */
 	struct nf_nat_pptp nat_pptp_info;
 };
@@ -66,8 +62,7 @@
 struct nf_conn;
 
 /* The structure embedded in the conntrack structure. */
-struct nf_conn_nat
-{
+struct nf_conn_nat {
 	struct hlist_node bysource;
 	struct nf_nat_seq seq[IP_CT_DIR_MAX];
 	struct nf_conn *ct;
diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h
index 237a961..4222220 100644
--- a/include/net/netfilter/nf_nat_helper.h
+++ b/include/net/netfilter/nf_nat_helper.h
@@ -32,4 +32,8 @@
  * to port ct->master->saved_proto. */
 extern void nf_nat_follow_master(struct nf_conn *ct,
 				 struct nf_conntrack_expect *this);
+
+extern s16 nf_nat_get_offset(const struct nf_conn *ct,
+			     enum ip_conntrack_dir dir,
+			     u32 seq);
 #endif
diff --git a/include/net/netfilter/nf_nat_protocol.h b/include/net/netfilter/nf_nat_protocol.h
index f3662c4..c398017 100644
--- a/include/net/netfilter/nf_nat_protocol.h
+++ b/include/net/netfilter/nf_nat_protocol.h
@@ -6,8 +6,7 @@
 
 struct nf_nat_range;
 
-struct nf_nat_protocol
-{
+struct nf_nat_protocol {
 	/* Protocol number. */
 	unsigned int protonum;
 
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index d1ca314..3dd210d 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -7,8 +7,7 @@
 
 /* Basic packet classifier frontend definitions. */
 
-struct tcf_walker
-{
+struct tcf_walker {
 	int	stop;
 	int	skip;
 	int	count;
@@ -61,8 +60,7 @@
 		tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
 }
 
-struct tcf_exts
-{
+struct tcf_exts {
 #ifdef CONFIG_NET_CLS_ACT
 	struct tc_action *action;
 #endif
@@ -71,8 +69,7 @@
 /* Map to export classifier specific extension TLV types to the
  * generic extensions API. Unsupported extensions must be set to 0.
  */
-struct tcf_ext_map
-{
+struct tcf_ext_map {
 	int action;
 	int police;
 };
@@ -143,8 +140,7 @@
 /**
  * struct tcf_pkt_info - packet information
  */
-struct tcf_pkt_info
-{
+struct tcf_pkt_info {
 	unsigned char *		ptr;
 	int			nexthdr;
 };
@@ -162,8 +158,7 @@
  * @datalen: length of the ematch specific configuration data
  * @data: ematch specific data
  */
-struct tcf_ematch
-{
+struct tcf_ematch {
 	struct tcf_ematch_ops * ops;
 	unsigned long		data;
 	unsigned int		datalen;
@@ -211,8 +206,7 @@
  * @hdr: ematch tree header supplied by userspace
  * @matches: array of ematches
  */
-struct tcf_ematch_tree
-{
+struct tcf_ematch_tree {
 	struct tcf_ematch_tree_hdr hdr;
 	struct tcf_ematch *	matches;
 	
@@ -230,8 +224,7 @@
  * @owner: owner, must be set to THIS_MODULE
  * @link: link to previous/next ematch module (internal use)
  */
-struct tcf_ematch_ops
-{
+struct tcf_ematch_ops {
 	int			kind;
 	int			datalen;
 	int			(*change)(struct tcf_proto *, void *,
@@ -302,8 +295,7 @@
 
 #else /* CONFIG_NET_EMATCH */
 
-struct tcf_ematch_tree
-{
+struct tcf_ematch_tree {
 };
 
 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index f911ec75..2d56726 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -5,8 +5,7 @@
 #include <linux/ktime.h>
 #include <net/sch_generic.h>
 
-struct qdisc_walker
-{
+struct qdisc_walker {
 	int	stop;
 	int	skip;
 	int	count;
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 60249e5..f1effdd3c 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -47,8 +47,7 @@
 };
 
 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-struct inet6_protocol 
-{
+struct inet6_protocol {
 	int	(*handler)(struct sk_buff *skb);
 
 	void	(*err_handler)(struct sk_buff *skb,
@@ -83,10 +82,6 @@
 	struct proto	 *prot;
 	const struct proto_ops *ops;
   
-	int              capability; /* Which (if any) capability do
-				      * we need to use this socket
-				      * interface?
-                                      */
 	char             no_check;   /* checksum on rcv/xmit/none? */
 	unsigned char	 flags;      /* See INET_PROTOSW_* below.  */
 };
diff --git a/include/net/red.h b/include/net/red.h
index 3cf31d4..995108e 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -90,8 +90,7 @@
 #define RED_STAB_SIZE	256
 #define RED_STAB_MASK	(RED_STAB_SIZE - 1)
 
-struct red_stats
-{
+struct red_stats {
 	u32		prob_drop;	/* Early probability drops */
 	u32		prob_mark;	/* Early probability marks */
 	u32		forced_drop;	/* Forced drops, qavg > max_thresh */
@@ -101,8 +100,7 @@
 	u32		backlog;
 };
 
-struct red_parms
-{
+struct red_parms {
 	/* Parameters */
 	u32		qth_min;	/* Min avg length threshold: A scaled */
 	u32		qth_max;	/* Max avg length threshold: A scaled */
diff --git a/include/net/route.h b/include/net/route.h
index 40f6346..cfb4c07 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -49,10 +49,8 @@
 
 struct fib_nh;
 struct inet_peer;
-struct rtable
-{
-	union
-	{
+struct rtable {
+	union {
 		struct dst_entry	dst;
 	} u;
 
@@ -77,16 +75,14 @@
 	struct inet_peer	*peer; /* long-living peer info */
 };
 
-struct ip_rt_acct
-{
+struct ip_rt_acct {
 	__u32 	o_bytes;
 	__u32 	o_packets;
 	__u32 	i_bytes;
 	__u32 	i_packets;
 };
 
-struct rt_cache_stat 
-{
+struct rt_cache_stat {
         unsigned int in_hit;
         unsigned int in_slow_tot;
         unsigned int in_slow_mc;
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index c3aa044d..cd5af1f 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -61,7 +61,8 @@
 	int			(*changelink)(struct net_device *dev,
 					      struct nlattr *tb[],
 					      struct nlattr *data[]);
-	void			(*dellink)(struct net_device *dev);
+	void			(*dellink)(struct net_device *dev,
+					   struct list_head *head);
 
 	size_t			(*get_size)(const struct net_device *dev);
 	int			(*fill_info)(struct sk_buff *skb,
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index c33180d..dad558b 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -15,16 +15,14 @@
 struct tcf_walker;
 struct module;
 
-struct qdisc_rate_table
-{
+struct qdisc_rate_table {
 	struct tc_ratespec rate;
 	u32		data[256];
 	struct qdisc_rate_table *next;
 	int		refcnt;
 };
 
-enum qdisc_state_t
-{
+enum qdisc_state_t {
 	__QDISC_STATE_RUNNING,
 	__QDISC_STATE_SCHED,
 	__QDISC_STATE_DEACTIVATED,
@@ -37,8 +35,7 @@
 	u16			data[];
 };
 
-struct Qdisc
-{
+struct Qdisc {
 	int 			(*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
 	struct sk_buff *	(*dequeue)(struct Qdisc *dev);
 	unsigned		flags;
@@ -78,8 +75,7 @@
 	struct gnet_stats_queue	qstats;
 };
 
-struct Qdisc_class_ops
-{
+struct Qdisc_class_ops {
 	/* Child qdisc manipulation */
 	struct netdev_queue *	(*select_queue)(struct Qdisc *, struct tcmsg *);
 	int			(*graft)(struct Qdisc *, unsigned long cl,
@@ -108,8 +104,7 @@
 					struct gnet_dump *);
 };
 
-struct Qdisc_ops
-{
+struct Qdisc_ops {
 	struct Qdisc_ops	*next;
 	const struct Qdisc_class_ops	*cl_ops;
 	char			id[IFNAMSIZ];
@@ -133,14 +128,12 @@
 };
 
 
-struct tcf_result
-{
+struct tcf_result {
 	unsigned long	class;
 	u32		classid;
 };
 
-struct tcf_proto_ops
-{
+struct tcf_proto_ops {
 	struct tcf_proto_ops	*next;
 	char			kind[IFNAMSIZ];
 
@@ -164,8 +157,7 @@
 	struct module		*owner;
 };
 
-struct tcf_proto
-{
+struct tcf_proto {
 	/* Fast access part */
 	struct tcf_proto	*next;
 	void			*root;
@@ -261,14 +253,12 @@
 extern struct Qdisc_ops pfifo_fast_ops;
 extern struct Qdisc_ops mq_qdisc_ops;
 
-struct Qdisc_class_common
-{
+struct Qdisc_class_common {
 	u32			classid;
 	struct hlist_node	hnode;
 };
 
-struct Qdisc_class_hash
-{
+struct Qdisc_class_hash {
 	struct hlist_head	*hash;
 	unsigned int		hashsize;
 	unsigned int		hashmask;
diff --git a/include/net/scm.h b/include/net/scm.h
index cf48c80..8360e47 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -12,15 +12,13 @@
  */
 #define SCM_MAX_FD	255
 
-struct scm_fp_list
-{
+struct scm_fp_list {
 	struct list_head	list;
 	int			count;
 	struct file		*fp[SCM_MAX_FD];
 };
 
-struct scm_cookie
-{
+struct scm_cookie {
 	struct ucred		creds;		/* Skb credentials	*/
 	struct scm_fp_list	*fp;		/* Passed files		*/
 #ifdef CONFIG_SECURITY_NETWORK
@@ -88,8 +86,7 @@
 static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
 				struct scm_cookie *scm, int flags)
 {
-	if (!msg->msg_control)
-	{
+	if (!msg->msg_control) {
 		if (test_bit(SOCK_PASSCRED, &sock->flags) || scm->fp)
 			msg->msg_flags |= MSG_CTRUNC;
 		scm_destroy(scm);
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 8a6d529..78740ec 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -227,8 +227,7 @@
 #endif /* !TEST_FRAME */
 
 /* sctp mib definitions */
-enum
-{
+enum {
 	SCTP_MIB_NUM = 0,
 	SCTP_MIB_CURRESTAB,			/* CurrEstab */
 	SCTP_MIB_ACTIVEESTABS,			/* ActiveEstabs */
diff --git a/include/net/sock.h b/include/net/sock.h
index 1364428..55de3bd 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -107,6 +107,7 @@
  *	@skc_node: main hash linkage for various protocol lookup tables
  *	@skc_nulls_node: main hash linkage for UDP/UDP-Lite protocol
  *	@skc_refcnt: reference count
+ *	@skc_tx_queue_mapping: tx queue number for this connection
  *	@skc_hash: hash value used with various protocol lookup tables
  *	@skc_family: network address family
  *	@skc_state: Connection state
@@ -128,6 +129,7 @@
 		struct hlist_nulls_node skc_nulls_node;
 	};
 	atomic_t		skc_refcnt;
+	int			skc_tx_queue_mapping;
 
 	unsigned int		skc_hash;
 	unsigned short		skc_family;
@@ -215,6 +217,7 @@
 #define sk_node			__sk_common.skc_node
 #define sk_nulls_node		__sk_common.skc_nulls_node
 #define sk_refcnt		__sk_common.skc_refcnt
+#define sk_tx_queue_mapping	__sk_common.skc_tx_queue_mapping
 
 #define sk_copy_start		__sk_common.skc_hash
 #define sk_hash			__sk_common.skc_hash
@@ -1094,8 +1097,29 @@
 extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
 			  const int nested);
 
+static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
+{
+	sk->sk_tx_queue_mapping = tx_queue;
+}
+
+static inline void sk_tx_queue_clear(struct sock *sk)
+{
+	sk->sk_tx_queue_mapping = -1;
+}
+
+static inline int sk_tx_queue_get(const struct sock *sk)
+{
+	return sk->sk_tx_queue_mapping;
+}
+
+static inline bool sk_tx_queue_recorded(const struct sock *sk)
+{
+	return (sk && sk->sk_tx_queue_mapping >= 0);
+}
+
 static inline void sk_set_socket(struct sock *sk, struct socket *sock)
 {
+	sk_tx_queue_clear(sk);
 	sk->sk_socket = sock;
 }
 
@@ -1152,6 +1176,7 @@
 {
 	struct dst_entry *old_dst;
 
+	sk_tx_queue_clear(sk);
 	old_dst = sk->sk_dst_cache;
 	sk->sk_dst_cache = dst;
 	dst_release(old_dst);
@@ -1170,6 +1195,7 @@
 {
 	struct dst_entry *old_dst;
 
+	sk_tx_queue_clear(sk);
 	old_dst = sk->sk_dst_cache;
 	sk->sk_dst_cache = NULL;
 	dst_release(old_dst);
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
index 6abb3ed..e103fe0 100644
--- a/include/net/tc_act/tc_skbedit.h
+++ b/include/net/tc_act/tc_skbedit.h
@@ -26,7 +26,9 @@
 	struct tcf_common	common;
 	u32			flags;
 	u32     		priority;
+	u32     		mark;
 	u16			queue_mapping;
+	/* XXX: 16-bit pad here? */
 };
 #define to_skbedit(pc) \
 	container_of(pc, struct tcf_skbedit, common)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 03a49c7..bf20f88 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -359,8 +359,7 @@
 		inet_rsk(req)->ecn_ok = 1;
 }
 
-enum tcp_tw_status
-{
+enum tcp_tw_status {
 	TCP_TW_SUCCESS = 0,
 	TCP_TW_RST = 1,
 	TCP_TW_ACK = 2,
@@ -409,7 +408,8 @@
 
 extern void			tcp_parse_options(struct sk_buff *skb,
 						  struct tcp_options_received *opt_rx,
-						  int estab);
+						  int estab,
+						  struct dst_entry *dst);
 
 extern u8			*tcp_parse_md5sig_option(struct tcphdr *th);
 
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index d9c6dbb..7f38ef5 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -121,8 +121,7 @@
 };
 
 /* Full description of state of transformer. */
-struct xfrm_state
-{
+struct xfrm_state {
 #ifdef CONFIG_NET_NS
 	struct net		*xs_net;
 #endif
@@ -237,8 +236,7 @@
 };
 
 /* callback structure passed from either netlink or pfkey */
-struct km_event
-{
+struct km_event {
 	union {
 		u32 hard;
 		u32 proto;
@@ -313,8 +311,7 @@
 
 extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
 
-struct xfrm_type
-{
+struct xfrm_type {
 	char			*description;
 	struct module		*owner;
 	__u8			proto;
@@ -420,8 +417,7 @@
 		return x->inner_mode_iaf;
 }
 
-struct xfrm_tmpl
-{
+struct xfrm_tmpl {
 /* id in template is interpreted as:
  * daddr - destination of tunnel, may be zero for transport mode.
  * spi   - zero to acquire spi. Not zero if spi is static, then
@@ -468,8 +464,7 @@
 	u32 seq;
 };
 
-struct xfrm_policy
-{
+struct xfrm_policy {
 #ifdef CONFIG_NET_NS
 	struct net		*xp_net;
 #endif
@@ -538,8 +533,7 @@
 /* default seq threshold size */
 #define XFRM_AE_SEQT_SIZE		2
 
-struct xfrm_mgr
-{
+struct xfrm_mgr {
 	struct list_head	list;
 	char			*id;
 	int			(*notify)(struct xfrm_state *x, struct km_event *c);
@@ -626,8 +620,7 @@
 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
 
 /* Audit Information */
-struct xfrm_audit
-{
+struct xfrm_audit {
 	u32	secid;
 	uid_t	loginuid;
 	u32	sessionid;
@@ -871,8 +864,7 @@
  * bundles differing by session id. All the bundles grow from a parent
  * policy rule.
  */
-struct xfrm_dst
-{
+struct xfrm_dst {
 	union {
 		struct dst_entry	dst;
 		struct rtable		rt;
@@ -907,8 +899,7 @@
 
 extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
 
-struct sec_path
-{
+struct sec_path {
 	atomic_t		refcnt;
 	int			len;
 	struct xfrm_state	*xvec[XFRM_MAX_DEPTH];
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 8836575..39f8d01 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -140,7 +140,7 @@
 	vlan_group_free(container_of(rcu, struct vlan_group, rcu));
 }
 
-void unregister_vlan_dev(struct net_device *dev)
+void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
 {
 	struct vlan_dev_info *vlan = vlan_dev_info(dev);
 	struct net_device *real_dev = vlan->real_dev;
@@ -159,12 +159,13 @@
 	if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
 		ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
 
-	vlan_group_set_device(grp, vlan_id, NULL);
 	grp->nr_vlans--;
 
-	synchronize_net();
+	vlan_group_set_device(grp, vlan_id, NULL);
+	if (!grp->killall)
+		synchronize_net();
 
-	unregister_netdevice(dev);
+	unregister_netdevice_queue(dev, head);
 
 	/* If the group is now empty, kill off the group. */
 	if (grp->nr_vlans == 0) {
@@ -427,6 +428,7 @@
 	struct vlan_group *grp;
 	int i, flgs;
 	struct net_device *vlandev;
+	LIST_HEAD(list);
 
 	if (is_vlan_dev(dev))
 		__vlan_device_event(dev, event);
@@ -525,6 +527,8 @@
 
 	case NETDEV_UNREGISTER:
 		/* Delete all VLANs for this dev. */
+		grp->killall = 1;
+
 		for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
 			vlandev = vlan_group_get_device(grp, i);
 			if (!vlandev)
@@ -535,8 +539,9 @@
 			if (grp->nr_vlans == 1)
 				i = VLAN_GROUP_ARRAY_LEN;
 
-			unregister_vlan_dev(vlandev);
+			unregister_vlan_dev(vlandev, &list);
 		}
+		unregister_netdevice_many(&list);
 		break;
 	}
 
@@ -642,7 +647,7 @@
 		err = -EPERM;
 		if (!capable(CAP_NET_ADMIN))
 			break;
-		unregister_vlan_dev(dev);
+		unregister_vlan_dev(dev, NULL);
 		err = 0;
 		break;
 
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 82570bc..68f9290 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -82,14 +82,14 @@
 int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id);
 void vlan_setup(struct net_device *dev);
 int register_vlan_dev(struct net_device *dev);
-void unregister_vlan_dev(struct net_device *dev);
+void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
 
 static inline u32 vlan_get_ingress_priority(struct net_device *dev,
 					    u16 vlan_tci)
 {
 	struct vlan_dev_info *vip = vlan_dev_info(dev);
 
-	return vip->ingress_priority_map[(vlan_tci >> 13) & 0x7];
+	return vip->ingress_priority_map[(vlan_tci >> VLAN_PRIO_SHIFT) & 0x7];
 }
 
 #ifdef CONFIG_VLAN_8021Q_GVRP
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 7f7de1a..8d5ca2a 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -74,8 +74,9 @@
 }
 EXPORT_SYMBOL(vlan_dev_vlan_id);
 
-static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
-			   unsigned int vlan_tci, struct sk_buff *skb)
+static gro_result_t
+vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
+		unsigned int vlan_tci, struct sk_buff *skb)
 {
 	struct sk_buff *p;
 
@@ -101,11 +102,12 @@
 	return GRO_DROP;
 }
 
-int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
-		     unsigned int vlan_tci, struct sk_buff *skb)
+gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
+			      unsigned int vlan_tci, struct sk_buff *skb)
 {
 	if (netpoll_rx_on(skb))
-		return vlan_hwaccel_receive_skb(skb, grp, vlan_tci);
+		return vlan_hwaccel_receive_skb(skb, grp, vlan_tci)
+			? GRO_DROP : GRO_NORMAL;
 
 	skb_gro_reset_offset(skb);
 
@@ -113,17 +115,18 @@
 }
 EXPORT_SYMBOL(vlan_gro_receive);
 
-int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
-		   unsigned int vlan_tci)
+gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
+			    unsigned int vlan_tci)
 {
 	struct sk_buff *skb = napi_frags_skb(napi);
 
 	if (!skb)
-		return NET_RX_DROP;
+		return GRO_DROP;
 
 	if (netpoll_rx_on(skb)) {
 		skb->protocol = eth_type_trans(skb, skb->dev);
-		return vlan_hwaccel_receive_skb(skb, grp, vlan_tci);
+		return vlan_hwaccel_receive_skb(skb, grp, vlan_tci)
+			? GRO_DROP : GRO_NORMAL;
 	}
 
 	return napi_frags_finish(napi, skb,
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 4198ec5..790fd55 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -393,7 +393,7 @@
 	struct vlan_dev_info *vlan = vlan_dev_info(dev);
 	struct vlan_priority_tci_mapping *mp = NULL;
 	struct vlan_priority_tci_mapping *np;
-	u32 vlan_qos = (vlan_prio << 13) & 0xE000;
+	u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
 
 	/* See if a priority mapping exists.. */
 	mp = vlan->egress_priority_map[skb_prio & 0xF];
@@ -626,6 +626,17 @@
 		rc = ops->ndo_fcoe_disable(real_dev);
 	return rc;
 }
+
+static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
+{
+	struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+	const struct net_device_ops *ops = real_dev->netdev_ops;
+	int rc = -EINVAL;
+
+	if (ops->ndo_fcoe_get_wwn)
+		rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
+	return rc;
+}
 #endif
 
 static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
@@ -791,6 +802,7 @@
 	.ndo_fcoe_ddp_done	= vlan_dev_fcoe_ddp_done,
 	.ndo_fcoe_enable	= vlan_dev_fcoe_enable,
 	.ndo_fcoe_disable	= vlan_dev_fcoe_disable,
+	.ndo_fcoe_get_wwn	= vlan_dev_fcoe_get_wwn,
 #endif
 };
 
@@ -813,6 +825,7 @@
 	.ndo_fcoe_ddp_done	= vlan_dev_fcoe_ddp_done,
 	.ndo_fcoe_enable	= vlan_dev_fcoe_enable,
 	.ndo_fcoe_disable	= vlan_dev_fcoe_disable,
+	.ndo_fcoe_get_wwn	= vlan_dev_fcoe_get_wwn,
 #endif
 };
 
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index abe3801..4b0ce2e 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1021,7 +1021,8 @@
  * Create a socket. Initialise the socket, blank the addresses
  * set the state.
  */
-static int atalk_create(struct net *net, struct socket *sock, int protocol)
+static int atalk_create(struct net *net, struct socket *sock, int protocol,
+			int kern)
 {
 	struct sock *sk;
 	int rc = -ESOCKTNOSUPPORT;
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index a6e1fdb..8d74e62 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -127,7 +127,8 @@
 };
 
 
-static int pvc_create(struct net *net, struct socket *sock,int protocol)
+static int pvc_create(struct net *net, struct socket *sock, int protocol,
+		      int kern)
 {
 	if (net != &init_net)
 		return -EAFNOSUPPORT;
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 8193542..c739507 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -25,7 +25,7 @@
 #include "signaling.h"
 #include "addr.h"
 
-static int svc_create(struct net *net, struct socket *sock,int protocol);
+static int svc_create(struct net *net, struct socket *sock, int protocol, int kern);
 
 /*
  * Note: since all this is still nicely synchronized with the signaling demon,
@@ -330,7 +330,7 @@
 
 	lock_sock(sk);
 
-	error = svc_create(sock_net(sk), newsock,0);
+	error = svc_create(sock_net(sk), newsock, 0, 0);
 	if (error)
 		goto out;
 
@@ -650,7 +650,8 @@
 };
 
 
-static int svc_create(struct net *net, struct socket *sock,int protocol)
+static int svc_create(struct net *net, struct socket *sock, int protocol,
+		      int kern)
 {
 	int error;
 
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index f05306f..d6ddfa4 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -369,6 +369,9 @@
 	if (ax25_ctl.digi_count > AX25_MAX_DIGIS)
 		return -EINVAL;
 
+	if (ax25_ctl.arg > ULONG_MAX / HZ && ax25_ctl.cmd != AX25_KILL)
+		return -EINVAL;
+
 	digi.ndigi = ax25_ctl.digi_count;
 	for (k = 0; k < digi.ndigi; k++)
 		digi.calls[k] = ax25_ctl.digi_addr[k];
@@ -418,14 +421,10 @@
 		break;
 
 	case AX25_T3:
-		if (ax25_ctl.arg < 0)
-			goto einval_put;
 		ax25->t3 = ax25_ctl.arg * HZ;
 		break;
 
 	case AX25_IDLE:
-		if (ax25_ctl.arg < 0)
-			goto einval_put;
 		ax25->idle = ax25_ctl.arg * 60 * HZ;
 		break;
 
@@ -800,7 +799,8 @@
 	.obj_size = sizeof(struct sock),
 };
 
-static int ax25_create(struct net *net, struct socket *sock, int protocol)
+static int ax25_create(struct net *net, struct socket *sock, int protocol,
+		       int kern)
 {
 	struct sock *sk;
 	ax25_cb *ax25;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 399e59c..087cc51 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -126,7 +126,8 @@
 }
 EXPORT_SYMBOL(bt_sock_unregister);
 
-static int bt_sock_create(struct net *net, struct socket *sock, int proto)
+static int bt_sock_create(struct net *net, struct socket *sock, int proto,
+			  int kern)
 {
 	int err;
 
@@ -144,7 +145,7 @@
 	read_lock(&bt_proto_lock);
 
 	if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
-		err = bt_proto[proto]->create(net, sock, proto);
+		err = bt_proto[proto]->create(net, sock, proto, kern);
 		bt_sock_reclassify_lock(sock, proto);
 		module_put(bt_proto[proto]->owner);
 	}
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 0a2c546..2ff6ac7 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -195,7 +195,8 @@
 	.obj_size	= sizeof(struct bt_sock)
 };
 
-static int bnep_sock_create(struct net *net, struct socket *sock, int protocol)
+static int bnep_sock_create(struct net *net, struct socket *sock, int protocol,
+			    int kern)
 {
 	struct sock *sk;
 
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index de7c804..978cc3a 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -190,7 +190,8 @@
 	.obj_size	= sizeof(struct bt_sock)
 };
 
-static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol)
+static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol,
+			    int kern)
 {
 	struct sock *sk;
 
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index e7395f2..1ca5c7c 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -621,7 +621,8 @@
 	.obj_size	= sizeof(struct hci_pinfo)
 };
 
-static int hci_sock_create(struct net *net, struct socket *sock, int protocol)
+static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
+			   int kern)
 {
 	struct sock *sk;
 
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 7f939ce..2bc6f6a 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -92,6 +92,8 @@
 
 	dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
 
+	dev_set_drvdata(&conn->dev, conn);
+
 	if (device_add(&conn->dev) < 0) {
 		BT_ERR("Failed to register connection device");
 		return;
@@ -144,8 +146,6 @@
 	conn->dev.class = bt_class;
 	conn->dev.parent = &hdev->dev;
 
-	dev_set_drvdata(&conn->dev, conn);
-
 	device_initialize(&conn->dev);
 
 	INIT_WORK(&conn->work_add, add_conn);
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 4beb6a7..9cfef68 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -241,7 +241,8 @@
 	.obj_size	= sizeof(struct bt_sock)
 };
 
-static int hidp_sock_create(struct net *net, struct socket *sock, int protocol)
+static int hidp_sock_create(struct net *net, struct socket *sock, int protocol,
+			    int kern)
 {
 	struct sock *sk;
 
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 4b66bd5..ff0233d 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -555,12 +555,12 @@
 
 	conn->feat_mask = 0;
 
-	setup_timer(&conn->info_timer, l2cap_info_timeout,
-						(unsigned long) conn);
-
 	spin_lock_init(&conn->lock);
 	rwlock_init(&conn->chan_list.lock);
 
+	setup_timer(&conn->info_timer, l2cap_info_timeout,
+						(unsigned long) conn);
+
 	conn->disc_reason = 0x13;
 
 	return conn;
@@ -783,6 +783,9 @@
 	/* Default config options */
 	pi->conf_len = 0;
 	pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+	skb_queue_head_init(TX_QUEUE(sk));
+	skb_queue_head_init(SREJ_QUEUE(sk));
+	INIT_LIST_HEAD(SREJ_LIST(sk));
 }
 
 static struct proto l2cap_proto = {
@@ -816,7 +819,8 @@
 	return sk;
 }
 
-static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
+static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
+			     int kern)
 {
 	struct sock *sk;
 
@@ -828,7 +832,7 @@
 			sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
 		return -ESOCKTNOSUPPORT;
 
-	if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
+	if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
 		return -EPERM;
 
 	sock->ops = &l2cap_sock_ops;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index d3bfc1b..4b5968d 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -323,7 +323,8 @@
 	return sk;
 }
 
-static int rfcomm_sock_create(struct net *net, struct socket *sock, int protocol)
+static int rfcomm_sock_create(struct net *net, struct socket *sock,
+			      int protocol, int kern)
 {
 	struct sock *sk;
 
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 694a655..dd8f6ec 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -430,7 +430,8 @@
 	return sk;
 }
 
-static int sco_sock_create(struct net *net, struct socket *sock, int protocol)
+static int sco_sock_create(struct net *net, struct socket *sock, int protocol,
+			   int kern)
 {
 	struct sock *sk;
 
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index b1b3b0f..a6f74b2 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -154,7 +154,7 @@
 }
 
 /* called with RTNL */
-static void del_br(struct net_bridge *br)
+static void del_br(struct net_bridge *br, struct list_head *head)
 {
 	struct net_bridge_port *p, *n;
 
@@ -165,7 +165,7 @@
 	del_timer_sync(&br->gc_timer);
 
 	br_sysfs_delbr(br->dev);
-	unregister_netdevice(br->dev);
+	unregister_netdevice_queue(br->dev, head);
 }
 
 static struct net_device *new_bridge_dev(struct net *net, const char *name)
@@ -323,7 +323,7 @@
 	}
 
 	else
-		del_br(netdev_priv(dev));
+		del_br(netdev_priv(dev), NULL);
 
 	rtnl_unlock();
 	return ret;
@@ -377,12 +377,16 @@
 	struct net_bridge_port *p;
 	int err = 0;
 
-	if (dev->flags & IFF_LOOPBACK || dev->type != ARPHRD_ETHER)
+	/* Don't allow bridging non-ethernet like devices */
+	if ((dev->flags & IFF_LOOPBACK) ||
+	    dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN)
 		return -EINVAL;
 
+	/* No bridging of bridges */
 	if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit)
 		return -ELOOP;
 
+	/* Device is already being bridged */
 	if (dev->br_port != NULL)
 		return -EBUSY;
 
@@ -462,15 +466,14 @@
 void br_net_exit(struct net *net)
 {
 	struct net_device *dev;
+	LIST_HEAD(list);
 
 	rtnl_lock();
-restart:
-	for_each_netdev(net, dev) {
-		if (dev->priv_flags & IFF_EBRIDGE) {
-			del_br(netdev_priv(dev));
-			goto restart;
-		}
-	}
+	for_each_netdev(net, dev)
+		if (dev->priv_flags & IFF_EBRIDGE)
+			del_br(netdev_priv(dev), &list);
+
+	unregister_netdevice_many(&list);
 	rtnl_unlock();
 
 }
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 6a6433d..2af6e4a 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -81,6 +81,7 @@
 	return num;
 }
 
+/* called with RTNL */
 static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
 {
 	struct net_device *dev;
@@ -89,7 +90,7 @@
 	if (!capable(CAP_NET_ADMIN))
 		return -EPERM;
 
-	dev = dev_get_by_index(dev_net(br->dev), ifindex);
+	dev = __dev_get_by_index(dev_net(br->dev), ifindex);
 	if (dev == NULL)
 		return -EINVAL;
 
@@ -98,7 +99,6 @@
 	else
 		ret = br_del_if(br, dev);
 
-	dev_put(dev);
 	return ret;
 }
 
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 3f2eb27..833bd83 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -114,7 +114,8 @@
 	skb_queue_purge(&sk->sk_receive_queue);
 }
 
-static int can_create(struct net *net, struct socket *sock, int protocol)
+static int can_create(struct net *net, struct socket *sock, int protocol,
+		      int kern)
 {
 	struct sock *sk;
 	struct can_proto *cp;
@@ -160,11 +161,6 @@
 		goto errout;
 	}
 
-	if (cp->capability >= 0 && !capable(cp->capability)) {
-		err = -EPERM;
-		goto errout;
-	}
-
 	sock->ops = cp->ops;
 
 	sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot);
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 2f47039..67b5433 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1576,7 +1576,6 @@
 static struct can_proto bcm_can_proto __read_mostly = {
 	.type       = SOCK_DGRAM,
 	.protocol   = CAN_BCM,
-	.capability = -1,
 	.ops        = &bcm_ops,
 	.prot       = &bcm_proto,
 };
diff --git a/net/can/raw.c b/net/can/raw.c
index 962fc9f..abca920 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -424,8 +424,6 @@
 
 	if (level != SOL_CAN_RAW)
 		return -EINVAL;
-	if (optlen < 0)
-		return -EINVAL;
 
 	switch (optname) {
 
@@ -744,7 +742,6 @@
 static struct can_proto raw_can_proto __read_mostly = {
 	.type       = SOCK_RAW,
 	.protocol   = CAN_RAW,
-	.capability = -1,
 	.ops        = &raw_ops,
 	.prot       = &raw_proto,
 };
diff --git a/net/compat.c b/net/compat.c
index e13f525..6a2f75f 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -390,9 +390,6 @@
 	int err;
 	struct socket *sock;
 
-	if (optlen < 0)
-		return -EINVAL;
-
 	if ((sock = sockfd_lookup(fd, &err))!=NULL)
 	{
 		err = security_socket_setsockopt(sock,level,optname);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 4d57f5e..95c2e08 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -224,6 +224,15 @@
 	consume_skb(skb);
 	sk_mem_reclaim_partial(sk);
 }
+EXPORT_SYMBOL(skb_free_datagram);
+
+void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
+{
+	lock_sock(sk);
+	skb_free_datagram(sk, skb);
+	release_sock(sk);
+}
+EXPORT_SYMBOL(skb_free_datagram_locked);
 
 /**
  *	skb_kill_datagram - Free a datagram skbuff forcibly
@@ -753,5 +762,4 @@
 EXPORT_SYMBOL(datagram_poll);
 EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec);
 EXPORT_SYMBOL(skb_copy_datagram_iovec);
-EXPORT_SYMBOL(skb_free_datagram);
 EXPORT_SYMBOL(skb_recv_datagram);
diff --git a/net/core/dev.c b/net/core/dev.c
index 28b0b9e..bf629ac 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -175,7 +175,7 @@
  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
  * semaphore.
  *
- * Pure readers hold dev_base_lock for reading.
+ * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
  *
  * Writers must hold the rtnl semaphore while they loop through the
  * dev_base_head list, and hold dev_base_lock for writing when they do the
@@ -193,18 +193,15 @@
 DEFINE_RWLOCK(dev_base_lock);
 EXPORT_SYMBOL(dev_base_lock);
 
-#define NETDEV_HASHBITS	8
-#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
-
 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
 {
 	unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
-	return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
+	return &net->dev_name_head[hash & (NETDEV_HASHENTRIES - 1)];
 }
 
 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
 {
-	return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
+	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
 }
 
 /* Device list insertion */
@@ -215,23 +212,26 @@
 	ASSERT_RTNL();
 
 	write_lock_bh(&dev_base_lock);
-	list_add_tail(&dev->dev_list, &net->dev_base_head);
-	hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
-	hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
+	list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
+	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
+	hlist_add_head_rcu(&dev->index_hlist,
+			   dev_index_hash(net, dev->ifindex));
 	write_unlock_bh(&dev_base_lock);
 	return 0;
 }
 
-/* Device list removal */
+/* Device list removal
+ * caller must respect a RCU grace period before freeing/reusing dev
+ */
 static void unlist_netdevice(struct net_device *dev)
 {
 	ASSERT_RTNL();
 
 	/* Unlink dev from the device chain */
 	write_lock_bh(&dev_base_lock);
-	list_del(&dev->dev_list);
-	hlist_del(&dev->name_hlist);
-	hlist_del(&dev->index_hlist);
+	list_del_rcu(&dev->dev_list);
+	hlist_del_rcu(&dev->name_hlist);
+	hlist_del_rcu(&dev->index_hlist);
 	write_unlock_bh(&dev_base_lock);
 }
 
@@ -587,18 +587,44 @@
 struct net_device *__dev_get_by_name(struct net *net, const char *name)
 {
 	struct hlist_node *p;
+	struct net_device *dev;
+	struct hlist_head *head = dev_name_hash(net, name);
 
-	hlist_for_each(p, dev_name_hash(net, name)) {
-		struct net_device *dev
-			= hlist_entry(p, struct net_device, name_hlist);
+	hlist_for_each_entry(dev, p, head, name_hlist)
 		if (!strncmp(dev->name, name, IFNAMSIZ))
 			return dev;
-	}
+
 	return NULL;
 }
 EXPORT_SYMBOL(__dev_get_by_name);
 
 /**
+ *	dev_get_by_name_rcu	- find a device by its name
+ *	@net: the applicable net namespace
+ *	@name: name to find
+ *
+ *	Find an interface by name.
+ *	If the name is found a pointer to the device is returned.
+ * 	If the name is not found then %NULL is returned.
+ *	The reference counters are not incremented so the caller must be
+ *	careful with locks. The caller must hold RCU lock.
+ */
+
+struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
+{
+	struct hlist_node *p;
+	struct net_device *dev;
+	struct hlist_head *head = dev_name_hash(net, name);
+
+	hlist_for_each_entry_rcu(dev, p, head, name_hlist)
+		if (!strncmp(dev->name, name, IFNAMSIZ))
+			return dev;
+
+	return NULL;
+}
+EXPORT_SYMBOL(dev_get_by_name_rcu);
+
+/**
  *	dev_get_by_name		- find a device by its name
  *	@net: the applicable net namespace
  *	@name: name to find
@@ -614,11 +640,11 @@
 {
 	struct net_device *dev;
 
-	read_lock(&dev_base_lock);
-	dev = __dev_get_by_name(net, name);
+	rcu_read_lock();
+	dev = dev_get_by_name_rcu(net, name);
 	if (dev)
 		dev_hold(dev);
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 	return dev;
 }
 EXPORT_SYMBOL(dev_get_by_name);
@@ -638,17 +664,42 @@
 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
 {
 	struct hlist_node *p;
+	struct net_device *dev;
+	struct hlist_head *head = dev_index_hash(net, ifindex);
 
-	hlist_for_each(p, dev_index_hash(net, ifindex)) {
-		struct net_device *dev
-			= hlist_entry(p, struct net_device, index_hlist);
+	hlist_for_each_entry(dev, p, head, index_hlist)
 		if (dev->ifindex == ifindex)
 			return dev;
-	}
+
 	return NULL;
 }
 EXPORT_SYMBOL(__dev_get_by_index);
 
+/**
+ *	dev_get_by_index_rcu - find a device by its ifindex
+ *	@net: the applicable net namespace
+ *	@ifindex: index of device
+ *
+ *	Search for an interface by index. Returns %NULL if the device
+ *	is not found or a pointer to the device. The device has not
+ *	had its reference counter increased so the caller must be careful
+ *	about locking. The caller must hold RCU lock.
+ */
+
+struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
+{
+	struct hlist_node *p;
+	struct net_device *dev;
+	struct hlist_head *head = dev_index_hash(net, ifindex);
+
+	hlist_for_each_entry_rcu(dev, p, head, index_hlist)
+		if (dev->ifindex == ifindex)
+			return dev;
+
+	return NULL;
+}
+EXPORT_SYMBOL(dev_get_by_index_rcu);
+
 
 /**
  *	dev_get_by_index - find a device by its ifindex
@@ -665,11 +716,11 @@
 {
 	struct net_device *dev;
 
-	read_lock(&dev_base_lock);
-	dev = __dev_get_by_index(net, ifindex);
+	rcu_read_lock();
+	dev = dev_get_by_index_rcu(net, ifindex);
 	if (dev)
 		dev_hold(dev);
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 	return dev;
 }
 EXPORT_SYMBOL(dev_get_by_index);
@@ -748,15 +799,15 @@
 	struct net_device *dev, *ret;
 
 	ret = NULL;
-	read_lock(&dev_base_lock);
-	for_each_netdev(net, dev) {
+	rcu_read_lock();
+	for_each_netdev_rcu(net, dev) {
 		if (((dev->flags ^ if_flags) & mask) == 0) {
 			dev_hold(dev);
 			ret = dev;
 			break;
 		}
 	}
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 	return ret;
 }
 EXPORT_SYMBOL(dev_get_by_flags);
@@ -935,7 +986,12 @@
 
 	write_lock_bh(&dev_base_lock);
 	hlist_del(&dev->name_hlist);
-	hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
+	write_unlock_bh(&dev_base_lock);
+
+	synchronize_rcu();
+
+	write_lock_bh(&dev_base_lock);
+	hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
 	write_unlock_bh(&dev_base_lock);
 
 	ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
@@ -1037,9 +1093,9 @@
 {
 	struct net_device *dev;
 
-	read_lock(&dev_base_lock);
-	dev = __dev_get_by_name(net, name);
-	read_unlock(&dev_base_lock);
+	rcu_read_lock();
+	dev = dev_get_by_name_rcu(net, name);
+	rcu_read_unlock();
 
 	if (!dev && capable(CAP_NET_ADMIN))
 		request_module("%s", name);
@@ -1791,13 +1847,25 @@
 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
 					struct sk_buff *skb)
 {
-	const struct net_device_ops *ops = dev->netdev_ops;
-	u16 queue_index = 0;
+	u16 queue_index;
+	struct sock *sk = skb->sk;
 
-	if (ops->ndo_select_queue)
-		queue_index = ops->ndo_select_queue(dev, skb);
-	else if (dev->real_num_tx_queues > 1)
-		queue_index = skb_tx_hash(dev, skb);
+	if (sk_tx_queue_recorded(sk)) {
+		queue_index = sk_tx_queue_get(sk);
+	} else {
+		const struct net_device_ops *ops = dev->netdev_ops;
+
+		if (ops->ndo_select_queue) {
+			queue_index = ops->ndo_select_queue(dev, skb);
+		} else {
+			queue_index = 0;
+			if (dev->real_num_tx_queues > 1)
+				queue_index = skb_tx_hash(dev, skb);
+
+			if (sk && sk->sk_dst_cache)
+				sk_tx_queue_set(sk, queue_index);
+		}
+	}
 
 	skb_set_queue_mapping(skb, queue_index);
 	return netdev_get_tx_queue(dev, queue_index);
@@ -2291,7 +2359,7 @@
 	if (!skb->tstamp.tv64)
 		net_timestamp(skb);
 
-	if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
+	if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
 		return NET_RX_SUCCESS;
 
 	/* if we've gotten here through NAPI, check netpoll */
@@ -2439,7 +2507,7 @@
 }
 EXPORT_SYMBOL(napi_gro_flush);
 
-int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
 	struct sk_buff **pp = NULL;
 	struct packet_type *ptype;
@@ -2447,7 +2515,7 @@
 	struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
 	int same_flow;
 	int mac_len;
-	int ret;
+	enum gro_result ret;
 
 	if (!(skb->dev->features & NETIF_F_GRO))
 		goto normal;
@@ -2531,7 +2599,8 @@
 }
 EXPORT_SYMBOL(dev_gro_receive);
 
-static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+static gro_result_t
+__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
 	struct sk_buff *p;
 
@@ -2548,24 +2617,25 @@
 	return dev_gro_receive(napi, skb);
 }
 
-int napi_skb_finish(int ret, struct sk_buff *skb)
+gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
 {
-	int err = NET_RX_SUCCESS;
-
 	switch (ret) {
 	case GRO_NORMAL:
-		return netif_receive_skb(skb);
+		if (netif_receive_skb(skb))
+			ret = GRO_DROP;
+		break;
 
 	case GRO_DROP:
-		err = NET_RX_DROP;
-		/* fall through */
-
 	case GRO_MERGED_FREE:
 		kfree_skb(skb);
 		break;
+
+	case GRO_HELD:
+	case GRO_MERGED:
+		break;
 	}
 
-	return err;
+	return ret;
 }
 EXPORT_SYMBOL(napi_skb_finish);
 
@@ -2585,7 +2655,7 @@
 }
 EXPORT_SYMBOL(skb_gro_reset_offset);
 
-int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
 	skb_gro_reset_offset(skb);
 
@@ -2615,31 +2685,30 @@
 }
 EXPORT_SYMBOL(napi_get_frags);
 
-int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
+gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
+			       gro_result_t ret)
 {
-	int err = NET_RX_SUCCESS;
-
 	switch (ret) {
 	case GRO_NORMAL:
 	case GRO_HELD:
 		skb->protocol = eth_type_trans(skb, napi->dev);
 
-		if (ret == GRO_NORMAL)
-			return netif_receive_skb(skb);
-
-		skb_gro_pull(skb, -ETH_HLEN);
+		if (ret == GRO_HELD)
+			skb_gro_pull(skb, -ETH_HLEN);
+		else if (netif_receive_skb(skb))
+			ret = GRO_DROP;
 		break;
 
 	case GRO_DROP:
-		err = NET_RX_DROP;
-		/* fall through */
-
 	case GRO_MERGED_FREE:
 		napi_reuse_skb(napi, skb);
 		break;
+
+	case GRO_MERGED:
+		break;
 	}
 
-	return err;
+	return ret;
 }
 EXPORT_SYMBOL(napi_frags_finish);
 
@@ -2680,12 +2749,12 @@
 }
 EXPORT_SYMBOL(napi_frags_skb);
 
-int napi_gro_frags(struct napi_struct *napi)
+gro_result_t napi_gro_frags(struct napi_struct *napi)
 {
 	struct sk_buff *skb = napi_frags_skb(napi);
 
 	if (!skb)
-		return NET_RX_DROP;
+		return GRO_DROP;
 
 	return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
 }
@@ -2930,15 +2999,15 @@
 	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
 		return -EFAULT;
 
-	read_lock(&dev_base_lock);
-	dev = __dev_get_by_index(net, ifr.ifr_ifindex);
+	rcu_read_lock();
+	dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
 	if (!dev) {
-		read_unlock(&dev_base_lock);
+		rcu_read_unlock();
 		return -ENODEV;
 	}
 
 	strcpy(ifr.ifr_name, dev->name);
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 
 	if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
 		return -EFAULT;
@@ -3008,18 +3077,18 @@
  *	in detail.
  */
 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
-	__acquires(dev_base_lock)
+	__acquires(RCU)
 {
 	struct net *net = seq_file_net(seq);
 	loff_t off;
 	struct net_device *dev;
 
-	read_lock(&dev_base_lock);
+	rcu_read_lock();
 	if (!*pos)
 		return SEQ_START_TOKEN;
 
 	off = 1;
-	for_each_netdev(net, dev)
+	for_each_netdev_rcu(net, dev)
 		if (off++ == *pos)
 			return dev;
 
@@ -3028,16 +3097,18 @@
 
 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-	struct net *net = seq_file_net(seq);
+	struct net_device *dev = (v == SEQ_START_TOKEN) ?
+				  first_net_device(seq_file_net(seq)) :
+				  next_net_device((struct net_device *)v);
+
 	++*pos;
-	return v == SEQ_START_TOKEN ?
-		first_net_device(net) : next_net_device((struct net_device *)v);
+	return rcu_dereference(dev);
 }
 
 void dev_seq_stop(struct seq_file *seq, void *v)
-	__releases(dev_base_lock)
+	__releases(RCU)
 {
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 }
 
 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
@@ -4246,12 +4317,12 @@
 EXPORT_SYMBOL(dev_set_mac_address);
 
 /*
- *	Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
+ *	Perform the SIOCxIFxxx calls, inside rcu_read_lock()
  */
 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
 {
 	int err;
-	struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
+	struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
 
 	if (!dev)
 		return -ENODEV;
@@ -4483,9 +4554,9 @@
 	case SIOCGIFINDEX:
 	case SIOCGIFTXQLEN:
 		dev_load(net, ifr.ifr_name);
-		read_lock(&dev_base_lock);
+		rcu_read_lock();
 		ret = dev_ifsioc_locked(net, &ifr, cmd);
-		read_unlock(&dev_base_lock);
+		rcu_read_unlock();
 		if (!ret) {
 			if (colon)
 				*colon = ':';
@@ -4628,59 +4699,76 @@
 	list_add_tail(&dev->todo_list, &net_todo_list);
 }
 
-static void rollback_registered(struct net_device *dev)
+static void rollback_registered_many(struct list_head *head)
 {
+	struct net_device *dev;
+
 	BUG_ON(dev_boot_phase);
 	ASSERT_RTNL();
 
-	/* Some devices call without registering for initialization unwind. */
-	if (dev->reg_state == NETREG_UNINITIALIZED) {
-		printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
-				  "was registered\n", dev->name, dev);
+	list_for_each_entry(dev, head, unreg_list) {
+		/* Some devices call without registering
+		 * for initialization unwind.
+		 */
+		if (dev->reg_state == NETREG_UNINITIALIZED) {
+			pr_debug("unregister_netdevice: device %s/%p never "
+				 "was registered\n", dev->name, dev);
 
-		WARN_ON(1);
-		return;
+			WARN_ON(1);
+			return;
+		}
+
+		BUG_ON(dev->reg_state != NETREG_REGISTERED);
+
+		/* If device is running, close it first. */
+		dev_close(dev);
+
+		/* And unlink it from device chain. */
+		unlist_netdevice(dev);
+
+		dev->reg_state = NETREG_UNREGISTERING;
 	}
 
-	BUG_ON(dev->reg_state != NETREG_REGISTERED);
+	synchronize_net();
 
-	/* If device is running, close it first. */
-	dev_close(dev);
+	list_for_each_entry(dev, head, unreg_list) {
+		/* Shutdown queueing discipline. */
+		dev_shutdown(dev);
 
-	/* And unlink it from device chain. */
-	unlist_netdevice(dev);
 
-	dev->reg_state = NETREG_UNREGISTERING;
+		/* Notify protocols, that we are about to destroy
+		   this device. They should clean all the things.
+		*/
+		call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
+
+		/*
+		 *	Flush the unicast and multicast chains
+		 */
+		dev_unicast_flush(dev);
+		dev_addr_discard(dev);
+
+		if (dev->netdev_ops->ndo_uninit)
+			dev->netdev_ops->ndo_uninit(dev);
+
+		/* Notifier chain MUST detach us from master device. */
+		WARN_ON(dev->master);
+
+		/* Remove entries from kobject tree */
+		netdev_unregister_kobject(dev);
+	}
 
 	synchronize_net();
 
-	/* Shutdown queueing discipline. */
-	dev_shutdown(dev);
+	list_for_each_entry(dev, head, unreg_list)
+		dev_put(dev);
+}
 
+static void rollback_registered(struct net_device *dev)
+{
+	LIST_HEAD(single);
 
-	/* Notify protocols, that we are about to destroy
-	   this device. They should clean all the things.
-	*/
-	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
-
-	/*
-	 *	Flush the unicast and multicast chains
-	 */
-	dev_unicast_flush(dev);
-	dev_addr_discard(dev);
-
-	if (dev->netdev_ops->ndo_uninit)
-		dev->netdev_ops->ndo_uninit(dev);
-
-	/* Notifier chain MUST detach us from master device. */
-	WARN_ON(dev->master);
-
-	/* Remove entries from kobject tree */
-	netdev_unregister_kobject(dev);
-
-	synchronize_net();
-
-	dev_put(dev);
+	list_add(&dev->unreg_list, &single);
+	rollback_registered_many(&single);
 }
 
 static void __netdev_init_queue_locks_one(struct net_device *dev,
@@ -5172,6 +5260,7 @@
 	netdev_init_queues(dev);
 
 	INIT_LIST_HEAD(&dev->napi_list);
+	INIT_LIST_HEAD(&dev->unreg_list);
 	dev->priv_flags = IFF_XMIT_DST_RELEASE;
 	setup(dev);
 	strcpy(dev->name, name);
@@ -5236,25 +5325,48 @@
 EXPORT_SYMBOL(synchronize_net);
 
 /**
- *	unregister_netdevice - remove device from the kernel
+ *	unregister_netdevice_queue - remove device from the kernel
  *	@dev: device
- *
+ *	@head: list
+
  *	This function shuts down a device interface and removes it
  *	from the kernel tables.
+ *	If head not NULL, device is queued to be unregistered later.
  *
  *	Callers must hold the rtnl semaphore.  You may want
  *	unregister_netdev() instead of this.
  */
 
-void unregister_netdevice(struct net_device *dev)
+void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
 {
 	ASSERT_RTNL();
 
-	rollback_registered(dev);
-	/* Finish processing unregister after unlock */
-	net_set_todo(dev);
+	if (head) {
+		list_move_tail(&dev->unreg_list, head);
+	} else {
+		rollback_registered(dev);
+		/* Finish processing unregister after unlock */
+		net_set_todo(dev);
+	}
 }
-EXPORT_SYMBOL(unregister_netdevice);
+EXPORT_SYMBOL(unregister_netdevice_queue);
+
+/**
+ *	unregister_netdevice_many - unregister many devices
+ *	@head: list of devices
+ *
+ */
+void unregister_netdevice_many(struct list_head *head)
+{
+	struct net_device *dev;
+
+	if (!list_empty(head)) {
+		rollback_registered_many(head);
+		list_for_each_entry(dev, head, unreg_list)
+			net_set_todo(dev);
+	}
+}
+EXPORT_SYMBOL(unregister_netdevice_many);
 
 /**
  *	unregister_netdev - remove device from the kernel
@@ -5581,7 +5693,7 @@
 
 		/* Delete virtual devices */
 		if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
-			dev->rtnl_link_ops->dellink(dev);
+			dev->rtnl_link_ops->dellink(dev, NULL);
 			goto restart;
 		}
 
diff --git a/net/core/filter.c b/net/core/filter.c
index d1d779c..08db7b9 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -303,6 +303,12 @@
 		case SKF_AD_IFINDEX:
 			A = skb->dev->ifindex;
 			continue;
+		case SKF_AD_MARK:
+			A = skb->mark;
+			continue;
+		case SKF_AD_QUEUE:
+			A = skb->queue_mapping;
+			continue;
 		case SKF_AD_NLATTR: {
 			struct nlattr *nla;
 
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 753c420..157645c 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -139,7 +139,9 @@
 	if (!rtnl_trylock())
 		return restart_syscall();
 
-	if (netif_running(netdev) && netdev->ethtool_ops->get_settings) {
+	if (netif_running(netdev) &&
+	    netdev->ethtool_ops &&
+	    netdev->ethtool_ops->get_settings) {
 		struct ethtool_cmd cmd = { ETHTOOL_GSET };
 
 		if (!netdev->ethtool_ops->get_settings(netdev, &cmd))
@@ -158,7 +160,9 @@
 	if (!rtnl_trylock())
 		return restart_syscall();
 
-	if (netif_running(netdev) && netdev->ethtool_ops->get_settings) {
+	if (netif_running(netdev) &&
+	    netdev->ethtool_ops &&
+	    netdev->ethtool_ops->get_settings) {
 		struct ethtool_cmd cmd = { ETHTOOL_GSET };
 
 		if (!netdev->ethtool_ops->get_settings(netdev, &cmd))
@@ -540,8 +544,11 @@
 	dev_set_name(dev, "%s", net->name);
 
 #ifdef CONFIG_SYSFS
-	*groups++ = &netstat_group;
+	/* Allow for a device specific group */
+	if (*groups)
+		groups++;
 
+	*groups++ = &netstat_group;
 #ifdef CONFIG_WIRELESS_EXT_SYSFS
 	if (net->ieee80211_ptr)
 		*groups++ = &wireless_group;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 1da0e03..d38470a 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -335,10 +335,12 @@
 	__u32 cur_src_mac_offset;
 	__be32 cur_saddr;
 	__be32 cur_daddr;
+	__u16 ip_id;
 	__u16 cur_udp_dst;
 	__u16 cur_udp_src;
 	__u16 cur_queue_map;
 	__u32 cur_pkt_size;
+	__u32 last_pkt_size;
 
 	__u8 hh[14];
 	/* = {
@@ -2630,6 +2632,8 @@
 	iph->protocol = IPPROTO_UDP;	/* UDP */
 	iph->saddr = pkt_dev->cur_saddr;
 	iph->daddr = pkt_dev->cur_daddr;
+	iph->id = htons(pkt_dev->ip_id);
+	pkt_dev->ip_id++;
 	iph->frag_off = 0;
 	iplen = 20 + 8 + datalen;
 	iph->tot_len = htons(iplen);
@@ -2641,24 +2645,26 @@
 	skb->dev = odev;
 	skb->pkt_type = PACKET_HOST;
 
-	if (pkt_dev->nfrags <= 0)
+	if (pkt_dev->nfrags <= 0) {
 		pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
-	else {
+		memset(pgh + 1, 0, datalen - sizeof(struct pktgen_hdr));
+	} else {
 		int frags = pkt_dev->nfrags;
-		int i;
+		int i, len;
 
 		pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8);
 
 		if (frags > MAX_SKB_FRAGS)
 			frags = MAX_SKB_FRAGS;
 		if (datalen > frags * PAGE_SIZE) {
-			skb_put(skb, datalen - frags * PAGE_SIZE);
+			len = datalen - frags * PAGE_SIZE;
+			memset(skb_put(skb, len), 0, len);
 			datalen = frags * PAGE_SIZE;
 		}
 
 		i = 0;
 		while (datalen > 0) {
-			struct page *page = alloc_pages(GFP_KERNEL, 0);
+			struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
 			skb_shinfo(skb)->frags[i].page = page;
 			skb_shinfo(skb)->frags[i].page_offset = 0;
 			skb_shinfo(skb)->frags[i].size =
@@ -3429,7 +3435,7 @@
 			pkt_dev->clone_count--;	/* back out increment, OOM */
 			return;
 		}
-
+		pkt_dev->last_pkt_size = pkt_dev->skb->len;
 		pkt_dev->allocated_skbs++;
 		pkt_dev->clone_count = 0;	/* reset counter */
 	}
@@ -3456,7 +3462,7 @@
 		pkt_dev->last_ok = 1;
 		pkt_dev->sofar++;
 		pkt_dev->seq_num++;
-		pkt_dev->tx_bytes += pkt_dev->cur_pkt_size;
+		pkt_dev->tx_bytes += pkt_dev->last_pkt_size;
 		break;
 	default: /* Drivers are not supposed to return other values! */
 		if (net_ratelimit())
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index eb42873..391a62c 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -248,7 +248,7 @@
 int __rtnl_link_register(struct rtnl_link_ops *ops)
 {
 	if (!ops->dellink)
-		ops->dellink = unregister_netdevice;
+		ops->dellink = unregister_netdevice_queue;
 
 	list_add_tail(&ops->list, &link_ops);
 	return 0;
@@ -277,13 +277,13 @@
 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
 {
 	struct net_device *dev;
-restart:
+	LIST_HEAD(list_kill);
+
 	for_each_netdev(net, dev) {
-		if (dev->rtnl_link_ops == ops) {
-			ops->dellink(dev);
-			goto restart;
-		}
+		if (dev->rtnl_link_ops == ops)
+			ops->dellink(dev, &list_kill);
 	}
+	unregister_netdevice_many(&list_kill);
 }
 
 void rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
@@ -682,22 +682,33 @@
 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
 {
 	struct net *net = sock_net(skb->sk);
-	int idx;
-	int s_idx = cb->args[0];
+	int h, s_h;
+	int idx = 0, s_idx;
 	struct net_device *dev;
+	struct hlist_head *head;
+	struct hlist_node *node;
 
-	idx = 0;
-	for_each_netdev(net, dev) {
-		if (idx < s_idx)
-			goto cont;
-		if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
-				     NETLINK_CB(cb->skb).pid,
-				     cb->nlh->nlmsg_seq, 0, NLM_F_MULTI) <= 0)
-			break;
+	s_h = cb->args[0];
+	s_idx = cb->args[1];
+
+	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+		idx = 0;
+		head = &net->dev_index_head[h];
+		hlist_for_each_entry(dev, node, head, index_hlist) {
+			if (idx < s_idx)
+				goto cont;
+			if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+					     NETLINK_CB(cb->skb).pid,
+					     cb->nlh->nlmsg_seq, 0,
+					     NLM_F_MULTI) <= 0)
+				goto out;
 cont:
-		idx++;
+			idx++;
+		}
 	}
-	cb->args[0] = idx;
+out:
+	cb->args[1] = idx;
+	cb->args[0] = h;
 
 	return skb->len;
 }
@@ -910,9 +921,9 @@
 	err = -EINVAL;
 	ifm = nlmsg_data(nlh);
 	if (ifm->ifi_index > 0)
-		dev = dev_get_by_index(net, ifm->ifi_index);
+		dev = __dev_get_by_index(net, ifm->ifi_index);
 	else if (tb[IFLA_IFNAME])
-		dev = dev_get_by_name(net, ifname);
+		dev = __dev_get_by_name(net, ifname);
 	else
 		goto errout;
 
@@ -922,11 +933,9 @@
 	}
 
 	if ((err = validate_linkmsg(dev, tb)) < 0)
-		goto errout_dev;
+		goto errout;
 
 	err = do_setlink(dev, ifm, tb, ifname, 0);
-errout_dev:
-	dev_put(dev);
 errout:
 	return err;
 }
@@ -963,7 +972,7 @@
 	if (!ops)
 		return -EOPNOTSUPP;
 
-	ops->dellink(dev);
+	ops->dellink(dev, NULL);
 	return 0;
 }
 
@@ -1154,6 +1163,7 @@
 {
 	struct net *net = sock_net(skb->sk);
 	struct ifinfomsg *ifm;
+	char ifname[IFNAMSIZ];
 	struct nlattr *tb[IFLA_MAX+1];
 	struct net_device *dev = NULL;
 	struct sk_buff *nskb;
@@ -1163,19 +1173,23 @@
 	if (err < 0)
 		return err;
 
+	if (tb[IFLA_IFNAME])
+		nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
+
 	ifm = nlmsg_data(nlh);
-	if (ifm->ifi_index > 0) {
-		dev = dev_get_by_index(net, ifm->ifi_index);
-		if (dev == NULL)
-			return -ENODEV;
-	} else
+	if (ifm->ifi_index > 0)
+		dev = __dev_get_by_index(net, ifm->ifi_index);
+	else if (tb[IFLA_IFNAME])
+		dev = __dev_get_by_name(net, ifname);
+	else
 		return -EINVAL;
 
+	if (dev == NULL)
+		return -ENODEV;
+
 	nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL);
-	if (nskb == NULL) {
-		err = -ENOBUFS;
-		goto errout;
-	}
+	if (nskb == NULL)
+		return -ENOBUFS;
 
 	err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid,
 			       nlh->nlmsg_seq, 0, 0);
@@ -1183,11 +1197,8 @@
 		/* -EMSGSIZE implies BUG in if_nlmsg_size */
 		WARN_ON(err == -EMSGSIZE);
 		kfree_skb(nskb);
-		goto errout;
-	}
-	err = rtnl_unicast(nskb, net, NETLINK_CB(skb).pid);
-errout:
-	dev_put(dev);
+	} else
+		err = rtnl_unicast(nskb, net, NETLINK_CB(skb).pid);
 
 	return err;
 }
diff --git a/net/core/sock.c b/net/core/sock.c
index 38713aa..76ff58d 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -352,11 +352,18 @@
 }
 EXPORT_SYMBOL(sk_receive_skb);
 
+void sk_reset_txq(struct sock *sk)
+{
+	sk_tx_queue_clear(sk);
+}
+EXPORT_SYMBOL(sk_reset_txq);
+
 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
 {
 	struct dst_entry *dst = sk->sk_dst_cache;
 
 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
+		sk_tx_queue_clear(sk);
 		sk->sk_dst_cache = NULL;
 		dst_release(dst);
 		return NULL;
@@ -410,17 +417,18 @@
 	if (copy_from_user(devname, optval, optlen))
 		goto out;
 
-	if (devname[0] == '\0') {
-		index = 0;
-	} else {
-		struct net_device *dev = dev_get_by_name(net, devname);
+	index = 0;
+	if (devname[0] != '\0') {
+		struct net_device *dev;
 
+		rcu_read_lock();
+		dev = dev_get_by_name_rcu(net, devname);
+		if (dev)
+			index = dev->ifindex;
+		rcu_read_unlock();
 		ret = -ENODEV;
 		if (!dev)
 			goto out;
-
-		index = dev->ifindex;
-		dev_put(dev);
 	}
 
 	lock_sock(sk);
@@ -953,7 +961,8 @@
 	void *sptr = nsk->sk_security;
 #endif
 	BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
-		     sizeof(osk->sk_node) + sizeof(osk->sk_refcnt));
+		     sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) +
+		     sizeof(osk->sk_tx_queue_mapping));
 	memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
 	       osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
 #ifdef CONFIG_SECURITY_NETWORK
@@ -997,6 +1006,7 @@
 
 		if (!try_module_get(prot->owner))
 			goto out_free_sec;
+		sk_tx_queue_clear(sk);
 	}
 
 	return sk;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 00028d4..2423a08 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -991,7 +991,6 @@
 	.protocol	= IPPROTO_DCCP,
 	.prot		= &dccp_v4_prot,
 	.ops		= &inet_dccp_ops,
-	.capability	= -1,
 	.no_check	= 0,
 	.flags		= INET_PROTOSW_ICSK,
 };
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 6d89f9f..50ea91a 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -1185,7 +1185,6 @@
 	.protocol	= IPPROTO_DCCP,
 	.prot		= &dccp_v6_prot,
 	.ops		= &inet6_dccp_ops,
-	.capability	= -1,
 	.flags		= INET_PROTOSW_ICSK,
 };
 
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 162d1e6..bbfeb5e 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -38,7 +38,7 @@
 
 	if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
 		if (icsk->icsk_retransmits != 0)
-			dst_negative_advice(&sk->sk_dst_cache);
+			dst_negative_advice(&sk->sk_dst_cache, sk);
 		retry_until = icsk->icsk_syn_retries ?
 			    : sysctl_dccp_request_retries;
 	} else {
@@ -63,7 +63,7 @@
 			   Golden words :-).
 		   */
 
-			dst_negative_advice(&sk->sk_dst_cache);
+			dst_negative_advice(&sk->sk_dst_cache, sk);
 		}
 
 		retry_until = sysctl_dccp_retries2;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 4d30606..9ade3a6 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -675,7 +675,8 @@
 
 
 
-static int dn_create(struct net *net, struct socket *sock, int protocol)
+static int dn_create(struct net *net, struct socket *sock, int protocol,
+		     int kern)
 {
 	struct sock *sk;
 
@@ -749,9 +750,9 @@
 
 	if (!(saddr->sdn_flags & SDF_WILD)) {
 		if (le16_to_cpu(saddr->sdn_nodeaddrl)) {
-			read_lock(&dev_base_lock);
+			rcu_read_lock();
 			ldev = NULL;
-			for_each_netdev(&init_net, dev) {
+			for_each_netdev_rcu(&init_net, dev) {
 				if (!dev->dn_ptr)
 					continue;
 				if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) {
@@ -759,7 +760,7 @@
 					break;
 				}
 			}
-			read_unlock(&dev_base_lock);
+			rcu_read_unlock();
 			if (ldev == NULL)
 				return -EADDRNOTAVAIL;
 		}
@@ -1955,7 +1956,7 @@
 	}
 
 	if ((flags & MSG_TRYHARD) && sk->sk_dst_cache)
-		dst_negative_advice(&sk->sk_dst_cache);
+		dst_negative_advice(&sk->sk_dst_cache, sk);
 
 	mss = scp->segsize_rem;
 	fctype = scp->services_rem & NSP_FC_MASK;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 6e1f085..d82694d 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -600,15 +600,17 @@
 		dev_put(dev);
 }
 
+/*
+ * Called with RTNL
+ */
 static struct dn_dev *dn_dev_by_index(int ifindex)
 {
 	struct net_device *dev;
 	struct dn_dev *dn_dev = NULL;
-	dev = dev_get_by_index(&init_net, ifindex);
-	if (dev) {
+
+	dev = __dev_get_by_index(&init_net, ifindex);
+	if (dev)
 		dn_dev = dev->dn_ptr;
-		dev_put(dev);
-	}
 
 	return dn_dev;
 }
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 27ea2e9..fd641f6 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -607,8 +607,8 @@
 	ASSERT_RTNL();
 
 	/* Scan device list */
-	read_lock(&dev_base_lock);
-	for_each_netdev(&init_net, dev) {
+	rcu_read_lock();
+	for_each_netdev_rcu(&init_net, dev) {
 		dn_db = dev->dn_ptr;
 		if (dn_db == NULL)
 			continue;
@@ -619,7 +619,7 @@
 			}
 		}
 	}
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 
 	if (found_it == 0) {
 		fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 16, ifa);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 57662ca..860286a 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -908,8 +908,8 @@
 			dev_put(dev_out);
 			goto out;
 		}
-		read_lock(&dev_base_lock);
-		for_each_netdev(&init_net, dev) {
+		rcu_read_lock();
+		for_each_netdev_rcu(&init_net, dev) {
 			if (!dev->dn_ptr)
 				continue;
 			if (!dn_dev_islocal(dev, oldflp->fld_src))
@@ -922,7 +922,7 @@
 			dev_out = dev;
 			break;
 		}
-		read_unlock(&dev_base_lock);
+		rcu_read_unlock();
 		if (dev_out == NULL)
 			goto out;
 		dev_hold(dev_out);
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index 26b0ab1..2036568 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -263,11 +263,10 @@
 			return -ENODEV;
 
 		rv = -ENODEV;
-		if (dev->dn_ptr != NULL) {
+		if (dev->dn_ptr != NULL)
 			rv = dn_dev_set_default(dev, 1);
-			if (rv)
-				dev_put(dev);
-		}
+		if (rv)
+			dev_put(dev);
 	}
 
 	return rv;
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 5e9426a1..5966798 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -605,7 +605,8 @@
  *	Create an Econet socket
  */
 
-static int econet_create(struct net *net, struct socket *sock, int protocol)
+static int econet_create(struct net *net, struct socket *sock, int protocol,
+			 int kern)
 {
 	struct sock *sk;
 	struct econet_sock *eo;
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index 309348f..de6e34d 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -234,7 +234,7 @@
  * set the state.
  */
 static int ieee802154_create(struct net *net, struct socket *sock,
-		int protocol)
+			     int protocol, int kern)
 {
 	struct sock *sk;
 	int rc;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 04a14b1..7d12c6a 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -262,7 +262,8 @@
  *	Create an inet socket.
  */
 
-static int inet_create(struct net *net, struct socket *sock, int protocol)
+static int inet_create(struct net *net, struct socket *sock, int protocol,
+		       int kern)
 {
 	struct sock *sk;
 	struct inet_protosw *answer;
@@ -325,7 +326,7 @@
 	}
 
 	err = -EPERM;
-	if (answer->capability > 0 && !capable(answer->capability))
+	if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
 		goto out_rcu_unlock;
 
 	err = -EAFNOSUPPORT;
@@ -685,7 +686,7 @@
 {
 	struct sock *sk		= sock->sk;
 	struct inet_sock *inet	= inet_sk(sk);
-	struct sockaddr_in *sin	= (struct sockaddr_in *)uaddr;
+	DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr);
 
 	sin->sin_family = AF_INET;
 	if (peer) {
@@ -947,7 +948,6 @@
 		.protocol =   IPPROTO_TCP,
 		.prot =       &tcp_prot,
 		.ops =        &inet_stream_ops,
-		.capability = -1,
 		.no_check =   0,
 		.flags =      INET_PROTOSW_PERMANENT |
 			      INET_PROTOSW_ICSK,
@@ -958,7 +958,6 @@
 		.protocol =   IPPROTO_UDP,
 		.prot =       &udp_prot,
 		.ops =        &inet_dgram_ops,
-		.capability = -1,
 		.no_check =   UDP_CSUM_DEFAULT,
 		.flags =      INET_PROTOSW_PERMANENT,
        },
@@ -969,7 +968,6 @@
 	       .protocol =   IPPROTO_IP,	/* wild card */
 	       .prot =       &raw_prot,
 	       .ops =        &inet_sockraw_ops,
-	       .capability = CAP_NET_RAW,
 	       .no_check =   UDP_CSUM_DEFAULT,
 	       .flags =      INET_PROTOSW_REUSE,
        }
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 5df2f6a..c2045f9 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -140,11 +140,11 @@
 #endif
 	dev_put(dev);
 	if (!idev->dead)
-		printk("Freeing alive in_device %p\n", idev);
-	else {
+		pr_err("Freeing alive in_device %p\n", idev);
+	else
 		kfree(idev);
-	}
 }
+EXPORT_SYMBOL(in_dev_finish_destroy);
 
 static struct in_device *inetdev_init(struct net_device *dev)
 {
@@ -159,7 +159,8 @@
 			sizeof(in_dev->cnf));
 	in_dev->cnf.sysctl = NULL;
 	in_dev->dev = dev;
-	if ((in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl)) == NULL)
+	in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
+	if (!in_dev->arp_parms)
 		goto out_kfree;
 	if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
 		dev_disable_lro(dev);
@@ -405,13 +406,15 @@
 {
 	struct net_device *dev;
 	struct in_device *in_dev = NULL;
-	read_lock(&dev_base_lock);
-	dev = __dev_get_by_index(net, ifindex);
+
+	rcu_read_lock();
+	dev = dev_get_by_index_rcu(net, ifindex);
 	if (dev)
 		in_dev = in_dev_get(dev);
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 	return in_dev;
 }
+EXPORT_SYMBOL(inetdev_by_index);
 
 /* Called only from RTNL semaphored context. No locks. */
 
@@ -557,7 +560,7 @@
  *	Determine a default network mask, based on the IP address.
  */
 
-static __inline__ int inet_abc_len(__be32 addr)
+static inline int inet_abc_len(__be32 addr)
 {
 	int rc = -1;	/* Something else, probably a multicast. */
 
@@ -646,13 +649,15 @@
 	rtnl_lock();
 
 	ret = -ENODEV;
-	if ((dev = __dev_get_by_name(net, ifr.ifr_name)) == NULL)
+	dev = __dev_get_by_name(net, ifr.ifr_name);
+	if (!dev)
 		goto done;
 
 	if (colon)
 		*colon = ':';
 
-	if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
+	in_dev = __in_dev_get_rtnl(dev);
+	if (in_dev) {
 		if (tryaddrmatch) {
 			/* Matthias Andree */
 			/* compare label and address (4.4BSD style) */
@@ -720,7 +725,8 @@
 
 		if (!ifa) {
 			ret = -ENOBUFS;
-			if ((ifa = inet_alloc_ifa()) == NULL)
+			ifa = inet_alloc_ifa();
+			if (!ifa)
 				break;
 			if (colon)
 				memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
@@ -822,10 +828,10 @@
 	struct ifreq ifr;
 	int done = 0;
 
-	if (!in_dev || (ifa = in_dev->ifa_list) == NULL)
+	if (!in_dev)
 		goto out;
 
-	for (; ifa; ifa = ifa->ifa_next) {
+	for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
 		if (!buf) {
 			done += sizeof(ifr);
 			continue;
@@ -875,36 +881,33 @@
 		if (!addr)
 			addr = ifa->ifa_local;
 	} endfor_ifa(in_dev);
-no_in_dev:
-	rcu_read_unlock();
 
 	if (addr)
-		goto out;
+		goto out_unlock;
+no_in_dev:
 
 	/* Not loopback addresses on loopback should be preferred
 	   in this case. It is importnat that lo is the first interface
 	   in dev_base list.
 	 */
-	read_lock(&dev_base_lock);
-	rcu_read_lock();
-	for_each_netdev(net, dev) {
-		if ((in_dev = __in_dev_get_rcu(dev)) == NULL)
+	for_each_netdev_rcu(net, dev) {
+		in_dev = __in_dev_get_rcu(dev);
+		if (!in_dev)
 			continue;
 
 		for_primary_ifa(in_dev) {
 			if (ifa->ifa_scope != RT_SCOPE_LINK &&
 			    ifa->ifa_scope <= scope) {
 				addr = ifa->ifa_local;
-				goto out_unlock_both;
+				goto out_unlock;
 			}
 		} endfor_ifa(in_dev);
 	}
-out_unlock_both:
-	read_unlock(&dev_base_lock);
+out_unlock:
 	rcu_read_unlock();
-out:
 	return addr;
 }
+EXPORT_SYMBOL(inet_select_addr);
 
 static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
 			      __be32 local, int scope)
@@ -940,7 +943,7 @@
 		}
 	} endfor_ifa(in_dev);
 
-	return same? addr : 0;
+	return same ? addr : 0;
 }
 
 /*
@@ -961,17 +964,16 @@
 		return confirm_addr_indev(in_dev, dst, local, scope);
 
 	net = dev_net(in_dev->dev);
-	read_lock(&dev_base_lock);
 	rcu_read_lock();
-	for_each_netdev(net, dev) {
-		if ((in_dev = __in_dev_get_rcu(dev))) {
+	for_each_netdev_rcu(net, dev) {
+		in_dev = __in_dev_get_rcu(dev);
+		if (in_dev) {
 			addr = confirm_addr_indev(in_dev, dst, local, scope);
 			if (addr)
 				break;
 		}
 	}
 	rcu_read_unlock();
-	read_unlock(&dev_base_lock);
 
 	return addr;
 }
@@ -984,14 +986,16 @@
 {
 	return blocking_notifier_chain_register(&inetaddr_chain, nb);
 }
+EXPORT_SYMBOL(register_inetaddr_notifier);
 
 int unregister_inetaddr_notifier(struct notifier_block *nb)
 {
 	return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
 }
+EXPORT_SYMBOL(unregister_inetaddr_notifier);
 
-/* Rename ifa_labels for a device name change. Make some effort to preserve existing
- * alias numbering and to create unique labels if possible.
+/* Rename ifa_labels for a device name change. Make some effort to preserve
+ * existing alias numbering and to create unique labels if possible.
 */
 static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
 {
@@ -1010,11 +1014,10 @@
 			sprintf(old, ":%d", named);
 			dot = old;
 		}
-		if (strlen(dot) + strlen(dev->name) < IFNAMSIZ) {
+		if (strlen(dot) + strlen(dev->name) < IFNAMSIZ)
 			strcat(ifa->ifa_label, dot);
-		} else {
+		else
 			strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
-		}
 skip:
 		rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
 	}
@@ -1061,8 +1064,9 @@
 		if (!inetdev_valid_mtu(dev->mtu))
 			break;
 		if (dev->flags & IFF_LOOPBACK) {
-			struct in_ifaddr *ifa;
-			if ((ifa = inet_alloc_ifa()) != NULL) {
+			struct in_ifaddr *ifa = inet_alloc_ifa();
+
+			if (ifa) {
 				ifa->ifa_local =
 				  ifa->ifa_address = htonl(INADDR_LOOPBACK);
 				ifa->ifa_prefixlen = 8;
@@ -1183,7 +1187,8 @@
 			goto cont;
 		if (idx > s_idx)
 			s_ip_idx = 0;
-		if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
+		in_dev = __in_dev_get_rtnl(dev);
+		if (!in_dev)
 			goto cont;
 
 		for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
@@ -1239,18 +1244,18 @@
 {
 	struct net_device *dev;
 
-	read_lock(&dev_base_lock);
-	for_each_netdev(net, dev) {
+	rcu_read_lock();
+	for_each_netdev_rcu(net, dev) {
 		struct in_device *in_dev;
-		rcu_read_lock();
+
 		in_dev = __in_dev_get_rcu(dev);
 		if (in_dev && !test_bit(i, in_dev->cnf.state))
 			in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i];
-		rcu_read_unlock();
 	}
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 }
 
+/* called with RTNL locked */
 static void inet_forward_change(struct net *net)
 {
 	struct net_device *dev;
@@ -1259,7 +1264,6 @@
 	IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
 	IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
 
-	read_lock(&dev_base_lock);
 	for_each_netdev(net, dev) {
 		struct in_device *in_dev;
 		if (on)
@@ -1270,7 +1274,6 @@
 			IN_DEV_CONF_SET(in_dev, FORWARDING, on);
 		rcu_read_unlock();
 	}
-	read_unlock(&dev_base_lock);
 }
 
 static int devinet_conf_proc(ctl_table *ctl, int write,
@@ -1680,8 +1683,3 @@
 	rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr);
 }
 
-EXPORT_SYMBOL(in_dev_finish_destroy);
-EXPORT_SYMBOL(inet_select_addr);
-EXPORT_SYMBOL(inetdev_by_index);
-EXPORT_SYMBOL(register_inetaddr_notifier);
-EXPORT_SYMBOL(unregister_inetaddr_notifier);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index f73dbed..816e218 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -229,14 +229,17 @@
  */
 
 int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
-			struct net_device *dev, __be32 *spec_dst, u32 *itag)
+			struct net_device *dev, __be32 *spec_dst,
+			u32 *itag, u32 mark)
 {
 	struct in_device *in_dev;
 	struct flowi fl = { .nl_u = { .ip4_u =
 				      { .daddr = src,
 					.saddr = dst,
 					.tos = tos } },
+			    .mark = mark,
 			    .iif = oif };
+
 	struct fib_result res;
 	int no_addr, rpf;
 	int ret;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 84adb57..fe11f60 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -501,15 +501,16 @@
 	if (!(rt->rt_flags & RTCF_LOCAL)) {
 		struct net_device *dev = NULL;
 
+		rcu_read_lock();
 		if (rt->fl.iif &&
 			net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
-			dev = dev_get_by_index(net, rt->fl.iif);
+			dev = dev_get_by_index_rcu(net, rt->fl.iif);
 
-		if (dev) {
+		if (dev)
 			saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
-			dev_put(dev);
-		} else
+		else
 			saddr = 0;
+		rcu_read_unlock();
 	}
 
 	tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index f6a0af7..26fb50e 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -447,6 +447,28 @@
 
 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
 
+/* Decide when to expire the request and when to resend SYN-ACK */
+static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
+				  const int max_retries,
+				  const u8 rskq_defer_accept,
+				  int *expire, int *resend)
+{
+	if (!rskq_defer_accept) {
+		*expire = req->retrans >= thresh;
+		*resend = 1;
+		return;
+	}
+	*expire = req->retrans >= thresh &&
+		  (!inet_rsk(req)->acked || req->retrans >= max_retries);
+	/*
+	 * Do not resend while waiting for data after ACK,
+	 * start to resend on end of deferring period to give
+	 * last chance for data or ACK to create established socket.
+	 */
+	*resend = !inet_rsk(req)->acked ||
+		  req->retrans >= rskq_defer_accept - 1;
+}
+
 void inet_csk_reqsk_queue_prune(struct sock *parent,
 				const unsigned long interval,
 				const unsigned long timeout,
@@ -502,9 +524,15 @@
 		reqp=&lopt->syn_table[i];
 		while ((req = *reqp) != NULL) {
 			if (time_after_eq(now, req->expires)) {
-				if ((req->retrans < thresh ||
-				     (inet_rsk(req)->acked && req->retrans < max_retries))
-				    && !req->rsk_ops->rtx_syn_ack(parent, req)) {
+				int expire = 0, resend = 0;
+
+				syn_ack_recalc(req, thresh, max_retries,
+					       queue->rskq_defer_accept,
+					       &expire, &resend);
+				if (!expire &&
+				    (!resend ||
+				     !req->rsk_ops->rtx_syn_ack(parent, req) ||
+				     inet_rsk(req)->acked)) {
 					unsigned long timeo;
 
 					if (req->retrans++ == 0)
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 575f9bd..b007f8a 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -206,10 +206,11 @@
 		struct sk_buff *head = qp->q.fragments;
 
 		/* Send an ICMP "Fragment Reassembly Timeout" message. */
-		if ((head->dev = dev_get_by_index(net, qp->iif)) != NULL) {
+		rcu_read_lock();
+		head->dev = dev_get_by_index_rcu(net, qp->iif);
+		if (head->dev)
 			icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
-			dev_put(head->dev);
-		}
+		rcu_read_unlock();
 	}
 out:
 	spin_unlock(&qp->q.lock);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 89ff9d5b..71a3242 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -156,8 +156,13 @@
 #define tunnels_r	tunnels[2]
 #define tunnels_l	tunnels[1]
 #define tunnels_wc	tunnels[0]
+/*
+ * Locking : hash tables are protected by RCU and a spinlock
+ */
+static DEFINE_SPINLOCK(ipgre_lock);
 
-static DEFINE_RWLOCK(ipgre_lock);
+#define for_each_ip_tunnel_rcu(start) \
+	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
 
 /* Given src, dst and key, find appropriate for input tunnel. */
 
@@ -175,7 +180,7 @@
 		       ARPHRD_ETHER : ARPHRD_IPGRE;
 	int score, cand_score = 4;
 
-	for (t = ign->tunnels_r_l[h0^h1]; t; t = t->next) {
+	for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
 		if (local != t->parms.iph.saddr ||
 		    remote != t->parms.iph.daddr ||
 		    key != t->parms.i_key ||
@@ -200,7 +205,7 @@
 		}
 	}
 
-	for (t = ign->tunnels_r[h0^h1]; t; t = t->next) {
+	for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
 		if (remote != t->parms.iph.daddr ||
 		    key != t->parms.i_key ||
 		    !(t->dev->flags & IFF_UP))
@@ -224,7 +229,7 @@
 		}
 	}
 
-	for (t = ign->tunnels_l[h1]; t; t = t->next) {
+	for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) {
 		if ((local != t->parms.iph.saddr &&
 		     (local != t->parms.iph.daddr ||
 		      !ipv4_is_multicast(local))) ||
@@ -250,7 +255,7 @@
 		}
 	}
 
-	for (t = ign->tunnels_wc[h1]; t; t = t->next) {
+	for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) {
 		if (t->parms.i_key != key ||
 		    !(t->dev->flags & IFF_UP))
 			continue;
@@ -276,8 +281,9 @@
 	if (cand != NULL)
 		return cand;
 
-	if (ign->fb_tunnel_dev->flags & IFF_UP)
-		return netdev_priv(ign->fb_tunnel_dev);
+	dev = ign->fb_tunnel_dev;
+	if (dev->flags & IFF_UP)
+		return netdev_priv(dev);
 
 	return NULL;
 }
@@ -311,10 +317,10 @@
 {
 	struct ip_tunnel **tp = ipgre_bucket(ign, t);
 
+	spin_lock_bh(&ipgre_lock);
 	t->next = *tp;
-	write_lock_bh(&ipgre_lock);
-	*tp = t;
-	write_unlock_bh(&ipgre_lock);
+	rcu_assign_pointer(*tp, t);
+	spin_unlock_bh(&ipgre_lock);
 }
 
 static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
@@ -323,9 +329,9 @@
 
 	for (tp = ipgre_bucket(ign, t); *tp; tp = &(*tp)->next) {
 		if (t == *tp) {
-			write_lock_bh(&ipgre_lock);
+			spin_lock_bh(&ipgre_lock);
 			*tp = t->next;
-			write_unlock_bh(&ipgre_lock);
+			spin_unlock_bh(&ipgre_lock);
 			break;
 		}
 	}
@@ -476,7 +482,7 @@
 		break;
 	}
 
-	read_lock(&ipgre_lock);
+	rcu_read_lock();
 	t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
 				flags & GRE_KEY ?
 				*(((__be32 *)p) + (grehlen / 4) - 1) : 0,
@@ -494,7 +500,7 @@
 		t->err_count = 1;
 	t->err_time = jiffies;
 out:
-	read_unlock(&ipgre_lock);
+	rcu_read_unlock();
 	return;
 }
 
@@ -573,7 +579,7 @@
 
 	gre_proto = *(__be16 *)(h + 2);
 
-	read_lock(&ipgre_lock);
+	rcu_read_lock();
 	if ((tunnel = ipgre_tunnel_lookup(skb->dev,
 					  iph->saddr, iph->daddr, key,
 					  gre_proto))) {
@@ -647,13 +653,13 @@
 		ipgre_ecn_decapsulate(iph, skb);
 
 		netif_rx(skb);
-		read_unlock(&ipgre_lock);
+		rcu_read_unlock();
 		return(0);
 	}
 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 
 drop:
-	read_unlock(&ipgre_lock);
+	rcu_read_unlock();
 drop_nolock:
 	kfree_skb(skb);
 	return(0);
@@ -1284,16 +1290,19 @@
 	.netns_ok	=	1,
 };
 
-static void ipgre_destroy_tunnels(struct ipgre_net *ign)
+static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
 {
 	int prio;
 
 	for (prio = 0; prio < 4; prio++) {
 		int h;
 		for (h = 0; h < HASH_SIZE; h++) {
-			struct ip_tunnel *t;
-			while ((t = ign->tunnels[prio][h]) != NULL)
-				unregister_netdevice(t->dev);
+			struct ip_tunnel *t = ign->tunnels[prio][h];
+
+			while (t != NULL) {
+				unregister_netdevice_queue(t->dev, head);
+				t = t->next;
+			}
 		}
 	}
 }
@@ -1341,10 +1350,12 @@
 static void ipgre_exit_net(struct net *net)
 {
 	struct ipgre_net *ign;
+	LIST_HEAD(list);
 
 	ign = net_generic(net, ipgre_net_id);
 	rtnl_lock();
-	ipgre_destroy_tunnels(ign);
+	ipgre_destroy_tunnels(ign, &list);
+	unregister_netdevice_many(&list);
 	rtnl_unlock();
 	kfree(ign);
 }
@@ -1465,7 +1476,7 @@
 
 	ether_setup(dev);
 
-	dev->netdev_ops		= &ipgre_netdev_ops;
+	dev->netdev_ops		= &ipgre_tap_netdev_ops;
 	dev->destructor 	= free_netdev;
 
 	dev->iflink		= 0;
@@ -1526,25 +1537,29 @@
 		if (t->dev != dev)
 			return -EEXIST;
 	} else {
-		unsigned nflags = 0;
-
 		t = nt;
 
-		if (ipv4_is_multicast(p.iph.daddr))
-			nflags = IFF_BROADCAST;
-		else if (p.iph.daddr)
-			nflags = IFF_POINTOPOINT;
+		if (dev->type != ARPHRD_ETHER) {
+			unsigned nflags = 0;
 
-		if ((dev->flags ^ nflags) &
-		    (IFF_POINTOPOINT | IFF_BROADCAST))
-			return -EINVAL;
+			if (ipv4_is_multicast(p.iph.daddr))
+				nflags = IFF_BROADCAST;
+			else if (p.iph.daddr)
+				nflags = IFF_POINTOPOINT;
+
+			if ((dev->flags ^ nflags) &
+			    (IFF_POINTOPOINT | IFF_BROADCAST))
+				return -EINVAL;
+		}
 
 		ipgre_tunnel_unlink(ign, t);
 		t->parms.iph.saddr = p.iph.saddr;
 		t->parms.iph.daddr = p.iph.daddr;
 		t->parms.i_key = p.i_key;
-		memcpy(dev->dev_addr, &p.iph.saddr, 4);
-		memcpy(dev->broadcast, &p.iph.daddr, 4);
+		if (dev->type != ARPHRD_ETHER) {
+			memcpy(dev->dev_addr, &p.iph.saddr, 4);
+			memcpy(dev->broadcast, &p.iph.daddr, 4);
+		}
 		ipgre_tunnel_link(ign, t);
 		netdev_state_change(dev);
 	}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index c602d98..cafad9b 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -480,7 +480,7 @@
 	case IP_OPTIONS:
 	{
 		struct ip_options *opt = NULL;
-		if (optlen > 40 || optlen < 0)
+		if (optlen > 40)
 			goto e_inval;
 		err = ip_options_get_from_user(sock_net(sk), &opt,
 					       optval, optlen);
@@ -575,7 +575,7 @@
 		inet->hdrincl = val ? 1 : 0;
 		break;
 	case IP_MTU_DISCOVER:
-		if (val < 0 || val > 3)
+		if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE)
 			goto e_inval;
 		inet->pmtudisc = val;
 		break;
@@ -634,17 +634,16 @@
 				break;
 			}
 			dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
-			if (dev) {
+			if (dev)
 				mreq.imr_ifindex = dev->ifindex;
-				dev_put(dev);
-			}
 		} else
-			dev = __dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
+			dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
 
 
 		err = -EADDRNOTAVAIL;
 		if (!dev)
 			break;
+		dev_put(dev);
 
 		err = -EINVAL;
 		if (sk->sk_bound_dev_if &&
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 6a55392..a2ca53d 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -134,7 +134,13 @@
 static void ipip_tunnel_init(struct net_device *dev);
 static void ipip_tunnel_setup(struct net_device *dev);
 
-static DEFINE_RWLOCK(ipip_lock);
+/*
+ * Locking : hash tables are protected by RCU and a spinlock
+ */
+static DEFINE_SPINLOCK(ipip_lock);
+
+#define for_each_ip_tunnel_rcu(start) \
+	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
 
 static struct ip_tunnel * ipip_tunnel_lookup(struct net *net,
 		__be32 remote, __be32 local)
@@ -144,20 +150,21 @@
 	struct ip_tunnel *t;
 	struct ipip_net *ipn = net_generic(net, ipip_net_id);
 
-	for (t = ipn->tunnels_r_l[h0^h1]; t; t = t->next) {
+	for_each_ip_tunnel_rcu(ipn->tunnels_r_l[h0 ^ h1])
 		if (local == t->parms.iph.saddr &&
 		    remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
 			return t;
-	}
-	for (t = ipn->tunnels_r[h0]; t; t = t->next) {
+
+	for_each_ip_tunnel_rcu(ipn->tunnels_r[h0])
 		if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
 			return t;
-	}
-	for (t = ipn->tunnels_l[h1]; t; t = t->next) {
+
+	for_each_ip_tunnel_rcu(ipn->tunnels_l[h1])
 		if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
 			return t;
-	}
-	if ((t = ipn->tunnels_wc[0]) != NULL && (t->dev->flags&IFF_UP))
+
+	t = rcu_dereference(ipn->tunnels_wc[0]);
+	if (t && (t->dev->flags&IFF_UP))
 		return t;
 	return NULL;
 }
@@ -193,9 +200,9 @@
 
 	for (tp = ipip_bucket(ipn, t); *tp; tp = &(*tp)->next) {
 		if (t == *tp) {
-			write_lock_bh(&ipip_lock);
+			spin_lock_bh(&ipip_lock);
 			*tp = t->next;
-			write_unlock_bh(&ipip_lock);
+			spin_unlock_bh(&ipip_lock);
 			break;
 		}
 	}
@@ -205,10 +212,10 @@
 {
 	struct ip_tunnel **tp = ipip_bucket(ipn, t);
 
+	spin_lock_bh(&ipip_lock);
 	t->next = *tp;
-	write_lock_bh(&ipip_lock);
-	*tp = t;
-	write_unlock_bh(&ipip_lock);
+	rcu_assign_pointer(*tp, t);
+	spin_unlock_bh(&ipip_lock);
 }
 
 static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
@@ -267,9 +274,9 @@
 	struct ipip_net *ipn = net_generic(net, ipip_net_id);
 
 	if (dev == ipn->fb_tunnel_dev) {
-		write_lock_bh(&ipip_lock);
+		spin_lock_bh(&ipip_lock);
 		ipn->tunnels_wc[0] = NULL;
-		write_unlock_bh(&ipip_lock);
+		spin_unlock_bh(&ipip_lock);
 	} else
 		ipip_tunnel_unlink(ipn, netdev_priv(dev));
 	dev_put(dev);
@@ -318,7 +325,7 @@
 
 	err = -ENOENT;
 
-	read_lock(&ipip_lock);
+	rcu_read_lock();
 	t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
 	if (t == NULL || t->parms.iph.daddr == 0)
 		goto out;
@@ -333,7 +340,7 @@
 		t->err_count = 1;
 	t->err_time = jiffies;
 out:
-	read_unlock(&ipip_lock);
+	rcu_read_unlock();
 	return err;
 }
 
@@ -351,11 +358,11 @@
 	struct ip_tunnel *tunnel;
 	const struct iphdr *iph = ip_hdr(skb);
 
-	read_lock(&ipip_lock);
+	rcu_read_lock();
 	if ((tunnel = ipip_tunnel_lookup(dev_net(skb->dev),
 					iph->saddr, iph->daddr)) != NULL) {
 		if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
-			read_unlock(&ipip_lock);
+			rcu_read_unlock();
 			kfree_skb(skb);
 			return 0;
 		}
@@ -374,10 +381,10 @@
 		nf_reset(skb);
 		ipip_ecn_decapsulate(iph, skb);
 		netif_rx(skb);
-		read_unlock(&ipip_lock);
+		rcu_read_unlock();
 		return 0;
 	}
-	read_unlock(&ipip_lock);
+	rcu_read_unlock();
 
 	return -1;
 }
@@ -747,16 +754,19 @@
 static const char banner[] __initconst =
 	KERN_INFO "IPv4 over IPv4 tunneling driver\n";
 
-static void ipip_destroy_tunnels(struct ipip_net *ipn)
+static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
 {
 	int prio;
 
 	for (prio = 1; prio < 4; prio++) {
 		int h;
 		for (h = 0; h < HASH_SIZE; h++) {
-			struct ip_tunnel *t;
-			while ((t = ipn->tunnels[prio][h]) != NULL)
-				unregister_netdevice(t->dev);
+			struct ip_tunnel *t = ipn->tunnels[prio][h];
+
+			while (t != NULL) {
+				unregister_netdevice_queue(t->dev, head);
+				t = t->next;
+			}
 		}
 	}
 }
@@ -809,11 +819,13 @@
 static void ipip_exit_net(struct net *net)
 {
 	struct ipip_net *ipn;
+	LIST_HEAD(list);
 
 	ipn = net_generic(net, ipip_net_id);
 	rtnl_lock();
-	ipip_destroy_tunnels(ipn);
-	unregister_netdevice(ipn->fb_tunnel_dev);
+	ipip_destroy_tunnels(ipn, &list);
+	unregister_netdevice_queue(ipn->fb_tunnel_dev, &list);
+	unregister_netdevice_many(&list);
 	rtnl_unlock();
 	kfree(ipn);
 }
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 6949745..ef4ee45 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -275,7 +275,8 @@
  *	@notify: Set to 1, if the caller is a notifier_call
  */
 
-static int vif_delete(struct net *net, int vifi, int notify)
+static int vif_delete(struct net *net, int vifi, int notify,
+		      struct list_head *head)
 {
 	struct vif_device *v;
 	struct net_device *dev;
@@ -319,7 +320,7 @@
 	}
 
 	if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify)
-		unregister_netdevice(dev);
+		unregister_netdevice_queue(dev, head);
 
 	dev_put(dev);
 	return 0;
@@ -870,14 +871,16 @@
 static void mroute_clean_tables(struct net *net)
 {
 	int i;
+	LIST_HEAD(list);
 
 	/*
 	 *	Shut down all active vif entries
 	 */
 	for (i = 0; i < net->ipv4.maxvif; i++) {
 		if (!(net->ipv4.vif_table[i].flags&VIFF_STATIC))
-			vif_delete(net, i, 0);
+			vif_delete(net, i, 0, &list);
 	}
+	unregister_netdevice_many(&list);
 
 	/*
 	 *	Wipe the cache
@@ -993,7 +996,7 @@
 		if (optname == MRT_ADD_VIF) {
 			ret = vif_add(net, &vif, sk == net->ipv4.mroute_sk);
 		} else {
-			ret = vif_delete(net, vif.vifc_vifi, 0);
+			ret = vif_delete(net, vif.vifc_vifi, 0, NULL);
 		}
 		rtnl_unlock();
 		return ret;
@@ -1156,6 +1159,7 @@
 	struct net *net = dev_net(dev);
 	struct vif_device *v;
 	int ct;
+	LIST_HEAD(list);
 
 	if (!net_eq(dev_net(dev), net))
 		return NOTIFY_DONE;
@@ -1165,8 +1169,9 @@
 	v = &net->ipv4.vif_table[0];
 	for (ct = 0; ct < net->ipv4.maxvif; ct++, v++) {
 		if (v->dev == dev)
-			vif_delete(net, ct, 1);
+			vif_delete(net, ct, 1, &list);
 	}
+	unregister_netdevice_many(&list);
 	return NOTIFY_DONE;
 }
 
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 68afc6e..fe1a644 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -750,6 +750,8 @@
 	BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
 	rcu_assign_pointer(nfnetlink_parse_nat_setup_hook,
 			   nfnetlink_parse_nat_setup);
+	BUG_ON(nf_ct_nat_offset != NULL);
+	rcu_assign_pointer(nf_ct_nat_offset, nf_nat_get_offset);
 	return 0;
 
  cleanup_extend:
@@ -764,6 +766,7 @@
 	nf_ct_extend_unregister(&nat_extend);
 	rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL);
 	rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, NULL);
+	rcu_assign_pointer(nf_ct_nat_offset, NULL);
 	synchronize_net();
 }
 
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 09172a6..f9520fa 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -73,6 +73,28 @@
 	DUMP_OFFSET(this_way);
 }
 
+/* Get the offset value, for conntrack */
+s16 nf_nat_get_offset(const struct nf_conn *ct,
+		      enum ip_conntrack_dir dir,
+		      u32 seq)
+{
+	struct nf_conn_nat *nat = nfct_nat(ct);
+	struct nf_nat_seq *this_way;
+	s16 offset;
+
+	if (!nat)
+		return 0;
+
+	this_way = &nat->seq[dir];
+	spin_lock_bh(&nf_nat_seqofs_lock);
+	offset = after(seq, this_way->correction_pos)
+		 ? this_way->offset_after : this_way->offset_before;
+	spin_unlock_bh(&nf_nat_seqofs_lock);
+
+	return offset;
+}
+EXPORT_SYMBOL_GPL(nf_nat_get_offset);
+
 /* Frobs data inside this packet, which is linear. */
 static void mangle_contents(struct sk_buff *skb,
 			    unsigned int dataoff,
@@ -189,11 +211,6 @@
 		adjust_tcp_sequence(ntohl(tcph->seq),
 				    (int)rep_len - (int)match_len,
 				    ct, ctinfo);
-		/* Tell TCP window tracking about seq change */
-		nf_conntrack_tcp_update(skb, ip_hdrlen(skb),
-					ct, CTINFO2DIR(ctinfo),
-					(int)rep_len - (int)match_len);
-
 		nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
 	}
 	return 1;
@@ -415,12 +432,7 @@
 	tcph->seq = newseq;
 	tcph->ack_seq = newack;
 
-	if (!nf_nat_sack_adjust(skb, tcph, ct, ctinfo))
-		return 0;
-
-	nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir, seqoff);
-
-	return 1;
+	return nf_nat_sack_adjust(skb, tcph, ct, ctinfo);
 }
 
 /* Setup NAT on this expected conntrack so it follows master. */
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 9ef8c08..ce154b4 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -351,13 +351,24 @@
 	skb->ip_summed = CHECKSUM_NONE;
 
 	skb->transport_header = skb->network_header;
-	err = memcpy_fromiovecend((void *)iph, from, 0, length);
-	if (err)
-		goto error_fault;
+	err = -EFAULT;
+	if (memcpy_fromiovecend((void *)iph, from, 0, length))
+		goto error_free;
 
-	/* We don't modify invalid header */
 	iphlen = iph->ihl * 4;
-	if (iphlen >= sizeof(*iph) && iphlen <= length) {
+
+	/*
+	 * We don't want to modify the ip header, but we do need to
+	 * be sure that it won't cause problems later along the network
+	 * stack.  Specifically we want to make sure that iph->ihl is a
+	 * sane value.  If ihl points beyond the length of the buffer passed
+	 * in, reject the frame as invalid
+	 */
+	err = -EINVAL;
+	if (iphlen > length)
+		goto error_free;
+
+	if (iphlen >= sizeof(*iph)) {
 		if (!iph->saddr)
 			iph->saddr = rt->rt_src;
 		iph->check   = 0;
@@ -380,8 +391,7 @@
 out:
 	return 0;
 
-error_fault:
-	err = -EFAULT;
+error_free:
 	kfree_skb(skb);
 error:
 	IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index bb41992..ff258b5 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1628,9 +1628,6 @@
 	__be32  daddr = iph->daddr;
 	unsigned short est_mtu = 0;
 
-	if (ipv4_config.no_pmtu_disc)
-		return 0;
-
 	for (k = 0; k < 2; k++) {
 		for (i = 0; i < 2; i++) {
 			unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
@@ -1854,7 +1851,7 @@
 			goto e_inval;
 		spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
 	} else if (fib_validate_source(saddr, 0, tos, 0,
-					dev, &spec_dst, &itag) < 0)
+					dev, &spec_dst, &itag, 0) < 0)
 		goto e_inval;
 
 	rth = dst_alloc(&ipv4_dst_ops);
@@ -1967,7 +1964,7 @@
 
 
 	err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
-				  in_dev->dev, &spec_dst, &itag);
+				  in_dev->dev, &spec_dst, &itag, skb->mark);
 	if (err < 0) {
 		ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
 					 saddr);
@@ -2141,7 +2138,7 @@
 		int result;
 		result = fib_validate_source(saddr, daddr, tos,
 					     net->loopback_dev->ifindex,
-					     dev, &spec_dst, &itag);
+					     dev, &spec_dst, &itag, skb->mark);
 		if (result < 0)
 			goto martian_source;
 		if (result)
@@ -2170,7 +2167,7 @@
 		spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
 	else {
 		err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
-					  &itag);
+					  &itag, skb->mark);
 		if (err < 0)
 			goto martian_source;
 		if (err)
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 5ec678a..3146cc4 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -276,13 +276,6 @@
 
 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
 
-	/* check for timestamp cookie support */
-	memset(&tcp_opt, 0, sizeof(tcp_opt));
-	tcp_parse_options(skb, &tcp_opt, 0);
-
-	if (tcp_opt.saw_tstamp)
-		cookie_check_timestamp(&tcp_opt);
-
 	ret = NULL;
 	req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */
 	if (!req)
@@ -298,12 +291,6 @@
 	ireq->loc_addr		= ip_hdr(skb)->daddr;
 	ireq->rmt_addr		= ip_hdr(skb)->saddr;
 	ireq->ecn_ok		= 0;
-	ireq->snd_wscale	= tcp_opt.snd_wscale;
-	ireq->rcv_wscale	= tcp_opt.rcv_wscale;
-	ireq->sack_ok		= tcp_opt.sack_ok;
-	ireq->wscale_ok		= tcp_opt.wscale_ok;
-	ireq->tstamp_ok		= tcp_opt.saw_tstamp;
-	req->ts_recent		= tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
 
 	/* We throwed the options of the initial SYN away, so we hope
 	 * the ACK carries the same options again (see RFC1122 4.2.3.8)
@@ -352,6 +339,20 @@
 		}
 	}
 
+	/* check for timestamp cookie support */
+	memset(&tcp_opt, 0, sizeof(tcp_opt));
+	tcp_parse_options(skb, &tcp_opt, 0, &rt->u.dst);
+
+	if (tcp_opt.saw_tstamp)
+		cookie_check_timestamp(&tcp_opt);
+
+	ireq->snd_wscale        = tcp_opt.snd_wscale;
+	ireq->rcv_wscale        = tcp_opt.rcv_wscale;
+	ireq->sack_ok           = tcp_opt.sack_ok;
+	ireq->wscale_ok         = tcp_opt.wscale_ok;
+	ireq->tstamp_ok         = tcp_opt.saw_tstamp;
+	req->ts_recent          = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
+
 	/* Try to redo what tcp_v4_send_synack did. */
 	req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW);
 
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 206a291..e0cfa63 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -326,6 +326,43 @@
 
 EXPORT_SYMBOL(tcp_enter_memory_pressure);
 
+/* Convert seconds to retransmits based on initial and max timeout */
+static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
+{
+	u8 res = 0;
+
+	if (seconds > 0) {
+		int period = timeout;
+
+		res = 1;
+		while (seconds > period && res < 255) {
+			res++;
+			timeout <<= 1;
+			if (timeout > rto_max)
+				timeout = rto_max;
+			period += timeout;
+		}
+	}
+	return res;
+}
+
+/* Convert retransmits to seconds based on initial and max timeout */
+static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
+{
+	int period = 0;
+
+	if (retrans > 0) {
+		period = timeout;
+		while (--retrans) {
+			timeout <<= 1;
+			if (timeout > rto_max)
+				timeout = rto_max;
+			period += timeout;
+		}
+	}
+	return period;
+}
+
 /*
  *	Wait for a TCP event.
  *
@@ -1405,7 +1442,9 @@
 				goto found_ok_skb;
 			if (tcp_hdr(skb)->fin)
 				goto found_fin_ok;
-			WARN_ON(!(flags & MSG_PEEK));
+			WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
+					"copied %X seq %X\n", *seq,
+					TCP_SKB_CB(skb)->seq);
 		}
 
 		/* Well, if we have backlog, try to process it now yet. */
@@ -2163,16 +2202,10 @@
 		break;
 
 	case TCP_DEFER_ACCEPT:
-		icsk->icsk_accept_queue.rskq_defer_accept = 0;
-		if (val > 0) {
-			/* Translate value in seconds to number of
-			 * retransmits */
-			while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
-			       val > ((TCP_TIMEOUT_INIT / HZ) <<
-				       icsk->icsk_accept_queue.rskq_defer_accept))
-				icsk->icsk_accept_queue.rskq_defer_accept++;
-			icsk->icsk_accept_queue.rskq_defer_accept++;
-		}
+		/* Translate value in seconds to number of retransmits */
+		icsk->icsk_accept_queue.rskq_defer_accept =
+			secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
+					TCP_RTO_MAX / HZ);
 		break;
 
 	case TCP_WINDOW_CLAMP:
@@ -2353,8 +2386,8 @@
 			val = (val ? : sysctl_tcp_fin_timeout) / HZ;
 		break;
 	case TCP_DEFER_ACCEPT:
-		val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
-			((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
+		val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
+				      TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
 		break;
 	case TCP_WINDOW_CLAMP:
 		val = tp->window_clamp;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d86784b..be0c5bf 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2300,7 +2300,7 @@
  * they differ. Since neither occurs due to loss, TCP should really
  * ignore them.
  */
-static inline int tcp_dupack_heurestics(struct tcp_sock *tp)
+static inline int tcp_dupack_heuristics(struct tcp_sock *tp)
 {
 	return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1;
 }
@@ -2425,7 +2425,7 @@
 		return 1;
 
 	/* Not-A-Trick#2 : Classic rule... */
-	if (tcp_dupack_heurestics(tp) > tp->reordering)
+	if (tcp_dupack_heuristics(tp) > tp->reordering)
 		return 1;
 
 	/* Trick#3 : when we use RFC2988 timer restart, fast
@@ -3698,7 +3698,7 @@
  * the fast version below fails.
  */
 void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
-		       int estab)
+		       int estab,  struct dst_entry *dst)
 {
 	unsigned char *ptr;
 	struct tcphdr *th = tcp_hdr(skb);
@@ -3737,7 +3737,8 @@
 				break;
 			case TCPOPT_WINDOW:
 				if (opsize == TCPOLEN_WINDOW && th->syn &&
-				    !estab && sysctl_tcp_window_scaling) {
+				    !estab && sysctl_tcp_window_scaling &&
+				    !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)) {
 					__u8 snd_wscale = *(__u8 *)ptr;
 					opt_rx->wscale_ok = 1;
 					if (snd_wscale > 14) {
@@ -3753,7 +3754,8 @@
 			case TCPOPT_TIMESTAMP:
 				if ((opsize == TCPOLEN_TIMESTAMP) &&
 				    ((estab && opt_rx->tstamp_ok) ||
-				     (!estab && sysctl_tcp_timestamps))) {
+				     (!estab && sysctl_tcp_timestamps &&
+				      !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP)))) {
 					opt_rx->saw_tstamp = 1;
 					opt_rx->rcv_tsval = get_unaligned_be32(ptr);
 					opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
@@ -3761,7 +3763,8 @@
 				break;
 			case TCPOPT_SACK_PERM:
 				if (opsize == TCPOLEN_SACK_PERM && th->syn &&
-				    !estab && sysctl_tcp_sack) {
+				    !estab && sysctl_tcp_sack &&
+				    !dst_feature(dst, RTAX_FEATURE_NO_SACK)) {
 					opt_rx->sack_ok = 1;
 					tcp_sack_reset(opt_rx);
 				}
@@ -3820,7 +3823,7 @@
 		if (tcp_parse_aligned_timestamp(tp, th))
 			return 1;
 	}
-	tcp_parse_options(skb, &tp->rx_opt, 1);
+	tcp_parse_options(skb, &tp->rx_opt, 1, NULL);
 	return 1;
 }
 
@@ -4075,8 +4078,10 @@
 static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
+	struct dst_entry *dst = __sk_dst_get(sk);
 
-	if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
+	if (tcp_is_sack(tp) && sysctl_tcp_dsack &&
+	    !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) {
 		int mib_idx;
 
 		if (before(seq, tp->rcv_nxt))
@@ -4105,13 +4110,15 @@
 static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
+	struct dst_entry *dst = __sk_dst_get(sk);
 
 	if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
 	    before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
 		tcp_enter_quickack_mode(sk);
 
-		if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
+		if (tcp_is_sack(tp) && sysctl_tcp_dsack &&
+		    !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) {
 			u32 end_seq = TCP_SKB_CB(skb)->end_seq;
 
 			if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
@@ -5364,8 +5371,9 @@
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	int saved_clamp = tp->rx_opt.mss_clamp;
+	struct dst_entry *dst = __sk_dst_get(sk);
 
-	tcp_parse_options(skb, &tp->rx_opt, 0);
+	tcp_parse_options(skb, &tp->rx_opt, 0, dst);
 
 	if (th->ack) {
 		/* rfc793:
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a4a3390..657ae33 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1257,11 +1257,21 @@
 	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
 #endif
 
+	ireq = inet_rsk(req);
+	ireq->loc_addr = daddr;
+	ireq->rmt_addr = saddr;
+	ireq->no_srccheck = inet_sk(sk)->transparent;
+	ireq->opt = tcp_v4_save_options(sk, skb);
+
+	dst = inet_csk_route_req(sk, req);
+	if(!dst)
+		goto drop_and_free;
+
 	tcp_clear_options(&tmp_opt);
 	tmp_opt.mss_clamp = 536;
 	tmp_opt.user_mss  = tcp_sk(sk)->rx_opt.user_mss;
 
-	tcp_parse_options(skb, &tmp_opt, 0);
+	tcp_parse_options(skb, &tmp_opt, 0, dst);
 
 	if (want_cookie && !tmp_opt.saw_tstamp)
 		tcp_clear_options(&tmp_opt);
@@ -1270,14 +1280,8 @@
 
 	tcp_openreq_init(req, &tmp_opt, skb);
 
-	ireq = inet_rsk(req);
-	ireq->loc_addr = daddr;
-	ireq->rmt_addr = saddr;
-	ireq->no_srccheck = inet_sk(sk)->transparent;
-	ireq->opt = tcp_v4_save_options(sk, skb);
-
 	if (security_inet_conn_request(sk, skb, req))
-		goto drop_and_free;
+		goto drop_and_release;
 
 	if (!want_cookie)
 		TCP_ECN_create_request(req, tcp_hdr(skb));
@@ -1302,7 +1306,6 @@
 		 */
 		if (tmp_opt.saw_tstamp &&
 		    tcp_death_row.sysctl_tw_recycle &&
-		    (dst = inet_csk_route_req(sk, req)) != NULL &&
 		    (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
 		    peer->v4daddr == saddr) {
 			if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index e320afe..a9d34e2 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -100,9 +100,9 @@
 	struct tcp_options_received tmp_opt;
 	int paws_reject = 0;
 
-	tmp_opt.saw_tstamp = 0;
 	if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
-		tcp_parse_options(skb, &tmp_opt, 0);
+		tmp_opt.tstamp_ok = 1;
+		tcp_parse_options(skb, &tmp_opt, 1, NULL);
 
 		if (tmp_opt.saw_tstamp) {
 			tmp_opt.ts_recent	= tcptw->tw_ts_recent;
@@ -501,9 +501,9 @@
 	struct tcp_options_received tmp_opt;
 	struct sock *child;
 
-	tmp_opt.saw_tstamp = 0;
-	if (th->doff > (sizeof(struct tcphdr)>>2)) {
-		tcp_parse_options(skb, &tmp_opt, 0);
+	if ((th->doff > (sizeof(struct tcphdr)>>2)) && (req->ts_recent)) {
+		tmp_opt.tstamp_ok = 1;
+		tcp_parse_options(skb, &tmp_opt, 1, NULL);
 
 		if (tmp_opt.saw_tstamp) {
 			tmp_opt.ts_recent = req->ts_recent;
@@ -641,10 +641,9 @@
 	if (!(flg & TCP_FLAG_ACK))
 		return NULL;
 
-	/* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
-	if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
+	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
+	if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
 	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
-		inet_csk(sk)->icsk_accept_queue.rskq_defer_accept--;
 		inet_rsk(req)->acked = 1;
 		return NULL;
 	}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 2e2eb74..616c686 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -464,6 +464,7 @@
 				struct tcp_md5sig_key **md5) {
 	struct tcp_sock *tp = tcp_sk(sk);
 	unsigned size = 0;
+	struct dst_entry *dst = __sk_dst_get(sk);
 
 #ifdef CONFIG_TCP_MD5SIG
 	*md5 = tp->af_specific->md5_lookup(sk, sk);
@@ -487,18 +488,22 @@
 	opts->mss = tcp_advertise_mss(sk);
 	size += TCPOLEN_MSS_ALIGNED;
 
-	if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
+	if (likely(sysctl_tcp_timestamps &&
+		   !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) &&
+		   *md5 == NULL)) {
 		opts->options |= OPTION_TS;
 		opts->tsval = TCP_SKB_CB(skb)->when;
 		opts->tsecr = tp->rx_opt.ts_recent;
 		size += TCPOLEN_TSTAMP_ALIGNED;
 	}
-	if (likely(sysctl_tcp_window_scaling)) {
+	if (likely(sysctl_tcp_window_scaling &&
+		   !dst_feature(dst, RTAX_FEATURE_NO_WSCALE))) {
 		opts->ws = tp->rx_opt.rcv_wscale;
 		opts->options |= OPTION_WSCALE;
 		size += TCPOLEN_WSCALE_ALIGNED;
 	}
-	if (likely(sysctl_tcp_sack)) {
+	if (likely(sysctl_tcp_sack &&
+		   !dst_feature(dst, RTAX_FEATURE_NO_SACK))) {
 		opts->options |= OPTION_SACK_ADVERTISE;
 		if (unlikely(!(OPTION_TS & opts->options)))
 			size += TCPOLEN_SACKPERM_ALIGNED;
@@ -2315,7 +2320,9 @@
 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
 	 */
 	tp->tcp_header_len = sizeof(struct tcphdr) +
-		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
+		(sysctl_tcp_timestamps &&
+		(!dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) ?
+		  TCPOLEN_TSTAMP_ALIGNED : 0));
 
 #ifdef CONFIG_TCP_MD5SIG
 	if (tp->af_specific->md5_lookup(sk, sk) != NULL)
@@ -2341,7 +2348,8 @@
 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
 				  &tp->rcv_wnd,
 				  &tp->window_clamp,
-				  sysctl_tcp_window_scaling,
+				  (sysctl_tcp_window_scaling &&
+				   !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)),
 				  &rcv_wscale);
 
 	tp->rx_opt.rcv_wscale = rcv_wscale;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 6e8996c..8353a53 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -141,14 +141,14 @@
 
 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
 		if (icsk->icsk_retransmits)
-			dst_negative_advice(&sk->sk_dst_cache);
+			dst_negative_advice(&sk->sk_dst_cache, sk);
 		retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
 	} else {
 		if (retransmits_timed_out(sk, sysctl_tcp_retries1)) {
 			/* Black hole detection */
 			tcp_mtu_probing(icsk, sk);
 
-			dst_negative_advice(&sk->sk_dst_cache);
+			dst_negative_advice(&sk->sk_dst_cache, sk);
 		}
 
 		retry_until = sysctl_tcp_retries2;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 4274c1c..d5e75e9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1005,9 +1005,7 @@
 		err = ulen;
 
 out_free:
-	lock_sock(sk);
-	skb_free_datagram(sk, skb);
-	release_sock(sk);
+	skb_free_datagram_locked(sk, skb);
 out:
 	return err;
 
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 470c504..66f7951 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -64,7 +64,6 @@
 	.protocol	=  IPPROTO_UDPLITE,
 	.prot		=  &udplite_prot,
 	.ops		=  &inet_dgram_ops,
-	.capability	= -1,
 	.no_check	=  0,		/* must checksum (RFC 3828) */
 	.flags		=  INET_PROTOSW_PERMANENT,
 };
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 9186484..024bba3 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -481,9 +481,8 @@
 	struct net_device *dev;
 	struct inet6_dev *idev;
 
-	read_lock(&dev_base_lock);
-	for_each_netdev(net, dev) {
-		rcu_read_lock();
+	rcu_read_lock();
+	for_each_netdev_rcu(net, dev) {
 		idev = __in6_dev_get(dev);
 		if (idev) {
 			int changed = (!idev->cnf.forwarding) ^ (!newf);
@@ -491,9 +490,8 @@
 			if (changed)
 				dev_forward_change(idev);
 		}
-		rcu_read_unlock();
 	}
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 }
 
 static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
@@ -1137,10 +1135,9 @@
 	hiscore->rule = -1;
 	hiscore->ifa = NULL;
 
-	read_lock(&dev_base_lock);
 	rcu_read_lock();
 
-	for_each_netdev(net, dev) {
+	for_each_netdev_rcu(net, dev) {
 		struct inet6_dev *idev;
 
 		/* Candidate Source Address (section 4)
@@ -1235,7 +1232,6 @@
 		read_unlock_bh(&idev->lock);
 	}
 	rcu_read_unlock();
-	read_unlock(&dev_base_lock);
 
 	if (!hiscore->ifa)
 		return -EADDRNOTAVAIL;
@@ -4052,9 +4048,8 @@
 	struct net_device *dev;
 	struct inet6_dev *idev;
 
-	read_lock(&dev_base_lock);
-	for_each_netdev(net, dev) {
-		rcu_read_lock();
+	rcu_read_lock();
+	for_each_netdev_rcu(net, dev) {
 		idev = __in6_dev_get(dev);
 		if (idev) {
 			int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
@@ -4062,9 +4057,8 @@
 			if (changed)
 				dev_disable_change(idev);
 		}
-		rcu_read_unlock();
 	}
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 }
 
 static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old)
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index b6d0588..12e69d3 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -95,7 +95,8 @@
 	return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
 }
 
-static int inet6_create(struct net *net, struct socket *sock, int protocol)
+static int inet6_create(struct net *net, struct socket *sock, int protocol,
+			int kern)
 {
 	struct inet_sock *inet;
 	struct ipv6_pinfo *np;
@@ -158,7 +159,7 @@
 	}
 
 	err = -EPERM;
-	if (answer->capability > 0 && !capable(answer->capability))
+	if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
 		goto out_rcu_unlock;
 
 	sock->ops = answer->ops;
@@ -314,6 +315,7 @@
 		if (addr_type != IPV6_ADDR_ANY) {
 			struct net_device *dev = NULL;
 
+			rcu_read_lock();
 			if (addr_type & IPV6_ADDR_LINKLOCAL) {
 				if (addr_len >= sizeof(struct sockaddr_in6) &&
 				    addr->sin6_scope_id) {
@@ -326,12 +328,12 @@
 				/* Binding to link-local address requires an interface */
 				if (!sk->sk_bound_dev_if) {
 					err = -EINVAL;
-					goto out;
+					goto out_unlock;
 				}
-				dev = dev_get_by_index(net, sk->sk_bound_dev_if);
+				dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
 				if (!dev) {
 					err = -ENODEV;
-					goto out;
+					goto out_unlock;
 				}
 			}
 
@@ -342,14 +344,11 @@
 			if (!(addr_type & IPV6_ADDR_MULTICAST))	{
 				if (!ipv6_chk_addr(net, &addr->sin6_addr,
 						   dev, 0)) {
-					if (dev)
-						dev_put(dev);
 					err = -EADDRNOTAVAIL;
-					goto out;
+					goto out_unlock;
 				}
 			}
-			if (dev)
-				dev_put(dev);
+			rcu_read_unlock();
 		}
 	}
 
@@ -381,6 +380,9 @@
 out:
 	release_sock(sk);
 	return err;
+out_unlock:
+	rcu_read_unlock();
+	goto out;
 }
 
 EXPORT_SYMBOL(inet6_bind);
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 1ae58be..2f00ca8 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -404,13 +404,13 @@
 
 	if (dev)
 		return ipv6_chk_acast_dev(dev, addr);
-	read_lock(&dev_base_lock);
-	for_each_netdev(net, dev)
+	rcu_read_lock();
+	for_each_netdev_rcu(net, dev)
 		if (ipv6_chk_acast_dev(dev, addr)) {
 			found = 1;
 			break;
 		}
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 	return found;
 }
 
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 9f70452..e6f9cdf 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -537,12 +537,17 @@
 
 			addr_type = __ipv6_addr_type(&src_info->ipi6_addr);
 
+			rcu_read_lock();
 			if (fl->oif) {
-				dev = dev_get_by_index(net, fl->oif);
-				if (!dev)
+				dev = dev_get_by_index_rcu(net, fl->oif);
+				if (!dev) {
+					rcu_read_unlock();
 					return -ENODEV;
-			} else if (addr_type & IPV6_ADDR_LINKLOCAL)
+				}
+			} else if (addr_type & IPV6_ADDR_LINKLOCAL) {
+				rcu_read_unlock();
 				return -EINVAL;
+			}
 
 			if (addr_type != IPV6_ADDR_ANY) {
 				int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
@@ -553,8 +558,7 @@
 					ipv6_addr_copy(&fl->fl6_src, &src_info->ipi6_addr);
 			}
 
-			if (dev)
-				dev_put(dev);
+			rcu_read_unlock();
 
 			if (err)
 				goto exit_f;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 19dceef..3516e6f 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -168,8 +168,7 @@
 	if (dst) {
 		struct rt6_info *rt = (struct rt6_info *)dst;
 		if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
-			sk->sk_dst_cache = NULL;
-			dst_release(dst);
+			__sk_dst_reset(sk);
 			dst = NULL;
 		}
 	}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index c595bbe..1d61411 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -88,8 +88,10 @@
 	struct ip6_tnl **tnls[2];
 };
 
-/* lock for the tunnel lists */
-static DEFINE_RWLOCK(ip6_tnl_lock);
+/*
+ * Locking : hash tables are protected by RCU and a spinlock
+ */
+static DEFINE_SPINLOCK(ip6_tnl_lock);
 
 static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
 {
@@ -130,6 +132,9 @@
  *   else %NULL
  **/
 
+#define for_each_ip6_tunnel_rcu(start) \
+	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
+
 static struct ip6_tnl *
 ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local)
 {
@@ -138,13 +143,14 @@
 	struct ip6_tnl *t;
 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
 
-	for (t = ip6n->tnls_r_l[h0 ^ h1]; t; t = t->next) {
+	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[h0 ^ h1]) {
 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
 		    ipv6_addr_equal(remote, &t->parms.raddr) &&
 		    (t->dev->flags & IFF_UP))
 			return t;
 	}
-	if ((t = ip6n->tnls_wc[0]) != NULL && (t->dev->flags & IFF_UP))
+	t = rcu_dereference(ip6n->tnls_wc[0]);
+	if (t && (t->dev->flags & IFF_UP))
 		return t;
 
 	return NULL;
@@ -186,10 +192,10 @@
 {
 	struct ip6_tnl **tp = ip6_tnl_bucket(ip6n, &t->parms);
 
+	spin_lock_bh(&ip6_tnl_lock);
 	t->next = *tp;
-	write_lock_bh(&ip6_tnl_lock);
-	*tp = t;
-	write_unlock_bh(&ip6_tnl_lock);
+	rcu_assign_pointer(*tp, t);
+	spin_unlock_bh(&ip6_tnl_lock);
 }
 
 /**
@@ -204,9 +210,9 @@
 
 	for (tp = ip6_tnl_bucket(ip6n, &t->parms); *tp; tp = &(*tp)->next) {
 		if (t == *tp) {
-			write_lock_bh(&ip6_tnl_lock);
+			spin_lock_bh(&ip6_tnl_lock);
 			*tp = t->next;
-			write_unlock_bh(&ip6_tnl_lock);
+			spin_unlock_bh(&ip6_tnl_lock);
 			break;
 		}
 	}
@@ -313,9 +319,9 @@
 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
 
 	if (dev == ip6n->fb_tnl_dev) {
-		write_lock_bh(&ip6_tnl_lock);
+		spin_lock_bh(&ip6_tnl_lock);
 		ip6n->tnls_wc[0] = NULL;
-		write_unlock_bh(&ip6_tnl_lock);
+		spin_unlock_bh(&ip6_tnl_lock);
 	} else {
 		ip6_tnl_unlink(ip6n, t);
 	}
@@ -409,7 +415,7 @@
 	   in trouble since we might need the source address for further
 	   processing of the error. */
 
-	read_lock(&ip6_tnl_lock);
+	rcu_read_lock();
 	if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr,
 					&ipv6h->saddr)) == NULL)
 		goto out;
@@ -482,7 +488,7 @@
 	*msg = rel_msg;
 
 out:
-	read_unlock(&ip6_tnl_lock);
+	rcu_read_unlock();
 	return err;
 }
 
@@ -652,6 +658,7 @@
 		IP6_ECN_set_ce(ipv6_hdr(skb));
 }
 
+/* called with rcu_read_lock() */
 static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
 {
 	struct ip6_tnl_parm *p = &t->parms;
@@ -662,15 +669,13 @@
 		struct net_device *ldev = NULL;
 
 		if (p->link)
-			ldev = dev_get_by_index(net, p->link);
+			ldev = dev_get_by_index_rcu(net, p->link);
 
 		if ((ipv6_addr_is_multicast(&p->laddr) ||
 		     likely(ipv6_chk_addr(net, &p->laddr, ldev, 0))) &&
 		    likely(!ipv6_chk_addr(net, &p->raddr, NULL, 0)))
 			ret = 1;
 
-		if (ldev)
-			dev_put(ldev);
 	}
 	return ret;
 }
@@ -693,23 +698,23 @@
 	struct ip6_tnl *t;
 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 
-	read_lock(&ip6_tnl_lock);
+	rcu_read_lock();
 
 	if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
 					&ipv6h->daddr)) != NULL) {
 		if (t->parms.proto != ipproto && t->parms.proto != 0) {
-			read_unlock(&ip6_tnl_lock);
+			rcu_read_unlock();
 			goto discard;
 		}
 
 		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
-			read_unlock(&ip6_tnl_lock);
+			rcu_read_unlock();
 			goto discard;
 		}
 
 		if (!ip6_tnl_rcv_ctl(t)) {
 			t->dev->stats.rx_dropped++;
-			read_unlock(&ip6_tnl_lock);
+			rcu_read_unlock();
 			goto discard;
 		}
 		secpath_reset(skb);
@@ -727,10 +732,10 @@
 		t->dev->stats.rx_packets++;
 		t->dev->stats.rx_bytes += skb->len;
 		netif_rx(skb);
-		read_unlock(&ip6_tnl_lock);
+		rcu_read_unlock();
 		return 0;
 	}
-	read_unlock(&ip6_tnl_lock);
+	rcu_read_unlock();
 	return 1;
 
 discard:
@@ -798,8 +803,9 @@
 	if (p->flags & IP6_TNL_F_CAP_XMIT) {
 		struct net_device *ldev = NULL;
 
+		rcu_read_lock();
 		if (p->link)
-			ldev = dev_get_by_index(net, p->link);
+			ldev = dev_get_by_index_rcu(net, p->link);
 
 		if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0)))
 			printk(KERN_WARNING
@@ -813,8 +819,7 @@
 			       p->name);
 		else
 			ret = 1;
-		if (ldev)
-			dev_put(ldev);
+		rcu_read_unlock();
 	}
 	return ret;
 }
@@ -1387,14 +1392,19 @@
 {
 	int h;
 	struct ip6_tnl *t;
+	LIST_HEAD(list);
 
 	for (h = 0; h < HASH_SIZE; h++) {
-		while ((t = ip6n->tnls_r_l[h]) != NULL)
-			unregister_netdevice(t->dev);
+		t = ip6n->tnls_r_l[h];
+		while (t != NULL) {
+			unregister_netdevice_queue(t->dev, &list);
+			t = t->next;
+		}
 	}
 
 	t = ip6n->tnls_wc[0];
-	unregister_netdevice(t->dev);
+	unregister_netdevice_queue(t->dev, &list);
+	unregister_netdevice_many(&list);
 }
 
 static int ip6_tnl_init_net(struct net *net)
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 85849b4..52e0f74 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -477,7 +477,7 @@
  *	Delete a VIF entry
  */
 
-static int mif6_delete(struct net *net, int vifi)
+static int mif6_delete(struct net *net, int vifi, struct list_head *head)
 {
 	struct mif_device *v;
 	struct net_device *dev;
@@ -519,7 +519,7 @@
 		in6_dev->cnf.mc_forwarding--;
 
 	if (v->flags & MIFF_REGISTER)
-		unregister_netdevice(dev);
+		unregister_netdevice_queue(dev, head);
 
 	dev_put(dev);
 	return 0;
@@ -976,6 +976,7 @@
 	struct net *net = dev_net(dev);
 	struct mif_device *v;
 	int ct;
+	LIST_HEAD(list);
 
 	if (event != NETDEV_UNREGISTER)
 		return NOTIFY_DONE;
@@ -983,8 +984,10 @@
 	v = &net->ipv6.vif6_table[0];
 	for (ct = 0; ct < net->ipv6.maxvif; ct++, v++) {
 		if (v->dev == dev)
-			mif6_delete(net, ct);
+			mif6_delete(net, ct, &list);
 	}
+	unregister_netdevice_many(&list);
+
 	return NOTIFY_DONE;
 }
 
@@ -1188,14 +1191,16 @@
 static void mroute_clean_tables(struct net *net)
 {
 	int i;
+	LIST_HEAD(list);
 
 	/*
 	 *	Shut down all active vif entries
 	 */
 	for (i = 0; i < net->ipv6.maxvif; i++) {
 		if (!(net->ipv6.vif6_table[i].flags & VIFF_STATIC))
-			mif6_delete(net, i);
+			mif6_delete(net, i, &list);
 	}
+	unregister_netdevice_many(&list);
 
 	/*
 	 *	Wipe the cache
@@ -1325,7 +1330,7 @@
 		if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
 			return -EFAULT;
 		rtnl_lock();
-		ret = mif6_delete(net, mifi);
+		ret = mif6_delete(net, mifi, NULL);
 		rtnl_unlock();
 		return ret;
 
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 39e10ac..430454e 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -497,13 +497,17 @@
 			goto e_inval;
 
 		if (val) {
+			struct net_device *dev;
+
 			if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
 				goto e_inval;
 
-			if (__dev_get_by_index(net, val) == NULL) {
+			dev = dev_get_by_index(net, val);
+			if (!dev) {
 				retv = -ENODEV;
 				break;
 			}
+			dev_put(dev);
 		}
 		np->mcast_oif = val;
 		retv = 0;
@@ -662,7 +666,7 @@
 	case IPV6_MTU_DISCOVER:
 		if (optlen < sizeof(int))
 			goto e_inval;
-		if (val<0 || val>3)
+		if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE)
 			goto e_inval;
 		np->pmtudisc = val;
 		retv = 0;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index cb834ab..818ef21 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1336,7 +1336,6 @@
 	.protocol	= IPPROTO_IP,	/* wild card */
 	.prot		= &rawv6_prot,
 	.ops		= &inet6_sockraw_ops,
-	.capability	= CAP_NET_RAW,
 	.no_check	= UDP_CSUM_DEFAULT,
 	.flags		= INET_PROTOSW_REUSE,
 };
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index da5bd0e..dce699f 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -208,18 +208,17 @@
 	fq_kill(fq);
 
 	net = container_of(fq->q.net, struct net, ipv6.frags);
-	dev = dev_get_by_index(net, fq->iif);
-	if (!dev)
-		goto out;
-
 	rcu_read_lock();
+	dev = dev_get_by_index_rcu(net, fq->iif);
+	if (!dev)
+		goto out_rcu_unlock;
+
 	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
 	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
-	rcu_read_unlock();
 
 	/* Don't send error if the first segment did not arrive. */
 	if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments)
-		goto out;
+		goto out_rcu_unlock;
 
 	/*
 	   But use as source device on which LAST ARRIVED
@@ -228,9 +227,9 @@
 	 */
 	fq->q.fragments->dev = dev;
 	icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev);
+out_rcu_unlock:
+	rcu_read_unlock();
 out:
-	if (dev)
-		dev_put(dev);
 	spin_unlock(&fq->q.lock);
 	fq_put(fq);
 }
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 510d31f..2362a33 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -77,8 +77,17 @@
 	struct net_device *fb_tunnel_dev;
 };
 
-static DEFINE_RWLOCK(ipip6_lock);
+/*
+ * Locking : hash tables are protected by RCU and a spinlock
+ */
+static DEFINE_SPINLOCK(ipip6_lock);
 
+#define for_each_ip_tunnel_rcu(start) \
+	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
+
+/*
+ * Must be invoked with rcu_read_lock
+ */
 static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
 		struct net_device *dev, __be32 remote, __be32 local)
 {
@@ -87,26 +96,26 @@
 	struct ip_tunnel *t;
 	struct sit_net *sitn = net_generic(net, sit_net_id);
 
-	for (t = sitn->tunnels_r_l[h0^h1]; t; t = t->next) {
+	for_each_ip_tunnel_rcu(sitn->tunnels_r_l[h0 ^ h1]) {
 		if (local == t->parms.iph.saddr &&
 		    remote == t->parms.iph.daddr &&
 		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
 		    (t->dev->flags & IFF_UP))
 			return t;
 	}
-	for (t = sitn->tunnels_r[h0]; t; t = t->next) {
+	for_each_ip_tunnel_rcu(sitn->tunnels_r[h0]) {
 		if (remote == t->parms.iph.daddr &&
 		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
 		    (t->dev->flags & IFF_UP))
 			return t;
 	}
-	for (t = sitn->tunnels_l[h1]; t; t = t->next) {
+	for_each_ip_tunnel_rcu(sitn->tunnels_l[h1]) {
 		if (local == t->parms.iph.saddr &&
 		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
 		    (t->dev->flags & IFF_UP))
 			return t;
 	}
-	t = sitn->tunnels_wc[0];
+	t = rcu_dereference(sitn->tunnels_wc[0]);
 	if ((t != NULL) && (t->dev->flags & IFF_UP))
 		return t;
 	return NULL;
@@ -143,9 +152,9 @@
 
 	for (tp = ipip6_bucket(sitn, t); *tp; tp = &(*tp)->next) {
 		if (t == *tp) {
-			write_lock_bh(&ipip6_lock);
+			spin_lock_bh(&ipip6_lock);
 			*tp = t->next;
-			write_unlock_bh(&ipip6_lock);
+			spin_unlock_bh(&ipip6_lock);
 			break;
 		}
 	}
@@ -155,10 +164,10 @@
 {
 	struct ip_tunnel **tp = ipip6_bucket(sitn, t);
 
+	spin_lock_bh(&ipip6_lock);
 	t->next = *tp;
-	write_lock_bh(&ipip6_lock);
-	*tp = t;
-	write_unlock_bh(&ipip6_lock);
+	rcu_assign_pointer(*tp, t);
+	spin_unlock_bh(&ipip6_lock);
 }
 
 static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
@@ -240,15 +249,22 @@
 	return NULL;
 }
 
+static DEFINE_SPINLOCK(ipip6_prl_lock);
+
+#define for_each_prl_rcu(start)			\
+	for (prl = rcu_dereference(start);	\
+	     prl;				\
+	     prl = rcu_dereference(prl->next))
+
 static struct ip_tunnel_prl_entry *
 __ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr)
 {
-	struct ip_tunnel_prl_entry *p = (struct ip_tunnel_prl_entry *)NULL;
+	struct ip_tunnel_prl_entry *prl;
 
-	for (p = t->prl; p; p = p->next)
-		if (p->addr == addr)
+	for_each_prl_rcu(t->prl)
+		if (prl->addr == addr)
 			break;
-	return p;
+	return prl;
 
 }
 
@@ -273,7 +289,7 @@
 		kcalloc(cmax, sizeof(*kp), GFP_KERNEL) :
 		NULL;
 
-	read_lock(&ipip6_lock);
+	rcu_read_lock();
 
 	ca = t->prl_count < cmax ? t->prl_count : cmax;
 
@@ -291,7 +307,7 @@
 	}
 
 	c = 0;
-	for (prl = t->prl; prl; prl = prl->next) {
+	for_each_prl_rcu(t->prl) {
 		if (c >= cmax)
 			break;
 		if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr)
@@ -303,7 +319,7 @@
 			break;
 	}
 out:
-	read_unlock(&ipip6_lock);
+	rcu_read_unlock();
 
 	len = sizeof(*kp) * c;
 	ret = 0;
@@ -324,12 +340,14 @@
 	if (a->addr == htonl(INADDR_ANY))
 		return -EINVAL;
 
-	write_lock(&ipip6_lock);
+	spin_lock(&ipip6_prl_lock);
 
 	for (p = t->prl; p; p = p->next) {
 		if (p->addr == a->addr) {
-			if (chg)
-				goto update;
+			if (chg) {
+				p->flags = a->flags;
+				goto out;
+			}
 			err = -EEXIST;
 			goto out;
 		}
@@ -346,46 +364,63 @@
 		goto out;
 	}
 
+	INIT_RCU_HEAD(&p->rcu_head);
 	p->next = t->prl;
-	t->prl = p;
-	t->prl_count++;
-update:
 	p->addr = a->addr;
 	p->flags = a->flags;
+	t->prl_count++;
+	rcu_assign_pointer(t->prl, p);
 out:
-	write_unlock(&ipip6_lock);
+	spin_unlock(&ipip6_prl_lock);
 	return err;
 }
 
+static void prl_entry_destroy_rcu(struct rcu_head *head)
+{
+	kfree(container_of(head, struct ip_tunnel_prl_entry, rcu_head));
+}
+
+static void prl_list_destroy_rcu(struct rcu_head *head)
+{
+	struct ip_tunnel_prl_entry *p, *n;
+
+	p = container_of(head, struct ip_tunnel_prl_entry, rcu_head);
+	do {
+		n = p->next;
+		kfree(p);
+		p = n;
+	} while (p);
+}
+
 static int
 ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
 {
 	struct ip_tunnel_prl_entry *x, **p;
 	int err = 0;
 
-	write_lock(&ipip6_lock);
+	spin_lock(&ipip6_prl_lock);
 
 	if (a && a->addr != htonl(INADDR_ANY)) {
 		for (p = &t->prl; *p; p = &(*p)->next) {
 			if ((*p)->addr == a->addr) {
 				x = *p;
 				*p = x->next;
-				kfree(x);
+				call_rcu(&x->rcu_head, prl_entry_destroy_rcu);
 				t->prl_count--;
 				goto out;
 			}
 		}
 		err = -ENXIO;
 	} else {
-		while (t->prl) {
+		if (t->prl) {
+			t->prl_count = 0;
 			x = t->prl;
-			t->prl = t->prl->next;
-			kfree(x);
-			t->prl_count--;
+			call_rcu(&x->rcu_head, prl_list_destroy_rcu);
+			t->prl = NULL;
 		}
 	}
 out:
-	write_unlock(&ipip6_lock);
+	spin_unlock(&ipip6_prl_lock);
 	return err;
 }
 
@@ -395,7 +430,7 @@
 	struct ip_tunnel_prl_entry *p;
 	int ok = 1;
 
-	read_lock(&ipip6_lock);
+	rcu_read_lock();
 	p = __ipip6_tunnel_locate_prl(t, iph->saddr);
 	if (p) {
 		if (p->flags & PRL_DEFAULT)
@@ -411,7 +446,7 @@
 		else
 			ok = 0;
 	}
-	read_unlock(&ipip6_lock);
+	rcu_read_unlock();
 	return ok;
 }
 
@@ -421,9 +456,9 @@
 	struct sit_net *sitn = net_generic(net, sit_net_id);
 
 	if (dev == sitn->fb_tunnel_dev) {
-		write_lock_bh(&ipip6_lock);
+		spin_lock_bh(&ipip6_lock);
 		sitn->tunnels_wc[0] = NULL;
-		write_unlock_bh(&ipip6_lock);
+		spin_unlock_bh(&ipip6_lock);
 		dev_put(dev);
 	} else {
 		ipip6_tunnel_unlink(sitn, netdev_priv(dev));
@@ -476,7 +511,7 @@
 
 	err = -ENOENT;
 
-	read_lock(&ipip6_lock);
+	rcu_read_lock();
 	t = ipip6_tunnel_lookup(dev_net(skb->dev),
 				skb->dev,
 				iph->daddr,
@@ -494,7 +529,7 @@
 		t->err_count = 1;
 	t->err_time = jiffies;
 out:
-	read_unlock(&ipip6_lock);
+	rcu_read_unlock();
 	return err;
 }
 
@@ -514,7 +549,7 @@
 
 	iph = ip_hdr(skb);
 
-	read_lock(&ipip6_lock);
+	rcu_read_lock();
 	tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
 				     iph->saddr, iph->daddr);
 	if (tunnel != NULL) {
@@ -528,7 +563,7 @@
 		if ((tunnel->dev->priv_flags & IFF_ISATAP) &&
 		    !isatap_chksrc(skb, iph, tunnel)) {
 			tunnel->dev->stats.rx_errors++;
-			read_unlock(&ipip6_lock);
+			rcu_read_unlock();
 			kfree_skb(skb);
 			return 0;
 		}
@@ -539,12 +574,12 @@
 		nf_reset(skb);
 		ipip6_ecn_decapsulate(iph, skb);
 		netif_rx(skb);
-		read_unlock(&ipip6_lock);
+		rcu_read_unlock();
 		return 0;
 	}
 
 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
-	read_unlock(&ipip6_lock);
+	rcu_read_unlock();
 out:
 	kfree_skb(skb);
 	return 0;
@@ -1110,16 +1145,19 @@
 	.priority	=	1,
 };
 
-static void sit_destroy_tunnels(struct sit_net *sitn)
+static void sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
 {
 	int prio;
 
 	for (prio = 1; prio < 4; prio++) {
 		int h;
 		for (h = 0; h < HASH_SIZE; h++) {
-			struct ip_tunnel *t;
-			while ((t = sitn->tunnels[prio][h]) != NULL)
-				unregister_netdevice(t->dev);
+			struct ip_tunnel *t = sitn->tunnels[prio][h];
+
+			while (t != NULL) {
+				unregister_netdevice_queue(t->dev, head);
+				t = t->next;
+			}
 		}
 	}
 }
@@ -1173,11 +1211,13 @@
 static void sit_exit_net(struct net *net)
 {
 	struct sit_net *sitn;
+	LIST_HEAD(list);
 
 	sitn = net_generic(net, sit_net_id);
 	rtnl_lock();
-	sit_destroy_tunnels(sitn);
-	unregister_netdevice(sitn->fb_tunnel_dev);
+	sit_destroy_tunnels(sitn, &list);
+	unregister_netdevice_queue(sitn->fb_tunnel_dev, &list);
+	unregister_netdevice_many(&list);
 	rtnl_unlock();
 	kfree(sitn);
 }
@@ -1192,6 +1232,7 @@
 	xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
 
 	unregister_pernet_gen_device(sit_net_id, &sit_net_ops);
+	rcu_barrier(); /* Wait for completion of call_rcu()'s */
 }
 
 static int __init sit_init(void)
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index c46da53..612fc53 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -184,13 +184,6 @@
 
 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
 
-	/* check for timestamp cookie support */
-	memset(&tcp_opt, 0, sizeof(tcp_opt));
-	tcp_parse_options(skb, &tcp_opt, 0);
-
-	if (tcp_opt.saw_tstamp)
-		cookie_check_timestamp(&tcp_opt);
-
 	ret = NULL;
 	req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
 	if (!req)
@@ -224,12 +217,6 @@
 	req->expires = 0UL;
 	req->retrans = 0;
 	ireq->ecn_ok		= 0;
-	ireq->snd_wscale	= tcp_opt.snd_wscale;
-	ireq->rcv_wscale	= tcp_opt.rcv_wscale;
-	ireq->sack_ok		= tcp_opt.sack_ok;
-	ireq->wscale_ok		= tcp_opt.wscale_ok;
-	ireq->tstamp_ok		= tcp_opt.saw_tstamp;
-	req->ts_recent		= tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
 	treq->rcv_isn = ntohl(th->seq) - 1;
 	treq->snt_isn = cookie;
 
@@ -265,6 +252,21 @@
 			goto out_free;
 	}
 
+	/* check for timestamp cookie support */
+	memset(&tcp_opt, 0, sizeof(tcp_opt));
+	tcp_parse_options(skb, &tcp_opt, 0, dst);
+
+	if (tcp_opt.saw_tstamp)
+		cookie_check_timestamp(&tcp_opt);
+
+	req->ts_recent          = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
+
+	ireq->snd_wscale        = tcp_opt.snd_wscale;
+	ireq->rcv_wscale        = tcp_opt.rcv_wscale;
+	ireq->sack_ok           = tcp_opt.sack_ok;
+	ireq->wscale_ok         = tcp_opt.wscale_ok;
+	ireq->tstamp_ok         = tcp_opt.saw_tstamp;
+
 	req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
 	tcp_select_initial_window(tcp_full_space(sk), req->mss,
 				  &req->rcv_wnd, &req->window_clamp,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index c54ec36..696a22f 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1167,6 +1167,7 @@
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct request_sock *req = NULL;
 	__u32 isn = TCP_SKB_CB(skb)->when;
+	struct dst_entry *dst = __sk_dst_get(sk);
 #ifdef CONFIG_SYN_COOKIES
 	int want_cookie = 0;
 #else
@@ -1205,7 +1206,7 @@
 	tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 	tmp_opt.user_mss = tp->rx_opt.user_mss;
 
-	tcp_parse_options(skb, &tmp_opt, 0);
+	tcp_parse_options(skb, &tmp_opt, 0, dst);
 
 	if (want_cookie && !tmp_opt.saw_tstamp)
 		tcp_clear_options(&tmp_opt);
@@ -2111,7 +2112,6 @@
 	.protocol	=	IPPROTO_TCP,
 	.prot		=	&tcpv6_prot,
 	.ops		=	&inet6_stream_ops,
-	.capability	=	-1,
 	.no_check	=	0,
 	.flags		=	INET_PROTOSW_PERMANENT |
 				INET_PROTOSW_ICSK,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d3b59d7..5bc7cdb 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -288,9 +288,7 @@
 		err = ulen;
 
 out_free:
-	lock_sock(sk);
-	skb_free_datagram(sk, skb);
-	release_sock(sk);
+	skb_free_datagram_locked(sk, skb);
 out:
 	return err;
 
@@ -1286,7 +1284,6 @@
 	.protocol =  IPPROTO_UDP,
 	.prot =      &udpv6_prot,
 	.ops =       &inet6_dgram_ops,
-	.capability =-1,
 	.no_check =  UDP_CSUM_DEFAULT,
 	.flags =     INET_PROTOSW_PERMANENT,
 };
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index d737a27..6ea6938 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -62,7 +62,6 @@
 	.protocol	= IPPROTO_UDPLITE,
 	.prot		= &udplitev6_prot,
 	.ops		= &inet6_dgram_ops,
-	.capability	= -1,
 	.no_check	= 0,
 	.flags		= INET_PROTOSW_PERMANENT,
 };
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 81a95c0..438831d 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -23,7 +23,7 @@
  */
 #include <linux/module.h>
 #include <linux/xfrm.h>
-#include <linux/list.h>
+#include <linux/rculist.h>
 #include <net/ip.h>
 #include <net/xfrm.h>
 #include <net/ipv6.h>
@@ -36,14 +36,15 @@
  * per xfrm_address_t.
  */
 struct xfrm6_tunnel_spi {
-	struct hlist_node list_byaddr;
-	struct hlist_node list_byspi;
-	xfrm_address_t addr;
-	u32 spi;
-	atomic_t refcnt;
+	struct hlist_node	list_byaddr;
+	struct hlist_node	list_byspi;
+	xfrm_address_t		addr;
+	u32			spi;
+	atomic_t		refcnt;
+	struct rcu_head		rcu_head;
 };
 
-static DEFINE_RWLOCK(xfrm6_tunnel_spi_lock);
+static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
 
 static u32 xfrm6_tunnel_spi;
 
@@ -107,6 +108,7 @@
 		if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i]))
 			return;
 	}
+	rcu_barrier();
 	kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
 	xfrm6_tunnel_spi_kmem = NULL;
 }
@@ -116,7 +118,7 @@
 	struct xfrm6_tunnel_spi *x6spi;
 	struct hlist_node *pos;
 
-	hlist_for_each_entry(x6spi, pos,
+	hlist_for_each_entry_rcu(x6spi, pos,
 			     &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
 			     list_byaddr) {
 		if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0)
@@ -131,10 +133,10 @@
 	struct xfrm6_tunnel_spi *x6spi;
 	u32 spi;
 
-	read_lock_bh(&xfrm6_tunnel_spi_lock);
+	rcu_read_lock_bh();
 	x6spi = __xfrm6_tunnel_spi_lookup(saddr);
 	spi = x6spi ? x6spi->spi : 0;
-	read_unlock_bh(&xfrm6_tunnel_spi_lock);
+	rcu_read_unlock_bh();
 	return htonl(spi);
 }
 
@@ -185,14 +187,15 @@
 	if (!x6spi)
 		goto out;
 
+	INIT_RCU_HEAD(&x6spi->rcu_head);
 	memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
 	x6spi->spi = spi;
 	atomic_set(&x6spi->refcnt, 1);
 
-	hlist_add_head(&x6spi->list_byspi, &xfrm6_tunnel_spi_byspi[index]);
+	hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tunnel_spi_byspi[index]);
 
 	index = xfrm6_tunnel_spi_hash_byaddr(saddr);
-	hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]);
+	hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]);
 out:
 	return spi;
 }
@@ -202,26 +205,32 @@
 	struct xfrm6_tunnel_spi *x6spi;
 	u32 spi;
 
-	write_lock_bh(&xfrm6_tunnel_spi_lock);
+	spin_lock_bh(&xfrm6_tunnel_spi_lock);
 	x6spi = __xfrm6_tunnel_spi_lookup(saddr);
 	if (x6spi) {
 		atomic_inc(&x6spi->refcnt);
 		spi = x6spi->spi;
 	} else
 		spi = __xfrm6_tunnel_alloc_spi(saddr);
-	write_unlock_bh(&xfrm6_tunnel_spi_lock);
+	spin_unlock_bh(&xfrm6_tunnel_spi_lock);
 
 	return htonl(spi);
 }
 
 EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi);
 
+static void x6spi_destroy_rcu(struct rcu_head *head)
+{
+	kmem_cache_free(xfrm6_tunnel_spi_kmem,
+			container_of(head, struct xfrm6_tunnel_spi, rcu_head));
+}
+
 void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
 {
 	struct xfrm6_tunnel_spi *x6spi;
 	struct hlist_node *pos, *n;
 
-	write_lock_bh(&xfrm6_tunnel_spi_lock);
+	spin_lock_bh(&xfrm6_tunnel_spi_lock);
 
 	hlist_for_each_entry_safe(x6spi, pos, n,
 				  &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
@@ -229,14 +238,14 @@
 	{
 		if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
 			if (atomic_dec_and_test(&x6spi->refcnt)) {
-				hlist_del(&x6spi->list_byaddr);
-				hlist_del(&x6spi->list_byspi);
-				kmem_cache_free(xfrm6_tunnel_spi_kmem, x6spi);
+				hlist_del_rcu(&x6spi->list_byaddr);
+				hlist_del_rcu(&x6spi->list_byspi);
+				call_rcu(&x6spi->rcu_head, x6spi_destroy_rcu);
 				break;
 			}
 		}
 	}
-	write_unlock_bh(&xfrm6_tunnel_spi_lock);
+	spin_unlock_bh(&xfrm6_tunnel_spi_lock);
 }
 
 EXPORT_SYMBOL(xfrm6_tunnel_free_spi);
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 6481ee4..96d193a 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1352,7 +1352,8 @@
 	.obj_size = sizeof(struct ipx_sock),
 };
 
-static int ipx_create(struct net *net, struct socket *sock, int protocol)
+static int ipx_create(struct net *net, struct socket *sock, int protocol,
+		      int kern)
 {
 	int rc = -ESOCKTNOSUPPORT;
 	struct sock *sk;
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 9429e40..e73a001 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -61,7 +61,7 @@
 
 #include <net/irda/af_irda.h>
 
-static int irda_create(struct net *net, struct socket *sock, int protocol);
+static int irda_create(struct net *net, struct socket *sock, int protocol, int kern);
 
 static const struct proto_ops irda_stream_ops;
 static const struct proto_ops irda_seqpacket_ops;
@@ -839,7 +839,7 @@
 
 	IRDA_DEBUG(2, "%s()\n", __func__);
 
-	err = irda_create(sock_net(sk), newsock, sk->sk_protocol);
+	err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0);
 	if (err)
 		return err;
 
@@ -1062,7 +1062,8 @@
  *    Create IrDA socket
  *
  */
-static int irda_create(struct net *net, struct socket *sock, int protocol)
+static int irda_create(struct net *net, struct socket *sock, int protocol,
+		       int kern)
 {
 	struct sock *sk;
 	struct irda_sock *self;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 3aebabb..1e42886 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -481,7 +481,8 @@
 }
 
 /* Create an IUCV socket */
-static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
+static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
+			    int kern)
 {
 	struct sock *sk;
 
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 472f659..86b2c22 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -177,7 +177,8 @@
 	.obj_size = sizeof(struct pfkey_sock),
 };
 
-static int pfkey_create(struct net *net, struct socket *sock, int protocol)
+static int pfkey_create(struct net *net, struct socket *sock, int protocol,
+			int kern)
 {
 	struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
 	struct sock *sk;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 4866b4f..5266c28 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -140,14 +140,17 @@
 
 /**
  *	llc_ui_create - alloc and init a new llc_ui socket
+ *	@net: network namespace (must be default network)
  *	@sock: Socket to initialize and attach allocated sk to.
  *	@protocol: Unused.
+ *	@kern: on behalf of kernel or userspace
  *
  *	Allocate and initialize a new llc_ui socket, validate the user wants a
  *	socket type we have available.
  *	Returns 0 upon success, negative upon failure.
  */
-static int llc_ui_create(struct net *net, struct socket *sock, int protocol)
+static int llc_ui_create(struct net *net, struct socket *sock, int protocol,
+			 int kern)
 {
 	struct sock *sk;
 	int rc = -ESOCKTNOSUPPORT;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index bd765f3..b09948c 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -666,26 +666,25 @@
 
 	state = &sta->ampdu_mlme.tid_state_tx[tid];
 
+	del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
+
 	spin_lock_bh(&sta->lock);
 
-	if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
-		spin_unlock_bh(&sta->lock);
-		return;
-	}
+	if (!(*state & HT_ADDBA_REQUESTED_MSK))
+		goto timer_still_needed;
 
 	if (mgmt->u.action.u.addba_resp.dialog_token !=
 		sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
-		spin_unlock_bh(&sta->lock);
 #ifdef CONFIG_MAC80211_HT_DEBUG
 		printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
 #endif /* CONFIG_MAC80211_HT_DEBUG */
-		return;
+		goto timer_still_needed;
 	}
 
-	del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
 #ifdef CONFIG_MAC80211_HT_DEBUG
 	printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
 #endif /* CONFIG_MAC80211_HT_DEBUG */
+
 	if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
 			== WLAN_STATUS_SUCCESS) {
 		u8 curstate = *state;
@@ -699,5 +698,11 @@
 	} else {
 		___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
 	}
+
+	goto out;
+
+ timer_still_needed:
+	add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
+ out:
 	spin_unlock_bh(&sta->lock);
 }
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 5608f6c..7b5131b 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -72,6 +72,9 @@
 	struct ieee80211_sub_if_data *sdata;
 	int ret;
 
+	if (netif_running(dev))
+		return -EBUSY;
+
 	if (!nl80211_type_check(type))
 		return -EINVAL;
 
@@ -81,9 +84,6 @@
 	if (ret)
 		return ret;
 
-	if (netif_running(sdata->dev))
-		return -EBUSY;
-
 	if (ieee80211_vif_is_mesh(&sdata->vif) && params->mesh_id_len)
 		ieee80211_sdata_set_mesh_id(sdata,
 					    params->mesh_id_len,
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 0891bfb..48ef1a2 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -153,7 +153,7 @@
 	if (net_ratelimit())
 		printk(KERN_DEBUG "delba from %pM (%s) tid %d reason code %d\n",
 			mgmt->sa, initiator ? "initiator" : "recipient", tid,
-			mgmt->u.action.u.delba.reason_code);
+			le16_to_cpu(mgmt->u.action.u.delba.reason_code));
 #endif /* CONFIG_MAC80211_HT_DEBUG */
 
 	if (initiator == WLAN_BACK_INITIATOR)
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 6eaf698..f1362f3 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -73,6 +73,7 @@
 	struct ieee80211_mgmt *mgmt;
 	u8 *pos;
 	struct ieee80211_supported_band *sband;
+	struct cfg80211_bss *bss;
 	u32 bss_change;
 	u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
 
@@ -177,8 +178,9 @@
 	mod_timer(&ifibss->timer,
 		  round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
 
-	cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel,
-				  mgmt, skb->len, 0, GFP_KERNEL);
+	bss = cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel,
+					mgmt, skb->len, 0, GFP_KERNEL);
+	cfg80211_put_bss(bss);
 	cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
 }
 
@@ -538,13 +540,12 @@
 				       WLAN_CAPABILITY_PRIVACY,
 				       capability);
 
+	if (bss) {
 #ifdef CONFIG_MAC80211_IBSS_DEBUG
-	if (bss)
 		printk(KERN_DEBUG "   sta_find_ibss: selected %pM current "
 		       "%pM\n", bss->cbss.bssid, ifibss->bssid);
 #endif /* CONFIG_MAC80211_IBSS_DEBUG */
 
-	if (bss && !memcmp(ifibss->bssid, bss->cbss.bssid, ETH_ALEN)) {
 		printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM"
 		       " based on configured SSID\n",
 		       sdata->dev->name, bss->cbss.bssid);
@@ -552,8 +553,7 @@
 		ieee80211_sta_join_ibss(sdata, bss);
 		ieee80211_rx_bss_put(local, bss);
 		return;
-	} else if (bss)
-		ieee80211_rx_bss_put(local, bss);
+	}
 
 #ifdef CONFIG_MAC80211_IBSS_DEBUG
 	printk(KERN_DEBUG "   did not try to join ibss\n");
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 87aff1d..14f10eb 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -314,7 +314,7 @@
 	if (sdata->vif.type == NL80211_IFTYPE_STATION)
 		ieee80211_queue_work(&local->hw, &sdata->u.mgd.work);
 
-	netif_tx_start_all_queues(dev);
+	netif_start_queue(dev);
 
 	return 0;
  err_del_interface:
@@ -343,7 +343,7 @@
 	/*
 	 * Stop TX on this interface first.
 	 */
-	netif_tx_stop_all_queues(dev);
+	netif_stop_queue(dev);
 
 	/*
 	 * Now delete all active aggregation sessions.
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index f7364e5..9a73389 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -50,7 +50,7 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 
-	ifmsh->wrkq_flags |= MESH_WORK_HOUSEKEEPING;
+	set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
 
 	if (local->quiescing) {
 		set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
@@ -480,7 +480,7 @@
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 	struct ieee80211_local *local = sdata->local;
 
-	ifmsh->wrkq_flags |= MESH_WORK_HOUSEKEEPING;
+	set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
 	ieee80211_queue_work(&local->hw, &ifmsh->work);
 	sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
 	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index e12a786..29b82e9 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -259,7 +259,7 @@
  * @hwmp_ie: hwmp information element (PREP or PREQ)
  *
  * This function updates the path routing information to the originator and the
- * transmitter of a HWMP PREQ or PREP fram.
+ * transmitter of a HWMP PREQ or PREP frame.
  *
  * Returns: metric to frame originator or 0 if the frame should not be further
  * processed
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 8d26e9b..dcc14e9 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -458,9 +458,15 @@
 	mgmt->u.deauth.reason_code = cpu_to_le16(reason);
 
 	if (stype == IEEE80211_STYPE_DEAUTH)
-		cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len, cookie);
+		if (cookie)
+			__cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
+		else
+			cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
 	else
-		cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len, cookie);
+		if (cookie)
+			__cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len);
+		else
+			cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len);
 	ieee80211_tx_skb(sdata, skb, ifmgd->flags & IEEE80211_STA_MFP_ENABLED);
 }
 
@@ -923,7 +929,7 @@
 	ieee80211_recalc_ps(local, -1);
 	mutex_unlock(&local->iflist_mtx);
 
-	netif_tx_start_all_queues(sdata->dev);
+	netif_start_queue(sdata->dev);
 	netif_carrier_on(sdata->dev);
 }
 
@@ -1055,7 +1061,7 @@
 	 * time -- we don't want the scan code to enable queues.
 	 */
 
-	netif_tx_stop_all_queues(sdata->dev);
+	netif_stop_queue(sdata->dev);
 	netif_carrier_off(sdata->dev);
 
 	rcu_read_lock();
@@ -1457,8 +1463,7 @@
 	if (status_code != WLAN_STATUS_SUCCESS) {
 		printk(KERN_DEBUG "%s: AP denied association (code=%d)\n",
 		       sdata->dev->name, status_code);
-		list_del(&wk->list);
-		kfree(wk);
+		wk->state = IEEE80211_MGD_STATE_IDLE;
 		return RX_MGMT_CFG80211_ASSOC;
 	}
 
@@ -1959,12 +1964,10 @@
 			/* no action */
 			break;
 		case RX_MGMT_CFG80211_DEAUTH:
-			cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len,
-					     NULL);
+			cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
 			break;
 		case RX_MGMT_CFG80211_DISASSOC:
-			cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len,
-					       NULL);
+			cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len);
 			break;
 		default:
 			WARN(1, "unexpected: %d", rma);
@@ -2019,7 +2022,7 @@
 		cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, skb->len);
 		break;
 	case RX_MGMT_CFG80211_DEAUTH:
-		cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len, NULL);
+		cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
 		break;
 	default:
 		WARN(1, "unexpected: %d", rma);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 7170bf4..5c385e3 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1504,19 +1504,28 @@
 		/* illegal frame */
 		return RX_DROP_MONITOR;
 
-	if (!is_multicast_ether_addr(hdr->addr1) &&
-			(mesh_hdr->flags & MESH_FLAGS_AE_A5_A6)) {
+	if (mesh_hdr->flags & MESH_FLAGS_AE) {
 		struct mesh_path *mppath;
+		char *proxied_addr;
+		char *mpp_addr;
+
+		if (is_multicast_ether_addr(hdr->addr1)) {
+			mpp_addr = hdr->addr3;
+			proxied_addr = mesh_hdr->eaddr1;
+		} else {
+			mpp_addr = hdr->addr4;
+			proxied_addr = mesh_hdr->eaddr2;
+		}
 
 		rcu_read_lock();
-		mppath = mpp_path_lookup(mesh_hdr->eaddr2, sdata);
+		mppath = mpp_path_lookup(proxied_addr, sdata);
 		if (!mppath) {
-			mpp_path_add(mesh_hdr->eaddr2, hdr->addr4, sdata);
+			mpp_path_add(proxied_addr, mpp_addr, sdata);
 		} else {
 			spin_lock_bh(&mppath->state_lock);
 			mppath->exp_time = jiffies;
-			if (compare_ether_addr(mppath->mpp, hdr->addr4) != 0)
-				memcpy(mppath->mpp, hdr->addr4, ETH_ALEN);
+			if (compare_ether_addr(mppath->mpp, mpp_addr) != 0)
+				memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
 			spin_unlock_bh(&mppath->state_lock);
 		}
 		rcu_read_unlock();
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 71e10ca..7a350d2 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -306,10 +306,10 @@
 		if (sdata->vif.type == NL80211_IFTYPE_STATION) {
 			if (sdata->u.mgd.associated) {
 				ieee80211_scan_ps_disable(sdata);
-				netif_tx_wake_all_queues(sdata->dev);
+				netif_wake_queue(sdata->dev);
 			}
 		} else
-			netif_tx_wake_all_queues(sdata->dev);
+			netif_wake_queue(sdata->dev);
 
 		/* re-enable beaconing */
 		if (sdata->vif.type == NL80211_IFTYPE_AP ||
@@ -364,7 +364,7 @@
 		 * are handled in the scan state machine
 		 */
 		if (sdata->vif.type != NL80211_IFTYPE_STATION)
-			netif_tx_stop_all_queues(sdata->dev);
+			netif_stop_queue(sdata->dev);
 	}
 	mutex_unlock(&local->iflist_mtx);
 
@@ -523,7 +523,7 @@
 			continue;
 
 		if (sdata->vif.type == NL80211_IFTYPE_STATION) {
-			netif_tx_stop_all_queues(sdata->dev);
+			netif_stop_queue(sdata->dev);
 			if (sdata->u.mgd.associated)
 				ieee80211_scan_ps_enable(sdata);
 		}
@@ -558,7 +558,7 @@
 		if (sdata->vif.type == NL80211_IFTYPE_STATION) {
 			if (sdata->u.mgd.associated)
 				ieee80211_scan_ps_disable(sdata);
-			netif_tx_wake_all_queues(sdata->dev);
+			netif_wake_queue(sdata->dev);
 		}
 	}
 	mutex_unlock(&local->iflist_mtx);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index db4bda6..eaa4118 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1445,7 +1445,7 @@
 				if (tmp_sdata->vif.type != NL80211_IFTYPE_AP)
 					continue;
 				if (compare_ether_addr(tmp_sdata->dev->dev_addr,
-						       hdr->addr2)) {
+						       hdr->addr2) == 0) {
 					dev_hold(tmp_sdata->dev);
 					dev_put(sdata->dev);
 					sdata = tmp_sdata;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 7c9ec3d..0cdfb38 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1350,6 +1350,11 @@
 	return ret;
 }
 
+s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
+			enum ip_conntrack_dir dir,
+			u32 seq);
+EXPORT_SYMBOL_GPL(nf_ct_nat_offset);
+
 int nf_conntrack_init(struct net *net)
 {
 	int ret;
@@ -1367,6 +1372,9 @@
 		/* For use by REJECT target */
 		rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach);
 		rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);
+
+		/* Howto get NAT offsets */
+		rcu_assign_pointer(nf_ct_nat_offset, NULL);
 	}
 	return 0;
 
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 97a82ba..ba2b769 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -492,6 +492,21 @@
 	}
 }
 
+#ifdef CONFIG_NF_NAT_NEEDED
+static inline s16 nat_offset(const struct nf_conn *ct,
+			     enum ip_conntrack_dir dir,
+			     u32 seq)
+{
+	typeof(nf_ct_nat_offset) get_offset = rcu_dereference(nf_ct_nat_offset);
+
+	return get_offset != NULL ? get_offset(ct, dir, seq) : 0;
+}
+#define NAT_OFFSET(pf, ct, dir, seq) \
+	(pf == NFPROTO_IPV4 ? nat_offset(ct, dir, seq) : 0)
+#else
+#define NAT_OFFSET(pf, ct, dir, seq)	0
+#endif
+
 static bool tcp_in_window(const struct nf_conn *ct,
 			  struct ip_ct_tcp *state,
 			  enum ip_conntrack_dir dir,
@@ -506,6 +521,7 @@
 	struct ip_ct_tcp_state *receiver = &state->seen[!dir];
 	const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
 	__u32 seq, ack, sack, end, win, swin;
+	s16 receiver_offset;
 	bool res;
 
 	/*
@@ -519,11 +535,16 @@
 	if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
 		tcp_sack(skb, dataoff, tcph, &sack);
 
+	/* Take into account NAT sequence number mangling */
+	receiver_offset = NAT_OFFSET(pf, ct, !dir, ack - 1);
+	ack -= receiver_offset;
+	sack -= receiver_offset;
+
 	pr_debug("tcp_in_window: START\n");
 	pr_debug("tcp_in_window: ");
 	nf_ct_dump_tuple(tuple);
-	pr_debug("seq=%u ack=%u sack=%u win=%u end=%u\n",
-		 seq, ack, sack, win, end);
+	pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
+		 seq, ack, receiver_offset, sack, receiver_offset, win, end);
 	pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
 		 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
 		 sender->td_end, sender->td_maxend, sender->td_maxwin,
@@ -613,8 +634,8 @@
 
 	pr_debug("tcp_in_window: ");
 	nf_ct_dump_tuple(tuple);
-	pr_debug("seq=%u ack=%u sack =%u win=%u end=%u\n",
-		 seq, ack, sack, win, end);
+	pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
+		 seq, ack, receiver_offset, sack, receiver_offset, win, end);
 	pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
 		 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
 		 sender->td_end, sender->td_maxend, sender->td_maxwin,
@@ -700,7 +721,7 @@
 			before(seq, sender->td_maxend + 1) ?
 			after(end, sender->td_end - receiver->td_maxwin - 1) ?
 			before(sack, receiver->td_end + 1) ?
-			after(ack, receiver->td_end - MAXACKWINDOW(sender)) ? "BUG"
+			after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
 			: "ACK is under the lower bound (possible overly delayed ACK)"
 			: "ACK is over the upper bound (ACKed data not seen yet)"
 			: "SEQ is under the lower bound (already ACKed data retransmitted)"
@@ -715,39 +736,6 @@
 	return res;
 }
 
-#ifdef CONFIG_NF_NAT_NEEDED
-/* Update sender->td_end after NAT successfully mangled the packet */
-/* Caller must linearize skb at tcp header. */
-void nf_conntrack_tcp_update(const struct sk_buff *skb,
-			     unsigned int dataoff,
-			     struct nf_conn *ct, int dir,
-			     s16 offset)
-{
-	const struct tcphdr *tcph = (const void *)skb->data + dataoff;
-	const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[dir];
-	const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[!dir];
-	__u32 end;
-
-	end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, dataoff, tcph);
-
-	spin_lock_bh(&ct->lock);
-	/*
-	 * We have to worry for the ack in the reply packet only...
-	 */
-	if (ct->proto.tcp.seen[dir].td_end + offset == end)
-		ct->proto.tcp.seen[dir].td_end = end;
-	ct->proto.tcp.last_end = end;
-	spin_unlock_bh(&ct->lock);
-	pr_debug("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i "
-		 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
-		 sender->td_end, sender->td_maxend, sender->td_maxwin,
-		 sender->td_scale,
-		 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
-		 receiver->td_scale);
-}
-EXPORT_SYMBOL_GPL(nf_conntrack_tcp_update);
-#endif
-
 #define	TH_FIN	0x01
 #define	TH_SYN	0x02
 #define	TH_RST	0x04
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index fb357f0..3dfe2ba 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -472,13 +472,12 @@
 
 	rcu_read_lock();
 	if (dev_name != NULL) {
-		dev = dev_get_by_name(net, dev_name);
+		dev = dev_get_by_name_rcu(net, dev_name);
 		if (dev == NULL) {
 			ret_val = -ENODEV;
 			goto unlhsh_add_return;
 		}
 		ifindex = dev->ifindex;
-		dev_put(dev);
 		iface = netlbl_unlhsh_search_iface(ifindex);
 	} else {
 		ifindex = 0;
@@ -737,13 +736,12 @@
 
 	rcu_read_lock();
 	if (dev_name != NULL) {
-		dev = dev_get_by_name(net, dev_name);
+		dev = dev_get_by_name_rcu(net, dev_name);
 		if (dev == NULL) {
 			ret_val = -ENODEV;
 			goto unlhsh_remove_return;
 		}
 		iface = netlbl_unlhsh_search_iface(dev->ifindex);
-		dev_put(dev);
 	} else
 		iface = rcu_dereference(netlbl_unlhsh_def);
 	if (iface == NULL) {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 0cd2d88..aea805c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -428,7 +428,8 @@
 	return 0;
 }
 
-static int netlink_create(struct net *net, struct socket *sock, int protocol)
+static int netlink_create(struct net *net, struct socket *sock, int protocol,
+			  int kern)
 {
 	struct module *module = NULL;
 	struct mutex *cb_mutex;
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 281fa59..4bdd569 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -425,7 +425,8 @@
 	.obj_size = sizeof(struct nr_sock),
 };
 
-static int nr_create(struct net *net, struct socket *sock, int protocol)
+static int nr_create(struct net *net, struct socket *sock, int protocol,
+		     int kern)
 {
 	struct sock *sk;
 	struct nr_sock *nr;
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 4eb1ac9..aacba76 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -597,15 +597,15 @@
 {
 	struct net_device *dev, *first = NULL;
 
-	read_lock(&dev_base_lock);
-	for_each_netdev(&init_net, dev) {
+	rcu_read_lock();
+	for_each_netdev_rcu(&init_net, dev) {
 		if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM)
 			if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
 				first = dev;
 	}
 	if (first)
 		dev_hold(first);
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 
 	return first;
 }
@@ -617,16 +617,17 @@
 {
 	struct net_device *dev;
 
-	read_lock(&dev_base_lock);
-	for_each_netdev(&init_net, dev) {
-		if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) {
+	rcu_read_lock();
+	for_each_netdev_rcu(&init_net, dev) {
+		if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM &&
+		    ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) {
 			dev_hold(dev);
 			goto out;
 		}
 	}
 	dev = NULL;
 out:
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 	return dev;
 }
 
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index bf3a295..3304caa 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -79,6 +79,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/mutex.h>
+#include <linux/if_vlan.h>
 
 #ifdef CONFIG_INET
 #include <net/inet_common.h>
@@ -188,7 +189,6 @@
 	struct packet_ring_buffer	tx_ring;
 	int			copy_thresh;
 #endif
-	struct packet_type	prot_hook;
 	spinlock_t		bind_lock;
 	struct mutex		pg_vec_lock;
 	unsigned int		running:1,	/* prot_hook is attached*/
@@ -204,6 +204,7 @@
 	unsigned int		tp_reserve;
 	unsigned int		tp_loss:1;
 #endif
+	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
 };
 
 struct packet_skb_cb {
@@ -436,7 +437,8 @@
 	 */
 
 	saddr->spkt_device[13] = 0;
-	dev = dev_get_by_name(sock_net(sk), saddr->spkt_device);
+	rcu_read_lock();
+	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
 	err = -ENODEV;
 	if (dev == NULL)
 		goto out_unlock;
@@ -499,14 +501,13 @@
 	 */
 
 	dev_queue_xmit(skb);
-	dev_put(dev);
+	rcu_read_unlock();
 	return len;
 
 out_free:
 	kfree_skb(skb);
 out_unlock:
-	if (dev)
-		dev_put(dev);
+	rcu_read_unlock();
 	return err;
 }
 
@@ -766,7 +767,7 @@
 			getnstimeofday(&ts);
 		h.h2->tp_sec = ts.tv_sec;
 		h.h2->tp_nsec = ts.tv_nsec;
-		h.h2->tp_vlan_tci = skb->vlan_tci;
+		h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
 		hdrlen = sizeof(*h.h2);
 		break;
 	default:
@@ -983,10 +984,7 @@
 		goto out_put;
 
 	size_max = po->tx_ring.frame_size
-		- sizeof(struct skb_shared_info)
-		- po->tp_hdrlen
-		- LL_ALLOCATED_SPACE(dev)
-		- sizeof(struct sockaddr_ll);
+		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
 
 	if (size_max > dev->mtu + reserve)
 		size_max = dev->mtu + reserve;
@@ -1346,7 +1344,8 @@
  *	Create a packet of type SOCK_PACKET.
  */
 
-static int packet_create(struct net *net, struct socket *sock, int protocol)
+static int packet_create(struct net *net, struct socket *sock, int protocol,
+			 int kern)
 {
 	struct sock *sk;
 	struct packet_sock *po;
@@ -1493,7 +1492,7 @@
 		aux.tp_snaplen = skb->len;
 		aux.tp_mac = 0;
 		aux.tp_net = skb_network_offset(skb);
-		aux.tp_vlan_tci = skb->vlan_tci;
+		aux.tp_vlan_tci = vlan_tx_tag_get(skb);
 
 		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
 	}
@@ -1520,12 +1519,13 @@
 		return -EOPNOTSUPP;
 
 	uaddr->sa_family = AF_PACKET;
-	dev = dev_get_by_index(sock_net(sk), pkt_sk(sk)->ifindex);
-	if (dev) {
+	rcu_read_lock();
+	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
+	if (dev)
 		strlcpy(uaddr->sa_data, dev->name, 15);
-		dev_put(dev);
-	} else
+	else
 		memset(uaddr->sa_data, 0, 14);
+	rcu_read_unlock();
 	*uaddr_len = sizeof(*uaddr);
 
 	return 0;
@@ -1545,16 +1545,17 @@
 	sll->sll_family = AF_PACKET;
 	sll->sll_ifindex = po->ifindex;
 	sll->sll_protocol = po->num;
-	dev = dev_get_by_index(sock_net(sk), po->ifindex);
+	rcu_read_lock();
+	dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
 	if (dev) {
 		sll->sll_hatype = dev->type;
 		sll->sll_halen = dev->addr_len;
 		memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
-		dev_put(dev);
 	} else {
 		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
 		sll->sll_halen = 0;
 	}
+	rcu_read_unlock();
 	*uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
 
 	return 0;
@@ -1664,11 +1665,9 @@
 			if (--ml->count == 0) {
 				struct net_device *dev;
 				*mlp = ml->next;
-				dev = dev_get_by_index(sock_net(sk), ml->ifindex);
-				if (dev) {
+				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
+				if (dev)
 					packet_dev_mc(dev, ml, -1);
-					dev_put(dev);
-				}
 				kfree(ml);
 			}
 			rtnl_unlock();
@@ -1692,11 +1691,9 @@
 		struct net_device *dev;
 
 		po->mclist = ml->next;
-		dev = dev_get_by_index(sock_net(sk), ml->ifindex);
-		if (dev != NULL) {
+		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
+		if (dev != NULL)
 			packet_dev_mc(dev, ml, -1);
-			dev_put(dev);
-		}
 		kfree(ml);
 	}
 	rtnl_unlock();
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index 66737aa..3bd1be6 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -60,7 +60,8 @@
 
 /* protocol family functions */
 
-static int pn_socket_create(struct net *net, struct socket *sock, int protocol)
+static int pn_socket_create(struct net *net, struct socket *sock, int protocol,
+			    int kern)
 {
 	struct sock *sk;
 	struct pn_sock *pn;
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index d8f5d3f..609e509 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -53,8 +53,7 @@
 		    RTNLGRP_PHONET_IFADDR, NULL, GFP_KERNEL);
 	return;
 errout:
-	if (err < 0)
-		rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_IFADDR, err);
+	rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_IFADDR, err);
 }
 
 static const struct nla_policy ifa_phonet_policy[IFA_MAX+1] = {
@@ -212,8 +211,7 @@
 			  RTNLGRP_PHONET_ROUTE, NULL, GFP_KERNEL);
 	return;
 errout:
-	if (err < 0)
-		rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_ROUTE, err);
+	rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_ROUTE, err);
 }
 
 static const struct nla_policy rtm_phonet_policy[RTA_MAX+1] = {
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index a202e5b..e25d8d5 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -265,6 +265,9 @@
 	case RDS_GET_MR:
 		ret = rds_get_mr(rs, optval, optlen);
 		break;
+	case RDS_GET_MR_FOR_DEST:
+		ret = rds_get_mr_for_dest(rs, optval, optlen);
+		break;
 	case RDS_FREE_MR:
 		ret = rds_free_mr(rs, optval, optlen);
 		break;
@@ -407,7 +410,8 @@
 	return 0;
 }
 
-static int rds_create(struct net *net, struct socket *sock, int protocol)
+static int rds_create(struct net *net, struct socket *sock, int protocol,
+		      int kern)
 {
 	struct sock *sk;
 
diff --git a/net/rds/cong.c b/net/rds/cong.c
index dd2711d..6d06cac 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -218,6 +218,8 @@
 	spin_lock_irqsave(&rds_cong_lock, flags);
 
 	list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
+		if (conn->c_loopback)
+			continue;
 		if (!test_and_set_bit(0, &conn->c_map_queued)) {
 			rds_stats_inc(s_cong_update_queued);
 			queue_delayed_work(rds_wq, &conn->c_send_w, 0);
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 1378b85..64df4e7 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -98,6 +98,7 @@
 	struct rds_ib_send_work *i_sends;
 
 	/* rx */
+	struct tasklet_struct	i_recv_tasklet;
 	struct mutex		i_recv_mutex;
 	struct rds_ib_work_ring	i_recv_ring;
 	struct rds_ib_incoming	*i_ibinc;
@@ -303,6 +304,7 @@
 int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
 			     size_t size);
 void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context);
+void rds_ib_recv_tasklet_fn(unsigned long data);
 void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
 void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
 void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index c2d372f..9d320692 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -694,6 +694,8 @@
 		return -ENOMEM;
 
 	INIT_LIST_HEAD(&ic->ib_node);
+	tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn,
+		     (unsigned long) ic);
 	mutex_init(&ic->i_recv_mutex);
 #ifndef KERNEL_HAS_ATOMIC64
 	spin_lock_init(&ic->i_ack_lock);
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index ef3ab5b..c5e9165 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -187,11 +187,8 @@
 	INIT_LIST_HEAD(list);
 	spin_unlock_irq(list_lock);
 
-	list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) {
-		if (ic->conn->c_passive)
-			rds_conn_destroy(ic->conn->c_passive);
+	list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
 		rds_conn_destroy(ic->conn);
-	}
 }
 
 struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index cd7a6cf..fe5ab8c 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -143,15 +143,16 @@
 	int ret = -ENOMEM;
 
 	if (recv->r_ibinc == NULL) {
-		if (atomic_read(&rds_ib_allocation) >= rds_ib_sysctl_max_recv_allocation) {
+		if (!atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation)) {
 			rds_ib_stats_inc(s_ib_rx_alloc_limit);
 			goto out;
 		}
 		recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab,
 						 kptr_gfp);
-		if (recv->r_ibinc == NULL)
+		if (recv->r_ibinc == NULL) {
+			atomic_dec(&rds_ib_allocation);
 			goto out;
-		atomic_inc(&rds_ib_allocation);
+		}
 		INIT_LIST_HEAD(&recv->r_ibinc->ii_frags);
 		rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr);
 	}
@@ -824,17 +825,22 @@
 {
 	struct rds_connection *conn = context;
 	struct rds_ib_connection *ic = conn->c_transport_data;
-	struct ib_wc wc;
-	struct rds_ib_ack_state state = { 0, };
-	struct rds_ib_recv_work *recv;
 
 	rdsdebug("conn %p cq %p\n", conn, cq);
 
 	rds_ib_stats_inc(s_ib_rx_cq_call);
 
-	ib_req_notify_cq(cq, IB_CQ_SOLICITED);
+	tasklet_schedule(&ic->i_recv_tasklet);
+}
 
-	while (ib_poll_cq(cq, 1, &wc) > 0) {
+static inline void rds_poll_cq(struct rds_ib_connection *ic,
+			       struct rds_ib_ack_state *state)
+{
+	struct rds_connection *conn = ic->conn;
+	struct ib_wc wc;
+	struct rds_ib_recv_work *recv;
+
+	while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
 		rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
 			 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
 			 be32_to_cpu(wc.ex.imm_data));
@@ -852,7 +858,7 @@
 		if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
 			/* We expect errors as the qp is drained during shutdown */
 			if (wc.status == IB_WC_SUCCESS) {
-				rds_ib_process_recv(conn, recv, wc.byte_len, &state);
+				rds_ib_process_recv(conn, recv, wc.byte_len, state);
 			} else {
 				rds_ib_conn_error(conn, "recv completion on "
 				       "%pI4 had status %u, disconnecting and "
@@ -863,6 +869,17 @@
 
 		rds_ib_ring_free(&ic->i_recv_ring, 1);
 	}
+}
+
+void rds_ib_recv_tasklet_fn(unsigned long data)
+{
+	struct rds_ib_connection *ic = (struct rds_ib_connection *) data;
+	struct rds_connection *conn = ic->conn;
+	struct rds_ib_ack_state state = { 0, };
+
+	rds_poll_cq(ic, &state);
+	ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
+	rds_poll_cq(ic, &state);
 
 	if (state.ack_next_valid)
 		rds_ib_set_ack(ic, state.ack_next, state.ack_required);
diff --git a/net/rds/iw.h b/net/rds/iw.h
index dd72b62..eef2f0c 100644
--- a/net/rds/iw.h
+++ b/net/rds/iw.h
@@ -119,6 +119,7 @@
 	struct rds_iw_send_work *i_sends;
 
 	/* rx */
+	struct tasklet_struct	i_recv_tasklet;
 	struct mutex		i_recv_mutex;
 	struct rds_iw_work_ring	i_recv_ring;
 	struct rds_iw_incoming	*i_iwinc;
@@ -330,6 +331,7 @@
 int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
 			     size_t size);
 void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context);
+void rds_iw_recv_tasklet_fn(unsigned long data);
 void rds_iw_recv_init_ring(struct rds_iw_connection *ic);
 void rds_iw_recv_clear_ring(struct rds_iw_connection *ic);
 void rds_iw_recv_init_ack(struct rds_iw_connection *ic);
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index a416b0d..394cf6b 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -696,6 +696,8 @@
 		return -ENOMEM;
 
 	INIT_LIST_HEAD(&ic->iw_node);
+	tasklet_init(&ic->i_recv_tasklet, rds_iw_recv_tasklet_fn,
+		     (unsigned long) ic);
 	mutex_init(&ic->i_recv_mutex);
 #ifndef KERNEL_HAS_ATOMIC64
 	spin_lock_init(&ic->i_ack_lock);
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index de4a1b1..b25d785 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -245,11 +245,8 @@
 	INIT_LIST_HEAD(list);
 	spin_unlock_irq(list_lock);
 
-	list_for_each_entry_safe(ic, _ic, &tmp_list, iw_node) {
-		if (ic->conn->c_passive)
-			rds_conn_destroy(ic->conn->c_passive);
+	list_for_each_entry_safe(ic, _ic, &tmp_list, iw_node)
 		rds_conn_destroy(ic->conn);
-	}
 }
 
 static void rds_iw_set_scatterlist(struct rds_iw_scatterlist *sg,
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index 8683f5f..24fc53f 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -143,15 +143,16 @@
 	int ret = -ENOMEM;
 
 	if (recv->r_iwinc == NULL) {
-		if (atomic_read(&rds_iw_allocation) >= rds_iw_sysctl_max_recv_allocation) {
+		if (!atomic_add_unless(&rds_iw_allocation, 1, rds_iw_sysctl_max_recv_allocation)) {
 			rds_iw_stats_inc(s_iw_rx_alloc_limit);
 			goto out;
 		}
 		recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab,
 						 kptr_gfp);
-		if (recv->r_iwinc == NULL)
+		if (recv->r_iwinc == NULL) {
+			atomic_dec(&rds_iw_allocation);
 			goto out;
-		atomic_inc(&rds_iw_allocation);
+		}
 		INIT_LIST_HEAD(&recv->r_iwinc->ii_frags);
 		rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr);
 	}
@@ -783,17 +784,22 @@
 {
 	struct rds_connection *conn = context;
 	struct rds_iw_connection *ic = conn->c_transport_data;
-	struct ib_wc wc;
-	struct rds_iw_ack_state state = { 0, };
-	struct rds_iw_recv_work *recv;
 
 	rdsdebug("conn %p cq %p\n", conn, cq);
 
 	rds_iw_stats_inc(s_iw_rx_cq_call);
 
-	ib_req_notify_cq(cq, IB_CQ_SOLICITED);
+	tasklet_schedule(&ic->i_recv_tasklet);
+}
 
-	while (ib_poll_cq(cq, 1, &wc) > 0) {
+static inline void rds_poll_cq(struct rds_iw_connection *ic,
+			       struct rds_iw_ack_state *state)
+{
+	struct rds_connection *conn = ic->conn;
+	struct ib_wc wc;
+	struct rds_iw_recv_work *recv;
+
+	while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
 		rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
 			 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
 			 be32_to_cpu(wc.ex.imm_data));
@@ -811,7 +817,7 @@
 		if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
 			/* We expect errors as the qp is drained during shutdown */
 			if (wc.status == IB_WC_SUCCESS) {
-				rds_iw_process_recv(conn, recv, wc.byte_len, &state);
+				rds_iw_process_recv(conn, recv, wc.byte_len, state);
 			} else {
 				rds_iw_conn_error(conn, "recv completion on "
 				       "%pI4 had status %u, disconnecting and "
@@ -822,6 +828,17 @@
 
 		rds_iw_ring_free(&ic->i_recv_ring, 1);
 	}
+}
+
+void rds_iw_recv_tasklet_fn(unsigned long data)
+{
+	struct rds_iw_connection *ic = (struct rds_iw_connection *) data;
+	struct rds_connection *conn = ic->conn;
+	struct rds_iw_ack_state state = { 0, };
+
+	rds_poll_cq(ic, &state);
+	ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
+	rds_poll_cq(ic, &state);
 
 	if (state.ack_next_valid)
 		rds_iw_set_ack(ic, state.ack_next, state.ack_required);
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 8dc83d2..971b5a6 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -317,6 +317,30 @@
 	return __rds_rdma_map(rs, &args, NULL, NULL);
 }
 
+int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
+{
+	struct rds_get_mr_for_dest_args args;
+	struct rds_get_mr_args new_args;
+
+	if (optlen != sizeof(struct rds_get_mr_for_dest_args))
+		return -EINVAL;
+
+	if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
+			   sizeof(struct rds_get_mr_for_dest_args)))
+		return -EFAULT;
+
+	/*
+	 * Initially, just behave like get_mr().
+	 * TODO: Implement get_mr as wrapper around this
+	 *	 and deprecate it.
+	 */
+	new_args.vec = args.vec;
+	new_args.cookie_addr = args.cookie_addr;
+	new_args.flags = args.flags;
+
+	return __rds_rdma_map(rs, &new_args, NULL, NULL);
+}
+
 /*
  * Free the MR indicated by the given R_Key
  */
diff --git a/net/rds/rdma.h b/net/rds/rdma.h
index 4255120..909c398 100644
--- a/net/rds/rdma.h
+++ b/net/rds/rdma.h
@@ -61,6 +61,7 @@
 }
 
 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
+int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
 void rds_rdma_drop_keys(struct rds_sock *rs);
 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index c17734c..4de4287 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -512,7 +512,8 @@
 	.obj_size = sizeof(struct rose_sock),
 };
 
-static int rose_create(struct net *net, struct socket *sock, int protocol)
+static int rose_create(struct net *net, struct socket *sock, int protocol,
+		       int kern)
 {
 	struct sock *sk;
 	struct rose_sock *rose;
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 9478d9b..ea2e723 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -578,18 +578,18 @@
 
 /*
  *	Check that the device given is a valid AX.25 interface that is "up".
+ * 	called whith RTNL
  */
-static struct net_device *rose_ax25_dev_get(char *devname)
+static struct net_device *rose_ax25_dev_find(char *devname)
 {
 	struct net_device *dev;
 
-	if ((dev = dev_get_by_name(&init_net, devname)) == NULL)
+	if ((dev = __dev_get_by_name(&init_net, devname)) == NULL)
 		return NULL;
 
 	if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
 		return dev;
 
-	dev_put(dev);
 	return NULL;
 }
 
@@ -600,13 +600,13 @@
 {
 	struct net_device *dev, *first = NULL;
 
-	read_lock(&dev_base_lock);
-	for_each_netdev(&init_net, dev) {
+	rcu_read_lock();
+	for_each_netdev_rcu(&init_net, dev) {
 		if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE)
 			if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
 				first = dev;
 	}
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 
 	return first;
 }
@@ -618,8 +618,8 @@
 {
 	struct net_device *dev;
 
-	read_lock(&dev_base_lock);
-	for_each_netdev(&init_net, dev) {
+	rcu_read_lock();
+	for_each_netdev_rcu(&init_net, dev) {
 		if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) {
 			dev_hold(dev);
 			goto out;
@@ -627,7 +627,7 @@
 	}
 	dev = NULL;
 out:
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 	return dev;
 }
 
@@ -635,14 +635,14 @@
 {
 	struct net_device *dev;
 
-	read_lock(&dev_base_lock);
-	for_each_netdev(&init_net, dev) {
+	rcu_read_lock();
+	for_each_netdev_rcu(&init_net, dev) {
 		if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0)
 			goto out;
 	}
 	dev = NULL;
 out:
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 	return dev != NULL;
 }
 
@@ -720,27 +720,23 @@
 	case SIOCADDRT:
 		if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct)))
 			return -EFAULT;
-		if ((dev = rose_ax25_dev_get(rose_route.device)) == NULL)
+		if ((dev = rose_ax25_dev_find(rose_route.device)) == NULL)
 			return -EINVAL;
-		if (rose_dev_exists(&rose_route.address)) { /* Can't add routes to ourself */
-			dev_put(dev);
+		if (rose_dev_exists(&rose_route.address)) /* Can't add routes to ourself */
 			return -EINVAL;
-		}
 		if (rose_route.mask > 10) /* Mask can't be more than 10 digits */
 			return -EINVAL;
 		if (rose_route.ndigis > AX25_MAX_DIGIS)
 			return -EINVAL;
 		err = rose_add_node(&rose_route, dev);
-		dev_put(dev);
 		return err;
 
 	case SIOCDELRT:
 		if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct)))
 			return -EFAULT;
-		if ((dev = rose_ax25_dev_get(rose_route.device)) == NULL)
+		if ((dev = rose_ax25_dev_find(rose_route.device)) == NULL)
 			return -EINVAL;
 		err = rose_del_node(&rose_route, dev);
-		dev_put(dev);
 		return err;
 
 	case SIOCRSCLRRT:
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 6817c97..f978d02 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -608,7 +608,8 @@
 /*
  * create an RxRPC socket
  */
-static int rxrpc_create(struct net *net, struct socket *sock, int protocol)
+static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
+			int kern)
 {
 	struct rxrpc_sock *rx;
 	struct sock *sk;
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 4ab916b..e9607fe 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -54,6 +54,8 @@
 	if (d->flags & SKBEDIT_F_QUEUE_MAPPING &&
 	    skb->dev->real_num_tx_queues > d->queue_mapping)
 		skb_set_queue_mapping(skb, d->queue_mapping);
+	if (d->flags & SKBEDIT_F_MARK)
+		skb->mark = d->mark;
 
 	spin_unlock(&d->tcf_lock);
 	return d->tcf_action;
@@ -63,6 +65,7 @@
 	[TCA_SKBEDIT_PARMS]		= { .len = sizeof(struct tc_skbedit) },
 	[TCA_SKBEDIT_PRIORITY]		= { .len = sizeof(u32) },
 	[TCA_SKBEDIT_QUEUE_MAPPING]	= { .len = sizeof(u16) },
+	[TCA_SKBEDIT_MARK]		= { .len = sizeof(u32) },
 };
 
 static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est,
@@ -72,7 +75,7 @@
 	struct tc_skbedit *parm;
 	struct tcf_skbedit *d;
 	struct tcf_common *pc;
-	u32 flags = 0, *priority = NULL;
+	u32 flags = 0, *priority = NULL, *mark = NULL;
 	u16 *queue_mapping = NULL;
 	int ret = 0, err;
 
@@ -95,6 +98,12 @@
 		flags |= SKBEDIT_F_QUEUE_MAPPING;
 		queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]);
 	}
+
+	if (tb[TCA_SKBEDIT_MARK] != NULL) {
+		flags |= SKBEDIT_F_MARK;
+		mark = nla_data(tb[TCA_SKBEDIT_MARK]);
+	}
+
 	if (!flags)
 		return -EINVAL;
 
@@ -124,6 +133,9 @@
 		d->priority = *priority;
 	if (flags & SKBEDIT_F_QUEUE_MAPPING)
 		d->queue_mapping = *queue_mapping;
+	if (flags & SKBEDIT_F_MARK)
+		d->mark = *mark;
+
 	d->tcf_action = parm->action;
 
 	spin_unlock_bh(&d->tcf_lock);
@@ -161,6 +173,9 @@
 	if (d->flags & SKBEDIT_F_QUEUE_MAPPING)
 		NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING,
 			sizeof(d->queue_mapping), &d->queue_mapping);
+	if (d->flags & SKBEDIT_F_MARK)
+		NLA_PUT(skb, TCA_SKBEDIT_MARK, sizeof(d->mark),
+			&d->mark);
 	t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 7cf6c0f..c024da7 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -404,6 +404,7 @@
 			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER);
 }
 
+/* called with RTNL */
 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
 {
 	struct net *net = sock_net(skb->sk);
@@ -422,7 +423,7 @@
 
 	if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
 		return skb->len;
-	if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
+	if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
 		return skb->len;
 
 	if (!tcm->tcm_parent)
@@ -484,7 +485,6 @@
 	if (cl)
 		cops->put(q, cl);
 out:
-	dev_put(dev);
 	return skb->len;
 }
 
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 18d85d2..8e8d836 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -303,17 +303,17 @@
 {
 	SKIP_NONLOCAL(skb);
 
-	 if (skb->sk->sk_bound_dev_if == 0) {
+	if (skb->sk->sk_bound_dev_if == 0) {
 		dst->value = (unsigned long) "any";
 		dst->len = 3;
-	 } else  {
+	} else {
 		struct net_device *dev;
 
-		dev = dev_get_by_index(&init_net, skb->sk->sk_bound_dev_if);
+		rcu_read_lock();
+		dev = dev_get_by_index_rcu(&init_net, skb->sk->sk_bound_dev_if);
 		*err = var_dev(dev, dst);
-		if (dev)
-			dev_put(dev);
-	 }
+		rcu_read_unlock();
+	}
 }
 
 META_COLLECTOR(int_sk_refcnt)
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index bb280e6..cc50fbe 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -837,15 +837,16 @@
 		if (type & IPV6_ADDR_LINKLOCAL) {
 			if (!addr->v6.sin6_scope_id)
 				return 0;
-			dev = dev_get_by_index(&init_net, addr->v6.sin6_scope_id);
-			if (!dev)
-				return 0;
-			if (!ipv6_chk_addr(&init_net, &addr->v6.sin6_addr,
+			rcu_read_lock();
+			dev = dev_get_by_index_rcu(&init_net,
+						   addr->v6.sin6_scope_id);
+			if (!dev ||
+			    !ipv6_chk_addr(&init_net, &addr->v6.sin6_addr,
 					   dev, 0)) {
-				dev_put(dev);
+				rcu_read_unlock();
 				return 0;
 			}
-			dev_put(dev);
+			rcu_read_unlock();
 		} else if (type == IPV6_ADDR_MAPPED) {
 			if (!opt->v4mapped)
 				return 0;
@@ -873,10 +874,12 @@
 		if (type & IPV6_ADDR_LINKLOCAL) {
 			if (!addr->v6.sin6_scope_id)
 				return 0;
-			dev = dev_get_by_index(&init_net, addr->v6.sin6_scope_id);
+			rcu_read_lock();
+			dev = dev_get_by_index_rcu(&init_net,
+						   addr->v6.sin6_scope_id);
+			rcu_read_unlock();
 			if (!dev)
 				return 0;
-			dev_put(dev);
 		}
 		af = opt->pf->af;
 	}
@@ -930,7 +933,6 @@
 	.protocol      = IPPROTO_SCTP,
 	.prot 	       = &sctpv6_prot,
 	.ops           = &inet6_seqpacket_ops,
-	.capability    = -1,
 	.no_check      = 0,
 	.flags         = SCTP_PROTOSW_FLAG
 };
@@ -939,7 +941,6 @@
 	.protocol      = IPPROTO_SCTP,
 	.prot 	       = &sctpv6_prot,
 	.ops           = &inet6_seqpacket_ops,
-	.capability    = -1,
 	.no_check      = 0,
 	.flags         = SCTP_PROTOSW_FLAG,
 };
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index d9f4cc2..08ef203 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -205,14 +205,14 @@
 	struct list_head *pos;
 	struct sctp_af *af;
 
-	read_lock(&dev_base_lock);
-	for_each_netdev(&init_net, dev) {
+	rcu_read_lock();
+	for_each_netdev_rcu(&init_net, dev) {
 		__list_for_each(pos, &sctp_address_families) {
 			af = list_entry(pos, struct sctp_af, list);
 			af->copy_addrlist(&sctp_local_addr_list, dev);
 		}
 	}
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 }
 
 /* Free the existing local addresses.  */
@@ -909,7 +909,6 @@
 	.protocol   = IPPROTO_SCTP,
 	.prot       = &sctp_prot,
 	.ops        = &inet_seqpacket_ops,
-	.capability = -1,
 	.no_check   = 0,
 	.flags      = SCTP_PROTOSW_FLAG
 };
@@ -918,7 +917,6 @@
 	.protocol   = IPPROTO_SCTP,
 	.prot       = &sctp_prot,
 	.ops        = &inet_seqpacket_ops,
-	.capability = -1,
 	.no_check   = 0,
 	.flags      = SCTP_PROTOSW_FLAG
 };
diff --git a/net/socket.c b/net/socket.c
index 9dff31c..4f3e0f0 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1252,7 +1252,7 @@
 	/* Now protected by module ref count */
 	rcu_read_unlock();
 
-	err = pf->create(net, sock, protocol);
+	err = pf->create(net, sock, protocol, kern);
 	if (err < 0)
 		goto out_module_put;
 
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index c2a1787..870929e 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -111,7 +111,7 @@
 		rqstp->rq_xprt_ctxt = NULL;
 
 		dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
-		skb_free_datagram(svsk->sk_sk, skb);
+		skb_free_datagram_locked(svsk->sk_sk, skb);
 	}
 }
 
@@ -578,7 +578,7 @@
 				"svc: received unknown control message %d/%d; "
 				"dropping RPC reply datagram\n",
 					cmh->cmsg_level, cmh->cmsg_type);
-		skb_free_datagram(svsk->sk_sk, skb);
+		skb_free_datagram_locked(svsk->sk_sk, skb);
 		return 0;
 	}
 
@@ -588,18 +588,18 @@
 		if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
 			local_bh_enable();
 			/* checksum error */
-			skb_free_datagram(svsk->sk_sk, skb);
+			skb_free_datagram_locked(svsk->sk_sk, skb);
 			return 0;
 		}
 		local_bh_enable();
-		skb_free_datagram(svsk->sk_sk, skb);
+		skb_free_datagram_locked(svsk->sk_sk, skb);
 	} else {
 		/* we can use it in-place */
 		rqstp->rq_arg.head[0].iov_base = skb->data +
 			sizeof(struct udphdr);
 		rqstp->rq_arg.head[0].iov_len = len;
 		if (skb_checksum_complete(skb)) {
-			skb_free_datagram(svsk->sk_sk, skb);
+			skb_free_datagram_locked(svsk->sk_sk, skb);
 			return 0;
 		}
 		rqstp->rq_xprt_ctxt = skb;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index e6d9abf..d00c211 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -177,6 +177,7 @@
  * @net: network namespace (must be default network)
  * @sock: pre-allocated socket structure
  * @protocol: protocol indicator (must be 0)
+ * @kern: caused by kernel or by userspace?
  *
  * This routine creates additional data structures used by the TIPC socket,
  * initializes them, and links them together.
@@ -184,7 +185,8 @@
  * Returns 0 on success, errno otherwise
  */
 
-static int tipc_create(struct net *net, struct socket *sock, int protocol)
+static int tipc_create(struct net *net, struct socket *sock, int protocol,
+		       int kern)
 {
 	const struct proto_ops *ops;
 	socket_state state;
@@ -1528,7 +1530,7 @@
 
 	buf = skb_peek(&sk->sk_receive_queue);
 
-	res = tipc_create(sock_net(sock->sk), new_sock, 0);
+	res = tipc_create(sock_net(sock->sk), new_sock, 0, 0);
 	if (!res) {
 		struct sock *new_sk = new_sock->sk;
 		struct tipc_sock *new_tsock = tipc_sk(new_sk);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 0f133c5..178d3af 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -621,7 +621,8 @@
 	return sk;
 }
 
-static int unix_create(struct net *net, struct socket *sock, int protocol)
+static int unix_create(struct net *net, struct socket *sock, int protocol,
+		       int kern)
 {
 	if (protocol && protocol != PF_UNIX)
 		return -EPROTONOSUPPORT;
@@ -1074,6 +1075,8 @@
 	err = -ECONNREFUSED;
 	if (other->sk_state != TCP_LISTEN)
 		goto out_unlock;
+	if (other->sk_shutdown & RCV_SHUTDOWN)
+		goto out_unlock;
 
 	if (unix_recvq_full(other)) {
 		err = -EAGAIN;
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 2a33d8b..68b3219 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -358,6 +358,7 @@
 			      struct wireless_dev *wdev);
 
 void cfg80211_conn_work(struct work_struct *work);
+void cfg80211_sme_failed_assoc(struct wireless_dev *wdev);
 bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev);
 
 /* internal helpers */
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index ceb2c14..83c2a28 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -62,6 +62,7 @@
 	u8 *ie = mgmt->u.assoc_resp.variable;
 	int i, ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
 	struct cfg80211_internal_bss *bss = NULL;
+	bool need_connect_result = true;
 
 	wdev_lock(wdev);
 
@@ -94,6 +95,14 @@
 		}
 
 		WARN_ON(!bss);
+	} else if (wdev->conn) {
+		cfg80211_sme_failed_assoc(wdev);
+		need_connect_result = false;
+		/*
+		 * do not call connect_result() now because the
+		 * sme will schedule work that does it later.
+		 */
+		goto out;
 	}
 
 	if (!wdev->conn && wdev->sme_state == CFG80211_SME_IDLE) {
@@ -121,7 +130,7 @@
 }
 EXPORT_SYMBOL(cfg80211_send_rx_assoc);
 
-static void __cfg80211_send_deauth(struct net_device *dev,
+void __cfg80211_send_deauth(struct net_device *dev,
 				   const u8 *buf, size_t len)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -130,7 +139,6 @@
 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
 	const u8 *bssid = mgmt->bssid;
 	int i;
-	bool done = false;
 
 	ASSERT_WDEV_LOCK(wdev);
 
@@ -138,7 +146,6 @@
 
 	if (wdev->current_bss &&
 	    memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
-		done = true;
 		cfg80211_unhold_bss(wdev->current_bss);
 		cfg80211_put_bss(&wdev->current_bss->pub);
 		wdev->current_bss = NULL;
@@ -148,7 +155,6 @@
 			cfg80211_unhold_bss(wdev->auth_bsses[i]);
 			cfg80211_put_bss(&wdev->auth_bsses[i]->pub);
 			wdev->auth_bsses[i] = NULL;
-			done = true;
 			break;
 		}
 		if (wdev->authtry_bsses[i] &&
@@ -156,13 +162,10 @@
 			cfg80211_unhold_bss(wdev->authtry_bsses[i]);
 			cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
 			wdev->authtry_bsses[i] = NULL;
-			done = true;
 			break;
 		}
 	}
 
-	WARN_ON(!done);
-
 	if (wdev->sme_state == CFG80211_SME_CONNECTED) {
 		u16 reason_code;
 		bool from_ap;
@@ -177,27 +180,19 @@
 					  false, NULL);
 	}
 }
+EXPORT_SYMBOL(__cfg80211_send_deauth);
 
-
-void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len,
-			  void *cookie)
+void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 
-	BUG_ON(cookie && wdev != cookie);
-
-	if (cookie) {
-		/* called within callback */
-		__cfg80211_send_deauth(dev, buf, len);
-	} else {
-		wdev_lock(wdev);
-		__cfg80211_send_deauth(dev, buf, len);
-		wdev_unlock(wdev);
-	}
+	wdev_lock(wdev);
+	__cfg80211_send_deauth(dev, buf, len);
+	wdev_unlock(wdev);
 }
 EXPORT_SYMBOL(cfg80211_send_deauth);
 
-static void __cfg80211_send_disassoc(struct net_device *dev,
+void __cfg80211_send_disassoc(struct net_device *dev,
 				     const u8 *buf, size_t len)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -238,22 +233,15 @@
 	from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0;
 	__cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap);
 }
+EXPORT_SYMBOL(__cfg80211_send_disassoc);
 
-void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len,
-			    void *cookie)
+void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 
-	BUG_ON(cookie && wdev != cookie);
-
-	if (cookie) {
-		/* called within callback */
-		__cfg80211_send_disassoc(dev, buf, len);
-	} else {
-		wdev_lock(wdev);
-		__cfg80211_send_disassoc(dev, buf, len);
-		wdev_unlock(wdev);
-	}
+	wdev_lock(wdev);
+	__cfg80211_send_disassoc(dev, buf, len);
+	wdev_unlock(wdev);
 }
 EXPORT_SYMBOL(cfg80211_send_disassoc);
 
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index d362415..0115d07 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -26,6 +26,7 @@
 		CFG80211_CONN_AUTHENTICATING,
 		CFG80211_CONN_ASSOCIATE_NEXT,
 		CFG80211_CONN_ASSOCIATING,
+		CFG80211_CONN_DEAUTH_ASSOC_FAIL,
 	} state;
 	u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
 	u8 *ie;
@@ -148,6 +149,12 @@
 					       NULL, 0,
 					       WLAN_REASON_DEAUTH_LEAVING);
 		return err;
+	case CFG80211_CONN_DEAUTH_ASSOC_FAIL:
+		__cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
+				       NULL, 0,
+				       WLAN_REASON_DEAUTH_LEAVING);
+		/* return an error so that we call __cfg80211_connect_result() */
+		return -EINVAL;
 	default:
 		return 0;
 	}
@@ -158,6 +165,7 @@
 	struct cfg80211_registered_device *rdev =
 		container_of(work, struct cfg80211_registered_device, conn_work);
 	struct wireless_dev *wdev;
+	u8 bssid_buf[ETH_ALEN], *bssid = NULL;
 
 	rtnl_lock();
 	cfg80211_lock_rdev(rdev);
@@ -173,10 +181,13 @@
 			wdev_unlock(wdev);
 			continue;
 		}
+		if (wdev->conn->params.bssid) {
+			memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN);
+			bssid = bssid_buf;
+		}
 		if (cfg80211_conn_do_work(wdev))
 			__cfg80211_connect_result(
-					wdev->netdev,
-					wdev->conn->params.bssid,
+					wdev->netdev, bssid,
 					NULL, 0, NULL, 0,
 					WLAN_STATUS_UNSPECIFIED_FAILURE,
 					false, NULL);
@@ -337,6 +348,15 @@
 	return true;
 }
 
+void cfg80211_sme_failed_assoc(struct wireless_dev *wdev)
+{
+	struct wiphy *wiphy = wdev->wiphy;
+	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+	wdev->conn->state = CFG80211_CONN_DEAUTH_ASSOC_FAIL;
+	schedule_work(&rdev->conn_work);
+}
+
 void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
 			       const u8 *req_ie, size_t req_ie_len,
 			       const u8 *resp_ie, size_t resp_ie_len,
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index e19d811..38e235f 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -501,7 +501,8 @@
 	return sk;
 }
 
-static int x25_create(struct net *net, struct socket *sock, int protocol)
+static int x25_create(struct net *net, struct socket *sock, int protocol,
+		      int kern)
 {
 	struct sock *sk;
 	struct x25_sock *x25;
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index 7d7c3ab..96d9227 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -114,7 +114,7 @@
 			/*
 			 *	Copy any Call User Data.
 			 */
-			if (skb->len >= 0) {
+			if (skb->len > 0) {
 				skb_copy_from_linear_data(skb,
 					      x25->calluserdata.cuddata,
 					      skb->len);
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c
index 2c999cc..66961ea 100644
--- a/net/x25/x25_route.c
+++ b/net/x25/x25_route.c
@@ -190,7 +190,7 @@
 		goto out;
 
 	rc = -EINVAL;
-	if (rt.sigdigits < 0 || rt.sigdigits > 15)
+	if (rt.sigdigits > 15)
 		goto out;
 
 	dev = x25_dev_get(rt.device);