Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next

Pablo Neira Ayuso says:

====================
Netfilter updates for net-next

The following patchset contains netfilter updates for net-next, they are:

1) Add the reject expression for the nf_tables bridge family, this
   allows us to send explicit reject (TCP RST / ICMP dest unrech) to
   the packets matching a rule.

2) Simplify and consolidate the nf_tables set dumping logic. This uses
   netlink control->data to filter out depending on the request.

3) Perform garbage collection in xt_hashlimit using a workqueue instead
   of a timer, which is problematic when many entries are in place in
   the tables, from Eric Dumazet.

4) Remove leftover code from the removed ulog target support, from
   Paul Bolle.

5) Dump unmodified flags in the netfilter packet accounting when resetting
   counters, so userspace knows that a counter was in overquota situation,
   from Alexey Perevalov.

6) Fix wrong usage of the bitwise functions in nfnetlink_acct, also from
   Alexey.

7) Fix a crash when adding new set element with an empty NFTA_SET_ELEM_LIST
   attribute.

This patchset also includes a couple of cleanups for xt_LED from
Duan Jiong and for nf_conntrack_ipv4 (using coccinelle) from
Himangi Saraogi.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
index d01ed63..42409bf 100644
--- a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
+++ b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
@@ -8,10 +8,16 @@
    - SerDes integration registers (1/2)
    - SerDes integration registers (2/2)
 
+Optional properties:
+- amd,speed-set: Speed capabilities of the device
+    0 - 1GbE and 10GbE (default)
+    1 - 2.5GbE and 10GbE
+
 Example:
 	xgbe_phy@e1240800 {
 		compatible = "amd,xgbe-phy-seattle-v1a", "ethernet-phy-ieee802.3-c45";
 		reg = <0 0xe1240800 0 0x00400>,
 		      <0 0xe1250000 0 0x00060>,
 		      <0 0xe1250080 0 0x00004>;
+		amd,speed-set = <0>;
 	};
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe.txt b/Documentation/devicetree/bindings/net/amd-xgbe.txt
index ea0c790..41354f7 100644
--- a/Documentation/devicetree/bindings/net/amd-xgbe.txt
+++ b/Documentation/devicetree/bindings/net/amd-xgbe.txt
@@ -8,16 +8,21 @@
 - interrupt-parent: Should be the phandle for the interrupt controller
   that services interrupts for this device
 - interrupts: Should contain the amd-xgbe interrupt
-- clocks: Should be the DMA clock for the amd-xgbe device (used for
-  calculating the correct Rx interrupt watchdog timer value on a DMA
-  channel for coalescing)
-- clock-names: Should be the name of the DMA clock, "dma_clk"
+- clocks:
+   - DMA clock for the amd-xgbe device (used for calculating the
+     correct Rx interrupt watchdog timer value on a DMA channel
+     for coalescing)
+   - PTP clock for the amd-xgbe device
+- clock-names: Should be the names of the clocks
+   - "dma_clk" for the DMA clock
+   - "ptp_clk" for the PTP clock
 - phy-handle: See ethernet.txt file in the same directory
 - phy-mode: See ethernet.txt file in the same directory
 
 Optional properties:
 - mac-address: mac address to be assigned to the device. Can be overridden
   by UEFI.
+- dma-coherent: Present if dma operations are coherent
 
 Example:
 	xgbe@e0700000 {
@@ -26,8 +31,8 @@
 		      <0 0xe0780000 0 0x80000>;
 		interrupt-parent = <&gic>;
 		interrupts = <0 325 4>;
-		clocks = <&xgbe_clk>;
-		clock-names = "dma_clk";
+		clocks = <&xgbe_dma_clk>, <&xgbe_ptp_clk>;
+		clock-names = "dma_clk", "ptp_clk";
 		phy-handle = <&phy>;
 		phy-mode = "xgmii";
 		mac-address = [ 02 a1 a2 a3 a4 a5 ];
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
index f1ea2c6..c587a96 100644
--- a/Documentation/input/event-codes.txt
+++ b/Documentation/input/event-codes.txt
@@ -281,6 +281,19 @@
 If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT
 device.
 
+INPUT_PROP_TOPBUTTONPAD:
+-----------------------
+Some laptops, most notably the Lenovo *40 series provide a trackstick
+device but do not have physical buttons associated with the trackstick
+device. Instead, the top area of the touchpad is marked to show
+visual/haptic areas for left, middle, right buttons intended to be used
+with the trackstick.
+
+If INPUT_PROP_TOPBUTTONPAD is set, userspace should emulate buttons
+accordingly. This property does not affect kernel behavior.
+The kernel does not provide button emulation for such devices but treats
+them as any other INPUT_PROP_BUTTONPAD device.
+
 Guidelines:
 ==========
 The guidelines below ensure proper single-touch and multi-finger functionality.
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index f35bfe4..29a9351 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -101,19 +101,17 @@
 	Maximum memory used to reassemble IP fragments. When
 	ipfrag_high_thresh bytes of memory is allocated for this purpose,
 	the fragment handler will toss packets until ipfrag_low_thresh
-	is reached.
+	is reached. This also serves as a maximum limit to namespaces
+	different from the initial one.
 
 ipfrag_low_thresh - INTEGER
-	See ipfrag_high_thresh
+	Maximum memory used to reassemble IP fragments before the kernel
+	begins to remove incomplete fragment queues to free up resources.
+	The kernel still accepts new fragments for defragmentation.
 
 ipfrag_time - INTEGER
 	Time in seconds to keep an IP fragment in memory.
 
-ipfrag_secret_interval - INTEGER
-	Regeneration interval (in seconds) of the hash secret (or lifetime
-	for the hash secret) for IP fragments.
-	Default: 600
-
 ipfrag_max_dist - INTEGER
 	ipfrag_max_dist is a non-negative integer value which defines the
 	maximum "disorder" which is allowed among fragments which share a
@@ -1162,11 +1160,6 @@
 ip6frag_time - INTEGER
 	Time in seconds to keep an IPv6 fragment in memory.
 
-ip6frag_secret_interval - INTEGER
-	Regeneration interval (in seconds) of the hash secret (or lifetime
-	for the hash secret) for IPv6 fragments.
-	Default: 600
-
 conf/default/*:
 	Change the interface-specific default settings.
 
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 38112d5..a6d7cb9 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -1008,14 +1008,9 @@
 of hardware timestamps with SIOCSHWTSTAMP (see related information from
 Documentation/networking/timestamping.txt).
 
-PACKET_TIMESTAMP accepts the same integer bit field as
-SO_TIMESTAMPING.  However, only the SOF_TIMESTAMPING_SYS_HARDWARE
-and SOF_TIMESTAMPING_RAW_HARDWARE values are recognized by
-PACKET_TIMESTAMP.  SOF_TIMESTAMPING_SYS_HARDWARE takes precedence over
-SOF_TIMESTAMPING_RAW_HARDWARE if both bits are set.
+PACKET_TIMESTAMP accepts the same integer bit field as SO_TIMESTAMPING:
 
-    int req = 0;
-    req |= SOF_TIMESTAMPING_SYS_HARDWARE;
+    int req = SOF_TIMESTAMPING_RAW_HARDWARE;
     setsockopt(fd, SOL_PACKET, PACKET_TIMESTAMP, (void *) &req, sizeof(req))
 
 For the mmap(2)ed ring buffers, such timestamps are stored in the
@@ -1023,14 +1018,13 @@
 what kind of timestamp has been reported, the tp_status field is binary |'ed
 with the following possible bits ...
 
-    TP_STATUS_TS_SYS_HARDWARE
     TP_STATUS_TS_RAW_HARDWARE
     TP_STATUS_TS_SOFTWARE
 
 ... that are equivalent to its SOF_TIMESTAMPING_* counterparts. For the
-RX_RING, if none of those 3 are set (i.e. PACKET_TIMESTAMP is not set),
-then this means that a software fallback was invoked *within* PF_PACKET's
-processing code (less precise).
+RX_RING, if neither is set (i.e. PACKET_TIMESTAMP is not set), then a
+software fallback was invoked *within* PF_PACKET's processing code (less
+precise).
 
 Getting timestamps for the TX_RING works as follows: i) fill the ring frames,
 ii) call sendto() e.g. in blocking mode, iii) wait for status of relevant
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt
index 3544c984..e839e7e 100644
--- a/Documentation/networking/phy.txt
+++ b/Documentation/networking/phy.txt
@@ -272,6 +272,8 @@
    txtsamp: Requests a transmit timestamp at the PHY level for a 'skb'
    set_wol: Enable Wake-on-LAN at the PHY level
    get_wol: Get the Wake-on-LAN status at the PHY level
+   read_mmd_indirect: Read PHY MMD indirect register
+   write_mmd_indirect: Write PHY MMD indirect register
 
  Of these, only config_aneg and read_status are required to be
  assigned by the driver code.  The rest are optional.  Also, it is
@@ -284,7 +286,21 @@
 
  Feel free to look at the Marvell, Cicada, and Davicom drivers in
  drivers/net/phy/ for examples (the lxt and qsemi drivers have
- not been tested as of this writing)
+ not been tested as of this writing).
+
+ The PHY's MMD register accesses are handled by the PAL framework
+ by default, but can be overridden by a specific PHY driver if
+ required. This could be the case if a PHY was released for
+ manufacturing before the MMD PHY register definitions were
+ standardized by the IEEE. Most modern PHYs will be able to use
+ the generic PAL framework for accessing the PHY's MMD registers.
+ An example of such usage is for Energy Efficient Ethernet support,
+ implemented in the PAL. This support uses the PAL to access MMD
+ registers for EEE query and configuration if the PHY supports
+ the IEEE standard access mechanisms, or can use the PHY's specific
+ access interfaces if overridden by the specific PHY driver. See
+ the Micrel driver in drivers/net/phy/ for an example of how this
+ can be implemented.
 
 Board Fixups
 
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 8b4ad80..897f942 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -88,15 +88,8 @@
 SOF_TIMESTAMPING_RAW_HARDWARE is set. No assumptions about its
 relation to system time should be made.
 
-hwtimetrans is the hardware time stamp transformed so that it
-corresponds as good as possible to system time. This correlation is
-not perfect; as a consequence, sorting packets received via different
-NICs by their hwtimetrans may differ from the order in which they were
-received. hwtimetrans may be non-monotonic even for the same NIC.
-Filled in if SOF_TIMESTAMPING_SYS_HARDWARE is set. Requires support
-by the network device and will be empty without that support. This
-field is DEPRECATED. Only one driver computes this value. New device
-drivers must leave this zero. Instead, they can expose the hardware
+hwtimetrans is always zero. This field is deprecated. It used to hold
+hw timestamps converted to system time. Instead, expose the hardware
 clock device on the NIC directly as a HW PTP clock source, to allow
 time conversion in userspace and optionally synchronize system time
 with a userspace PTP stack such as linuxptp. For the PTP clock API,
@@ -191,7 +184,6 @@
 	 * since arbitrary point in time
 	 */
 	ktime_t	hwtstamp;
-	ktime_t	syststamp; /* hwtstamp transformed to system time base */
 };
 
 Time stamps for outgoing packets are to be generated as follows:
diff --git a/Documentation/networking/timestamping/timestamping.c b/Documentation/networking/timestamping/timestamping.c
index 8ba82bf..5cdfd74 100644
--- a/Documentation/networking/timestamping/timestamping.c
+++ b/Documentation/networking/timestamping/timestamping.c
@@ -76,7 +76,6 @@
 	       "  SOF_TIMESTAMPING_RX_HARDWARE - hardware time stamping of incoming packets\n"
 	       "  SOF_TIMESTAMPING_RX_SOFTWARE - software fallback for incoming packets\n"
 	       "  SOF_TIMESTAMPING_SOFTWARE - request reporting of software time stamps\n"
-	       "  SOF_TIMESTAMPING_SYS_HARDWARE - request reporting of transformed HW time stamps\n"
 	       "  SOF_TIMESTAMPING_RAW_HARDWARE - request reporting of raw HW time stamps\n"
 	       "  SIOCGSTAMP - check last socket time stamp\n"
 	       "  SIOCGSTAMPNS - more accurate socket time stamp\n");
@@ -202,9 +201,7 @@
 				       (long)stamp->tv_sec,
 				       (long)stamp->tv_nsec);
 				stamp++;
-				printf("HW transformed %ld.%09ld ",
-				       (long)stamp->tv_sec,
-				       (long)stamp->tv_nsec);
+				/* skip deprecated HW transformed */
 				stamp++;
 				printf("HW raw %ld.%09ld",
 				       (long)stamp->tv_sec,
@@ -361,8 +358,6 @@
 			so_timestamping_flags |= SOF_TIMESTAMPING_RX_SOFTWARE;
 		else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_SOFTWARE"))
 			so_timestamping_flags |= SOF_TIMESTAMPING_SOFTWARE;
-		else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_SYS_HARDWARE"))
-			so_timestamping_flags |= SOF_TIMESTAMPING_SYS_HARDWARE;
 		else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_RAW_HARDWARE"))
 			so_timestamping_flags |= SOF_TIMESTAMPING_RAW_HARDWARE;
 		else
diff --git a/MAINTAINERS b/MAINTAINERS
index 78215a5..c77a0ef 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -148,6 +148,13 @@
 S:	Maintained
 F:	drivers/scsi/53c700*
 
+6LOWPAN GENERIC (BTLE/IEEE 802.15.4)
+M:	Alexander Aring <alex.aring@gmail.com>
+L:	linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-bluetooth@vger.kernel.org
+S:	Maintained
+F:	net/6lowpan/
+
 6PACK NETWORK DRIVER FOR AX.25
 M:	Andreas Koensgen <ajk@comnets.uni-bremen.de>
 L:	linux-hams@vger.kernel.org
@@ -1912,6 +1919,13 @@
 F:	drivers/net/bonding/
 F:	include/uapi/linux/if_bonding.h
 
+BPF (Safe dynamic programs and tools)
+M:	Alexei Starovoitov <ast@kernel.org>
+L:	netdev@vger.kernel.org
+L:	linux-kernel@vger.kernel.org
+S:	Supported
+F:	kernel/bpf/
+
 BROADCOM B44 10/100 ETHERNET DRIVER
 M:	Gary Zambrano <zambrano@broadcom.com>
 L:	netdev@vger.kernel.org
@@ -6947,6 +6961,12 @@
 S:	Maintained
 F:	drivers/pinctrl/pinctrl-at91.c
 
+PIN CONTROLLER - RENESAS
+M:	Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L:	linux-sh@vger.kernel.org
+S:	Maintained
+F:	drivers/pinctrl/sh-pfc/
+
 PIN CONTROLLER - SAMSUNG
 M:	Tomasz Figa <t.figa@samsung.com>
 M:	Thomas Abraham <thomas.abraham@linaro.org>
diff --git a/Makefile b/Makefile
index 6b27741..f6a7794 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 16
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
@@ -688,6 +688,8 @@
 endif
 endif
 
+KBUILD_CFLAGS   += $(call cc-option, -fno-var-tracking-assignments)
+
 ifdef CONFIG_DEBUG_INFO
 KBUILD_CFLAGS	+= -g
 KBUILD_AFLAGS	+= -Wa,-gdwarf-2
diff --git a/arch/arm/boot/dts/hi3620.dtsi b/arch/arm/boot/dts/hi3620.dtsi
index ab1116d..83a5b86 100644
--- a/arch/arm/boot/dts/hi3620.dtsi
+++ b/arch/arm/boot/dts/hi3620.dtsi
@@ -73,7 +73,7 @@
 
 		L2: l2-cache {
 			compatible = "arm,pl310-cache";
-			reg = <0xfc10000 0x100000>;
+			reg = <0x100000 0x100000>;
 			interrupts = <0 15 4>;
 			cache-unified;
 			cache-level = <2>;
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 1fe45d1..b15f1a7 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -353,7 +353,7 @@
 	};
 
 	twl_power: power {
-		compatible = "ti,twl4030-power-n900", "ti,twl4030-power-idle-osc-off";
+		compatible = "ti,twl4030-power-n900";
 		ti,use_poweroff;
 	};
 };
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 8d7ffae..79f68ac 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -540,9 +540,9 @@
 			#clock-cells = <0>;
 			clock-output-names = "sd1";
 		};
-		sd2_clk: sd3_clk@e615007c {
+		sd2_clk: sd3_clk@e615026c {
 			compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock";
-			reg = <0 0xe615007c 0 4>;
+			reg = <0 0xe615026c 0 4>;
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
 			clock-output-names = "sd2";
diff --git a/arch/arm/boot/dts/sama5d3_gmac.dtsi b/arch/arm/boot/dts/sama5d3_gmac.dtsi
index a6cb050..de5ed59 100644
--- a/arch/arm/boot/dts/sama5d3_gmac.dtsi
+++ b/arch/arm/boot/dts/sama5d3_gmac.dtsi
@@ -74,7 +74,7 @@
 			};
 
 			macb0: ethernet@f0028000 {
-				compatible = "cdns,pc302-gem", "cdns,gem";
+				compatible = "atmel,sama5d3-gem";
 				reg = <0xf0028000 0x100>;
 				interrupts = <34 IRQ_TYPE_LEVEL_HIGH 3>;
 				pinctrl-names = "default";
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
index 4522366..15468fb 100644
--- a/arch/arm/crypto/aesbs-glue.c
+++ b/arch/arm/crypto/aesbs-glue.c
@@ -137,7 +137,7 @@
 				dst += AES_BLOCK_SIZE;
 			} while (--blocks);
 		}
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	return err;
 }
@@ -158,7 +158,7 @@
 		bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
 				  walk.nbytes, &ctx->dec, walk.iv);
 		kernel_neon_end();
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	while (walk.nbytes) {
 		u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
@@ -182,7 +182,7 @@
 			dst += AES_BLOCK_SIZE;
 			src += AES_BLOCK_SIZE;
 		} while (--blocks);
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	return err;
 }
@@ -268,7 +268,7 @@
 		bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
 				  walk.nbytes, &ctx->enc, walk.iv);
 		kernel_neon_end();
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	return err;
 }
@@ -292,7 +292,7 @@
 		bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
 				  walk.nbytes, &ctx->dec, walk.iv);
 		kernel_neon_end();
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	return err;
 }
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 060a75e..0406cb3 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -50,6 +50,7 @@
 	struct smp_operations	*smp;		/* SMP operations	*/
 	bool			(*smp_init)(void);
 	void			(*fixup)(struct tag *, char **);
+	void			(*dt_fixup)(void);
 	void			(*init_meminfo)(void);
 	void			(*reserve)(void);/* reserve mem blocks	*/
 	void			(*map_io)(void);/* IO mapping function	*/
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index e94a157..11c54de 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -212,7 +212,7 @@
 	mdesc_best = &__mach_desc_GENERIC_DT;
 #endif
 
-	if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys)))
+	if (!dt_phys || !early_init_dt_verify(phys_to_virt(dt_phys)))
 		return NULL;
 
 	mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach);
@@ -237,6 +237,12 @@
 		dump_machine_table(); /* does not return */
 	}
 
+	/* We really don't want to do this, but sometimes firmware provides buggy data */
+	if (mdesc->dt_fixup)
+		mdesc->dt_fixup();
+
+	early_init_dt_scan_nodes();
+
 	/* Change machine number to match the mdesc we're using */
 	__machine_arch_type = mdesc->nr;
 
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index 46d893f..66c9b96 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -335,6 +335,15 @@
 #endif
 }
 
+static void __init exynos_dt_fixup(void)
+{
+	/*
+	 * Some versions of uboot pass garbage entries in the memory node,
+	 * use the old CONFIG_ARM_NR_BANKS
+	 */
+	of_fdt_limit_memory(8);
+}
+
 DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
 	/* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */
 	/* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
@@ -348,4 +357,5 @@
 	.dt_compat	= exynos_dt_compat,
 	.restart	= exynos_restart,
 	.reserve	= exynos_reserve,
+	.dt_fixup	= exynos_dt_fixup,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
index 17cd393..93914d2 100644
--- a/arch/arm/mach-omap2/gpmc-nand.c
+++ b/arch/arm/mach-omap2/gpmc-nand.c
@@ -50,6 +50,16 @@
 		 soc_is_omap54xx() || soc_is_dra7xx())
 		return 1;
 
+	if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
+		 ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
+		if (cpu_is_omap24xx())
+			return 0;
+		else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
+			return 0;
+		else
+			return 1;
+	}
+
 	/* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
 	 * which require H/W based ECC error detection */
 	if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
@@ -57,14 +67,6 @@
 		 (ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
 		return 0;
 
-	/*
-	 * For now, assume 4-bit mode is only supported on OMAP3630 ES1.x, x>=1
-	 * and AM33xx derivates. Other chips may be added if confirmed to work.
-	 */
-	if ((ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW) &&
-	    (!cpu_is_omap3630() || (GET_OMAP_REVISION() == 0)))
-		return 0;
-
 	/* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
 	if (ecc_opt == OMAP_ECC_HAM1_CODE_HW)
 		return 1;
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 539e810..a0fe747 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -168,6 +168,10 @@
 		smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX;
 		break;
 
+	case L310_POWER_CTRL:
+		pr_info_once("OMAP L2C310: ROM does not support power control setting\n");
+		return;
+
 	default:
 		WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg);
 		return;
diff --git a/arch/arm/xen/grant-table.c b/arch/arm/xen/grant-table.c
index 859a9bb..91cf08b 100644
--- a/arch/arm/xen/grant-table.c
+++ b/arch/arm/xen/grant-table.c
@@ -51,3 +51,8 @@
 {
 	return -ENOSYS;
 }
+
+int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
+{
+	return 0;
+}
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 60f2f4c..79cd911 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -106,7 +106,7 @@
 	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
 		aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				(u8 *)ctx->key_enc, rounds, blocks, first);
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 	return err;
@@ -128,7 +128,7 @@
 	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
 		aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				(u8 *)ctx->key_dec, rounds, blocks, first);
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 	return err;
@@ -151,7 +151,7 @@
 		aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				(u8 *)ctx->key_enc, rounds, blocks, walk.iv,
 				first);
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 	return err;
@@ -174,7 +174,7 @@
 		aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				(u8 *)ctx->key_dec, rounds, blocks, walk.iv,
 				first);
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 	return err;
@@ -243,7 +243,7 @@
 		aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				(u8 *)ctx->key1.key_enc, rounds, blocks,
 				(u8 *)ctx->key2.key_enc, walk.iv, first);
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 
@@ -267,7 +267,7 @@
 		aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				(u8 *)ctx->key1.key_dec, rounds, blocks,
 				(u8 *)ctx->key2.key_enc, walk.iv, first);
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index f43db8a..e90c542 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -60,6 +60,17 @@
 early_param("initrd", early_initrd);
 #endif
 
+/*
+ * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
+ * currently assumes that for memory starting above 4G, 32-bit devices will
+ * use a DMA offset.
+ */
+static phys_addr_t max_zone_dma_phys(void)
+{
+	phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
+	return min(offset + (1ULL << 32), memblock_end_of_DRAM());
+}
+
 static void __init zone_sizes_init(unsigned long min, unsigned long max)
 {
 	struct memblock_region *reg;
@@ -70,9 +81,7 @@
 
 	/* 4GB maximum for 32-bit only capable devices */
 	if (IS_ENABLED(CONFIG_ZONE_DMA)) {
-		unsigned long max_dma_phys =
-			(unsigned long)(dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1);
-		max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
+		max_dma = PFN_DOWN(max_zone_dma_phys());
 		zone_size[ZONE_DMA] = max_dma - min;
 	}
 	zone_size[ZONE_NORMAL] = max - max_dma;
@@ -146,7 +155,7 @@
 
 	/* 4GB maximum for 32-bit only capable devices */
 	if (IS_ENABLED(CONFIG_ZONE_DMA))
-		dma_phys_limit = dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1;
+		dma_phys_limit = max_zone_dma_phys();
 	dma_contiguous_reserve(dma_phys_limit);
 
 	memblock_allow_resize();
diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig
index a7e9bfd..fcec5ce 100644
--- a/arch/blackfin/configs/BF609-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF609-EZKIT_defconfig
@@ -102,7 +102,7 @@
 CONFIG_I2C_BLACKFIN_TWI=y
 CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 CONFIG_SPI=y
-CONFIG_SPI_BFIN_V3=y
+CONFIG_SPI_ADI_V3=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index ba35864..c9eec84 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -145,7 +145,7 @@
 
 	.text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
 #else
-	.init.data : AT(__data_lma + __data_len)
+	.init.data : AT(__data_lma + __data_len + 32)
 	{
 		__sinitdata = .;
 		INIT_DATA
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index 63b0e4f..0ccf0cf 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -20,6 +20,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/flash.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <linux/i2c.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index c65c6db..1e7290e 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -21,6 +21,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index af58454..c7495dc 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -21,6 +21,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index a021122..6b988ad 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -21,6 +21,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 90138e6..1fe7ff2 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -2118,7 +2118,7 @@
 	PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary",  "pinctrl-adi2.0", NULL, "rotary"),
 	PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0",  "pinctrl-adi2.0", NULL, "can0"),
 	PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1",  "pinctrl-adi2.0", NULL, "can1"),
-	PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043",  "pinctrl-adi2.0", NULL, "ppi0_24b"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043",  "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
 	PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0",  "pinctrl-adi2.0", NULL, "sport0"),
 	PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0",  "pinctrl-adi2.0", NULL, "sport0"),
 	PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0",  "pinctrl-adi2.0", NULL, "sport0"),
@@ -2140,7 +2140,9 @@
 	PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x",  "pinctrl-adi2.0", NULL, "atapi_alter"),
 #endif
 	PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0",  "pinctrl-adi2.0", NULL, "nfc0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys",  "pinctrl-adi2.0", NULL, "keys_4x4"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys",  "pinctrl-adi2.0", "keys_4x4grp", "keys"),
+	PIN_MAP_MUX_GROUP("bf54x-keys", "4bit",  "pinctrl-adi2.0", "keys_4x4grp", "keys"),
+	PIN_MAP_MUX_GROUP("bf54x-keys", "8bit",  "pinctrl-adi2.0", "keys_8x8grp", "keys"),
 };
 
 static int __init ezkit_init(void)
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 430b16d..6ab9515 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -44,6 +44,7 @@
 #include <linux/spi/flash.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/gpio.h>
 #include <linux/jiffies.h>
 #include <linux/i2c-pca-platform.h>
 #include <linux/delay.h>
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 9f777df..e862f78 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -18,6 +18,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 88dee43..2de71e8 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -14,6 +14,7 @@
 #include <linux/spi/spi.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/gpio.h>
 #include <linux/delay.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c
index 1ba4600..e2c0b02 100644
--- a/arch/blackfin/mach-bf609/boards/ezkit.c
+++ b/arch/blackfin/mach-bf609/boards/ezkit.c
@@ -698,8 +698,6 @@
 {
 #define CONFIG_SMC_GCTL_VAL     0x00000010
 
-	if (!devm_pinctrl_get_select_default(&pdev->dev))
-		return -EBUSY;
 	bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL);
 	bfin_write32(SMC_B0CTL, 0x01002011);
 	bfin_write32(SMC_B0TIM, 0x08170977);
@@ -709,7 +707,6 @@
 
 void bf609_nor_flash_exit(struct platform_device *pdev)
 {
-	devm_pinctrl_put(pdev->dev.pins->p);
 	bfin_write32(SMC_GCTL, 0);
 }
 
@@ -2058,15 +2055,14 @@
 	PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary",  "pinctrl-adi2.0", NULL, "rotary"),
 	PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0",  "pinctrl-adi2.0", NULL, "can0"),
 	PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0",  "pinctrl-adi2.0", NULL, "smc0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2",  "pinctrl-adi2.0", NULL, "ppi2_16b"),
-	PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0",  "pinctrl-adi2.0", NULL, "ppi0_16b"),
-#if IS_ENABLED(CONFIG_VIDEO_MT9M114)
-	PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_8b"),
-#elif IS_ENABLED(CONFIG_VIDEO_VS6624)
-	PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_16b"),
-#else
-	PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_24b"),
-#endif
+	PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2",  "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+	PIN_MAP_MUX_GROUP("bfin_display.0", "8bit",  "pinctrl-adi2.0", "ppi2_8bgrp", "ppi2"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0",  "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+	PIN_MAP_MUX_GROUP("bfin_display.0", "16bit",  "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+	PIN_MAP_MUX_GROUP("bfin_capture.0", "8bit",  "pinctrl-adi2.0", "ppi0_8bgrp", "ppi0"),
+	PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
+	PIN_MAP_MUX_GROUP("bfin_capture.0", "16bit",  "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
+	PIN_MAP_MUX_GROUP("bfin_capture.0", "24bit",  "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
 	PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0",  "pinctrl-adi2.0", NULL, "sport0"),
 	PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0",  "pinctrl-adi2.0", NULL, "sport0"),
 	PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1",  "pinctrl-adi2.0", NULL, "sport1"),
diff --git a/arch/blackfin/mach-bf609/include/mach/pm.h b/arch/blackfin/mach-bf609/include/mach/pm.h
index 3ca0fb9..a1efd93 100644
--- a/arch/blackfin/mach-bf609/include/mach/pm.h
+++ b/arch/blackfin/mach-bf609/include/mach/pm.h
@@ -10,6 +10,7 @@
 #define __MACH_BF609_PM_H__
 
 #include <linux/suspend.h>
+#include <linux/platform_device.h>
 
 extern int bfin609_pm_enter(suspend_state_t state);
 extern int bf609_pm_prepare(void);
@@ -19,6 +20,6 @@
 void bfin_sec_raise_irq(unsigned int sid);
 void coreb_enable(void);
 
-int bf609_nor_flash_init(void);
-void bf609_nor_flash_exit(void);
+int bf609_nor_flash_init(struct platform_device *pdev);
+void bf609_nor_flash_exit(struct platform_device *pdev);
 #endif
diff --git a/arch/blackfin/mach-bf609/pm.c b/arch/blackfin/mach-bf609/pm.c
index 0cdd695..b1bfcf4 100644
--- a/arch/blackfin/mach-bf609/pm.c
+++ b/arch/blackfin/mach-bf609/pm.c
@@ -291,13 +291,13 @@
 #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
 static int smc_pm_syscore_suspend(void)
 {
-	bf609_nor_flash_exit();
+	bf609_nor_flash_exit(NULL);
 	return 0;
 }
 
 static void smc_pm_syscore_resume(void)
 {
-	bf609_nor_flash_init();
+	bf609_nor_flash_init(NULL);
 }
 
 static struct syscore_ops smc_pm_syscore_ops = {
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 867b7ce..1f94784 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -1208,8 +1208,6 @@
 
 	bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
 
-	bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
-
 	/* Enable interrupts IVG7-15 */
 	bfin_irq_flags |= IMASK_IVG15 |
 	    IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
index a2fa2971..f5645d6 100644
--- a/arch/parisc/include/uapi/asm/signal.h
+++ b/arch/parisc/include/uapi/asm/signal.h
@@ -69,8 +69,6 @@
 #define SA_NOMASK	SA_NODEFER
 #define SA_ONESHOT	SA_RESETHAND
 
-#define SA_RESTORER	0x04000000 /* obsolete -- ignored */
-
 #define MINSIGSTKSZ	2048
 #define SIGSTKSZ	8192
 
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index ae085ad..0bef864 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -728,7 +728,6 @@
 #endif
 
 	empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
-	memset(empty_zero_page, 0, PAGE_SIZE);
 }
 
 static void __init gateway_init(void)
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index bc23477..0fdd7ee 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -447,6 +447,7 @@
 	    CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
 	    CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
 #define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
+#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
 #define CPU_FTRS_CELL	(CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 807014d..c2b4dcf 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -22,6 +22,7 @@
  */
 #include <asm/pgtable-ppc64.h>
 #include <asm/bug.h>
+#include <asm/processor.h>
 
 /*
  * Segment table
@@ -496,7 +497,7 @@
  */
 struct subpage_prot_table {
 	unsigned long maxaddr;	/* only addresses < this are protected */
-	unsigned int **protptrs[2];
+	unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
 	unsigned int *low_prot[4];
 };
 
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 965291b..0c15764 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -527,6 +527,26 @@
 		.machine_check_early	= __machine_check_early_realmode_p8,
 		.platform		= "power8",
 	},
+	{	/* Power8 DD1: Does not support doorbell IPIs */
+		.pvr_mask		= 0xffffff00,
+		.pvr_value		= 0x004d0100,
+		.cpu_name		= "POWER8 (raw)",
+		.cpu_features		= CPU_FTRS_POWER8_DD1,
+		.cpu_user_features	= COMMON_USER_POWER8,
+		.cpu_user_features2	= COMMON_USER2_POWER8,
+		.mmu_features		= MMU_FTRS_POWER8,
+		.icache_bsize		= 128,
+		.dcache_bsize		= 128,
+		.num_pmcs		= 6,
+		.pmc_type		= PPC_PMC_IBM,
+		.oprofile_cpu_type	= "ppc64/power8",
+		.oprofile_type		= PPC_OPROFILE_INVALID,
+		.cpu_setup		= __setup_cpu_power8,
+		.cpu_restore		= __restore_cpu_power8,
+		.flush_tlb		= __flush_tlb_power8,
+		.machine_check_early	= __machine_check_early_realmode_p8,
+		.platform		= "power8",
+	},
 	{	/* Power8 */
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x004d0000,
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 658e89d..db2b482 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -611,17 +611,19 @@
 	for (f = flist; f; f = next) {
 		/* Translate data addrs to absolute */
 		for (i = 0; i < f->num_blocks; i++) {
-			f->blocks[i].data = (char *)__pa(f->blocks[i].data);
+			f->blocks[i].data = (char *)cpu_to_be64(__pa(f->blocks[i].data));
 			image_size += f->blocks[i].length;
+			f->blocks[i].length = cpu_to_be64(f->blocks[i].length);
 		}
 		next = f->next;
 		/* Don't translate NULL pointer for last entry */
 		if (f->next)
-			f->next = (struct flash_block_list *)__pa(f->next);
+			f->next = (struct flash_block_list *)cpu_to_be64(__pa(f->next));
 		else
 			f->next = NULL;
 		/* make num_blocks into the version/length field */
 		f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
+		f->num_blocks = cpu_to_be64(f->num_blocks);
 	}
 
 	printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
index 0738f96..43435c6 100644
--- a/arch/powerpc/lib/mem_64.S
+++ b/arch/powerpc/lib/mem_64.S
@@ -77,7 +77,7 @@
 	stb	r4,0(r6)
 	blr
 
-_GLOBAL(memmove)
+_GLOBAL_TOC(memmove)
 	cmplw	0,r3,r4
 	bgt	backwards_memcpy
 	b	memcpy
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 412dd46..5c09f36 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1198,7 +1198,7 @@
 			sh = regs->gpr[rb] & 0x3f;
 			ival = (signed int) regs->gpr[rd];
 			regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
-			if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0))
+			if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
 				regs->xer |= XER_CA;
 			else
 				regs->xer &= ~XER_CA;
@@ -1208,7 +1208,7 @@
 			sh = rb;
 			ival = (signed int) regs->gpr[rd];
 			regs->gpr[ra] = ival >> sh;
-			if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
+			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
 				regs->xer |= XER_CA;
 			else
 				regs->xer &= ~XER_CA;
@@ -1216,7 +1216,7 @@
 
 #ifdef __powerpc64__
 		case 27:	/* sld */
-			sh = regs->gpr[rd] & 0x7f;
+			sh = regs->gpr[rb] & 0x7f;
 			if (sh < 64)
 				regs->gpr[ra] = regs->gpr[rd] << sh;
 			else
@@ -1235,7 +1235,7 @@
 			sh = regs->gpr[rb] & 0x7f;
 			ival = (signed long int) regs->gpr[rd];
 			regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
-			if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0))
+			if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
 				regs->xer |= XER_CA;
 			else
 				regs->xer &= ~XER_CA;
@@ -1246,7 +1246,7 @@
 			sh = rb | ((instr & 2) << 4);
 			ival = (signed long int) regs->gpr[rd];
 			regs->gpr[ra] = ival >> sh;
-			if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
+			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
 				regs->xer |= XER_CA;
 			else
 				regs->xer &= ~XER_CA;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 6b0641c..fe52db2 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1307,6 +1307,9 @@
  out_enable:
 	pmao_restore_workaround(ebb);
 
+	if (ppmu->flags & PPMU_ARCH_207S)
+		mtspr(SPRN_MMCR2, 0);
+
 	mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
 
 	mb();
@@ -1315,9 +1318,6 @@
 
 	write_mmcr0(cpuhw, mmcr0);
 
-	if (ppmu->flags & PPMU_ARCH_207S)
-		mtspr(SPRN_MMCR2, 0);
-
 	/*
 	 * Enable instruction sampling if necessary
 	 */
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 10268c4..0ad533b 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -249,7 +249,7 @@
 
 	rc = opal_get_elog_size(&id, &size, &type);
 	if (rc != OPAL_SUCCESS) {
-		pr_err("ELOG: Opal log read failed\n");
+		pr_err("ELOG: OPAL log info read failed\n");
 		return;
 	}
 
@@ -257,7 +257,7 @@
 	log_id = be64_to_cpu(id);
 	elog_type = be64_to_cpu(type);
 
-	BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
+	WARN_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
 
 	if (elog_size >= OPAL_MAX_ERRLOG_SIZE)
 		elog_size  =  OPAL_MAX_ERRLOG_SIZE;
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 022b38e6..2d0b4d6 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -86,6 +86,7 @@
 	}
 
 	of_node_set_flag(dn, OF_DYNAMIC);
+	of_node_init(dn);
 
 	return dn;
 }
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 0435bb6..1c0a60d 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -69,6 +69,7 @@
 
 	np->properties = proplist;
 	of_node_set_flag(np, OF_DYNAMIC);
+	of_node_init(np);
 
 	np->parent = derive_parent(path);
 	if (IS_ERR(np->parent)) {
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index d4d16e4..bf5b3f5 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -32,7 +32,8 @@
 
 cflags-$(CONFIG_CPU_SH2)		:= $(call cc-option,-m2,)
 cflags-$(CONFIG_CPU_SH2A)		+= $(call cc-option,-m2a,) \
-					   $(call cc-option,-m2a-nofpu,)
+					   $(call cc-option,-m2a-nofpu,) \
+					   $(call cc-option,-m4-nofpu,)
 cflags-$(CONFIG_CPU_SH3)		:= $(call cc-option,-m3,)
 cflags-$(CONFIG_CPU_SH4)		:= $(call cc-option,-m4,) \
 	$(call cc-option,-mno-implicit-fp,-m4-nofpu)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index a800290..f9e4fdd 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -370,6 +370,17 @@
 	 */
 	detect_extended_topology(c);
 
+	if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
+		/*
+		 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
+		 * detection.
+		 */
+		c->x86_max_cores = intel_num_cpu_cores(c);
+#ifdef CONFIG_X86_32
+		detect_ht(c);
+#endif
+	}
+
 	l2 = init_intel_cacheinfo(c);
 	if (c->cpuid_level > 9) {
 		unsigned eax = cpuid_eax(10);
@@ -438,17 +449,6 @@
 		set_cpu_cap(c, X86_FEATURE_P3);
 #endif
 
-	if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
-		/*
-		 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
-		 * detection.
-		 */
-		c->x86_max_cores = intel_num_cpu_cores(c);
-#ifdef CONFIG_X86_32
-		detect_ht(c);
-#endif
-	}
-
 	/* Work around errata */
 	srat_detect_node(c);
 
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index a952e9c..9c8f739 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -730,6 +730,18 @@
 #endif
 	}
 
+#ifdef CONFIG_X86_HT
+	/*
+	 * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
+	 * turns means that the only possibility is SMT (as indicated in
+	 * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
+	 * that SMT shares all caches, we can unconditionally set cpu_llc_id to
+	 * c->phys_proc_id.
+	 */
+	if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
+		per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
+#endif
+
 	c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
 
 	return l2;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index bb92f38..9a79c8d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -2451,6 +2451,12 @@
 	for_each_online_cpu(i) {
 		err = mce_device_create(i);
 		if (err) {
+			/*
+			 * Register notifier anyway (and do not unreg it) so
+			 * that we don't leave undeleted timers, see notifier
+			 * callback above.
+			 */
+			__register_hotcpu_notifier(&mce_cpu_notifier);
 			cpu_notifier_register_done();
 			goto err_device_create;
 		}
@@ -2471,10 +2477,6 @@
 err_register:
 	unregister_syscore_ops(&mce_syscore_ops);
 
-	cpu_notifier_register_begin();
-	__unregister_hotcpu_notifier(&mce_cpu_notifier);
-	cpu_notifier_register_done();
-
 err_device_create:
 	/*
 	 * We didn't keep track of which devices were created above, but
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 2bdfbff..2879ecd 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -118,6 +118,9 @@
 			continue;
 		if (event->attr.config1 & ~er->valid_mask)
 			return -EINVAL;
+		/* Check if the extra msrs can be safely accessed*/
+		if (!er->extra_msr_access)
+			return -ENXIO;
 
 		reg->idx = er->idx;
 		reg->config = event->attr.config1;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 3b2f9bd..8ade931 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -295,14 +295,16 @@
 	u64			config_mask;
 	u64			valid_mask;
 	int			idx;  /* per_xxx->regs[] reg index */
+	bool			extra_msr_access;
 };
 
 #define EVENT_EXTRA_REG(e, ms, m, vm, i) {	\
-	.event = (e),		\
-	.msr = (ms),		\
-	.config_mask = (m),	\
-	.valid_mask = (vm),	\
-	.idx = EXTRA_REG_##i,	\
+	.event = (e),			\
+	.msr = (ms),			\
+	.config_mask = (m),		\
+	.valid_mask = (vm),		\
+	.idx = EXTRA_REG_##i,		\
+	.extra_msr_access = true,	\
 	}
 
 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)	\
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 07846d7..2502d0d 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2182,6 +2182,41 @@
 	}
 }
 
+/*
+ * Under certain circumstances, access certain MSR may cause #GP.
+ * The function tests if the input MSR can be safely accessed.
+ */
+static bool check_msr(unsigned long msr, u64 mask)
+{
+	u64 val_old, val_new, val_tmp;
+
+	/*
+	 * Read the current value, change it and read it back to see if it
+	 * matches, this is needed to detect certain hardware emulators
+	 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
+	 */
+	if (rdmsrl_safe(msr, &val_old))
+		return false;
+
+	/*
+	 * Only change the bits which can be updated by wrmsrl.
+	 */
+	val_tmp = val_old ^ mask;
+	if (wrmsrl_safe(msr, val_tmp) ||
+	    rdmsrl_safe(msr, &val_new))
+		return false;
+
+	if (val_new != val_tmp)
+		return false;
+
+	/* Here it's sure that the MSR can be safely accessed.
+	 * Restore the old value and return.
+	 */
+	wrmsrl(msr, val_old);
+
+	return true;
+}
+
 static __init void intel_sandybridge_quirk(void)
 {
 	x86_pmu.check_microcode = intel_snb_check_microcode;
@@ -2271,7 +2306,8 @@
 	union cpuid10_ebx ebx;
 	struct event_constraint *c;
 	unsigned int unused;
-	int version;
+	struct extra_reg *er;
+	int version, i;
 
 	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
 		switch (boot_cpu_data.x86) {
@@ -2474,6 +2510,9 @@
 	case 62: /* IvyBridge EP */
 		memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
 		       sizeof(hw_cache_event_ids));
+		/* dTLB-load-misses on IVB is different than SNB */
+		hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
+
 		memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
 		       sizeof(hw_cache_extra_regs));
 
@@ -2574,6 +2613,34 @@
 		}
 	}
 
+	/*
+	 * Access LBR MSR may cause #GP under certain circumstances.
+	 * E.g. KVM doesn't support LBR MSR
+	 * Check all LBT MSR here.
+	 * Disable LBR access if any LBR MSRs can not be accessed.
+	 */
+	if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
+		x86_pmu.lbr_nr = 0;
+	for (i = 0; i < x86_pmu.lbr_nr; i++) {
+		if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
+		      check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
+			x86_pmu.lbr_nr = 0;
+	}
+
+	/*
+	 * Access extra MSR may cause #GP under certain circumstances.
+	 * E.g. KVM doesn't support offcore event
+	 * Check all extra_regs here.
+	 */
+	if (x86_pmu.extra_regs) {
+		for (er = x86_pmu.extra_regs; er->msr; er++) {
+			er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
+			/* Disable LBR select mapping */
+			if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
+				x86_pmu.lbr_sel_map = NULL;
+		}
+	}
+
 	/* Support full width counters using alternative MSR range */
 	if (x86_pmu.intel_cap.full_width_write) {
 		x86_pmu.max_period = x86_pmu.cntval_mask;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 980970c..696ade3 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -311,9 +311,11 @@
 	if (!x86_pmu.bts)
 		return 0;
 
-	buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node);
-	if (unlikely(!buffer))
+	buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
+	if (unlikely(!buffer)) {
+		WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
 		return -ENOMEM;
+	}
 
 	max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
 	thresh = max / 16;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 65bbbea..ae6552a 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -550,16 +550,16 @@
 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
-	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
-	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
+	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
+	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
-	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
-	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
+	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
+	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
@@ -1222,6 +1222,7 @@
 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
 	SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
+
 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
 	SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
@@ -1245,7 +1246,7 @@
 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
-	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
+	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index dbaa23e..0d0c9d4 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -425,8 +425,8 @@
 	cmpl $(NR_syscalls), %eax
 	jae sysenter_badsys
 	call *sys_call_table(,%eax,4)
-	movl %eax,PT_EAX(%esp)
 sysenter_after_call:
+	movl %eax,PT_EAX(%esp)
 	LOCKDEP_SYS_EXIT
 	DISABLE_INTERRUPTS(CLBR_ANY)
 	TRACE_IRQS_OFF
@@ -502,6 +502,7 @@
 	jae syscall_badsys
 syscall_call:
 	call *sys_call_table(,%eax,4)
+syscall_after_call:
 	movl %eax,PT_EAX(%esp)		# store the return value
 syscall_exit:
 	LOCKDEP_SYS_EXIT
@@ -675,12 +676,12 @@
 END(syscall_fault)
 
 syscall_badsys:
-	movl $-ENOSYS,PT_EAX(%esp)
-	jmp syscall_exit
+	movl $-ENOSYS,%eax
+	jmp syscall_after_call
 END(syscall_badsys)
 
 sysenter_badsys:
-	movl $-ENOSYS,PT_EAX(%esp)
+	movl $-ENOSYS,%eax
 	jmp sysenter_after_call
 END(syscall_badsys)
 	CFI_ENDPROC
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 7596df6..67e6d19e 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -574,6 +574,9 @@
 	struct kprobe *p;
 	struct kprobe_ctlblk *kcb;
 
+	if (user_mode_vm(regs))
+		return 0;
+
 	addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
 	/*
 	 * We don't want to be preempted for the entire
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 99bef86..71737a8 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -214,7 +214,7 @@
 static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
 		  int oldproglen, struct jit_context *ctx)
 {
-	struct sock_filter_int *insn = bpf_prog->insnsi;
+	struct bpf_insn *insn = bpf_prog->insnsi;
 	int insn_cnt = bpf_prog->len;
 	u8 temp[64];
 	int i;
diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c
index c985835..ebfa9b2 100644
--- a/arch/x86/xen/grant-table.c
+++ b/arch/x86/xen/grant-table.c
@@ -36,99 +36,133 @@
 
 #include <linux/sched.h>
 #include <linux/mm.h>
+#include <linux/slab.h>
 #include <linux/vmalloc.h>
 
 #include <xen/interface/xen.h>
 #include <xen/page.h>
 #include <xen/grant_table.h>
+#include <xen/xen.h>
 
 #include <asm/pgtable.h>
 
-static int map_pte_fn(pte_t *pte, struct page *pmd_page,
-		      unsigned long addr, void *data)
-{
-	unsigned long **frames = (unsigned long **)data;
-
-	set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
-	(*frames)++;
-	return 0;
-}
-
-/*
- * This function is used to map shared frames to store grant status. It is
- * different from map_pte_fn above, the frames type here is uint64_t.
- */
-static int map_pte_fn_status(pte_t *pte, struct page *pmd_page,
-			     unsigned long addr, void *data)
-{
-	uint64_t **frames = (uint64_t **)data;
-
-	set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
-	(*frames)++;
-	return 0;
-}
-
-static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
-			unsigned long addr, void *data)
-{
-
-	set_pte_at(&init_mm, addr, pte, __pte(0));
-	return 0;
-}
+static struct gnttab_vm_area {
+	struct vm_struct *area;
+	pte_t **ptes;
+} gnttab_shared_vm_area, gnttab_status_vm_area;
 
 int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
 			   unsigned long max_nr_gframes,
 			   void **__shared)
 {
-	int rc;
 	void *shared = *__shared;
+	unsigned long addr;
+	unsigned long i;
 
-	if (shared == NULL) {
-		struct vm_struct *area =
-			alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
-		BUG_ON(area == NULL);
-		shared = area->addr;
-		*__shared = shared;
+	if (shared == NULL)
+		*__shared = shared = gnttab_shared_vm_area.area->addr;
+
+	addr = (unsigned long)shared;
+
+	for (i = 0; i < nr_gframes; i++) {
+		set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i],
+			   mfn_pte(frames[i], PAGE_KERNEL));
+		addr += PAGE_SIZE;
 	}
 
-	rc = apply_to_page_range(&init_mm, (unsigned long)shared,
-				 PAGE_SIZE * nr_gframes,
-				 map_pte_fn, &frames);
-	return rc;
+	return 0;
 }
 
 int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
 			   unsigned long max_nr_gframes,
 			   grant_status_t **__shared)
 {
-	int rc;
 	grant_status_t *shared = *__shared;
+	unsigned long addr;
+	unsigned long i;
 
-	if (shared == NULL) {
-		/* No need to pass in PTE as we are going to do it
-		 * in apply_to_page_range anyhow. */
-		struct vm_struct *area =
-			alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
-		BUG_ON(area == NULL);
-		shared = area->addr;
-		*__shared = shared;
+	if (shared == NULL)
+		*__shared = shared = gnttab_status_vm_area.area->addr;
+
+	addr = (unsigned long)shared;
+
+	for (i = 0; i < nr_gframes; i++) {
+		set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i],
+			   mfn_pte(frames[i], PAGE_KERNEL));
+		addr += PAGE_SIZE;
 	}
 
-	rc = apply_to_page_range(&init_mm, (unsigned long)shared,
-				 PAGE_SIZE * nr_gframes,
-				 map_pte_fn_status, &frames);
-	return rc;
+	return 0;
 }
 
 void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
 {
-	apply_to_page_range(&init_mm, (unsigned long)shared,
-			    PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL);
+	pte_t **ptes;
+	unsigned long addr;
+	unsigned long i;
+
+	if (shared == gnttab_status_vm_area.area->addr)
+		ptes = gnttab_status_vm_area.ptes;
+	else
+		ptes = gnttab_shared_vm_area.ptes;
+
+	addr = (unsigned long)shared;
+
+	for (i = 0; i < nr_gframes; i++) {
+		set_pte_at(&init_mm, addr, ptes[i], __pte(0));
+		addr += PAGE_SIZE;
+	}
 }
+
+static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames)
+{
+	area->ptes = kmalloc(sizeof(pte_t *) * nr_frames, GFP_KERNEL);
+	if (area->ptes == NULL)
+		return -ENOMEM;
+
+	area->area = alloc_vm_area(PAGE_SIZE * nr_frames, area->ptes);
+	if (area->area == NULL) {
+		kfree(area->ptes);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void arch_gnttab_vfree(struct gnttab_vm_area *area)
+{
+	free_vm_area(area->area);
+	kfree(area->ptes);
+}
+
+int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
+{
+	int ret;
+
+	if (!xen_pv_domain())
+		return 0;
+
+	ret = arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Always allocate the space for the status frames in case
+	 * we're migrated to a host with V2 support.
+	 */
+	ret = arch_gnttab_valloc(&gnttab_status_vm_area, nr_status);
+	if (ret < 0)
+		goto err;
+
+	return 0;
+  err:
+	arch_gnttab_vfree(&gnttab_shared_vm_area);
+	return -ENOMEM;
+}
+
 #ifdef CONFIG_XEN_PVH
 #include <xen/balloon.h>
 #include <xen/events.h>
-#include <xen/xen.h>
 #include <linux/slab.h>
 static int __init xlated_setup_gnttab_pages(void)
 {
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index f9e1ec3..8453e6e 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -376,38 +376,42 @@
 	beqz	a2, 1f		# if at start of vector, don't restore
 
 	addi	a0, a0, -128
-	bbsi	a0, 8, 1f	# don't restore except for overflow 8 and 12
-	bbsi	a0, 7, 2f
+	bbsi.l	a0, 8, 1f	# don't restore except for overflow 8 and 12
+
+	/*
+	 * This fixup handler is for the extremely unlikely case where the
+	 * overflow handler's reference thru a0 gets a hardware TLB refill
+	 * that bumps out the (distinct, aliasing) TLB entry that mapped its
+	 * prior references thru a9/a13, and where our reference now thru
+	 * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
+	 */
+	movi	a2, window_overflow_restore_a0_fixup
+	s32i	a2, a3, EXC_TABLE_FIXUP
+	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
+	xsr	a3, excsave1
+
+	bbsi.l	a0, 7, 2f
 
 	/*
 	 * Restore a0 as saved by _WindowOverflow8().
-	 *
-	 * FIXME:  we really need a fixup handler for this L32E,
-	 * for the extremely unlikely case where the overflow handler's
-	 * reference thru a0 gets a hardware TLB refill that bumps out
-	 * the (distinct, aliasing) TLB entry that mapped its prior
-	 * references thru a9, and where our reference now thru a9
-	 * gets a 2nd-level miss exception (not hardware TLB refill).
 	 */
 
-	l32e	a2, a9, -16
-	wsr	a2, depc	# replace the saved a0
-	j	1f
+	l32e	a0, a9, -16
+	wsr	a0, depc	# replace the saved a0
+	j	3f
 
 2:
 	/*
 	 * Restore a0 as saved by _WindowOverflow12().
-	 *
-	 * FIXME:  we really need a fixup handler for this L32E,
-	 * for the extremely unlikely case where the overflow handler's
-	 * reference thru a0 gets a hardware TLB refill that bumps out
-	 * the (distinct, aliasing) TLB entry that mapped its prior
-	 * references thru a13, and where our reference now thru a13
-	 * gets a 2nd-level miss exception (not hardware TLB refill).
 	 */
 
-	l32e	a2, a13, -16
-	wsr	a2, depc	# replace the saved a0
+	l32e	a0, a13, -16
+	wsr	a0, depc	# replace the saved a0
+3:
+	xsr	a3, excsave1
+	movi	a0, 0
+	s32i	a0, a3, EXC_TABLE_FIXUP
+	s32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
 1:
 	/*
 	 * Restore WindowBase while leaving all address registers restored.
@@ -449,6 +453,7 @@
 
 	s32i	a0, a2, PT_DEPC
 
+_DoubleExceptionVector_handle_exception:
 	addx4	a0, a0, a3
 	l32i	a0, a0, EXC_TABLE_FAST_USER
 	xsr	a3, excsave1
@@ -464,11 +469,120 @@
 	rotw	-3
 	j	1b
 
-	.end literal_prefix
 
 ENDPROC(_DoubleExceptionVector)
 
 /*
+ * Fixup handler for TLB miss in double exception handler for window owerflow.
+ * We get here with windowbase set to the window that was being spilled and
+ * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
+ * (bit set) window.
+ *
+ * We do the following here:
+ * - go to the original window retaining a0 value;
+ * - set up exception stack to return back to appropriate a0 restore code
+ *   (we'll need to rotate window back and there's no place to save this
+ *    information, use different return address for that);
+ * - handle the exception;
+ * - go to the window that was being spilled;
+ * - set up window_overflow_restore_a0_fixup as a fixup routine;
+ * - reload a0;
+ * - restore the original window;
+ * - reset the default fixup routine;
+ * - return to user. By the time we get to this fixup handler all information
+ *   about the conditions of the original double exception that happened in
+ *   the window overflow handler is lost, so we just return to userspace to
+ *   retry overflow from start.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+ENTRY(window_overflow_restore_a0_fixup)
+
+	rsr	a0, ps
+	extui	a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+	rsr	a2, windowbase
+	sub	a0, a2, a0
+	extui	a0, a0, 0, 3
+	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
+	xsr	a3, excsave1
+
+	_beqi	a0, 1, .Lhandle_1
+	_beqi	a0, 3, .Lhandle_3
+
+	.macro	overflow_fixup_handle_exception_pane n
+
+	rsr	a0, depc
+	rotw	-\n
+
+	xsr	a3, excsave1
+	wsr	a2, depc
+	l32i	a2, a3, EXC_TABLE_KSTK
+	s32i	a0, a2, PT_AREG0
+
+	movi	a0, .Lrestore_\n
+	s32i	a0, a2, PT_DEPC
+	rsr	a0, exccause
+	j	_DoubleExceptionVector_handle_exception
+
+	.endm
+
+	overflow_fixup_handle_exception_pane 2
+.Lhandle_1:
+	overflow_fixup_handle_exception_pane 1
+.Lhandle_3:
+	overflow_fixup_handle_exception_pane 3
+
+	.macro	overflow_fixup_restore_a0_pane n
+
+	rotw	\n
+	/* Need to preserve a0 value here to be able to handle exception
+	 * that may occur on a0 reload from stack. It may occur because
+	 * TLB miss handler may not be atomic and pointer to page table
+	 * may be lost before we get here. There are no free registers,
+	 * so we need to use EXC_TABLE_DOUBLE_SAVE area.
+	 */
+	xsr	a3, excsave1
+	s32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
+	movi	a2, window_overflow_restore_a0_fixup
+	s32i	a2, a3, EXC_TABLE_FIXUP
+	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
+	xsr	a3, excsave1
+	bbsi.l	a0, 7, 1f
+	l32e	a0, a9, -16
+	j	2f
+1:
+	l32e	a0, a13, -16
+2:
+	rotw	-\n
+
+	.endm
+
+.Lrestore_2:
+	overflow_fixup_restore_a0_pane 2
+
+.Lset_default_fixup:
+	xsr	a3, excsave1
+	s32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
+	movi	a2, 0
+	s32i	a2, a3, EXC_TABLE_FIXUP
+	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
+	xsr	a3, excsave1
+	rfe
+
+.Lrestore_1:
+	overflow_fixup_restore_a0_pane 1
+	j	.Lset_default_fixup
+.Lrestore_3:
+	overflow_fixup_restore_a0_pane 3
+	j	.Lset_default_fixup
+
+ENDPROC(window_overflow_restore_a0_fixup)
+
+	.end literal_prefix
+/*
  * Debug interrupt vector
  *
  * There is not much space here, so simply jump to another handler.
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index ee32c00..d16db6d 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -269,13 +269,13 @@
 		  .UserExceptionVector.literal)
   SECTION_VECTOR (_DoubleExceptionVector_literal,
 		  .DoubleExceptionVector.literal,
-		  DOUBLEEXC_VECTOR_VADDR - 16,
+		  DOUBLEEXC_VECTOR_VADDR - 40,
 		  SIZEOF(.UserExceptionVector.text),
 		  .UserExceptionVector.text)
   SECTION_VECTOR (_DoubleExceptionVector_text,
 		  .DoubleExceptionVector.text,
 		  DOUBLEEXC_VECTOR_VADDR,
-		  32,
+		  40,
 		  .DoubleExceptionVector.literal)
 
   . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 4224256..77ed202 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -191,7 +191,7 @@
 		return -EINVAL;
 	}
 
-	if (it && start - it->start < bank_sz) {
+	if (it && start - it->start <= bank_sz) {
 		if (start == it->start) {
 			if (end - it->start < bank_sz) {
 				it->start = end;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index d19c37a7..677c0c1 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4798,9 +4798,8 @@
 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
 {
 	struct ata_queued_cmd *qc = NULL;
-	unsigned int i, tag, max_queue;
-
-	max_queue = ap->scsi_host->can_queue;
+	unsigned int max_queue = ap->host->n_tags;
+	unsigned int i, tag;
 
 	/* no command while frozen */
 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
@@ -6094,6 +6093,7 @@
 {
 	spin_lock_init(&host->lock);
 	mutex_init(&host->eh_mutex);
+	host->n_tags = ATA_MAX_QUEUE - 1;
 	host->dev = dev;
 	host->ops = ops;
 }
@@ -6175,15 +6175,7 @@
 {
 	int i, rc;
 
-	/*
-	 * The max queue supported by hardware must not be greater than
-	 * ATA_MAX_QUEUE.
-	 */
-	if (sht->can_queue > ATA_MAX_QUEUE) {
-		dev_err(host->dev, "BUG: the hardware max queue is too large\n");
-		WARN_ON(1);
-		return -EINVAL;
-	}
+	host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
 
 	/* host must have been started */
 	if (!(host->flags & ATA_HOST_STARTED)) {
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index 5081a8c..bb694e2 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -603,6 +603,7 @@
 		tmp = BCMA_CC_PMU_CTL_PLL_UPD | BCMA_CC_PMU_CTL_NOILPONW;
 		break;
 
+	case BCMA_CHIP_ID_BCM43217:
 	case BCMA_CHIP_ID_BCM43227:
 	case BCMA_CHIP_ID_BCM43228:
 	case BCMA_CHIP_ID_BCM43428:
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index e3333053..3cf725a 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -279,6 +279,7 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4365) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
 	{ 0, },
 };
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index 72bf454..97bb38e 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -201,6 +201,23 @@
 		SPEX(_field[7], _offset + 14, _mask, _shift);	\
 	} while (0)
 
+static s8 sprom_extract_antgain(const u16 *in, u16 offset, u16 mask, u16 shift)
+{
+	u16 v;
+	u8 gain;
+
+	v = in[SPOFF(offset)];
+	gain = (v & mask) >> shift;
+	if (gain == 0xFF) {
+		gain = 8; /* If unset use 2dBm */
+	} else {
+		/* Q5.2 Fractional part is stored in 0xC0 */
+		gain = ((gain & 0xC0) >> 6) | ((gain & 0x3F) << 2);
+	}
+
+	return (s8)gain;
+}
+
 static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
 {
 	u16 v, o;
@@ -381,14 +398,22 @@
 	SPEX32(ofdm5ghpo, SSB_SPROM8_OFDM5GHPO, ~0, 0);
 
 	/* Extract the antenna gain values. */
-	SPEX(antenna_gain.a0, SSB_SPROM8_AGAIN01,
-	     SSB_SPROM8_AGAIN0, SSB_SPROM8_AGAIN0_SHIFT);
-	SPEX(antenna_gain.a1, SSB_SPROM8_AGAIN01,
-	     SSB_SPROM8_AGAIN1, SSB_SPROM8_AGAIN1_SHIFT);
-	SPEX(antenna_gain.a2, SSB_SPROM8_AGAIN23,
-	     SSB_SPROM8_AGAIN2, SSB_SPROM8_AGAIN2_SHIFT);
-	SPEX(antenna_gain.a3, SSB_SPROM8_AGAIN23,
-	     SSB_SPROM8_AGAIN3, SSB_SPROM8_AGAIN3_SHIFT);
+	bus->sprom.antenna_gain.a0 = sprom_extract_antgain(sprom,
+							   SSB_SPROM8_AGAIN01,
+							   SSB_SPROM8_AGAIN0,
+							   SSB_SPROM8_AGAIN0_SHIFT);
+	bus->sprom.antenna_gain.a1 = sprom_extract_antgain(sprom,
+							   SSB_SPROM8_AGAIN01,
+							   SSB_SPROM8_AGAIN1,
+							   SSB_SPROM8_AGAIN1_SHIFT);
+	bus->sprom.antenna_gain.a2 = sprom_extract_antgain(sprom,
+							   SSB_SPROM8_AGAIN23,
+							   SSB_SPROM8_AGAIN2,
+							   SSB_SPROM8_AGAIN2_SHIFT);
+	bus->sprom.antenna_gain.a3 = sprom_extract_antgain(sprom,
+							   SSB_SPROM8_AGAIN23,
+							   SSB_SPROM8_AGAIN3,
+							   SSB_SPROM8_AGAIN3_SHIFT);
 
 	SPEX(leddc_on_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_ON,
 	     SSB_SPROM8_LEDDC_ON_SHIFT);
@@ -509,6 +534,7 @@
 		/* for these chips OTP is always available */
 		present = true;
 		break;
+	case BCMA_CHIP_ID_BCM43217:
 	case BCMA_CHIP_ID_BCM43227:
 	case BCMA_CHIP_ID_BCM43228:
 	case BCMA_CHIP_ID_BCM43428:
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 089e72c..36e54be 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -622,11 +622,18 @@
 	memset(&zram->stats, 0, sizeof(zram->stats));
 
 	zram->disksize = 0;
-	if (reset_capacity) {
+	if (reset_capacity)
 		set_capacity(zram->disk, 0);
-		revalidate_disk(zram->disk);
-	}
+
 	up_write(&zram->init_lock);
+
+	/*
+	 * Revalidate disk out of the init_lock to avoid lockdep splat.
+	 * It's okay because disk's capacity is protected by init_lock
+	 * so that revalidate_disk always sees up-to-date capacity.
+	 */
+	if (reset_capacity)
+		revalidate_disk(zram->disk);
 }
 
 static ssize_t disksize_store(struct device *dev,
@@ -666,8 +673,15 @@
 	zram->comp = comp;
 	zram->disksize = disksize;
 	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
-	revalidate_disk(zram->disk);
 	up_write(&zram->init_lock);
+
+	/*
+	 * Revalidate disk out of the init_lock to avoid lockdep splat.
+	 * It's okay because disk's capacity is protected by init_lock
+	 * so that revalidate_disk always sees up-to-date capacity.
+	 */
+	revalidate_disk(zram->disk);
+
 	return len;
 
 out_destroy_comp:
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 230c552..a0d7355 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -27,6 +27,7 @@
 #include <linux/device.h>
 #include <linux/firmware.h>
 #include <linux/usb.h>
+#include <asm/unaligned.h>
 #include <net/bluetooth/bluetooth.h>
 
 #define VERSION "1.0"
@@ -50,12 +51,12 @@
 #define ATH3K_NAME_LEN				0xFF
 
 struct ath3k_version {
-	unsigned int	rom_version;
-	unsigned int	build_version;
-	unsigned int	ram_version;
-	unsigned char	ref_clock;
-	unsigned char	reserved[0x07];
-};
+	__le32	rom_version;
+	__le32	build_version;
+	__le32	ram_version;
+	__u8	ref_clock;
+	__u8	reserved[7];
+} __packed;
 
 static const struct usb_device_id ath3k_table[] = {
 	/* Atheros AR3011 */
@@ -349,7 +350,8 @@
 	unsigned char fw_state;
 	char filename[ATH3K_NAME_LEN] = {0};
 	const struct firmware *firmware;
-	struct ath3k_version fw_version, pt_version;
+	struct ath3k_version fw_version;
+	__u32 pt_rom_version, pt_build_version;
 	int ret;
 
 	ret = ath3k_get_state(udev, &fw_state);
@@ -370,7 +372,7 @@
 	}
 
 	snprintf(filename, ATH3K_NAME_LEN, "ar3k/AthrBT_0x%08x.dfu",
-		le32_to_cpu(fw_version.rom_version));
+		 le32_to_cpu(fw_version.rom_version));
 
 	ret = request_firmware(&firmware, filename, &udev->dev);
 	if (ret < 0) {
@@ -378,12 +380,13 @@
 		return ret;
 	}
 
-	pt_version.rom_version = *(int *)(firmware->data + firmware->size - 8);
-	pt_version.build_version = *(int *)
-		(firmware->data + firmware->size - 4);
+	pt_rom_version = get_unaligned_le32(firmware->data +
+					    firmware->size - 8);
+	pt_build_version = get_unaligned_le32(firmware->data +
+					      firmware->size - 4);
 
-	if ((pt_version.rom_version != fw_version.rom_version) ||
-		(pt_version.build_version <= fw_version.build_version)) {
+	if (pt_rom_version != le32_to_cpu(fw_version.rom_version) ||
+	    pt_build_version <= le32_to_cpu(fw_version.build_version)) {
 		BT_ERR("Patch file version did not match with firmware");
 		release_firmware(firmware);
 		return -EINVAL;
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index caf6841..38ad662 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -91,6 +91,7 @@
 
 /* Vendor specific Bluetooth commands */
 #define BT_CMD_PSCAN_WIN_REPORT_ENABLE	0xFC03
+#define BT_CMD_SET_BDADDR		0xFC22
 #define BT_CMD_AUTO_SLEEP_MODE		0xFC23
 #define BT_CMD_HOST_SLEEP_CONFIG	0xFC59
 #define BT_CMD_HOST_SLEEP_ENABLE	0xFC5A
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index cc65fd2..bae8e6a 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -539,6 +539,29 @@
 	return 0;
 }
 
+static int btmrvl_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+	struct sk_buff *skb;
+	long ret;
+	u8 buf[8];
+
+	buf[0] = MRVL_VENDOR_PKT;
+	buf[1] = sizeof(bdaddr_t);
+	memcpy(buf + 2, bdaddr, sizeof(bdaddr_t));
+
+	skb = __hci_cmd_sync(hdev, BT_CMD_SET_BDADDR, sizeof(buf), buf,
+			     HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		ret = PTR_ERR(skb);
+		BT_ERR("%s: changing btmrvl device address failed (%ld)",
+		       hdev->name, ret);
+		return ret;
+	}
+	kfree_skb(skb);
+
+	return 0;
+}
+
 /*
  * This function handles the event generated by firmware, rx data
  * received from firmware, and tx data sent from kernel.
@@ -632,6 +655,7 @@
 	hdev->flush = btmrvl_flush;
 	hdev->send  = btmrvl_send_frame;
 	hdev->setup = btmrvl_setup;
+	hdev->set_bdaddr = btmrvl_set_bdaddr;
 
 	hdev->dev_type = priv->btmrvl_dev.dev_type;
 
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index efff064..3e683b1 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -1169,6 +1169,10 @@
 	}
 
 	priv = card->priv;
+	hcidev = priv->btmrvl_dev.hcidev;
+	BT_DBG("%s: SDIO suspend", hcidev->name);
+	hci_suspend_dev(hcidev);
+	skb_queue_purge(&priv->adapter->tx_queue);
 
 	if (priv->adapter->hs_state != HS_ACTIVATED) {
 		if (btmrvl_enable_hs(priv)) {
@@ -1176,10 +1180,6 @@
 			return -EBUSY;
 		}
 	}
-	hcidev = priv->btmrvl_dev.hcidev;
-	BT_DBG("%s: SDIO suspend", hcidev->name);
-	hci_suspend_dev(hcidev);
-	skb_queue_purge(&priv->adapter->tx_queue);
 
 	priv->adapter->is_suspended = true;
 
@@ -1221,13 +1221,13 @@
 		return 0;
 	}
 
-	priv->adapter->is_suspended = false;
-	hcidev = priv->btmrvl_dev.hcidev;
-	BT_DBG("%s: SDIO resume", hcidev->name);
-	hci_resume_dev(hcidev);
 	priv->hw_wakeup_firmware(priv);
 	priv->adapter->hs_state = HS_DEACTIVATED;
+	hcidev = priv->btmrvl_dev.hcidev;
 	BT_DBG("%s: HS DEACTIVATED in resume!", hcidev->name);
+	priv->adapter->is_suspended = false;
+	BT_DBG("%s: SDIO resume", hcidev->name);
+	hci_resume_dev(hcidev);
 
 	return 0;
 }
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index ed7b33b..292c38e8 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -48,6 +48,7 @@
 #define BTUSB_INTEL		0x100
 #define BTUSB_INTEL_BOOT	0x200
 #define BTUSB_BCM_PATCHRAM	0x400
+#define BTUSB_MARVELL		0x800
 
 static const struct usb_device_id btusb_table[] = {
 	/* Generic Bluetooth USB device */
@@ -113,6 +114,9 @@
 	{ USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01),
 	  .driver_info = BTUSB_BCM_PATCHRAM },
 
+	/* ASUSTek Computer - Broadcom based */
+	{ USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01) },
+
 	/* Belkin F8065bf - Broadcom based */
 	{ USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
 
@@ -242,6 +246,10 @@
 	{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
 	{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
 
+	/* Marvell device */
+	{ USB_DEVICE(0x1286, 0x2044), .driver_info = BTUSB_MARVELL },
+	{ USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
+
 	{ }	/* Terminating entry */
 };
 
@@ -1455,6 +1463,29 @@
 	return 0;
 }
 
+static int btusb_set_bdaddr_marvell(struct hci_dev *hdev,
+				    const bdaddr_t *bdaddr)
+{
+	struct sk_buff *skb;
+	u8 buf[8];
+	long ret;
+
+	buf[0] = 0xfe;
+	buf[1] = sizeof(bdaddr_t);
+	memcpy(buf + 2, bdaddr, sizeof(bdaddr_t));
+
+	skb = __hci_cmd_sync(hdev, 0xfc22, sizeof(buf), buf, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		ret = PTR_ERR(skb);
+		BT_ERR("%s: changing Marvell device address failed (%ld)",
+		       hdev->name, ret);
+		return ret;
+	}
+	kfree_skb(skb);
+
+	return 0;
+}
+
 #define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}})
 
 static int btusb_setup_bcm_patchram(struct hci_dev *hdev)
@@ -1766,6 +1797,9 @@
 		hdev->set_bdaddr = btusb_set_bdaddr_intel;
 	}
 
+	if (id->driver_info & BTUSB_MARVELL)
+		hdev->set_bdaddr = btusb_set_bdaddr_marvell;
+
 	if (id->driver_info & BTUSB_INTEL_BOOT)
 		set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
 
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index e00f8f5..dc487b5 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -431,6 +431,9 @@
 	if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags))
 		set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
 
+	if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags))
+		set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
+
 	if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
 		set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
 
@@ -477,6 +480,22 @@
 	return 0;
 }
 
+static int hci_uart_set_flags(struct hci_uart *hu, unsigned long flags)
+{
+	unsigned long valid_flags = BIT(HCI_UART_RAW_DEVICE) |
+				    BIT(HCI_UART_RESET_ON_INIT) |
+				    BIT(HCI_UART_CREATE_AMP) |
+				    BIT(HCI_UART_INIT_PENDING) |
+				    BIT(HCI_UART_EXT_CONFIG);
+
+	if ((flags & ~valid_flags))
+		return -EINVAL;
+
+	hu->hdev_flags = flags;
+
+	return 0;
+}
+
 /* hci_uart_tty_ioctl()
  *
  *    Process IOCTL system call for the tty device.
@@ -520,14 +539,16 @@
 		return -EUNATCH;
 
 	case HCIUARTGETDEVICE:
-		if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
+		if (test_bit(HCI_UART_REGISTERED, &hu->flags))
 			return hu->hdev->id;
 		return -EUNATCH;
 
 	case HCIUARTSETFLAGS:
 		if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
 			return -EBUSY;
-		hu->hdev_flags = arg;
+		err = hci_uart_set_flags(hu, arg);
+		if (err)
+			return err;
 		break;
 
 	case HCIUARTGETFLAGS:
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 12df101..247488e 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -48,6 +48,7 @@
 #define HCI_UART_RESET_ON_INIT	1
 #define HCI_UART_CREATE_AMP	2
 #define HCI_UART_INIT_PENDING	3
+#define HCI_UART_EXT_CONFIG	4
 
 struct hci_uart;
 
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 5798541..a66a321 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -336,10 +336,10 @@
 		QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
 
 	{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
-		QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
+		QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
 
 	{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
-		0},
+		QUIRK_NO_MSI},
 
 	{PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
 		QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 0c9f803..b6ae89e 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -284,6 +284,7 @@
 
 static struct irq_domain_ops gpio_rcar_irq_domain_ops = {
 	.map	= gpio_rcar_irq_domain_map,
+	.xlate	= irq_domain_xlate_twocell,
 };
 
 struct gpio_rcar_info {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f361263..d893e4d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1616,22 +1616,6 @@
 	return ret;
 }
 
-void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
-{
-	struct i915_vma *vma;
-
-	/*
-	 * Only the global gtt is relevant for gtt memory mappings, so restrict
-	 * list traversal to objects bound into the global address space. Note
-	 * that the active list should be empty, but better safe than sorry.
-	 */
-	WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
-	list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
-		i915_gem_release_mmap(vma->obj);
-	list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
-		i915_gem_release_mmap(vma->obj);
-}
-
 /**
  * i915_gem_release_mmap - remove physical page mappings
  * @obj: obj in question
@@ -1657,6 +1641,15 @@
 	obj->fault_mappable = false;
 }
 
+void
+i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+{
+	struct drm_i915_gem_object *obj;
+
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+		i915_gem_release_mmap(obj);
+}
+
 uint32_t
 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 3521f99..34894b5 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -31,7 +31,7 @@
 struct i915_render_state {
 	struct drm_i915_gem_object *obj;
 	unsigned long ggtt_offset;
-	void *batch;
+	u32 *batch;
 	u32 size;
 	u32 len;
 };
@@ -80,7 +80,7 @@
 
 static void render_state_free(struct i915_render_state *so)
 {
-	kunmap(so->batch);
+	kunmap(kmap_to_page(so->batch));
 	i915_gem_object_ggtt_unpin(so->obj);
 	drm_gem_object_unreference(&so->obj->base);
 	kfree(so);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 267f069..c05c84f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2845,7 +2845,7 @@
 {
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 	struct intel_engine_cs *signaller;
-	u32 seqno, ctl;
+	u32 seqno;
 
 	ring->hangcheck.deadlock++;
 
@@ -2857,15 +2857,12 @@
 	if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
 		return -1;
 
-	/* cursory check for an unkickable deadlock */
-	ctl = I915_READ_CTL(signaller);
-	if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
-		return -1;
-
 	if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
 		return 1;
 
-	if (signaller->hangcheck.deadlock)
+	/* cursory check for an unkickable deadlock */
+	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
+	    semaphore_passed(signaller) < 0)
 		return -1;
 
 	return 0;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 0b24711..c0ea661 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -2291,6 +2291,7 @@
 				gb_tile_moden = 0;
 				break;
 			}
+			rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
 			WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
 		}
 	} else if (num_pipe_configs == 8) {
@@ -7376,6 +7377,7 @@
 		tmp = RREG32(IH_RB_CNTL);
 		tmp |= IH_WPTR_OVERFLOW_CLEAR;
 		WREG32(IH_RB_CNTL, tmp);
+		wptr &= ~RB_OVERFLOW;
 	}
 	return (wptr & rdev->ih.ptr_mask);
 }
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 250bac3..15e4f28 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4756,6 +4756,7 @@
 		tmp = RREG32(IH_RB_CNTL);
 		tmp |= IH_WPTR_OVERFLOW_CLEAR;
 		WREG32(IH_RB_CNTL, tmp);
+		wptr &= ~RB_OVERFLOW;
 	}
 	return (wptr & rdev->ih.ptr_mask);
 }
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c66952d..3c69f58 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3795,6 +3795,7 @@
 		tmp = RREG32(IH_RB_CNTL);
 		tmp |= IH_WPTR_OVERFLOW_CLEAR;
 		WREG32(IH_RB_CNTL, tmp);
+		wptr &= ~RB_OVERFLOW;
 	}
 	return (wptr & rdev->ih.ptr_mask);
 }
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b720450..60c47f8 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -449,6 +449,7 @@
 
 	/* protected by vm mutex */
 	struct list_head		vm_list;
+	struct list_head		vm_status;
 
 	/* constant after initialization */
 	struct radeon_vm		*vm;
@@ -867,6 +868,9 @@
 	struct list_head		va;
 	unsigned			id;
 
+	/* BOs freed, but not yet updated in the PT */
+	struct list_head		freed;
+
 	/* contains the page directory */
 	struct radeon_bo		*page_directory;
 	uint64_t			pd_gpu_addr;
@@ -875,6 +879,8 @@
 	/* array of page tables, one for each page directory entry */
 	struct radeon_vm_pt		*page_tables;
 
+	struct radeon_bo_va		*ib_bo_va;
+
 	struct mutex			mutex;
 	/* last fence for cs using this vm */
 	struct radeon_fence		*fence;
@@ -2832,9 +2838,10 @@
 uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
 int radeon_vm_update_page_directory(struct radeon_device *rdev,
 				    struct radeon_vm *vm);
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+			  struct radeon_vm *vm);
 int radeon_vm_bo_update(struct radeon_device *rdev,
-			struct radeon_vm *vm,
-			struct radeon_bo *bo,
+			struct radeon_bo_va *bo_va,
 			struct ttm_mem_reg *mem);
 void radeon_vm_bo_invalidate(struct radeon_device *rdev,
 			     struct radeon_bo *bo);
@@ -2847,8 +2854,8 @@
 			  struct radeon_bo_va *bo_va,
 			  uint64_t offset,
 			  uint32_t flags);
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
-		     struct radeon_bo_va *bo_va);
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+		      struct radeon_bo_va *bo_va);
 
 /* audio */
 void r600_audio_update_hdmi(struct work_struct *work);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 71a1434..ae763f6 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -461,13 +461,23 @@
 				   struct radeon_vm *vm)
 {
 	struct radeon_device *rdev = p->rdev;
+	struct radeon_bo_va *bo_va;
 	int i, r;
 
 	r = radeon_vm_update_page_directory(rdev, vm);
 	if (r)
 		return r;
 
-	r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo,
+	r = radeon_vm_clear_freed(rdev, vm);
+	if (r)
+		return r;
+
+	if (vm->ib_bo_va == NULL) {
+		DRM_ERROR("Tmp BO not in VM!\n");
+		return -EINVAL;
+	}
+
+	r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
 				&rdev->ring_tmp_bo.bo->tbo.mem);
 	if (r)
 		return r;
@@ -480,7 +490,13 @@
 			continue;
 
 		bo = p->relocs[i].robj;
-		r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem);
+		bo_va = radeon_vm_bo_find(vm, bo);
+		if (bo_va == NULL) {
+			dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+			return -EINVAL;
+		}
+
+		r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
 		if (r)
 			return r;
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 03686fa..697add2 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1056,36 +1056,36 @@
 	if (!radeon_check_pot_argument(radeon_vm_size)) {
 		dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
 			 radeon_vm_size);
-		radeon_vm_size = 4096;
+		radeon_vm_size = 4;
 	}
 
-	if (radeon_vm_size < 4) {
-		dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n",
+	if (radeon_vm_size < 1) {
+		dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
 			 radeon_vm_size);
-		radeon_vm_size = 4096;
+		radeon_vm_size = 4;
 	}
 
        /*
         * Max GPUVM size for Cayman, SI and CI are 40 bits.
         */
-	if (radeon_vm_size > 1024*1024) {
-		dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n",
+	if (radeon_vm_size > 1024) {
+		dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
 			 radeon_vm_size);
-		radeon_vm_size = 4096;
+		radeon_vm_size = 4;
 	}
 
 	/* defines number of bits in page table versus page directory,
 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
 	 * page table and the remaining bits are in the page directory */
 	if (radeon_vm_block_size < 9) {
-		dev_warn(rdev->dev, "VM page table size (%d) to small\n",
+		dev_warn(rdev->dev, "VM page table size (%d) too small\n",
 			 radeon_vm_block_size);
 		radeon_vm_block_size = 9;
 	}
 
 	if (radeon_vm_block_size > 24 ||
-	    radeon_vm_size < (1ull << radeon_vm_block_size)) {
-		dev_warn(rdev->dev, "VM page table size (%d) to large\n",
+	    (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
+		dev_warn(rdev->dev, "VM page table size (%d) too large\n",
 			 radeon_vm_block_size);
 		radeon_vm_block_size = 9;
 	}
@@ -1238,7 +1238,7 @@
 	/* Adjust VM size here.
 	 * Max GPUVM size for cayman+ is 40 bits.
 	 */
-	rdev->vm_manager.max_pfn = radeon_vm_size << 8;
+	rdev->vm_manager.max_pfn = radeon_vm_size << 18;
 
 	/* Set asic functions */
 	r = radeon_asic_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cb14213..e9e3610 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -173,7 +173,7 @@
 int radeon_aspm = -1;
 int radeon_runtime_pm = -1;
 int radeon_hard_reset = 0;
-int radeon_vm_size = 4096;
+int radeon_vm_size = 4;
 int radeon_vm_block_size = 9;
 int radeon_deep_color = 0;
 
@@ -243,7 +243,7 @@
 MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
 module_param_named(hard_reset, radeon_hard_reset, int, 0444);
 
-MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)");
+MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
 module_param_named(vm_size, radeon_vm_size, int, 0444);
 
 MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 35d9318..d25ae6a 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -579,7 +579,7 @@
 	/* new gpu have virtual address space support */
 	if (rdev->family >= CHIP_CAYMAN) {
 		struct radeon_fpriv *fpriv;
-		struct radeon_bo_va *bo_va;
+		struct radeon_vm *vm;
 		int r;
 
 		fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@@ -587,7 +587,8 @@
 			return -ENOMEM;
 		}
 
-		r = radeon_vm_init(rdev, &fpriv->vm);
+		vm = &fpriv->vm;
+		r = radeon_vm_init(rdev, vm);
 		if (r) {
 			kfree(fpriv);
 			return r;
@@ -596,22 +597,23 @@
 		if (rdev->accel_working) {
 			r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
 			if (r) {
-				radeon_vm_fini(rdev, &fpriv->vm);
+				radeon_vm_fini(rdev, vm);
 				kfree(fpriv);
 				return r;
 			}
 
 			/* map the ib pool buffer read only into
 			 * virtual address space */
-			bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
-						 rdev->ring_tmp_bo.bo);
-			r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+			vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
+							rdev->ring_tmp_bo.bo);
+			r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
+						  RADEON_VA_IB_OFFSET,
 						  RADEON_VM_PAGE_READABLE |
 						  RADEON_VM_PAGE_SNOOPED);
 
 			radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
 			if (r) {
-				radeon_vm_fini(rdev, &fpriv->vm);
+				radeon_vm_fini(rdev, vm);
 				kfree(fpriv);
 				return r;
 			}
@@ -640,21 +642,19 @@
 	/* new gpu have virtual address space support */
 	if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
 		struct radeon_fpriv *fpriv = file_priv->driver_priv;
-		struct radeon_bo_va *bo_va;
+		struct radeon_vm *vm = &fpriv->vm;
 		int r;
 
 		if (rdev->accel_working) {
 			r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
 			if (!r) {
-				bo_va = radeon_vm_bo_find(&fpriv->vm,
-							  rdev->ring_tmp_bo.bo);
-				if (bo_va)
-					radeon_vm_bo_rmv(rdev, bo_va);
+				if (vm->ib_bo_va)
+					radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
 				radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
 			}
 		}
 
-		radeon_vm_fini(rdev, &fpriv->vm);
+		radeon_vm_fini(rdev, vm);
 		kfree(fpriv);
 		file_priv->driver_priv = NULL;
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index eecff6b..725d366 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -332,6 +332,7 @@
 	bo_va->ref_count = 1;
 	INIT_LIST_HEAD(&bo_va->bo_list);
 	INIT_LIST_HEAD(&bo_va->vm_list);
+	INIT_LIST_HEAD(&bo_va->vm_status);
 
 	mutex_lock(&vm->mutex);
 	list_add(&bo_va->vm_list, &vm->va);
@@ -468,6 +469,19 @@
 		head = &tmp->vm_list;
 	}
 
+	if (bo_va->soffset) {
+		/* add a clone of the bo_va to clear the old address */
+		tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+		if (!tmp) {
+			mutex_unlock(&vm->mutex);
+			return -ENOMEM;
+		}
+		tmp->soffset = bo_va->soffset;
+		tmp->eoffset = bo_va->eoffset;
+		tmp->vm = vm;
+		list_add(&tmp->vm_status, &vm->freed);
+	}
+
 	bo_va->soffset = soffset;
 	bo_va->eoffset = eoffset;
 	bo_va->flags = flags;
@@ -823,25 +837,19 @@
  * Object have to be reserved and mutex must be locked!
  */
 int radeon_vm_bo_update(struct radeon_device *rdev,
-			struct radeon_vm *vm,
-			struct radeon_bo *bo,
+			struct radeon_bo_va *bo_va,
 			struct ttm_mem_reg *mem)
 {
+	struct radeon_vm *vm = bo_va->vm;
 	struct radeon_ib ib;
-	struct radeon_bo_va *bo_va;
 	unsigned nptes, ndw;
 	uint64_t addr;
 	int r;
 
-	bo_va = radeon_vm_bo_find(vm, bo);
-	if (bo_va == NULL) {
-		dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
-		return -EINVAL;
-	}
 
 	if (!bo_va->soffset) {
 		dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
-			bo, vm);
+			bo_va->bo, vm);
 		return -EINVAL;
 	}
 
@@ -868,7 +876,7 @@
 
 	trace_radeon_vm_bo_update(bo_va);
 
-	nptes = radeon_bo_ngpu_pages(bo);
+	nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
 
 	/* padding, etc. */
 	ndw = 64;
@@ -911,33 +919,61 @@
 }
 
 /**
+ * radeon_vm_clear_freed - clear freed BOs in the PT
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Make sure all freed BOs are cleared in the PT.
+ * Returns 0 for success.
+ *
+ * PTs have to be reserved and mutex must be locked!
+ */
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+			  struct radeon_vm *vm)
+{
+	struct radeon_bo_va *bo_va, *tmp;
+	int r;
+
+	list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+		list_del(&bo_va->vm_status);
+		r = radeon_vm_bo_update(rdev, bo_va, NULL);
+		kfree(bo_va);
+		if (r)
+			return r;
+	}
+	return 0;
+
+}
+
+/**
  * radeon_vm_bo_rmv - remove a bo to a specific vm
  *
  * @rdev: radeon_device pointer
  * @bo_va: requested bo_va
  *
  * Remove @bo_va->bo from the requested vm (cayman+).
- * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
- * remove the ptes for @bo_va in the page table.
- * Returns 0 for success.
  *
  * Object have to be reserved!
  */
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
-		     struct radeon_bo_va *bo_va)
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+		      struct radeon_bo_va *bo_va)
 {
-	int r = 0;
+	struct radeon_vm *vm = bo_va->vm;
 
-	mutex_lock(&bo_va->vm->mutex);
-	if (bo_va->soffset)
-		r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
-
-	list_del(&bo_va->vm_list);
-	mutex_unlock(&bo_va->vm->mutex);
 	list_del(&bo_va->bo_list);
 
-	kfree(bo_va);
-	return r;
+	mutex_lock(&vm->mutex);
+	list_del(&bo_va->vm_list);
+
+	if (bo_va->soffset) {
+		bo_va->bo = NULL;
+		list_add(&bo_va->vm_status, &vm->freed);
+	} else {
+		kfree(bo_va);
+	}
+
+	mutex_unlock(&vm->mutex);
 }
 
 /**
@@ -975,11 +1011,13 @@
 	int r;
 
 	vm->id = 0;
+	vm->ib_bo_va = NULL;
 	vm->fence = NULL;
 	vm->last_flush = NULL;
 	vm->last_id_use = NULL;
 	mutex_init(&vm->mutex);
 	INIT_LIST_HEAD(&vm->va);
+	INIT_LIST_HEAD(&vm->freed);
 
 	pd_size = radeon_vm_directory_size(rdev);
 	pd_entries = radeon_vm_num_pdes(rdev);
@@ -1034,7 +1072,8 @@
 			kfree(bo_va);
 		}
 	}
-
+	list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
+		kfree(bo_va);
 
 	for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
 		radeon_bo_unref(&vm->page_tables[i].bo);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index eba0225..9e854fd 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6103,6 +6103,7 @@
 		tmp = RREG32(IH_RB_CNTL);
 		tmp |= IH_WPTR_OVERFLOW_CLEAR;
 		WREG32(IH_RB_CNTL, tmp);
+		wptr &= ~RB_OVERFLOW;
 	}
 	return (wptr & rdev->ih.ptr_mask);
 }
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 20da6ff..32e50be 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1874,15 +1874,16 @@
 	for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
 		pi->at[i] = TRINITY_AT_DFLT;
 
-	/* There are stability issues reported on latops with
-	 * bapm installed when switching between AC and battery
-	 * power.  At the same time, some desktop boards hang
-	 * if it's not enabled and dpm is enabled.
+	/* There are stability issues reported on with
+	 * bapm enabled when switching between AC and battery
+	 * power.  At the same time, some MSI boards hang
+	 * if it's not enabled and dpm is enabled.  Just enable
+	 * it for MSI boards right now.
 	 */
-	if (rdev->flags & RADEON_IS_MOBILITY)
-		pi->enable_bapm = false;
-	else
+	if (rdev->pdev->subsystem_vendor == 0x1462)
 		pi->enable_bapm = true;
+	else
+		pi->enable_bapm = false;
 	pi->enable_nbps_policy = true;
 	pi->enable_sclk_ds = true;
 	pi->enable_gfx_power_gating = true;
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index efee4c5..34b9a60 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -86,7 +86,7 @@
  */
 static inline s8 TEMP_TO_REG(int val)
 {
-	return clamp_val(SCALE(val, 1, 1000), -128000, 127000);
+	return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
 }
 
 static inline int TEMP_FROM_REG(s8 val)
@@ -384,6 +384,8 @@
 	err = kstrtoul(buf, 10, &val);
 	if (err)
 		return err;
+	if (val > 255)
+		return -EINVAL;
 
 	data->vrm = val;
 	return count;
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 8ae4f89..e405627 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -180,7 +180,7 @@
 		struct mlx5_core_srq *msrq = NULL;
 
 		if (qp->ibqp.xrcd) {
-			msrq = mlx5_core_get_srq(&dev->mdev,
+			msrq = mlx5_core_get_srq(dev->mdev,
 						 be32_to_cpu(cqe->srqn));
 			srq = to_mibsrq(msrq);
 		} else {
@@ -348,7 +348,7 @@
 static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
 			   u16 tail, u16 head)
 {
-	int idx;
+	u16 idx;
 
 	do {
 		idx = tail & (qp->sq.wqe_cnt - 1);
@@ -364,7 +364,7 @@
 
 static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
 {
-	mlx5_buf_free(&dev->mdev, &buf->buf);
+	mlx5_buf_free(dev->mdev, &buf->buf);
 }
 
 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
@@ -450,7 +450,7 @@
 		 * because CQs will be locked while QPs are removed
 		 * from the table.
 		 */
-		mqp = __mlx5_qp_lookup(&dev->mdev, qpn);
+		mqp = __mlx5_qp_lookup(dev->mdev, qpn);
 		if (unlikely(!mqp)) {
 			mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
 				     cq->mcq.cqn, qpn);
@@ -514,11 +514,11 @@
 	case MLX5_CQE_SIG_ERR:
 		sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
 
-		read_lock(&dev->mdev.priv.mr_table.lock);
-		mmr = __mlx5_mr_lookup(&dev->mdev,
+		read_lock(&dev->mdev->priv.mr_table.lock);
+		mmr = __mlx5_mr_lookup(dev->mdev,
 				       mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
 		if (unlikely(!mmr)) {
-			read_unlock(&dev->mdev.priv.mr_table.lock);
+			read_unlock(&dev->mdev->priv.mr_table.lock);
 			mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
 				     cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
 			return -EINVAL;
@@ -536,7 +536,7 @@
 			     mr->sig->err_item.expected,
 			     mr->sig->err_item.actual);
 
-		read_unlock(&dev->mdev.priv.mr_table.lock);
+		read_unlock(&dev->mdev->priv.mr_table.lock);
 		goto repoll;
 	}
 
@@ -575,8 +575,8 @@
 	mlx5_cq_arm(&to_mcq(ibcq)->mcq,
 		    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
 		    MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
-		    to_mdev(ibcq->device)->mdev.priv.uuari.uars[0].map,
-		    MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev.priv.cq_uar_lock));
+		    to_mdev(ibcq->device)->mdev->priv.uuari.uars[0].map,
+		    MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev->priv.cq_uar_lock));
 
 	return 0;
 }
@@ -586,7 +586,7 @@
 {
 	int err;
 
-	err = mlx5_buf_alloc(&dev->mdev, nent * cqe_size,
+	err = mlx5_buf_alloc(dev->mdev, nent * cqe_size,
 			     PAGE_SIZE * 2, &buf->buf);
 	if (err)
 		return err;
@@ -691,7 +691,7 @@
 {
 	int err;
 
-	err = mlx5_db_alloc(&dev->mdev, &cq->db);
+	err = mlx5_db_alloc(dev->mdev, &cq->db);
 	if (err)
 		return err;
 
@@ -716,7 +716,7 @@
 	mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
 
 	(*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
-	*index = dev->mdev.priv.uuari.uars[0].index;
+	*index = dev->mdev->priv.uuari.uars[0].index;
 
 	return 0;
 
@@ -724,14 +724,14 @@
 	free_cq_buf(dev, &cq->buf);
 
 err_db:
-	mlx5_db_free(&dev->mdev, &cq->db);
+	mlx5_db_free(dev->mdev, &cq->db);
 	return err;
 }
 
 static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
 {
 	free_cq_buf(dev, &cq->buf);
-	mlx5_db_free(&dev->mdev, &cq->db);
+	mlx5_db_free(dev->mdev, &cq->db);
 }
 
 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
@@ -752,7 +752,7 @@
 		return ERR_PTR(-EINVAL);
 
 	entries = roundup_pow_of_two(entries + 1);
-	if (entries > dev->mdev.caps.max_cqes)
+	if (entries > dev->mdev->caps.max_cqes)
 		return ERR_PTR(-EINVAL);
 
 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -789,7 +789,7 @@
 	cqb->ctx.c_eqn = cpu_to_be16(eqn);
 	cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);
 
-	err = mlx5_core_create_cq(&dev->mdev, &cq->mcq, cqb, inlen);
+	err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
 	if (err)
 		goto err_cqb;
 
@@ -809,7 +809,7 @@
 	return &cq->ibcq;
 
 err_cmd:
-	mlx5_core_destroy_cq(&dev->mdev, &cq->mcq);
+	mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
 
 err_cqb:
 	mlx5_vfree(cqb);
@@ -834,7 +834,7 @@
 	if (cq->uobject)
 		context = cq->uobject->context;
 
-	mlx5_core_destroy_cq(&dev->mdev, &mcq->mcq);
+	mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
 	if (context)
 		destroy_cq_user(mcq, context);
 	else
@@ -919,7 +919,7 @@
 	int err;
 	u32 fsel;
 
-	if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
+	if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
 		return -ENOSYS;
 
 	in = kzalloc(sizeof(*in), GFP_KERNEL);
@@ -931,7 +931,7 @@
 	in->ctx.cq_period = cpu_to_be16(cq_period);
 	in->ctx.cq_max_count = cpu_to_be16(cq_count);
 	in->field_select = cpu_to_be32(fsel);
-	err = mlx5_core_modify_cq(&dev->mdev, &mcq->mcq, in, sizeof(*in));
+	err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in));
 	kfree(in);
 
 	if (err)
@@ -1074,7 +1074,7 @@
 	int uninitialized_var(cqe_size);
 	unsigned long flags;
 
-	if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
+	if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
 		pr_info("Firmware does not support resize CQ\n");
 		return -ENOSYS;
 	}
@@ -1083,7 +1083,7 @@
 		return -EINVAL;
 
 	entries = roundup_pow_of_two(entries + 1);
-	if (entries > dev->mdev.caps.max_cqes + 1)
+	if (entries > dev->mdev->caps.max_cqes + 1)
 		return -EINVAL;
 
 	if (entries == ibcq->cqe + 1)
@@ -1128,7 +1128,7 @@
 	in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
 	in->cqn = cpu_to_be32(cq->mcq.cqn);
 
-	err = mlx5_core_modify_cq(&dev->mdev, &cq->mcq, in, inlen);
+	err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
 	if (err)
 		goto ex_alloc;
 
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 5c8938b..b514bbb 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -41,7 +41,7 @@
 };
 
 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
-		 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
+		 u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh,
 		 void *in_mad, void *response_mad)
 {
 	u8 op_modifier = 0;
@@ -54,7 +54,7 @@
 	if (ignore_bkey || !in_wc)
 		op_modifier |= 0x2;
 
-	return mlx5_core_mad_ifc(&dev->mdev, in_mad, response_mad, op_modifier, port);
+	return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port);
 }
 
 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@@ -129,7 +129,7 @@
 
 	packet_error = be16_to_cpu(out_mad->status);
 
-	dev->mdev.caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
+	dev->mdev->caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
 		MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
 
 out:
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 364d4b6..d8907b2 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -54,96 +54,17 @@
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_VERSION(DRIVER_VERSION);
 
-static int prof_sel = 2;
-module_param_named(prof_sel, prof_sel, int, 0444);
-MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
+static int deprecated_prof_sel = 2;
+module_param_named(prof_sel, deprecated_prof_sel, int, 0444);
+MODULE_PARM_DESC(prof_sel, "profile selector. Deprecated here. Moved to module mlx5_core");
 
 static char mlx5_version[] =
 	DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
 	DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
 
-static struct mlx5_profile profile[] = {
-	[0] = {
-		.mask		= 0,
-	},
-	[1] = {
-		.mask		= MLX5_PROF_MASK_QP_SIZE,
-		.log_max_qp	= 12,
-	},
-	[2] = {
-		.mask		= MLX5_PROF_MASK_QP_SIZE |
-				  MLX5_PROF_MASK_MR_CACHE,
-		.log_max_qp	= 17,
-		.mr_cache[0]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[1]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[2]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[3]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[4]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[5]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[6]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[7]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[8]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[9]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[10]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[11]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[12]	= {
-			.size	= 64,
-			.limit	= 32
-		},
-		.mr_cache[13]	= {
-			.size	= 32,
-			.limit	= 16
-		},
-		.mr_cache[14]	= {
-			.size	= 16,
-			.limit	= 8
-		},
-		.mr_cache[15]	= {
-			.size	= 8,
-			.limit	= 4
-		},
-	},
-};
-
 int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
 {
-	struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
+	struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
 	struct mlx5_eq *eq, *n;
 	int err = -ENOENT;
 
@@ -163,7 +84,7 @@
 
 static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
 {
-	struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
+	struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
 	char name[MLX5_MAX_EQ_NAME];
 	struct mlx5_eq *eq, *n;
 	int ncomp_vec;
@@ -182,9 +103,9 @@
 		}
 
 		snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
-		err = mlx5_create_map_eq(&dev->mdev, eq,
+		err = mlx5_create_map_eq(dev->mdev, eq,
 					 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
-					 name, &dev->mdev.priv.uuari.uars[0]);
+					 name, &dev->mdev->priv.uuari.uars[0]);
 		if (err) {
 			kfree(eq);
 			goto clean;
@@ -204,7 +125,7 @@
 	list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
 		list_del(&eq->list);
 		spin_unlock(&table->lock);
-		if (mlx5_destroy_unmap_eq(&dev->mdev, eq))
+		if (mlx5_destroy_unmap_eq(dev->mdev, eq))
 			mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
 		kfree(eq);
 		spin_lock(&table->lock);
@@ -215,14 +136,14 @@
 
 static void free_comp_eqs(struct mlx5_ib_dev *dev)
 {
-	struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
+	struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
 	struct mlx5_eq *eq, *n;
 
 	spin_lock(&table->lock);
 	list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
 		list_del(&eq->list);
 		spin_unlock(&table->lock);
-		if (mlx5_destroy_unmap_eq(&dev->mdev, eq))
+		if (mlx5_destroy_unmap_eq(dev->mdev, eq))
 			mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
 		kfree(eq);
 		spin_lock(&table->lock);
@@ -255,14 +176,14 @@
 
 	memset(props, 0, sizeof(*props));
 
-	props->fw_ver = ((u64)fw_rev_maj(&dev->mdev) << 32) |
-		(fw_rev_min(&dev->mdev) << 16) |
-		fw_rev_sub(&dev->mdev);
+	props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
+		(fw_rev_min(dev->mdev) << 16) |
+		fw_rev_sub(dev->mdev);
 	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
 		IB_DEVICE_PORT_ACTIVE_EVENT		|
 		IB_DEVICE_SYS_IMAGE_GUID		|
 		IB_DEVICE_RC_RNR_NAK_GEN;
-	flags = dev->mdev.caps.flags;
+	flags = dev->mdev->caps.flags;
 	if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
 		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
 	if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR)
@@ -292,30 +213,30 @@
 	memcpy(&props->sys_image_guid, out_mad->data +	4, 8);
 
 	props->max_mr_size	   = ~0ull;
-	props->page_size_cap	   = dev->mdev.caps.min_page_sz;
-	props->max_qp		   = 1 << dev->mdev.caps.log_max_qp;
-	props->max_qp_wr	   = dev->mdev.caps.max_wqes;
-	max_rq_sg = dev->mdev.caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
-	max_sq_sg = (dev->mdev.caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) /
+	props->page_size_cap	   = dev->mdev->caps.min_page_sz;
+	props->max_qp		   = 1 << dev->mdev->caps.log_max_qp;
+	props->max_qp_wr	   = dev->mdev->caps.max_wqes;
+	max_rq_sg = dev->mdev->caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
+	max_sq_sg = (dev->mdev->caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) /
 		sizeof(struct mlx5_wqe_data_seg);
 	props->max_sge = min(max_rq_sg, max_sq_sg);
-	props->max_cq		   = 1 << dev->mdev.caps.log_max_cq;
-	props->max_cqe		   = dev->mdev.caps.max_cqes - 1;
-	props->max_mr		   = 1 << dev->mdev.caps.log_max_mkey;
-	props->max_pd		   = 1 << dev->mdev.caps.log_max_pd;
-	props->max_qp_rd_atom	   = dev->mdev.caps.max_ra_req_qp;
-	props->max_qp_init_rd_atom = dev->mdev.caps.max_ra_res_qp;
+	props->max_cq		   = 1 << dev->mdev->caps.log_max_cq;
+	props->max_cqe		   = dev->mdev->caps.max_cqes - 1;
+	props->max_mr		   = 1 << dev->mdev->caps.log_max_mkey;
+	props->max_pd		   = 1 << dev->mdev->caps.log_max_pd;
+	props->max_qp_rd_atom	   = dev->mdev->caps.max_ra_req_qp;
+	props->max_qp_init_rd_atom = dev->mdev->caps.max_ra_res_qp;
 	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
-	props->max_srq		   = 1 << dev->mdev.caps.log_max_srq;
-	props->max_srq_wr	   = dev->mdev.caps.max_srq_wqes - 1;
+	props->max_srq		   = 1 << dev->mdev->caps.log_max_srq;
+	props->max_srq_wr	   = dev->mdev->caps.max_srq_wqes - 1;
 	props->max_srq_sge	   = max_rq_sg - 1;
 	props->max_fast_reg_page_list_len = (unsigned int)-1;
-	props->local_ca_ack_delay  = dev->mdev.caps.local_ca_ack_delay;
+	props->local_ca_ack_delay  = dev->mdev->caps.local_ca_ack_delay;
 	props->atomic_cap	   = IB_ATOMIC_NONE;
 	props->masked_atomic_cap   = IB_ATOMIC_NONE;
 	props->max_pkeys	   = be16_to_cpup((__be16 *)(out_mad->data + 28));
-	props->max_mcast_grp	   = 1 << dev->mdev.caps.log_max_mcg;
-	props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;
+	props->max_mcast_grp	   = 1 << dev->mdev->caps.log_max_mcg;
+	props->max_mcast_qp_attach = dev->mdev->caps.max_qp_mcg;
 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
 					   props->max_mcast_grp;
 	props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
@@ -336,7 +257,7 @@
 	int ext_active_speed;
 	int err = -ENOMEM;
 
-	if (port < 1 || port > dev->mdev.caps.num_ports) {
+	if (port < 1 || port > dev->mdev->caps.num_ports) {
 		mlx5_ib_warn(dev, "invalid port number %d\n", port);
 		return -EINVAL;
 	}
@@ -367,8 +288,8 @@
 	props->phys_state	= out_mad->data[33] >> 4;
 	props->port_cap_flags	= be32_to_cpup((__be32 *)(out_mad->data + 20));
 	props->gid_tbl_len	= out_mad->data[50];
-	props->max_msg_sz	= 1 << to_mdev(ibdev)->mdev.caps.log_max_msg;
-	props->pkey_tbl_len	= to_mdev(ibdev)->mdev.caps.port[port - 1].pkey_table_len;
+	props->max_msg_sz	= 1 << to_mdev(ibdev)->mdev->caps.log_max_msg;
+	props->pkey_tbl_len	= to_mdev(ibdev)->mdev->caps.port[port - 1].pkey_table_len;
 	props->bad_pkey_cntr	= be16_to_cpup((__be16 *)(out_mad->data + 46));
 	props->qkey_viol_cntr	= be16_to_cpup((__be16 *)(out_mad->data + 48));
 	props->active_width	= out_mad->data[31] & 0xf;
@@ -395,7 +316,7 @@
 
 	/* If reported active speed is QDR, check if is FDR-10 */
 	if (props->active_speed == 4) {
-		if (dev->mdev.caps.ext_port_cap[port - 1] &
+		if (dev->mdev->caps.ext_port_cap[port - 1] &
 		    MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
 			init_query_mad(in_mad);
 			in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
@@ -508,7 +429,7 @@
 	 * a 144 trap.  If cmd fails, just ignore.
 	 */
 	memcpy(&in, props->node_desc, 64);
-	err = mlx5_core_access_reg(&dev->mdev, &in, sizeof(in), &out,
+	err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
 				   sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
 	if (err)
 		return err;
@@ -535,7 +456,7 @@
 	tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
 		~props->clr_port_cap_mask;
 
-	err = mlx5_set_port_caps(&dev->mdev, port, tmp);
+	err = mlx5_set_port_caps(dev->mdev, port, tmp);
 
 out:
 	mutex_unlock(&dev->cap_mask_mutex);
@@ -557,7 +478,7 @@
 	int uuarn;
 	int err;
 	int i;
-	int reqlen;
+	size_t reqlen;
 
 	if (!dev->ib_active)
 		return ERR_PTR(-EAGAIN);
@@ -591,14 +512,14 @@
 
 	num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
 	gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
-	resp.qp_tab_size      = 1 << dev->mdev.caps.log_max_qp;
-	resp.bf_reg_size      = dev->mdev.caps.bf_reg_size;
+	resp.qp_tab_size      = 1 << dev->mdev->caps.log_max_qp;
+	resp.bf_reg_size      = dev->mdev->caps.bf_reg_size;
 	resp.cache_line_size  = L1_CACHE_BYTES;
-	resp.max_sq_desc_sz = dev->mdev.caps.max_sq_desc_sz;
-	resp.max_rq_desc_sz = dev->mdev.caps.max_rq_desc_sz;
-	resp.max_send_wqebb = dev->mdev.caps.max_wqes;
-	resp.max_recv_wr = dev->mdev.caps.max_wqes;
-	resp.max_srq_recv_wr = dev->mdev.caps.max_srq_wqes;
+	resp.max_sq_desc_sz = dev->mdev->caps.max_sq_desc_sz;
+	resp.max_rq_desc_sz = dev->mdev->caps.max_rq_desc_sz;
+	resp.max_send_wqebb = dev->mdev->caps.max_wqes;
+	resp.max_recv_wr = dev->mdev->caps.max_wqes;
+	resp.max_srq_recv_wr = dev->mdev->caps.max_srq_wqes;
 
 	context = kzalloc(sizeof(*context), GFP_KERNEL);
 	if (!context)
@@ -635,7 +556,7 @@
 	}
 
 	for (i = 0; i < num_uars; i++) {
-		err = mlx5_cmd_alloc_uar(&dev->mdev, &uars[i].index);
+		err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index);
 		if (err)
 			goto out_count;
 	}
@@ -644,7 +565,7 @@
 	mutex_init(&context->db_page_mutex);
 
 	resp.tot_uuars = req.total_num_uuars;
-	resp.num_ports = dev->mdev.caps.num_ports;
+	resp.num_ports = dev->mdev->caps.num_ports;
 	err = ib_copy_to_udata(udata, &resp,
 			       sizeof(resp) - sizeof(resp.reserved));
 	if (err)
@@ -658,7 +579,7 @@
 
 out_uars:
 	for (i--; i >= 0; i--)
-		mlx5_cmd_free_uar(&dev->mdev, uars[i].index);
+		mlx5_cmd_free_uar(dev->mdev, uars[i].index);
 out_count:
 	kfree(uuari->count);
 
@@ -681,7 +602,7 @@
 	int i;
 
 	for (i = 0; i < uuari->num_uars; i++) {
-		if (mlx5_cmd_free_uar(&dev->mdev, uuari->uars[i].index))
+		if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index))
 			mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
 	}
 
@@ -695,7 +616,7 @@
 
 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index)
 {
-	return (pci_resource_start(dev->mdev.pdev, 0) >> PAGE_SHIFT) + index;
+	return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index;
 }
 
 static int get_command(unsigned long offset)
@@ -773,7 +694,7 @@
 	seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
 	seg->start_addr = 0;
 
-	err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in),
+	err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in),
 				    NULL, NULL, NULL);
 	if (err) {
 		mlx5_ib_warn(dev, "failed to create mkey, %d\n", err);
@@ -798,7 +719,7 @@
 
 	memset(&mr, 0, sizeof(mr));
 	mr.key = key;
-	err = mlx5_core_destroy_mkey(&dev->mdev, &mr);
+	err = mlx5_core_destroy_mkey(dev->mdev, &mr);
 	if (err)
 		mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key);
 }
@@ -815,7 +736,7 @@
 	if (!pd)
 		return ERR_PTR(-ENOMEM);
 
-	err = mlx5_core_alloc_pd(&to_mdev(ibdev)->mdev, &pd->pdn);
+	err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
 	if (err) {
 		kfree(pd);
 		return ERR_PTR(err);
@@ -824,14 +745,14 @@
 	if (context) {
 		resp.pdn = pd->pdn;
 		if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
-			mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn);
+			mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
 			kfree(pd);
 			return ERR_PTR(-EFAULT);
 		}
 	} else {
 		err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn);
 		if (err) {
-			mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn);
+			mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
 			kfree(pd);
 			return ERR_PTR(err);
 		}
@@ -848,7 +769,7 @@
 	if (!pd->uobject)
 		free_pa_mkey(mdev, mpd->pa_lkey);
 
-	mlx5_core_dealloc_pd(&mdev->mdev, mpd->pdn);
+	mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
 	kfree(mpd);
 
 	return 0;
@@ -859,7 +780,7 @@
 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
 	int err;
 
-	err = mlx5_core_attach_mcg(&dev->mdev, gid, ibqp->qp_num);
+	err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
 	if (err)
 		mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
 			     ibqp->qp_num, gid->raw);
@@ -872,7 +793,7 @@
 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
 	int err;
 
-	err = mlx5_core_detach_mcg(&dev->mdev, gid, ibqp->qp_num);
+	err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
 	if (err)
 		mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
 			     ibqp->qp_num, gid->raw);
@@ -906,7 +827,7 @@
 	if (err)
 		goto out;
 
-	dev->mdev.rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32));
+	dev->mdev->rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32));
 	memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
 
 out:
@@ -921,7 +842,7 @@
 	struct mlx5_ib_dev *dev =
 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
 
-	return sprintf(buf, "%d\n", dev->mdev.priv.fw_pages);
+	return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
 }
 
 static ssize_t show_reg_pages(struct device *device,
@@ -930,7 +851,7 @@
 	struct mlx5_ib_dev *dev =
 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
 
-	return sprintf(buf, "%d\n", dev->mdev.priv.reg_pages);
+	return sprintf(buf, "%d\n", dev->mdev->priv.reg_pages);
 }
 
 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
@@ -938,7 +859,7 @@
 {
 	struct mlx5_ib_dev *dev =
 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
-	return sprintf(buf, "MT%d\n", dev->mdev.pdev->device);
+	return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
 }
 
 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
@@ -946,8 +867,8 @@
 {
 	struct mlx5_ib_dev *dev =
 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
-	return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(&dev->mdev),
-		       fw_rev_min(&dev->mdev), fw_rev_sub(&dev->mdev));
+	return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev),
+		       fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
 }
 
 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
@@ -955,7 +876,7 @@
 {
 	struct mlx5_ib_dev *dev =
 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
-	return sprintf(buf, "%x\n", dev->mdev.rev_id);
+	return sprintf(buf, "%x\n", dev->mdev->rev_id);
 }
 
 static ssize_t show_board(struct device *device, struct device_attribute *attr,
@@ -964,7 +885,7 @@
 	struct mlx5_ib_dev *dev =
 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
 	return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
-		       dev->mdev.board_id);
+		       dev->mdev->board_id);
 }
 
 static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
@@ -983,11 +904,12 @@
 	&dev_attr_reg_pages,
 };
 
-static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
-			  void *data)
+static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
+			  enum mlx5_dev_event event, unsigned long param)
 {
-	struct mlx5_ib_dev *ibdev = container_of(dev, struct mlx5_ib_dev, mdev);
+	struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
 	struct ib_event ibev;
+
 	u8 port = 0;
 
 	switch (event) {
@@ -998,12 +920,12 @@
 
 	case MLX5_DEV_EVENT_PORT_UP:
 		ibev.event = IB_EVENT_PORT_ACTIVE;
-		port = *(u8 *)data;
+		port = (u8)param;
 		break;
 
 	case MLX5_DEV_EVENT_PORT_DOWN:
 		ibev.event = IB_EVENT_PORT_ERR;
-		port = *(u8 *)data;
+		port = (u8)param;
 		break;
 
 	case MLX5_DEV_EVENT_PORT_INITIALIZED:
@@ -1012,22 +934,22 @@
 
 	case MLX5_DEV_EVENT_LID_CHANGE:
 		ibev.event = IB_EVENT_LID_CHANGE;
-		port = *(u8 *)data;
+		port = (u8)param;
 		break;
 
 	case MLX5_DEV_EVENT_PKEY_CHANGE:
 		ibev.event = IB_EVENT_PKEY_CHANGE;
-		port = *(u8 *)data;
+		port = (u8)param;
 		break;
 
 	case MLX5_DEV_EVENT_GUID_CHANGE:
 		ibev.event = IB_EVENT_GID_CHANGE;
-		port = *(u8 *)data;
+		port = (u8)param;
 		break;
 
 	case MLX5_DEV_EVENT_CLIENT_REREG:
 		ibev.event = IB_EVENT_CLIENT_REREGISTER;
-		port = *(u8 *)data;
+		port = (u8)param;
 		break;
 	}
 
@@ -1047,7 +969,7 @@
 {
 	int port;
 
-	for (port = 1; port <= dev->mdev.caps.num_ports; port++)
+	for (port = 1; port <= dev->mdev->caps.num_ports; port++)
 		mlx5_query_ext_port_caps(dev, port);
 }
 
@@ -1072,14 +994,14 @@
 		goto out;
 	}
 
-	for (port = 1; port <= dev->mdev.caps.num_ports; port++) {
+	for (port = 1; port <= dev->mdev->caps.num_ports; port++) {
 		err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
 		if (err) {
 			mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err);
 			break;
 		}
-		dev->mdev.caps.port[port - 1].pkey_table_len = dprops->max_pkeys;
-		dev->mdev.caps.port[port - 1].gid_table_len = pprops->gid_tbl_len;
+		dev->mdev->caps.port[port - 1].pkey_table_len = dprops->max_pkeys;
+		dev->mdev->caps.port[port - 1].gid_table_len = pprops->gid_tbl_len;
 		mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
 			    dprops->max_pkeys, pprops->gid_tbl_len);
 	}
@@ -1328,10 +1250,8 @@
 	mlx5_ib_dealloc_pd(devr->p0);
 }
 
-static int init_one(struct pci_dev *pdev,
-		    const struct pci_device_id *id)
+static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 {
-	struct mlx5_core_dev *mdev;
 	struct mlx5_ib_dev *dev;
 	int err;
 	int i;
@@ -1340,28 +1260,19 @@
 
 	dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
 	if (!dev)
-		return -ENOMEM;
+		return NULL;
 
-	mdev = &dev->mdev;
-	mdev->event = mlx5_ib_event;
-	if (prof_sel >= ARRAY_SIZE(profile)) {
-		pr_warn("selected pofile out of range, selceting default\n");
-		prof_sel = 0;
-	}
-	mdev->profile = &profile[prof_sel];
-	err = mlx5_dev_init(mdev, pdev);
-	if (err)
-		goto err_free;
+	dev->mdev = mdev;
 
 	err = get_port_caps(dev);
 	if (err)
-		goto err_cleanup;
+		goto err_dealloc;
 
 	get_ext_port_caps(dev);
 
 	err = alloc_comp_eqs(dev);
 	if (err)
-		goto err_cleanup;
+		goto err_dealloc;
 
 	MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
 
@@ -1480,7 +1391,7 @@
 
 	dev->ib_active = true;
 
-	return 0;
+	return dev;
 
 err_umrc:
 	destroy_umrc_res(dev);
@@ -1494,49 +1405,39 @@
 err_eqs:
 	free_comp_eqs(dev);
 
-err_cleanup:
-	mlx5_dev_cleanup(mdev);
-
-err_free:
+err_dealloc:
 	ib_dealloc_device((struct ib_device *)dev);
 
-	return err;
+	return NULL;
 }
 
-static void remove_one(struct pci_dev *pdev)
+static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
 {
-	struct mlx5_ib_dev *dev = mlx5_pci2ibdev(pdev);
-
+	struct mlx5_ib_dev *dev = context;
 	destroy_umrc_res(dev);
 	ib_unregister_device(&dev->ib_dev);
 	destroy_dev_resources(&dev->devr);
 	free_comp_eqs(dev);
-	mlx5_dev_cleanup(&dev->mdev);
 	ib_dealloc_device(&dev->ib_dev);
 }
 
-static DEFINE_PCI_DEVICE_TABLE(mlx5_ib_pci_table) = {
-	{ PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */
-	{ 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, mlx5_ib_pci_table);
-
-static struct pci_driver mlx5_ib_driver = {
-	.name		= DRIVER_NAME,
-	.id_table	= mlx5_ib_pci_table,
-	.probe		= init_one,
-	.remove		= remove_one
+static struct mlx5_interface mlx5_ib_interface = {
+	.add            = mlx5_ib_add,
+	.remove         = mlx5_ib_remove,
+	.event          = mlx5_ib_event,
 };
 
 static int __init mlx5_ib_init(void)
 {
-	return pci_register_driver(&mlx5_ib_driver);
+	if (deprecated_prof_sel != 2)
+		pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n");
+
+	return mlx5_register_interface(&mlx5_ib_interface);
 }
 
 static void __exit mlx5_ib_cleanup(void)
 {
-	pci_unregister_driver(&mlx5_ib_driver);
+	mlx5_unregister_interface(&mlx5_ib_interface);
 }
 
 module_init(mlx5_ib_init);
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 8499aec..a3e8144 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -148,7 +148,7 @@
 	u64 off_mask;
 	u64 buf_off;
 
-	page_size = 1 << page_shift;
+	page_size = (u64)1 << page_shift;
 	page_mask = page_size - 1;
 	buf_off = addr & page_mask;
 	off_size = page_size >> 6;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index f2ccf1a..386780f 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -360,7 +360,7 @@
 
 struct mlx5_ib_dev {
 	struct ib_device		ib_dev;
-	struct mlx5_core_dev		mdev;
+	struct mlx5_core_dev		*mdev;
 	MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
 	struct list_head		eqs_list;
 	int				num_ports;
@@ -454,16 +454,6 @@
 	return container_of(ibah, struct mlx5_ib_ah, ibah);
 }
 
-static inline struct mlx5_ib_dev *mlx5_core2ibdev(struct mlx5_core_dev *dev)
-{
-	return container_of(dev, struct mlx5_ib_dev, mdev);
-}
-
-static inline struct mlx5_ib_dev *mlx5_pci2ibdev(struct pci_dev *pdev)
-{
-	return mlx5_core2ibdev(pci2mlx5_core_dev(pdev));
-}
-
 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
 			struct mlx5_db *db);
 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
@@ -471,7 +461,7 @@
 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
-		 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
+		 u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh,
 		 void *in_mad, void *response_mad);
 struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr,
 			   struct mlx5_ib_ah *ah);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index afa873b..80b3c63 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -73,7 +73,7 @@
 	struct mlx5_cache_ent *ent = &cache->ent[c];
 	u8 key;
 	unsigned long flags;
-	struct mlx5_mr_table *table = &dev->mdev.priv.mr_table;
+	struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
 	int err;
 
 	spin_lock_irqsave(&ent->lock, flags);
@@ -97,9 +97,9 @@
 		return;
 	}
 
-	spin_lock_irqsave(&dev->mdev.priv.mkey_lock, flags);
-	key = dev->mdev.priv.mkey_key++;
-	spin_unlock_irqrestore(&dev->mdev.priv.mkey_lock, flags);
+	spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
+	key = dev->mdev->priv.mkey_key++;
+	spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
 	mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
 
 	cache->last_add = jiffies;
@@ -155,7 +155,7 @@
 		spin_lock_irq(&ent->lock);
 		ent->pending++;
 		spin_unlock_irq(&ent->lock);
-		err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in,
+		err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in,
 					    sizeof(*in), reg_mr_callback,
 					    mr, &mr->out);
 		if (err) {
@@ -188,7 +188,7 @@
 		ent->cur--;
 		ent->size--;
 		spin_unlock_irq(&ent->lock);
-		err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
+		err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
 		if (err)
 			mlx5_ib_warn(dev, "failed destroy mkey\n");
 		else
@@ -479,7 +479,7 @@
 		ent->cur--;
 		ent->size--;
 		spin_unlock_irq(&ent->lock);
-		err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
+		err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
 		if (err)
 			mlx5_ib_warn(dev, "failed destroy mkey\n");
 		else
@@ -496,7 +496,7 @@
 	if (!mlx5_debugfs_root)
 		return 0;
 
-	cache->root = debugfs_create_dir("mr_cache", dev->mdev.priv.dbg_root);
+	cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
 	if (!cache->root)
 		return -ENOMEM;
 
@@ -571,8 +571,8 @@
 		ent->order = i + 2;
 		ent->dev = dev;
 
-		if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE)
-			limit = dev->mdev.profile->mr_cache[i].limit;
+		if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
+			limit = dev->mdev->profile->mr_cache[i].limit;
 		else
 			limit = 0;
 
@@ -610,7 +610,7 @@
 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
 {
 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
-	struct mlx5_core_dev *mdev = &dev->mdev;
+	struct mlx5_core_dev *mdev = dev->mdev;
 	struct mlx5_create_mkey_mbox_in *in;
 	struct mlx5_mkey_seg *seg;
 	struct mlx5_ib_mr *mr;
@@ -846,7 +846,7 @@
 	in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
 	in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
 							 1 << page_shift));
-	err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen, NULL,
+	err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL,
 				    NULL, NULL);
 	if (err) {
 		mlx5_ib_warn(dev, "create mkey failed\n");
@@ -923,7 +923,7 @@
 	mr->umem = umem;
 	mr->npages = npages;
 	spin_lock(&dev->mr_lock);
-	dev->mdev.priv.reg_pages += npages;
+	dev->mdev->priv.reg_pages += npages;
 	spin_unlock(&dev->mr_lock);
 	mr->ibmr.lkey = mr->mmr.key;
 	mr->ibmr.rkey = mr->mmr.key;
@@ -978,7 +978,7 @@
 	int err;
 
 	if (!umred) {
-		err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
+		err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
 		if (err) {
 			mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
 				     mr->mmr.key, err);
@@ -996,7 +996,7 @@
 	if (umem) {
 		ib_umem_release(umem);
 		spin_lock(&dev->mr_lock);
-		dev->mdev.priv.reg_pages -= npages;
+		dev->mdev->priv.reg_pages -= npages;
 		spin_unlock(&dev->mr_lock);
 	}
 
@@ -1044,7 +1044,7 @@
 		}
 
 		/* create mem & wire PSVs */
-		err = mlx5_core_create_psv(&dev->mdev, to_mpd(pd)->pdn,
+		err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
 					   2, psv_index);
 		if (err)
 			goto err_free_sig;
@@ -1060,7 +1060,7 @@
 	}
 
 	in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
-	err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in),
+	err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in),
 				    NULL, NULL, NULL);
 	if (err)
 		goto err_destroy_psv;
@@ -1074,11 +1074,11 @@
 
 err_destroy_psv:
 	if (mr->sig) {
-		if (mlx5_core_destroy_psv(&dev->mdev,
+		if (mlx5_core_destroy_psv(dev->mdev,
 					  mr->sig->psv_memory.psv_idx))
 			mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
 				     mr->sig->psv_memory.psv_idx);
-		if (mlx5_core_destroy_psv(&dev->mdev,
+		if (mlx5_core_destroy_psv(dev->mdev,
 					  mr->sig->psv_wire.psv_idx))
 			mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
 				     mr->sig->psv_wire.psv_idx);
@@ -1099,18 +1099,18 @@
 	int err;
 
 	if (mr->sig) {
-		if (mlx5_core_destroy_psv(&dev->mdev,
+		if (mlx5_core_destroy_psv(dev->mdev,
 					  mr->sig->psv_memory.psv_idx))
 			mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
 				     mr->sig->psv_memory.psv_idx);
-		if (mlx5_core_destroy_psv(&dev->mdev,
+		if (mlx5_core_destroy_psv(dev->mdev,
 					  mr->sig->psv_wire.psv_idx))
 			mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
 				     mr->sig->psv_wire.psv_idx);
 		kfree(mr->sig);
 	}
 
-	err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
+	err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
 	if (err) {
 		mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
 			     mr->mmr.key, err);
@@ -1149,7 +1149,7 @@
 	 * TBD not needed - issue 197292 */
 	in->seg.log2_page_size = PAGE_SHIFT;
 
-	err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in), NULL,
+	err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in), NULL,
 				    NULL, NULL);
 	kfree(in);
 	if (err)
@@ -1202,7 +1202,7 @@
 	struct mlx5_ib_dev *dev = to_mdev(page_list->device);
 	int size = page_list->max_page_list_len * sizeof(u64);
 
-	dma_free_coherent(&dev->mdev.pdev->dev, size, mfrpl->mapped_page_list,
+	dma_free_coherent(&dev->mdev->pdev->dev, size, mfrpl->mapped_page_list,
 			  mfrpl->map);
 	kfree(mfrpl->ibfrpl.page_list);
 	kfree(mfrpl);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index bbbcf38..7efe6e3f 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -162,7 +162,7 @@
 	int wq_size;
 
 	/* Sanity check RQ size before proceeding */
-	if (cap->max_recv_wr  > dev->mdev.caps.max_wqes)
+	if (cap->max_recv_wr  > dev->mdev->caps.max_wqes)
 		return -EINVAL;
 
 	if (!has_rq) {
@@ -182,10 +182,10 @@
 			wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
 			wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
 			qp->rq.wqe_cnt = wq_size / wqe_size;
-			if (wqe_size > dev->mdev.caps.max_rq_desc_sz) {
+			if (wqe_size > dev->mdev->caps.max_rq_desc_sz) {
 				mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
 					    wqe_size,
-					    dev->mdev.caps.max_rq_desc_sz);
+					    dev->mdev->caps.max_rq_desc_sz);
 				return -EINVAL;
 			}
 			qp->rq.wqe_shift = ilog2(wqe_size);
@@ -277,9 +277,9 @@
 	if (wqe_size < 0)
 		return wqe_size;
 
-	if (wqe_size > dev->mdev.caps.max_sq_desc_sz) {
+	if (wqe_size > dev->mdev->caps.max_sq_desc_sz) {
 		mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
-			    wqe_size, dev->mdev.caps.max_sq_desc_sz);
+			    wqe_size, dev->mdev->caps.max_sq_desc_sz);
 		return -EINVAL;
 	}
 
@@ -292,9 +292,9 @@
 
 	wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
 	qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
-	if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
+	if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) {
 		mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
-			    qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
+			    qp->sq.wqe_cnt, dev->mdev->caps.max_wqes);
 		return -ENOMEM;
 	}
 	qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -311,9 +311,9 @@
 {
 	int desc_sz = 1 << qp->sq.wqe_shift;
 
-	if (desc_sz > dev->mdev.caps.max_sq_desc_sz) {
+	if (desc_sz > dev->mdev->caps.max_sq_desc_sz) {
 		mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
-			     desc_sz, dev->mdev.caps.max_sq_desc_sz);
+			     desc_sz, dev->mdev->caps.max_sq_desc_sz);
 		return -EINVAL;
 	}
 
@@ -325,9 +325,9 @@
 
 	qp->sq.wqe_cnt = ucmd->sq_wqe_count;
 
-	if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
+	if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) {
 		mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
-			     qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
+			     qp->sq.wqe_cnt, dev->mdev->caps.max_wqes);
 		return -EINVAL;
 	}
 
@@ -674,7 +674,7 @@
 	int uuarn;
 	int err;
 
-	uuari = &dev->mdev.priv.uuari;
+	uuari = &dev->mdev->priv.uuari;
 	if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
 		return -EINVAL;
 
@@ -700,7 +700,7 @@
 	qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
 	qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
 
-	err = mlx5_buf_alloc(&dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
+	err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
 	if (err) {
 		mlx5_ib_dbg(dev, "err %d\n", err);
 		goto err_uuar;
@@ -722,7 +722,7 @@
 
 	mlx5_fill_page_array(&qp->buf, (*in)->pas);
 
-	err = mlx5_db_alloc(&dev->mdev, &qp->db);
+	err = mlx5_db_alloc(dev->mdev, &qp->db);
 	if (err) {
 		mlx5_ib_dbg(dev, "err %d\n", err);
 		goto err_free;
@@ -747,7 +747,7 @@
 	return 0;
 
 err_wrid:
-	mlx5_db_free(&dev->mdev, &qp->db);
+	mlx5_db_free(dev->mdev, &qp->db);
 	kfree(qp->sq.wqe_head);
 	kfree(qp->sq.w_list);
 	kfree(qp->sq.wrid);
@@ -758,23 +758,23 @@
 	mlx5_vfree(*in);
 
 err_buf:
-	mlx5_buf_free(&dev->mdev, &qp->buf);
+	mlx5_buf_free(dev->mdev, &qp->buf);
 
 err_uuar:
-	free_uuar(&dev->mdev.priv.uuari, uuarn);
+	free_uuar(&dev->mdev->priv.uuari, uuarn);
 	return err;
 }
 
 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
 {
-	mlx5_db_free(&dev->mdev, &qp->db);
+	mlx5_db_free(dev->mdev, &qp->db);
 	kfree(qp->sq.wqe_head);
 	kfree(qp->sq.w_list);
 	kfree(qp->sq.wrid);
 	kfree(qp->sq.wr_data);
 	kfree(qp->rq.wrid);
-	mlx5_buf_free(&dev->mdev, &qp->buf);
-	free_uuar(&dev->mdev.priv.uuari, qp->bf->uuarn);
+	mlx5_buf_free(dev->mdev, &qp->buf);
+	free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
 }
 
 static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
@@ -812,7 +812,7 @@
 	spin_lock_init(&qp->rq.lock);
 
 	if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
-		if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
+		if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
 			mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
 			return -EINVAL;
 		} else {
@@ -851,9 +851,9 @@
 				mlx5_ib_dbg(dev, "invalid rq params\n");
 				return -EINVAL;
 			}
-			if (ucmd.sq_wqe_count > dev->mdev.caps.max_wqes) {
+			if (ucmd.sq_wqe_count > dev->mdev->caps.max_wqes) {
 				mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
-					    ucmd.sq_wqe_count, dev->mdev.caps.max_wqes);
+					    ucmd.sq_wqe_count, dev->mdev->caps.max_wqes);
 				return -EINVAL;
 			}
 			err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
@@ -957,7 +957,7 @@
 
 	in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
 
-	err = mlx5_core_create_qp(&dev->mdev, &qp->mqp, in, inlen);
+	err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen);
 	if (err) {
 		mlx5_ib_dbg(dev, "create qp failed\n");
 		goto err_create;
@@ -1081,7 +1081,7 @@
 	if (!in)
 		return;
 	if (qp->state != IB_QPS_RESET)
-		if (mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(qp->state),
+		if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
 					MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp))
 			mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
 				     qp->mqp.qpn);
@@ -1097,7 +1097,7 @@
 		mlx5_ib_unlock_cqs(send_cq, recv_cq);
 	}
 
-	err = mlx5_core_destroy_qp(&dev->mdev, &qp->mqp);
+	err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp);
 	if (err)
 		mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
 	kfree(in);
@@ -1165,7 +1165,7 @@
 	switch (init_attr->qp_type) {
 	case IB_QPT_XRC_TGT:
 	case IB_QPT_XRC_INI:
-		if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC)) {
+		if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC)) {
 			mlx5_ib_dbg(dev, "XRC not supported\n");
 			return ERR_PTR(-ENOSYS);
 		}
@@ -1279,7 +1279,7 @@
 	} else {
 		while (rate != IB_RATE_2_5_GBPS &&
 		       !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
-			 dev->mdev.caps.stat_rate_support))
+			 dev->mdev->caps.stat_rate_support))
 			--rate;
 	}
 
@@ -1318,9 +1318,9 @@
 	path->port = port;
 
 	if (ah->ah_flags & IB_AH_GRH) {
-		if (ah->grh.sgid_index >= dev->mdev.caps.port[port - 1].gid_table_len) {
+		if (ah->grh.sgid_index >= dev->mdev->caps.port[port - 1].gid_table_len) {
 			pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
-			       ah->grh.sgid_index, dev->mdev.caps.port[port - 1].gid_table_len);
+			       ah->grh.sgid_index, dev->mdev->caps.port[port - 1].gid_table_len);
 			return -EINVAL;
 		}
 
@@ -1539,7 +1539,7 @@
 			err = -EINVAL;
 			goto out;
 		}
-		context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev.caps.log_max_msg;
+		context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev->caps.log_max_msg;
 	}
 
 	if (attr_mask & IB_QP_DEST_QPN)
@@ -1637,7 +1637,7 @@
 	optpar = ib_mask_to_mlx5_opt(attr_mask);
 	optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
 	in->optparam = cpu_to_be32(optpar);
-	err = mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(cur_state),
+	err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state),
 				  to_mlx5_state(new_state), in, sqd_event,
 				  &qp->mqp);
 	if (err)
@@ -1699,21 +1699,21 @@
 		goto out;
 
 	if ((attr_mask & IB_QP_PORT) &&
-	    (attr->port_num == 0 || attr->port_num > dev->mdev.caps.num_ports))
+	    (attr->port_num == 0 || attr->port_num > dev->mdev->caps.num_ports))
 		goto out;
 
 	if (attr_mask & IB_QP_PKEY_INDEX) {
 		port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
-		if (attr->pkey_index >= dev->mdev.caps.port[port - 1].pkey_table_len)
+		if (attr->pkey_index >= dev->mdev->caps.port[port - 1].pkey_table_len)
 			goto out;
 	}
 
 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
-	    attr->max_rd_atomic > dev->mdev.caps.max_ra_res_qp)
+	    attr->max_rd_atomic > dev->mdev->caps.max_ra_res_qp)
 		goto out;
 
 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
-	    attr->max_dest_rd_atomic > dev->mdev.caps.max_ra_req_qp)
+	    attr->max_dest_rd_atomic > dev->mdev->caps.max_ra_req_qp)
 		goto out;
 
 	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
@@ -2479,7 +2479,7 @@
 {
 	struct mlx5_wqe_ctrl_seg *ctrl = NULL;  /* compiler warning */
 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
-	struct mlx5_core_dev *mdev = &dev->mdev;
+	struct mlx5_core_dev *mdev = dev->mdev;
 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
 	struct mlx5_ib_mr *mr;
 	struct mlx5_wqe_data_seg *dpseg;
@@ -2539,7 +2539,7 @@
 			case IB_WR_RDMA_WRITE_WITH_IMM:
 				set_raddr_seg(seg, wr->wr.rdma.remote_addr,
 					      wr->wr.rdma.rkey);
-				seg  += sizeof(struct mlx5_wqe_raddr_seg);
+				seg += sizeof(struct mlx5_wqe_raddr_seg);
 				size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
 				break;
 
@@ -2668,7 +2668,7 @@
 		case IB_QPT_SMI:
 		case IB_QPT_GSI:
 			set_datagram_seg(seg, wr);
-			seg  += sizeof(struct mlx5_wqe_datagram_seg);
+			seg += sizeof(struct mlx5_wqe_datagram_seg);
 			size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
 			if (unlikely((seg == qend)))
 				seg = mlx5_get_send_wqe(qp, 0);
@@ -2888,7 +2888,7 @@
 static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
 				struct mlx5_qp_path *path)
 {
-	struct mlx5_core_dev *dev = &ibdev->mdev;
+	struct mlx5_core_dev *dev = ibdev->mdev;
 
 	memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
 	ib_ah_attr->port_num	  = path->port;
@@ -2931,7 +2931,7 @@
 		goto out;
 	}
 	context = &outb->ctx;
-	err = mlx5_core_qp_query(&dev->mdev, &qp->mqp, outb, sizeof(*outb));
+	err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb));
 	if (err)
 		goto out_free;
 
@@ -3014,14 +3014,14 @@
 	struct mlx5_ib_xrcd *xrcd;
 	int err;
 
-	if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC))
+	if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC))
 		return ERR_PTR(-ENOSYS);
 
 	xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
 	if (!xrcd)
 		return ERR_PTR(-ENOMEM);
 
-	err = mlx5_core_xrcd_alloc(&dev->mdev, &xrcd->xrcdn);
+	err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
 	if (err) {
 		kfree(xrcd);
 		return ERR_PTR(-ENOMEM);
@@ -3036,7 +3036,7 @@
 	u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
 	int err;
 
-	err = mlx5_core_xrcd_dealloc(&dev->mdev, xrcdn);
+	err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
 	if (err) {
 		mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
 		return err;
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 384af6d..70bd131 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -159,7 +159,7 @@
 	int page_shift;
 	int npages;
 
-	err = mlx5_db_alloc(&dev->mdev, &srq->db);
+	err = mlx5_db_alloc(dev->mdev, &srq->db);
 	if (err) {
 		mlx5_ib_warn(dev, "alloc dbell rec failed\n");
 		return err;
@@ -167,7 +167,7 @@
 
 	*srq->db.db = 0;
 
-	if (mlx5_buf_alloc(&dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
+	if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
 		mlx5_ib_dbg(dev, "buf alloc failed\n");
 		err = -ENOMEM;
 		goto err_db;
@@ -212,10 +212,10 @@
 	mlx5_vfree(*in);
 
 err_buf:
-	mlx5_buf_free(&dev->mdev, &srq->buf);
+	mlx5_buf_free(dev->mdev, &srq->buf);
 
 err_db:
-	mlx5_db_free(&dev->mdev, &srq->db);
+	mlx5_db_free(dev->mdev, &srq->db);
 	return err;
 }
 
@@ -229,8 +229,8 @@
 static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
 {
 	kfree(srq->wrid);
-	mlx5_buf_free(&dev->mdev, &srq->buf);
-	mlx5_db_free(&dev->mdev, &srq->db);
+	mlx5_buf_free(dev->mdev, &srq->buf);
+	mlx5_db_free(dev->mdev, &srq->db);
 }
 
 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
@@ -248,10 +248,10 @@
 	u32 flgs, xrcdn;
 
 	/* Sanity check SRQ size before proceeding */
-	if (init_attr->attr.max_wr >= dev->mdev.caps.max_srq_wqes) {
+	if (init_attr->attr.max_wr >= dev->mdev->caps.max_srq_wqes) {
 		mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
 			    init_attr->attr.max_wr,
-			    dev->mdev.caps.max_srq_wqes);
+			    dev->mdev->caps.max_srq_wqes);
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -303,7 +303,7 @@
 
 	in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
 	in->ctx.db_record = cpu_to_be64(srq->db.dma);
-	err = mlx5_core_create_srq(&dev->mdev, &srq->msrq, in, inlen);
+	err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen);
 	mlx5_vfree(in);
 	if (err) {
 		mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
@@ -327,7 +327,7 @@
 	return &srq->ibsrq;
 
 err_core:
-	mlx5_core_destroy_srq(&dev->mdev, &srq->msrq);
+	mlx5_core_destroy_srq(dev->mdev, &srq->msrq);
 
 err_usr_kern_srq:
 	if (pd->uobject)
@@ -357,7 +357,7 @@
 			return -EINVAL;
 
 		mutex_lock(&srq->mutex);
-		ret = mlx5_core_arm_srq(&dev->mdev, &srq->msrq, attr->srq_limit, 1);
+		ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1);
 		mutex_unlock(&srq->mutex);
 
 		if (ret)
@@ -378,7 +378,7 @@
 	if (!out)
 		return -ENOMEM;
 
-	ret = mlx5_core_query_srq(&dev->mdev, &srq->msrq, out);
+	ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out);
 	if (ret)
 		goto out_box;
 
@@ -396,7 +396,7 @@
 	struct mlx5_ib_dev *dev = to_mdev(srq->device);
 	struct mlx5_ib_srq *msrq = to_msrq(srq);
 
-	mlx5_core_destroy_srq(&dev->mdev, &msrq->msrq);
+	mlx5_core_destroy_srq(dev->mdev, &msrq->msrq);
 
 	if (srq->uobject) {
 		mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 1c4c0db..29ca0bb 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -257,9 +257,10 @@
 }
 
 static int input_get_disposition(struct input_dev *dev,
-			  unsigned int type, unsigned int code, int value)
+			  unsigned int type, unsigned int code, int *pval)
 {
 	int disposition = INPUT_IGNORE_EVENT;
+	int value = *pval;
 
 	switch (type) {
 
@@ -357,6 +358,7 @@
 		break;
 	}
 
+	*pval = value;
 	return disposition;
 }
 
@@ -365,7 +367,7 @@
 {
 	int disposition;
 
-	disposition = input_get_disposition(dev, type, code, value);
+	disposition = input_get_disposition(dev, type, code, &value);
 
 	if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
 		dev->event(dev, type, code, value);
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
index 758b487..de7be4f 100644
--- a/drivers/input/keyboard/st-keyscan.c
+++ b/drivers/input/keyboard/st-keyscan.c
@@ -215,6 +215,7 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int keyscan_suspend(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
@@ -249,6 +250,7 @@
 	mutex_unlock(&input->mutex);
 	return retval;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume);
 
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index e4104f9..fed5102 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -213,7 +213,7 @@
 
 module_platform_driver(sirfsoc_pwrc_driver);
 
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>");
 MODULE_DESCRIPTION("CSR Prima2 PWRC Driver");
 MODULE_ALIAS("platform:sirfsoc-pwrc");
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ec772d9..ef9e0b8 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -132,7 +132,8 @@
 		1232, 5710, 1156, 4696
 	},
 	{
-		(const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
+		(const char * const []){"LEN0034", "LEN0036", "LEN2002",
+					"LEN2004", NULL},
 		1024, 5112, 2024, 4832
 	},
 	{
@@ -168,7 +169,7 @@
 	"LEN0049",
 	"LEN2000",
 	"LEN2001", /* Edge E431 */
-	"LEN2002",
+	"LEN2002", /* Edge E531 */
 	"LEN2003",
 	"LEN2004", /* L440 */
 	"LEN2005",
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 381b20d..136b7b20 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -402,6 +402,13 @@
 		},
 	},
 	{
+		/* Acer Aspire 5710 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
+		},
+	},
+	{
 		/* Gericom Bellagio */
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 977d05c..e73cf2c 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1217,9 +1217,9 @@
 			 * a=(pi*r^2)/C.
 			 */
 			int a = data[5];
-			int x_res  = input_abs_get_res(input, ABS_X);
-			int y_res  = input_abs_get_res(input, ABS_Y);
-			width  = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
+			int x_res = input_abs_get_res(input, ABS_MT_POSITION_X);
+			int y_res = input_abs_get_res(input, ABS_MT_POSITION_Y);
+			width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
 			height = width * y_res / x_res;
 		}
 
@@ -1587,7 +1587,7 @@
 		input_abs_set_res(input_dev, ABS_X, features->x_resolution);
 		input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
 	} else {
-		if (features->touch_max <= 2) {
+		if (features->touch_max == 1) {
 			input_set_abs_params(input_dev, ABS_X, 0,
 				features->x_max, features->x_fuzz, 0);
 			input_set_abs_params(input_dev, ABS_Y, 0,
@@ -1815,14 +1815,8 @@
 	case MTTPC:
 	case MTTPC_B:
 	case TABLETPC2FG:
-		if (features->device_type == BTN_TOOL_FINGER) {
-			unsigned int flags = INPUT_MT_DIRECT;
-
-			if (wacom_wac->features.type == TABLETPC2FG)
-				flags = 0;
-
-			input_mt_init_slots(input_dev, features->touch_max, flags);
-		}
+		if (features->device_type == BTN_TOOL_FINGER && features->touch_max > 1)
+			input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT);
 		/* fall through */
 
 	case TABLETPC:
@@ -1883,10 +1877,6 @@
 			__set_bit(BTN_RIGHT, input_dev->keybit);
 
 			if (features->touch_max) {
-				/* touch interface */
-				unsigned int flags = INPUT_MT_POINTER;
-
-				__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
 				if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
 					input_set_abs_params(input_dev,
 						     ABS_MT_TOUCH_MAJOR,
@@ -1894,12 +1884,8 @@
 					input_set_abs_params(input_dev,
 						     ABS_MT_TOUCH_MINOR,
 						     0, features->y_max, 0, 0);
-				} else {
-					__set_bit(BTN_TOOL_FINGER, input_dev->keybit);
-					__set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
-					flags = 0;
 				}
-				input_mt_init_slots(input_dev, features->touch_max, flags);
+				input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
 			} else {
 				/* buttons/keys only interface */
 				__clear_bit(ABS_X, input_dev->absbit);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 4e793a1..2ce6495 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -359,9 +359,12 @@
 	 */
 	err = of_property_read_u32(node, "ti,coordinate-readouts",
 			&ts_dev->coordinate_readouts);
-	if (err < 0)
+	if (err < 0) {
+		dev_warn(&pdev->dev, "please use 'ti,coordinate-readouts' instead\n");
 		err = of_property_read_u32(node, "ti,coordiante-readouts",
 				&ts_dev->coordinate_readouts);
+	}
+
 	if (err < 0)
 		return err;
 
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index f9a87ed..6a2df32 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -1260,7 +1260,7 @@
 	if (capi_ttyminors <= 0)
 		capi_ttyminors = CAPINC_NR_PORTS;
 
-	capiminors = kzalloc(sizeof(struct capi_minor *) * capi_ttyminors,
+	capiminors = kzalloc(sizeof(struct capiminor *) * capi_ttyminors,
 			     GFP_KERNEL);
 	if (!capiminors)
 		return -ENOMEM;
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index c44950d..b7ae0a0 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2400,6 +2400,7 @@
 error:
 	freeurbs(cs);
 	usb_set_intfdata(interface, NULL);
+	usb_put_dev(udev);
 	gigaset_freecs(cs);
 	return rc;
 }
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index c2ed624..94affa5 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -2918,8 +2918,8 @@
 			cfg->callback = 2;
 		cfg->cbhup = (lp->flags & ISDN_NET_CBHUP) ? 1 : 0;
 		cfg->dialmode = lp->flags & ISDN_NET_DIALMODE_MASK;
-		cfg->chargehup = (lp->hupflags & 4) ? 1 : 0;
-		cfg->ihup = (lp->hupflags & 8) ? 1 : 0;
+		cfg->chargehup = (lp->hupflags & ISDN_CHARGEHUP) ? 1 : 0;
+		cfg->ihup = (lp->hupflags & ISDN_INHUP) ? 1 : 0;
 		cfg->cbdelay = lp->cbdelay;
 		cfg->dialmax = lp->dialmax;
 		cfg->triggercps = lp->triggercps;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 023ec36..f0f5eab 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2287,8 +2287,7 @@
 		ip_rt_put(rt);
 		bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
 			      addr, tags);
-		if (!tags)
-			kfree(tags);
+		kfree(tags);
 	}
 }
 
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index e29b6d0..5dede6e 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -278,7 +278,8 @@
 			break;
 		}
 
-		priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
+		priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start,
+						     resource_size(res));
 		if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
 			dev_info(&pdev->dev, "control memory is not used for raminit\n");
 		else
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index f952fff..c9cd359 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -52,8 +52,7 @@
 config BFIN_RX_DESC_NUM
 	int "Number of receive buffer packets"
 	depends on BFIN_MAC
-	range 20 100 if BFIN_MAC_USE_L1
-	range 20 800
+	range 20 64
 	default "20"
 	---help---
 	  Set the number of buffer packets used in driver.
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 7ae74d4..afa6684 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1218,11 +1218,11 @@
 #define RX_ERROR_MASK (RX_LONG | RX_ALIGN | RX_CRC | RX_LEN | \
 	RX_FRAG | RX_ADDR | RX_DMAO | RX_PHY | RX_LATE | RX_RANGE)
 
-static void bfin_mac_rx(struct net_device *dev)
+static void bfin_mac_rx(struct bfin_mac_local *lp)
 {
+	struct net_device *dev = lp->ndev;
 	struct sk_buff *skb, *new_skb;
 	unsigned short len;
-	struct bfin_mac_local *lp __maybe_unused = netdev_priv(dev);
 #if defined(BFIN_MAC_CSUM_OFFLOAD)
 	unsigned int i;
 	unsigned char fcs[ETH_FCS_LEN + 1];
@@ -1256,7 +1256,7 @@
 	current_rx_ptr->skb = new_skb;
 	current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
 
-	len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
+	len = (unsigned short)(current_rx_ptr->status.status_word & RX_FRLEN);
 	/* Deduce Ethernet FCS length from Ethernet payload length */
 	len -= ETH_FCS_LEN;
 	skb_put(skb, len);
@@ -1294,7 +1294,8 @@
 	}
 #endif
 
-	netif_rx(skb);
+	napi_gro_receive(&lp->napi, skb);
+
 	dev->stats.rx_packets++;
 	dev->stats.rx_bytes += len;
 out:
@@ -1302,41 +1303,52 @@
 	current_rx_ptr = current_rx_ptr->next;
 }
 
+static int bfin_mac_poll(struct napi_struct *napi, int budget)
+{
+	int i = 0;
+	struct bfin_mac_local *lp = container_of(napi,
+						 struct bfin_mac_local,
+						 napi);
+
+	while (current_rx_ptr->status.status_word != 0 && i < budget) {
+		bfin_mac_rx(lp);
+		i++;
+	}
+
+	if (i < budget) {
+		napi_complete(napi);
+		if (test_and_clear_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags))
+			enable_irq(IRQ_MAC_RX);
+	}
+
+	return i;
+}
+
 /* interrupt routine to handle rx and error signal */
 static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id)
 {
-	struct net_device *dev = dev_id;
-	int number = 0;
+	struct bfin_mac_local *lp = netdev_priv(dev_id);
+	u32 status;
 
-get_one_packet:
-	if (current_rx_ptr->status.status_word == 0) {
-		/* no more new packet received */
-		if (number == 0) {
-			if (current_rx_ptr->next->status.status_word != 0) {
-				current_rx_ptr = current_rx_ptr->next;
-				goto real_rx;
-			}
-		}
-		bfin_write_DMA1_IRQ_STATUS(bfin_read_DMA1_IRQ_STATUS() |
-					   DMA_DONE | DMA_ERR);
-		return IRQ_HANDLED;
+	status = bfin_read_DMA1_IRQ_STATUS();
+
+	bfin_write_DMA1_IRQ_STATUS(status | DMA_DONE | DMA_ERR);
+	if (status & DMA_DONE) {
+		disable_irq_nosync(IRQ_MAC_RX);
+		set_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags);
+		napi_schedule(&lp->napi);
 	}
 
-real_rx:
-	bfin_mac_rx(dev);
-	number++;
-	goto get_one_packet;
+	return IRQ_HANDLED;
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-static void bfin_mac_poll(struct net_device *dev)
+static void bfin_mac_poll_controller(struct net_device *dev)
 {
 	struct bfin_mac_local *lp = netdev_priv(dev);
 
-	disable_irq(IRQ_MAC_RX);
 	bfin_mac_interrupt(IRQ_MAC_RX, dev);
 	tx_reclaim_skb(lp);
-	enable_irq(IRQ_MAC_RX);
 }
 #endif				/* CONFIG_NET_POLL_CONTROLLER */
 
@@ -1428,14 +1440,13 @@
 		tx_list_head = tx_list_head->next;
 	}
 
-	if (netif_queue_stopped(lp->ndev))
-		netif_wake_queue(lp->ndev);
+	if (netif_queue_stopped(dev))
+		netif_wake_queue(dev);
 
 	bfin_mac_enable(lp->phydev);
 
 	/* We can accept TX packets again */
 	dev->trans_start = jiffies; /* prevent tx timeout */
-	netif_wake_queue(dev);
 }
 
 static void bfin_mac_multicast_hash(struct net_device *dev)
@@ -1562,6 +1573,7 @@
 		return ret;
 	pr_debug("hardware init finished\n");
 
+	napi_enable(&lp->napi);
 	netif_start_queue(dev);
 	netif_carrier_on(dev);
 
@@ -1579,6 +1591,7 @@
 	pr_debug("%s: %s\n", dev->name, __func__);
 
 	netif_stop_queue(dev);
+	napi_disable(&lp->napi);
 	netif_carrier_off(dev);
 
 	phy_stop(lp->phydev);
@@ -1604,7 +1617,7 @@
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_change_mtu		= eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller	= bfin_mac_poll,
+	.ndo_poll_controller	= bfin_mac_poll_controller,
 #endif
 };
 
@@ -1689,6 +1702,9 @@
 	lp->tx_reclaim_timer.data = (unsigned long)lp;
 	lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout;
 
+	lp->flags = 0;
+	netif_napi_add(ndev, &lp->napi, bfin_mac_poll, CONFIG_BFIN_RX_DESC_NUM);
+
 	spin_lock_init(&lp->lock);
 
 	/* now, enable interrupts */
@@ -1723,6 +1739,7 @@
 out_err_reg_ndev:
 	free_irq(IRQ_MAC_RX, ndev);
 out_err_request_irq:
+	netif_napi_del(&lp->napi);
 out_err_mii_probe:
 	mdiobus_unregister(lp->mii_bus);
 	mdiobus_free(lp->mii_bus);
@@ -1743,6 +1760,8 @@
 
 	unregister_netdev(ndev);
 
+	netif_napi_del(&lp->napi);
+
 	free_irq(IRQ_MAC_RX, ndev);
 
 	free_netdev(ndev);
diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
index 6dec86a..d1217db 100644
--- a/drivers/net/ethernet/adi/bfin_mac.h
+++ b/drivers/net/ethernet/adi/bfin_mac.h
@@ -26,6 +26,7 @@
 #endif
 
 #define TX_RECLAIM_JIFFIES (HZ / 5)
+#define BFIN_MAC_RX_IRQ_DISABLED	1
 
 struct dma_descriptor {
 	struct dma_descriptor *next_dma_desc;
@@ -80,6 +81,8 @@
 	int irq_wake_requested;
 	struct timer_list tx_reclaim_timer;
 	struct net_device *ndev;
+	struct napi_struct napi;
+	unsigned long flags;
 
 	/* Data for EMAC_VLAN1 regs */
 	u16 vlan1_mask, vlan2_mask;
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 6e314db..8319c99 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -184,6 +184,7 @@
 	select AMD_XGBE_PHY
 	select BITREVERSE
 	select CRC32
+	select PTP_1588_CLOCK
 	---help---
 	  This driver supports the AMD 10GbE Ethernet device found on an
 	  AMD SoC.
@@ -191,4 +192,14 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called amd-xgbe.
 
+config AMD_XGBE_DCB
+	bool "Data Center Bridging (DCB) support"
+	default n
+	depends on AMD_XGBE && DCB
+	---help---
+	  Say Y here to enable Data Center Bridging (DCB) support in the
+	  driver.
+
+	  If unsure, say N.
+
 endif # NET_VENDOR_AMD
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
index 26cf9af..171a7e6 100644
--- a/drivers/net/ethernet/amd/xgbe/Makefile
+++ b/drivers/net/ethernet/amd/xgbe/Makefile
@@ -1,6 +1,8 @@
 obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
 
 amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
-		 xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o
+		 xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
+		 xgbe-ptp.o
 
+amd-xgbe-$(CONFIG_AMD_XGBE_DCB) += xgbe-dcb.o
 amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 7ec80ac..cc25a3a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -307,11 +307,24 @@
 #define MAC_MACA0LR			0x0304
 #define MAC_MACA1HR			0x0308
 #define MAC_MACA1LR			0x030c
+#define MAC_TSCR			0x0d00
+#define MAC_SSIR			0x0d04
+#define MAC_STSR			0x0d08
+#define MAC_STNR			0x0d0c
+#define MAC_STSUR			0x0d10
+#define MAC_STNUR			0x0d14
+#define MAC_TSAR			0x0d18
+#define MAC_TSSR			0x0d20
+#define MAC_TXSNR			0x0d30
+#define MAC_TXSSR			0x0d34
 
 #define MAC_QTFCR_INC			4
 #define MAC_MACA_INC			4
 #define MAC_HTR_INC			4
 
+#define MAC_RQC2_INC			4
+#define MAC_RQC2_Q_PER_REG		4
+
 /* MAC register entry bit positions and sizes */
 #define MAC_HWF0R_ADDMACADRSEL_INDEX	18
 #define MAC_HWF0R_ADDMACADRSEL_WIDTH	5
@@ -351,6 +364,8 @@
 #define MAC_HWF1R_HASHTBLSZ_WIDTH	3
 #define MAC_HWF1R_L3L4FNUM_INDEX	27
 #define MAC_HWF1R_L3L4FNUM_WIDTH	4
+#define MAC_HWF1R_NUMTC_INDEX		21
+#define MAC_HWF1R_NUMTC_WIDTH		3
 #define MAC_HWF1R_RSSEN_INDEX		20
 #define MAC_HWF1R_RSSEN_WIDTH		1
 #define MAC_HWF1R_RXFIFOSIZE_INDEX	0
@@ -373,12 +388,16 @@
 #define MAC_HWF2R_TXCHCNT_WIDTH		4
 #define MAC_HWF2R_TXQCNT_INDEX		6
 #define MAC_HWF2R_TXQCNT_WIDTH		4
+#define MAC_IER_TSIE_INDEX		12
+#define MAC_IER_TSIE_WIDTH		1
 #define MAC_ISR_MMCRXIS_INDEX		9
 #define MAC_ISR_MMCRXIS_WIDTH		1
 #define MAC_ISR_MMCTXIS_INDEX		10
 #define MAC_ISR_MMCTXIS_WIDTH		1
 #define MAC_ISR_PMTIS_INDEX		4
 #define MAC_ISR_PMTIS_WIDTH		1
+#define MAC_ISR_TSIS_INDEX		12
+#define MAC_ISR_TSIS_WIDTH		1
 #define MAC_MACA1HR_AE_INDEX		31
 #define MAC_MACA1HR_AE_WIDTH		1
 #define MAC_PFR_HMC_INDEX		2
@@ -419,14 +438,56 @@
 #define MAC_RCR_LM_WIDTH		1
 #define MAC_RCR_RE_INDEX		0
 #define MAC_RCR_RE_WIDTH		1
+#define MAC_RFCR_PFCE_INDEX		8
+#define MAC_RFCR_PFCE_WIDTH		1
 #define MAC_RFCR_RFE_INDEX		0
 #define MAC_RFCR_RFE_WIDTH		1
+#define MAC_RFCR_UP_INDEX		1
+#define MAC_RFCR_UP_WIDTH		1
 #define MAC_RQC0R_RXQ0EN_INDEX		0
 #define MAC_RQC0R_RXQ0EN_WIDTH		2
+#define MAC_SSIR_SNSINC_INDEX		8
+#define MAC_SSIR_SNSINC_WIDTH		8
+#define MAC_SSIR_SSINC_INDEX		16
+#define MAC_SSIR_SSINC_WIDTH		8
 #define MAC_TCR_SS_INDEX		29
 #define MAC_TCR_SS_WIDTH		2
 #define MAC_TCR_TE_INDEX		0
 #define MAC_TCR_TE_WIDTH		1
+#define MAC_TSCR_AV8021ASMEN_INDEX	28
+#define MAC_TSCR_AV8021ASMEN_WIDTH	1
+#define MAC_TSCR_SNAPTYPSEL_INDEX	16
+#define MAC_TSCR_SNAPTYPSEL_WIDTH	2
+#define MAC_TSCR_TSADDREG_INDEX		5
+#define MAC_TSCR_TSADDREG_WIDTH		1
+#define MAC_TSCR_TSCFUPDT_INDEX		1
+#define MAC_TSCR_TSCFUPDT_WIDTH		1
+#define MAC_TSCR_TSCTRLSSR_INDEX	9
+#define MAC_TSCR_TSCTRLSSR_WIDTH	1
+#define MAC_TSCR_TSENA_INDEX		0
+#define MAC_TSCR_TSENA_WIDTH		1
+#define MAC_TSCR_TSENALL_INDEX		8
+#define MAC_TSCR_TSENALL_WIDTH		1
+#define MAC_TSCR_TSEVNTENA_INDEX	14
+#define MAC_TSCR_TSEVNTENA_WIDTH	1
+#define MAC_TSCR_TSINIT_INDEX		2
+#define MAC_TSCR_TSINIT_WIDTH		1
+#define MAC_TSCR_TSIPENA_INDEX		11
+#define MAC_TSCR_TSIPENA_WIDTH		1
+#define MAC_TSCR_TSIPV4ENA_INDEX	13
+#define MAC_TSCR_TSIPV4ENA_WIDTH	1
+#define MAC_TSCR_TSIPV6ENA_INDEX	12
+#define MAC_TSCR_TSIPV6ENA_WIDTH	1
+#define MAC_TSCR_TSMSTRENA_INDEX	15
+#define MAC_TSCR_TSMSTRENA_WIDTH	1
+#define MAC_TSCR_TSVER2ENA_INDEX	10
+#define MAC_TSCR_TSVER2ENA_WIDTH	1
+#define MAC_TSCR_TXTSSTSM_INDEX		24
+#define MAC_TSCR_TXTSSTSM_WIDTH		1
+#define MAC_TSSR_TXTSC_INDEX		15
+#define MAC_TSSR_TXTSC_WIDTH		1
+#define MAC_TXSNR_TXTSSTSMIS_INDEX	31
+#define MAC_TXSNR_TXTSSTSMIS_WIDTH	1
 #define MAC_VLANHTR_VLHT_INDEX		0
 #define MAC_VLANHTR_VLHT_WIDTH		16
 #define MAC_VLANIR_VLTI_INDEX		20
@@ -652,6 +713,8 @@
 
 #define MTL_RQDCM_INC			4
 #define MTL_RQDCM_Q_PER_REG		4
+#define MTL_TCPM_INC			4
+#define MTL_TCPM_TC_PER_REG		4
 
 /* MTL register entry bit positions and sizes */
 #define MTL_OMR_ETSALG_INDEX		5
@@ -670,9 +733,6 @@
 #define MTL_Q_TQOMR			0x00
 #define MTL_Q_TQUR			0x04
 #define MTL_Q_TQDR			0x08
-#define MTL_Q_TCECR			0x10
-#define MTL_Q_TCESR			0x14
-#define MTL_Q_TCQWR			0x18
 #define MTL_Q_RQOMR			0x40
 #define MTL_Q_RQMPOCR			0x44
 #define MTL_Q_RQDR			0x4c
@@ -680,8 +740,6 @@
 #define MTL_Q_ISR			0x74
 
 /* MTL queue register entry bit positions and sizes */
-#define MTL_Q_TCQWR_QW_INDEX		0
-#define MTL_Q_TCQWR_QW_WIDTH		21
 #define MTL_Q_RQOMR_EHFC_INDEX		7
 #define MTL_Q_RQOMR_EHFC_WIDTH		1
 #define MTL_Q_RQOMR_RFA_INDEX		8
@@ -696,6 +754,8 @@
 #define MTL_Q_RQOMR_RTC_WIDTH		2
 #define MTL_Q_TQOMR_FTQ_INDEX		0
 #define MTL_Q_TQOMR_FTQ_WIDTH		1
+#define MTL_Q_TQOMR_Q2TCMAP_INDEX	8
+#define MTL_Q_TQOMR_Q2TCMAP_WIDTH	3
 #define MTL_Q_TQOMR_TQS_INDEX		16
 #define MTL_Q_TQOMR_TQS_WIDTH		10
 #define MTL_Q_TQOMR_TSF_INDEX		1
@@ -742,10 +802,14 @@
 #define MTL_TC_INC			MTL_Q_INC
 
 #define MTL_TC_ETSCR			0x10
+#define MTL_TC_ETSSR			0x14
+#define MTL_TC_QWR			0x18
 
 /* MTL traffic class register entry bit positions and sizes */
 #define MTL_TC_ETSCR_TSA_INDEX		0
 #define MTL_TC_ETSCR_TSA_WIDTH		2
+#define MTL_TC_QWR_QW_INDEX		0
+#define MTL_TC_QWR_QW_WIDTH		21
 
 /* MTL traffic class register value */
 #define MTL_TSA_SP			0x00
@@ -778,9 +842,19 @@
 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH	1
 #define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX	2
 #define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH	1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX	3
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH	1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX	4
+#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH	1
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX	5
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH	1
 
 #define RX_NORMAL_DESC0_OVT_INDEX		0
 #define RX_NORMAL_DESC0_OVT_WIDTH		16
+#define RX_NORMAL_DESC3_CDA_INDEX		27
+#define RX_NORMAL_DESC3_CDA_WIDTH		1
+#define RX_NORMAL_DESC3_CTXT_INDEX		30
+#define RX_NORMAL_DESC3_CTXT_WIDTH		1
 #define RX_NORMAL_DESC3_ES_INDEX		15
 #define RX_NORMAL_DESC3_ES_WIDTH		1
 #define RX_NORMAL_DESC3_ETLT_INDEX		16
@@ -794,12 +868,19 @@
 #define RX_NORMAL_DESC3_PL_INDEX		0
 #define RX_NORMAL_DESC3_PL_WIDTH		14
 
+#define RX_CONTEXT_DESC3_TSA_INDEX		4
+#define RX_CONTEXT_DESC3_TSA_WIDTH		1
+#define RX_CONTEXT_DESC3_TSD_INDEX		6
+#define RX_CONTEXT_DESC3_TSD_WIDTH		1
+
 #define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX	0
 #define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH	1
 #define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX	1
 #define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH	1
 #define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX	2
 #define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH	1
+#define TX_PACKET_ATTRIBUTES_PTP_INDEX		3
+#define TX_PACKET_ATTRIBUTES_PTP_WIDTH		1
 
 #define TX_CONTEXT_DESC2_MSS_INDEX		0
 #define TX_CONTEXT_DESC2_MSS_WIDTH		15
@@ -816,6 +897,8 @@
 #define TX_NORMAL_DESC2_HL_B1L_WIDTH		14
 #define TX_NORMAL_DESC2_IC_INDEX		31
 #define TX_NORMAL_DESC2_IC_WIDTH		1
+#define TX_NORMAL_DESC2_TTSE_INDEX		30
+#define TX_NORMAL_DESC2_TTSE_WIDTH		1
 #define TX_NORMAL_DESC2_VTIR_INDEX		14
 #define TX_NORMAL_DESC2_VTIR_WIDTH		2
 #define TX_NORMAL_DESC3_CIC_INDEX		16
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
new file mode 100644
index 0000000..7d6a49b
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -0,0 +1,270 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ *     Inc. unless otherwise expressly agreed to in writing between Synopsys
+ *     and you.
+ *
+ *     The Software IS NOT an item of Licensed Software or Licensed Product
+ *     under any End User Software License Agreement or Agreement for Licensed
+ *     Product with Synopsys or any supplement thereto.  Permission is hereby
+ *     granted, free of charge, to any person obtaining a copy of this software
+ *     annotated with this license and the Software, to deal in the Software
+ *     without restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ *     of the Software, and to permit persons to whom the Software is furnished
+ *     to do so, subject to the following conditions:
+ *
+ *     The above copyright notice and this permission notice shall be included
+ *     in all copies or substantial portions of the Software.
+ *
+ *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ *     THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Advanced Micro Devices, Inc. nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ *     Inc. unless otherwise expressly agreed to in writing between Synopsys
+ *     and you.
+ *
+ *     The Software IS NOT an item of Licensed Software or Licensed Product
+ *     under any End User Software License Agreement or Agreement for Licensed
+ *     Product with Synopsys or any supplement thereto.  Permission is hereby
+ *     granted, free of charge, to any person obtaining a copy of this software
+ *     annotated with this license and the Software, to deal in the Software
+ *     without restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ *     of the Software, and to permit persons to whom the Software is furnished
+ *     to do so, subject to the following conditions:
+ *
+ *     The above copyright notice and this permission notice shall be included
+ *     in all copies or substantial portions of the Software.
+ *
+ *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ *     THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/netdevice.h>
+#include <net/dcbnl.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static int xgbe_dcb_ieee_getets(struct net_device *netdev,
+				struct ieee_ets *ets)
+{
+	struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+	/* Set number of supported traffic classes */
+	ets->ets_cap = pdata->hw_feat.tc_cnt;
+
+	if (pdata->ets) {
+		ets->cbs = pdata->ets->cbs;
+		memcpy(ets->tc_tx_bw, pdata->ets->tc_tx_bw,
+		       sizeof(ets->tc_tx_bw));
+		memcpy(ets->tc_tsa, pdata->ets->tc_tsa,
+		       sizeof(ets->tc_tsa));
+		memcpy(ets->prio_tc, pdata->ets->prio_tc,
+		       sizeof(ets->prio_tc));
+	}
+
+	return 0;
+}
+
+static int xgbe_dcb_ieee_setets(struct net_device *netdev,
+				struct ieee_ets *ets)
+{
+	struct xgbe_prv_data *pdata = netdev_priv(netdev);
+	unsigned int i, tc_ets, tc_ets_weight;
+
+	tc_ets = 0;
+	tc_ets_weight = 0;
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		DBGPR("  TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
+		      ets->tc_tx_bw[i], ets->tc_rx_bw[i], ets->tc_tsa[i]);
+		DBGPR("  PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]);
+
+		if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) &&
+		    (i >= pdata->hw_feat.tc_cnt))
+				return -EINVAL;
+
+		if (ets->prio_tc[i] >= pdata->hw_feat.tc_cnt)
+			return -EINVAL;
+
+		switch (ets->tc_tsa[i]) {
+		case IEEE_8021QAZ_TSA_STRICT:
+			break;
+		case IEEE_8021QAZ_TSA_ETS:
+			tc_ets = 1;
+			tc_ets_weight += ets->tc_tx_bw[i];
+			break;
+
+		default:
+			return -EINVAL;
+		}
+	}
+
+	/* Weights must add up to 100% */
+	if (tc_ets && (tc_ets_weight != 100))
+		return -EINVAL;
+
+	if (!pdata->ets) {
+		pdata->ets = devm_kzalloc(pdata->dev, sizeof(*pdata->ets),
+					  GFP_KERNEL);
+		if (!pdata->ets)
+			return -ENOMEM;
+	}
+
+	memcpy(pdata->ets, ets, sizeof(*pdata->ets));
+
+	pdata->hw_if.config_dcb_tc(pdata);
+
+	return 0;
+}
+
+static int xgbe_dcb_ieee_getpfc(struct net_device *netdev,
+				struct ieee_pfc *pfc)
+{
+	struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+	/* Set number of supported PFC traffic classes */
+	pfc->pfc_cap = pdata->hw_feat.tc_cnt;
+
+	if (pdata->pfc) {
+		pfc->pfc_en = pdata->pfc->pfc_en;
+		pfc->mbc = pdata->pfc->mbc;
+		pfc->delay = pdata->pfc->delay;
+	}
+
+	return 0;
+}
+
+static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
+				struct ieee_pfc *pfc)
+{
+	struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+	DBGPR("  cap=%hhu, en=%hhx, mbc=%hhu, delay=%hhu\n",
+	      pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
+
+	if (!pdata->pfc) {
+		pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc),
+					  GFP_KERNEL);
+		if (!pdata->pfc)
+			return -ENOMEM;
+	}
+
+	memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
+
+	pdata->hw_if.config_dcb_pfc(pdata);
+
+	return 0;
+}
+
+static u8 xgbe_dcb_getdcbx(struct net_device *netdev)
+{
+	return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx)
+{
+	u8 support = xgbe_dcb_getdcbx(netdev);
+
+	DBGPR("  DCBX=%#hhx\n", dcbx);
+
+	if (dcbx & ~support)
+		return 1;
+
+	if ((dcbx & support) != support)
+		return 1;
+
+	return 0;
+}
+
+static const struct dcbnl_rtnl_ops xgbe_dcbnl_ops = {
+	/* IEEE 802.1Qaz std */
+	.ieee_getets = xgbe_dcb_ieee_getets,
+	.ieee_setets = xgbe_dcb_ieee_setets,
+	.ieee_getpfc = xgbe_dcb_ieee_getpfc,
+	.ieee_setpfc = xgbe_dcb_ieee_setpfc,
+
+	/* DCBX configuration */
+	.getdcbx     = xgbe_dcb_getdcbx,
+	.setdcbx     = xgbe_dcb_setdcbx,
+};
+
+const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void)
+{
+	return &xgbe_dcbnl_ops;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index a9ce56d..1c5d62e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -359,6 +359,15 @@
 	rdata->len = 0;
 	rdata->interrupt = 0;
 	rdata->mapped_as_page = 0;
+
+	if (rdata->state_saved) {
+		rdata->state_saved = 0;
+		rdata->state.incomplete = 0;
+		rdata->state.context_next = 0;
+		rdata->state.skb = NULL;
+		rdata->state.len = 0;
+		rdata->state.error = 0;
+	}
 }
 
 static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 699cff5..edaca44 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -131,7 +131,7 @@
 
 	DBGPR("-->xgbe_usec_to_riwt\n");
 
-	rate = clk_get_rate(pdata->sysclock);
+	rate = clk_get_rate(pdata->sysclk);
 
 	/*
 	 * Convert the input usec value to the watchdog timer value. Each
@@ -154,7 +154,7 @@
 
 	DBGPR("-->xgbe_riwt_to_usec\n");
 
-	rate = clk_get_rate(pdata->sysclock);
+	rate = clk_get_rate(pdata->sysclk);
 
 	/*
 	 * Convert the input watchdog timer value to the usec value. Each
@@ -247,7 +247,7 @@
 {
 	unsigned int i;
 
-	for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+	for (i = 0; i < pdata->rx_q_count; i++)
 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
 
 	return 0;
@@ -257,7 +257,7 @@
 {
 	unsigned int i;
 
-	for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+	for (i = 0; i < pdata->tx_q_count; i++)
 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
 
 	return 0;
@@ -268,7 +268,7 @@
 {
 	unsigned int i;
 
-	for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+	for (i = 0; i < pdata->rx_q_count; i++)
 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
 
 	return 0;
@@ -279,7 +279,7 @@
 {
 	unsigned int i;
 
-	for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+	for (i = 0; i < pdata->tx_q_count; i++)
 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
 
 	return 0;
@@ -343,12 +343,12 @@
 	unsigned int i;
 
 	/* Clear MTL flow control */
-	for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+	for (i = 0; i < pdata->rx_q_count; i++)
 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
 
 	/* Clear MAC flow control */
 	max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
-	q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
+	q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
 	reg = MAC_Q0TFCR;
 	for (i = 0; i < q_count; i++) {
 		reg_val = XGMAC_IOREAD(pdata, reg);
@@ -368,12 +368,12 @@
 	unsigned int i;
 
 	/* Set MTL flow control */
-	for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+	for (i = 0; i < pdata->rx_q_count; i++)
 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1);
 
 	/* Set MAC flow control */
 	max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
-	q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
+	q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
 	reg = MAC_Q0TFCR;
 	for (i = 0; i < q_count; i++) {
 		reg_val = XGMAC_IOREAD(pdata, reg);
@@ -407,7 +407,9 @@
 
 static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
 {
-	if (pdata->tx_pause)
+	struct ieee_pfc *pfc = pdata->pfc;
+
+	if (pdata->tx_pause || (pfc && pfc->pfc_en))
 		xgbe_enable_tx_flow_control(pdata);
 	else
 		xgbe_disable_tx_flow_control(pdata);
@@ -417,7 +419,9 @@
 
 static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
 {
-	if (pdata->rx_pause)
+	struct ieee_pfc *pfc = pdata->pfc;
+
+	if (pdata->rx_pause || (pfc && pfc->pfc_en))
 		xgbe_enable_rx_flow_control(pdata);
 	else
 		xgbe_disable_rx_flow_control(pdata);
@@ -427,8 +431,13 @@
 
 static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
 {
+	struct ieee_pfc *pfc = pdata->pfc;
+
 	xgbe_config_tx_flow_control(pdata);
 	xgbe_config_rx_flow_control(pdata);
+
+	XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE,
+			   (pfc && pfc->pfc_en) ? 1 : 0);
 }
 
 static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
@@ -492,8 +501,12 @@
 
 static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
 {
-	/* No MAC interrupts to be enabled */
-	XGMAC_IOWRITE(pdata, MAC_IER, 0);
+	unsigned int mac_ier = 0;
+
+	/* Enable Timestamp interrupt */
+	XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1);
+
+	XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
 
 	/* Enable all counter interrupts */
 	XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff);
@@ -1012,6 +1025,180 @@
 	DBGPR("<--rx_desc_init\n");
 }
 
+static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
+				      unsigned int addend)
+{
+	/* Set the addend register value and tell the device */
+	XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
+	XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
+
+	/* Wait for addend update to complete */
+	while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
+		udelay(5);
+}
+
+static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+				 unsigned int nsec)
+{
+	/* Set the time values and tell the device */
+	XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
+	XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
+	XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
+
+	/* Wait for time update to complete */
+	while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
+		udelay(5);
+}
+
+static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
+{
+	u64 nsec;
+
+	nsec = XGMAC_IOREAD(pdata, MAC_STSR);
+	nsec *= NSEC_PER_SEC;
+	nsec += XGMAC_IOREAD(pdata, MAC_STNR);
+
+	return nsec;
+}
+
+static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
+{
+	unsigned int tx_snr;
+	u64 nsec;
+
+	tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+	if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
+		return 0;
+
+	nsec = XGMAC_IOREAD(pdata, MAC_TXSSR);
+	nsec *= NSEC_PER_SEC;
+	nsec += tx_snr;
+
+	return nsec;
+}
+
+static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+			       struct xgbe_ring_desc *rdesc)
+{
+	u64 nsec;
+
+	if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
+	    !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
+		nsec = le32_to_cpu(rdesc->desc1);
+		nsec <<= 32;
+		nsec |= le32_to_cpu(rdesc->desc0);
+		if (nsec != 0xffffffffffffffffULL) {
+			packet->rx_tstamp = nsec;
+			XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+				       RX_TSTAMP, 1);
+		}
+	}
+}
+
+static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
+			      unsigned int mac_tscr)
+{
+	/* Set one nano-second accuracy */
+	XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
+
+	/* Set fine timestamp update */
+	XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
+
+	/* Overwrite earlier timestamps */
+	XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
+
+	XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
+
+	/* Exit if timestamping is not enabled */
+	if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
+		return 0;
+
+	/* Initialize time registers */
+	XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
+	XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
+	xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
+	xgbe_set_tstamp_time(pdata, 0, 0);
+
+	/* Initialize the timecounter */
+	timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
+			 ktime_to_ns(ktime_get_real()));
+
+	return 0;
+}
+
+static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
+{
+	struct ieee_ets *ets = pdata->ets;
+	unsigned int total_weight, min_weight, weight;
+	unsigned int i;
+
+	if (!ets)
+		return;
+
+	/* Set Tx to deficit weighted round robin scheduling algorithm (when
+	 * traffic class is using ETS algorithm)
+	 */
+	XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
+
+	/* Set Traffic Class algorithms */
+	total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
+	min_weight = total_weight / 100;
+	if (!min_weight)
+		min_weight = 1;
+
+	for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+		switch (ets->tc_tsa[i]) {
+		case IEEE_8021QAZ_TSA_STRICT:
+			DBGPR("  TC%u using SP\n", i);
+			XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+					       MTL_TSA_SP);
+			break;
+		case IEEE_8021QAZ_TSA_ETS:
+			weight = total_weight * ets->tc_tx_bw[i] / 100;
+			weight = clamp(weight, min_weight, total_weight);
+
+			DBGPR("  TC%u using DWRR (weight %u)\n", i, weight);
+			XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+					       MTL_TSA_ETS);
+			XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
+					       weight);
+			break;
+		}
+	}
+}
+
+static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
+{
+	struct ieee_pfc *pfc = pdata->pfc;
+	struct ieee_ets *ets = pdata->ets;
+	unsigned int mask, reg, reg_val;
+	unsigned int tc, prio;
+
+	if (!pfc || !ets)
+		return;
+
+	for (tc = 0; tc < pdata->hw_feat.tc_cnt; tc++) {
+		mask = 0;
+		for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+			if ((pfc->pfc_en & (1 << prio)) &&
+			    (ets->prio_tc[prio] == tc))
+				mask |= (1 << prio);
+		}
+		mask &= 0xff;
+
+		DBGPR("  TC%u PFC mask=%#x\n", tc, mask);
+		reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG));
+		reg_val = XGMAC_IOREAD(pdata, reg);
+
+		reg_val &= ~(0xff << ((tc % MTL_TCPM_TC_PER_REG) << 3));
+		reg_val |= (mask << ((tc % MTL_TCPM_TC_PER_REG) << 3));
+
+		XGMAC_IOWRITE(pdata, reg, reg_val);
+	}
+
+	xgbe_config_flow_control(pdata);
+}
+
 static void xgbe_pre_xmit(struct xgbe_channel *channel)
 {
 	struct xgbe_prv_data *pdata = channel->pdata;
@@ -1110,6 +1297,10 @@
 		XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
 				  TX_NORMAL_DESC2_VLAN_INSERT);
 
+	/* Timestamp enablement check */
+	if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
+		XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
+
 	/* Set IC bit based on Tx coalescing settings */
 	XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
 	if (tx_coalesce && (!tx_frames ||
@@ -1245,6 +1436,25 @@
 	xgbe_dump_rx_desc(ring, rdesc, ring->cur);
 #endif
 
+	if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
+		/* Timestamp Context Descriptor */
+		xgbe_get_rx_tstamp(packet, rdesc);
+
+		XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+			       CONTEXT, 1);
+		XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+			       CONTEXT_NEXT, 0);
+		return 0;
+	}
+
+	/* Normal Descriptor, be sure Context Descriptor bit is off */
+	XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
+
+	/* Indicate if a Context Descriptor is next */
+	if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
+		XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+			       CONTEXT_NEXT, 1);
+
 	/* Get the packet length */
 	rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
 
@@ -1423,11 +1633,11 @@
 {
 	unsigned int i, count;
 
-	for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+	for (i = 0; i < pdata->tx_q_count; i++)
 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
 
 	/* Poll Until Poll Condition */
-	for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) {
+	for (i = 0; i < pdata->tx_q_count; i++) {
 		count = 2000;
 		while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i,
 							MTL_Q_TQOMR, FTQ))
@@ -1479,14 +1689,15 @@
 {
 	unsigned int i;
 
-	/* Set Tx to weighted round robin scheduling algorithm (when
-	 * traffic class is using ETS algorithm)
-	 */
+	/* Set Tx to weighted round robin scheduling algorithm */
 	XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
 
-	/* Set Tx traffic classes to strict priority algorithm */
-	for (i = 0; i < XGBE_TC_CNT; i++)
-		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, MTL_TSA_SP);
+	/* Set Tx traffic classes to use WRR algorithm with equal weights */
+	for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+				       MTL_TSA_ETS);
+		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
+	}
 
 	/* Set Rx to strict priority algorithm */
 	XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
@@ -1572,13 +1783,13 @@
 	unsigned int i;
 
 	fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
-						  pdata->hw_feat.tx_q_cnt);
+						  pdata->tx_q_count);
 
-	for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+	for (i = 0; i < pdata->tx_q_count; i++)
 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
 
 	netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n",
-		      pdata->hw_feat.tx_q_cnt, ((fifo_size + 1) * 256));
+		      pdata->tx_q_count, ((fifo_size + 1) * 256));
 }
 
 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
@@ -1587,27 +1798,84 @@
 	unsigned int i;
 
 	fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
-						  pdata->hw_feat.rx_q_cnt);
+						  pdata->rx_q_count);
 
-	for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+	for (i = 0; i < pdata->rx_q_count; i++)
 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
 
 	netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n",
-		      pdata->hw_feat.rx_q_cnt, ((fifo_size + 1) * 256));
+		      pdata->rx_q_count, ((fifo_size + 1) * 256));
 }
 
-static void xgbe_config_rx_queue_mapping(struct xgbe_prv_data *pdata)
+static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
 {
-	unsigned int i, reg, reg_val;
-	unsigned int q_count = pdata->hw_feat.rx_q_cnt;
+	unsigned int qptc, qptc_extra, queue;
+	unsigned int prio_queues;
+	unsigned int ppq, ppq_extra, prio;
+	unsigned int mask;
+	unsigned int i, j, reg, reg_val;
+
+	/* Map the MTL Tx Queues to Traffic Classes
+	 *   Note: Tx Queues >= Traffic Classes
+	 */
+	qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
+	qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
+
+	for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
+		for (j = 0; j < qptc; j++) {
+			DBGPR("  TXq%u mapped to TC%u\n", queue, i);
+			XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+					       Q2TCMAP, i);
+			pdata->q2tc_map[queue++] = i;
+		}
+
+		if (i < qptc_extra) {
+			DBGPR("  TXq%u mapped to TC%u\n", queue, i);
+			XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+					       Q2TCMAP, i);
+			pdata->q2tc_map[queue++] = i;
+		}
+	}
+
+	/* Map the 8 VLAN priority values to available MTL Rx queues */
+	prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS,
+			    pdata->rx_q_count);
+	ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
+	ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
+
+	reg = MAC_RQC2R;
+	reg_val = 0;
+	for (i = 0, prio = 0; i < prio_queues;) {
+		mask = 0;
+		for (j = 0; j < ppq; j++) {
+			DBGPR("  PRIO%u mapped to RXq%u\n", prio, i);
+			mask |= (1 << prio);
+			pdata->prio2q_map[prio++] = i;
+		}
+
+		if (i < ppq_extra) {
+			DBGPR("  PRIO%u mapped to RXq%u\n", prio, i);
+			mask |= (1 << prio);
+			pdata->prio2q_map[prio++] = i;
+		}
+
+		reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
+
+		if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
+			continue;
+
+		XGMAC_IOWRITE(pdata, reg, reg_val);
+		reg += MAC_RQC2_INC;
+		reg_val = 0;
+	}
 
 	/* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
 	reg = MTL_RQDCM0R;
 	reg_val = 0;
-	for (i = 0; i < q_count;) {
+	for (i = 0; i < pdata->rx_q_count;) {
 		reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
 
-		if ((i % MTL_RQDCM_Q_PER_REG) && (i != q_count))
+		if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count))
 			continue;
 
 		XGMAC_IOWRITE(pdata, reg, reg_val);
@@ -1621,7 +1889,7 @@
 {
 	unsigned int i;
 
-	for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) {
+	for (i = 0; i < pdata->rx_q_count; i++) {
 		/* Activate flow control when less than 4k left in fifo */
 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
 
@@ -2013,7 +2281,7 @@
 	}
 
 	/* Enable each Tx queue */
-	for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+	for (i = 0; i < pdata->tx_q_count; i++)
 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
 				       MTL_Q_ENABLED);
 
@@ -2030,7 +2298,7 @@
 	XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
 
 	/* Disable each Tx queue */
-	for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+	for (i = 0; i < pdata->tx_q_count; i++)
 		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
 
 	/* Disable each Tx DMA channel */
@@ -2059,7 +2327,7 @@
 
 	/* Enable each Rx queue */
 	reg_val = 0;
-	for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+	for (i = 0; i < pdata->rx_q_count; i++)
 		reg_val |= (0x02 << (i << 1));
 	XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
 
@@ -2193,9 +2461,7 @@
 	 * Initialize MTL related features
 	 */
 	xgbe_config_mtl_mode(pdata);
-	xgbe_config_rx_queue_mapping(pdata);
-	/*TODO: Program the priorities mapped to the Selected Traffic Classes
-		in MTL_TC_Prty_Map0-3 registers */
+	xgbe_config_queue_mapping(pdata);
 	xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
 	xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
 	xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
@@ -2203,15 +2469,13 @@
 	xgbe_config_tx_fifo_size(pdata);
 	xgbe_config_rx_fifo_size(pdata);
 	xgbe_config_flow_control_threshold(pdata);
-	/*TODO: Queue to Traffic Class Mapping (Q2TCMAP) */
 	/*TODO: Error Packet and undersized good Packet forwarding enable
 		(FEP and FUP)
 	 */
+	xgbe_config_dcb_tc(pdata);
+	xgbe_config_dcb_pfc(pdata);
 	xgbe_enable_mtl_interrupts(pdata);
 
-	/* Transmit Class Weight */
-	XGMAC_IOWRITE_BITS(pdata, MTL_Q_TCQWR, QW, 0x10);
-
 	/*
 	 * Initialize MAC related features
 	 */
@@ -2313,5 +2577,16 @@
 	hw_if->rx_mmc_int = xgbe_rx_mmc_int;
 	hw_if->read_mmc_stats = xgbe_read_mmc_stats;
 
+	/* For PTP config */
+	hw_if->config_tstamp = xgbe_config_tstamp;
+	hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
+	hw_if->set_tstamp_time = xgbe_set_tstamp_time;
+	hw_if->get_tstamp_time = xgbe_get_tstamp_time;
+	hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
+
+	/* For Data Center Bridging config */
+	hw_if->config_dcb_tc = xgbe_config_dcb_tc;
+	hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
+
 	DBGPR("<--xgbe_init_function_ptrs\n");
 }
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 344e6b1..3bf3c01 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -121,6 +121,7 @@
 #include <net/busy_poll.h>
 #include <linux/clk.h>
 #include <linux/if_ether.h>
+#include <linux/net_tstamp.h>
 
 #include "xgbe.h"
 #include "xgbe-common.h"
@@ -202,7 +203,7 @@
 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
 	struct xgbe_channel *channel;
 	unsigned int dma_isr, dma_ch_isr;
-	unsigned int mac_isr;
+	unsigned int mac_isr, mac_tssr;
 	unsigned int i;
 
 	/* The DMA interrupt status register also reports MAC and MTL
@@ -255,6 +256,17 @@
 
 		if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
 			hw_if->rx_mmc_int(pdata);
+
+		if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
+			mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
+
+			if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
+				/* Read Tx Timestamp to clear interrupt */
+				pdata->tx_tstamp =
+					hw_if->get_tx_tstamp(pdata);
+				schedule_work(&pdata->tx_tstamp_work);
+			}
+		}
 	}
 
 	DBGPR("  DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
@@ -375,6 +387,7 @@
 	hw_feat->sph           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
 	hw_feat->tso           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
 	hw_feat->dma_debug     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
+	hw_feat->tc_cnt	       = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
 	hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
 						  HASHTBLSZ);
 	hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
@@ -668,6 +681,197 @@
 	rtnl_unlock();
 }
 
+static void xgbe_tx_tstamp(struct work_struct *work)
+{
+	struct xgbe_prv_data *pdata = container_of(work,
+						   struct xgbe_prv_data,
+						   tx_tstamp_work);
+	struct skb_shared_hwtstamps hwtstamps;
+	u64 nsec;
+	unsigned long flags;
+
+	if (pdata->tx_tstamp) {
+		nsec = timecounter_cyc2time(&pdata->tstamp_tc,
+					    pdata->tx_tstamp);
+
+		memset(&hwtstamps, 0, sizeof(hwtstamps));
+		hwtstamps.hwtstamp = ns_to_ktime(nsec);
+		skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
+	}
+
+	dev_kfree_skb_any(pdata->tx_tstamp_skb);
+
+	spin_lock_irqsave(&pdata->tstamp_lock, flags);
+	pdata->tx_tstamp_skb = NULL;
+	spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+}
+
+static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
+				      struct ifreq *ifreq)
+{
+	if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
+			 sizeof(pdata->tstamp_config)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
+				      struct ifreq *ifreq)
+{
+	struct hwtstamp_config config;
+	unsigned int mac_tscr;
+
+	if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
+		return -EFAULT;
+
+	if (config.flags)
+		return -EINVAL;
+
+	mac_tscr = 0;
+
+	switch (config.tx_type) {
+	case HWTSTAMP_TX_OFF:
+		break;
+
+	case HWTSTAMP_TX_ON:
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+		break;
+
+	default:
+		return -ERANGE;
+	}
+
+	switch (config.rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		break;
+
+	case HWTSTAMP_FILTER_ALL:
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+		break;
+
+	/* PTP v2, UDP, any kind of event packet */
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+	/* PTP v1, UDP, any kind of event packet */
+	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+		break;
+
+	/* PTP v2, UDP, Sync packet */
+	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+	/* PTP v1, UDP, Sync packet */
+	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+		break;
+
+	/* PTP v2, UDP, Delay_req packet */
+	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+	/* PTP v1, UDP, Delay_req packet */
+	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+		break;
+
+	/* 802.AS1, Ethernet, any kind of event packet */
+	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+		break;
+
+	/* 802.AS1, Ethernet, Sync packet */
+	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+		break;
+
+	/* 802.AS1, Ethernet, Delay_req packet */
+	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+		break;
+
+	/* PTP v2/802.AS1, any layer, any kind of event packet */
+	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+		break;
+
+	/* PTP v2/802.AS1, any layer, Sync packet */
+	case HWTSTAMP_FILTER_PTP_V2_SYNC:
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+		break;
+
+	/* PTP v2/802.AS1, any layer, Delay_req packet */
+	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+		break;
+
+	default:
+		return -ERANGE;
+	}
+
+	pdata->hw_if.config_tstamp(pdata, mac_tscr);
+
+	memcpy(&pdata->tstamp_config, &config, sizeof(config));
+
+	return 0;
+}
+
+static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
+				struct sk_buff *skb,
+				struct xgbe_packet_data *packet)
+{
+	unsigned long flags;
+
+	if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
+		spin_lock_irqsave(&pdata->tstamp_lock, flags);
+		if (pdata->tx_tstamp_skb) {
+			/* Another timestamp in progress, ignore this one */
+			XGMAC_SET_BITS(packet->attributes,
+				       TX_PACKET_ATTRIBUTES, PTP, 0);
+		} else {
+			pdata->tx_tstamp_skb = skb_get(skb);
+			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+		}
+		spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+	}
+
+	if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
+		skb_tx_timestamp(skb);
+}
+
 static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
 {
 	if (vlan_tx_tag_present(skb))
@@ -711,7 +915,8 @@
 	return 1;
 }
 
-static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb,
+static void xgbe_packet_info(struct xgbe_prv_data *pdata,
+			     struct xgbe_ring *ring, struct sk_buff *skb,
 			     struct xgbe_packet_data *packet)
 {
 	struct skb_frag_struct *frag;
@@ -753,6 +958,11 @@
 			       VLAN_CTAG, 1);
 	}
 
+	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+	    (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
+		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+			       PTP, 1);
+
 	for (len = skb_headlen(skb); len;) {
 		packet->rdesc_count++;
 		len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
@@ -776,26 +986,33 @@
 
 	DBGPR("-->xgbe_open\n");
 
-	/* Enable the clock */
-	ret = clk_prepare_enable(pdata->sysclock);
+	/* Enable the clocks */
+	ret = clk_prepare_enable(pdata->sysclk);
 	if (ret) {
-		netdev_alert(netdev, "clk_prepare_enable failed\n");
+		netdev_alert(netdev, "dma clk_prepare_enable failed\n");
 		return ret;
 	}
 
+	ret = clk_prepare_enable(pdata->ptpclk);
+	if (ret) {
+		netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
+		goto err_sysclk;
+	}
+
 	/* Calculate the Rx buffer size before allocating rings */
 	ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
 	if (ret < 0)
-		goto err_clk;
+		goto err_ptpclk;
 	pdata->rx_buf_size = ret;
 
 	/* Allocate the ring descriptors and buffers */
 	ret = desc_if->alloc_ring_resources(pdata);
 	if (ret)
-		goto err_clk;
+		goto err_ptpclk;
 
-	/* Initialize the device restart work struct */
+	/* Initialize the device restart and Tx timestamp work struct */
 	INIT_WORK(&pdata->restart_work, xgbe_restart);
+	INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
 
 	/* Request interrupts */
 	ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0,
@@ -824,8 +1041,11 @@
 err_irq:
 	desc_if->free_ring_resources(pdata);
 
-err_clk:
-	clk_disable_unprepare(pdata->sysclock);
+err_ptpclk:
+	clk_disable_unprepare(pdata->ptpclk);
+
+err_sysclk:
+	clk_disable_unprepare(pdata->sysclk);
 
 	return ret;
 }
@@ -853,8 +1073,9 @@
 		pdata->irq_number = 0;
 	}
 
-	/* Disable the clock */
-	clk_disable_unprepare(pdata->sysclock);
+	/* Disable the clocks */
+	clk_disable_unprepare(pdata->ptpclk);
+	clk_disable_unprepare(pdata->sysclk);
 
 	DBGPR("<--xgbe_close\n");
 
@@ -890,7 +1111,7 @@
 
 	/* Calculate preliminary packet info */
 	memset(packet, 0, sizeof(*packet));
-	xgbe_packet_info(ring, skb, packet);
+	xgbe_packet_info(pdata, ring, skb, packet);
 
 	/* Check that there are enough descriptors available */
 	if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) {
@@ -914,6 +1135,8 @@
 		goto tx_netdev_return;
 	}
 
+	xgbe_prep_tx_tstamp(pdata, skb, packet);
+
 	/* Configure required descriptor fields for transmission */
 	hw_if->pre_xmit(channel);
 
@@ -968,6 +1191,27 @@
 	return 0;
 }
 
+static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
+{
+	struct xgbe_prv_data *pdata = netdev_priv(netdev);
+	int ret;
+
+	switch (cmd) {
+	case SIOCGHWTSTAMP:
+		ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
+		break;
+
+	case SIOCSHWTSTAMP:
+		ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
+		break;
+
+	default:
+		ret = -EOPNOTSUPP;
+	}
+
+	return ret;
+}
+
 static int xgbe_change_mtu(struct net_device *netdev, int mtu)
 {
 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
@@ -1069,6 +1313,33 @@
 }
 #endif /* End CONFIG_NET_POLL_CONTROLLER */
 
+static int xgbe_setup_tc(struct net_device *netdev, u8 tc)
+{
+	struct xgbe_prv_data *pdata = netdev_priv(netdev);
+	unsigned int offset, queue;
+	u8 i;
+
+	if (tc && (tc != pdata->hw_feat.tc_cnt))
+		return -EINVAL;
+
+	if (tc) {
+		netdev_set_num_tc(netdev, tc);
+		for (i = 0, queue = 0, offset = 0; i < tc; i++) {
+			while ((queue < pdata->tx_q_count) &&
+			       (pdata->q2tc_map[queue] == i))
+				queue++;
+
+			DBGPR("  TC%u using TXq%u-%u\n", i, offset, queue - 1);
+			netdev_set_tc_queue(netdev, i, queue - offset, offset);
+			offset = queue;
+		}
+	} else {
+		netdev_reset_tc(netdev);
+	}
+
+	return 0;
+}
+
 static int xgbe_set_features(struct net_device *netdev,
 			     netdev_features_t features)
 {
@@ -1109,6 +1380,7 @@
 	.ndo_set_rx_mode	= xgbe_set_rx_mode,
 	.ndo_set_mac_address	= xgbe_set_mac_address,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_do_ioctl		= xgbe_ioctl,
 	.ndo_change_mtu		= xgbe_change_mtu,
 	.ndo_get_stats64	= xgbe_get_stats64,
 	.ndo_vlan_rx_add_vid	= xgbe_vlan_rx_add_vid,
@@ -1116,6 +1388,7 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= xgbe_poll_controller,
 #endif
+	.ndo_setup_tc		= xgbe_setup_tc,
 	.ndo_set_features	= xgbe_set_features,
 };
 
@@ -1202,8 +1475,9 @@
 	struct xgbe_packet_data *packet;
 	struct net_device *netdev = pdata->netdev;
 	struct sk_buff *skb;
-	unsigned int incomplete, error;
-	unsigned int cur_len, put_len, max_len;
+	struct skb_shared_hwtstamps *hwtstamps;
+	unsigned int incomplete, error, context_next, context;
+	unsigned int len, put_len, max_len;
 	int received = 0;
 
 	DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
@@ -1212,22 +1486,33 @@
 	if (!ring)
 		return 0;
 
+	rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
 	packet = &ring->packet_data;
 	while (received < budget) {
 		DBGPR("  cur = %d\n", ring->cur);
 
-		/* Clear the packet data information */
-		memset(packet, 0, sizeof(*packet));
-		skb = NULL;
-		error = 0;
-		cur_len = 0;
+		/* First time in loop see if we need to restore state */
+		if (!received && rdata->state_saved) {
+			incomplete = rdata->state.incomplete;
+			context_next = rdata->state.context_next;
+			skb = rdata->state.skb;
+			error = rdata->state.error;
+			len = rdata->state.len;
+		} else {
+			memset(packet, 0, sizeof(*packet));
+			incomplete = 0;
+			context_next = 0;
+			skb = NULL;
+			error = 0;
+			len = 0;
+		}
 
 read_again:
+		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+
 		if (ring->dirty > (XGBE_RX_DESC_CNT >> 3))
 			xgbe_rx_refresh(channel);
 
-		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
-
 		if (hw_if->dev_read(channel))
 			break;
 
@@ -1242,9 +1527,15 @@
 		incomplete = XGMAC_GET_BITS(packet->attributes,
 					    RX_PACKET_ATTRIBUTES,
 					    INCOMPLETE);
+		context_next = XGMAC_GET_BITS(packet->attributes,
+					      RX_PACKET_ATTRIBUTES,
+					      CONTEXT_NEXT);
+		context = XGMAC_GET_BITS(packet->attributes,
+					 RX_PACKET_ATTRIBUTES,
+					 CONTEXT);
 
 		/* Earlier error, just drain the remaining data */
-		if (incomplete && error)
+		if ((incomplete || context_next) && error)
 			goto read_again;
 
 		if (error || packet->errors) {
@@ -1254,30 +1545,37 @@
 			continue;
 		}
 
-		put_len = rdata->len - cur_len;
-		if (skb) {
-			if (pskb_expand_head(skb, 0, put_len, GFP_ATOMIC)) {
-				DBGPR("pskb_expand_head error\n");
-				if (incomplete) {
-					error = 1;
-					goto read_again;
+		if (!context) {
+			put_len = rdata->len - len;
+			if (skb) {
+				if (pskb_expand_head(skb, 0, put_len,
+						     GFP_ATOMIC)) {
+					DBGPR("pskb_expand_head error\n");
+					if (incomplete) {
+						error = 1;
+						goto read_again;
+					}
+
+					dev_kfree_skb(skb);
+					continue;
 				}
-
-				dev_kfree_skb(skb);
-				continue;
+				memcpy(skb_tail_pointer(skb), rdata->skb->data,
+				       put_len);
+			} else {
+				skb = rdata->skb;
+				rdata->skb = NULL;
 			}
-			memcpy(skb_tail_pointer(skb), rdata->skb->data,
-			       put_len);
-		} else {
-			skb = rdata->skb;
-			rdata->skb = NULL;
+			skb_put(skb, put_len);
+			len += put_len;
 		}
-		skb_put(skb, put_len);
-		cur_len += put_len;
 
-		if (incomplete)
+		if (incomplete || context_next)
 			goto read_again;
 
+		/* Stray Context Descriptor? */
+		if (!skb)
+			continue;
+
 		/* Be sure we don't exceed the configured MTU */
 		max_len = netdev->mtu + ETH_HLEN;
 		if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
@@ -1304,6 +1602,16 @@
 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
 					       packet->vlan_ctag);
 
+		if (XGMAC_GET_BITS(packet->attributes,
+				   RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
+			u64 nsec;
+
+			nsec = timecounter_cyc2time(&pdata->tstamp_tc,
+						    packet->rx_tstamp);
+			hwtstamps = skb_hwtstamps(skb);
+			hwtstamps->hwtstamp = ns_to_ktime(nsec);
+		}
+
 		skb->dev = netdev;
 		skb->protocol = eth_type_trans(skb, netdev);
 		skb_record_rx_queue(skb, channel->queue_index);
@@ -1313,6 +1621,17 @@
 		napi_gro_receive(&pdata->napi, skb);
 	}
 
+	/* Check if we need to save state before leaving */
+	if (received && (incomplete || context_next)) {
+		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+		rdata->state_saved = 1;
+		rdata->state.incomplete = incomplete;
+		rdata->state.context_next = context_next;
+		rdata->state.skb = skb;
+		rdata->state.len = len;
+		rdata->state.error = error;
+	}
+
 	DBGPR("<--xgbe_rx_poll: received = %d\n", received);
 
 	return received;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index f740526..6005b60 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -116,6 +116,7 @@
 
 #include <linux/spinlock.h>
 #include <linux/phy.h>
+#include <linux/net_tstamp.h>
 
 #include "xgbe.h"
 #include "xgbe-common.h"
@@ -326,10 +327,19 @@
 	    (cmd->autoneg != AUTONEG_DISABLE))
 		goto unlock;
 
-	if ((cmd->autoneg == AUTONEG_DISABLE) &&
-	    (((speed != SPEED_10000) && (speed != SPEED_1000)) ||
-	     (cmd->duplex != DUPLEX_FULL)))
-		goto unlock;
+	if (cmd->autoneg == AUTONEG_DISABLE) {
+		switch (speed) {
+		case SPEED_10000:
+		case SPEED_2500:
+		case SPEED_1000:
+			break;
+		default:
+			goto unlock;
+		}
+
+		if (cmd->duplex != DUPLEX_FULL)
+			goto unlock;
+	}
 
 	cmd->advertising &= phydev->supported;
 	if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
@@ -480,6 +490,39 @@
 	return 0;
 }
 
+static int xgbe_get_ts_info(struct net_device *netdev,
+			    struct ethtool_ts_info *ts_info)
+{
+	struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+	ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+				   SOF_TIMESTAMPING_RX_SOFTWARE |
+				   SOF_TIMESTAMPING_SOFTWARE |
+				   SOF_TIMESTAMPING_TX_HARDWARE |
+				   SOF_TIMESTAMPING_RX_HARDWARE |
+				   SOF_TIMESTAMPING_RAW_HARDWARE;
+
+	if (pdata->ptp_clock)
+		ts_info->phc_index = ptp_clock_index(pdata->ptp_clock);
+	else
+		ts_info->phc_index = -1;
+
+	ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+	ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+			      (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+			      (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+			      (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+			      (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+			      (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+			      (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+			      (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+			      (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+			      (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+			      (1 << HWTSTAMP_FILTER_ALL);
+
+	return 0;
+}
+
 static const struct ethtool_ops xgbe_ethtool_ops = {
 	.get_settings = xgbe_get_settings,
 	.set_settings = xgbe_set_settings,
@@ -492,6 +535,7 @@
 	.get_strings = xgbe_get_strings,
 	.get_ethtool_stats = xgbe_get_ethtool_stats,
 	.get_sset_count = xgbe_get_sset_count,
+	.get_ts_info = xgbe_get_ts_info,
 };
 
 struct ethtool_ops *xgbe_get_ethtool_ops(void)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index d5a4f76..ec977d3 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -245,6 +245,7 @@
 
 	spin_lock_init(&pdata->lock);
 	mutex_init(&pdata->xpcs_mutex);
+	spin_lock_init(&pdata->tstamp_lock);
 
 	/* Set and validate the number of descriptors for a ring */
 	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
@@ -265,10 +266,18 @@
 	}
 
 	/* Obtain the system clock setting */
-	pdata->sysclock = devm_clk_get(dev, NULL);
-	if (IS_ERR(pdata->sysclock)) {
-		dev_err(dev, "devm_clk_get failed\n");
-		ret = PTR_ERR(pdata->sysclock);
+	pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
+	if (IS_ERR(pdata->sysclk)) {
+		dev_err(dev, "dma devm_clk_get failed\n");
+		ret = PTR_ERR(pdata->sysclk);
+		goto err_io;
+	}
+
+	/* Obtain the PTP clock setting */
+	pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
+	if (IS_ERR(pdata->ptpclk)) {
+		dev_err(dev, "ptp devm_clk_get failed\n");
+		ret = PTR_ERR(pdata->ptpclk);
 		goto err_io;
 	}
 
@@ -346,10 +355,18 @@
 	/* Set default configuration data */
 	xgbe_default_config(pdata);
 
-	/* Calculate the number of Tx and Rx rings to be created */
+	/* Calculate the number of Tx and Rx rings to be created
+	 *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
+	 *   the number of Tx queues to the number of Tx channels
+	 *   enabled
+	 *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
+	 *   number of Rx queues
+	 */
 	pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
 				     pdata->hw_feat.tx_ch_cnt);
-	if (netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count)) {
+	pdata->tx_q_count = pdata->tx_ring_count;
+	ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
+	if (ret) {
 		dev_err(dev, "error setting real tx queue count\n");
 		goto err_io;
 	}
@@ -357,6 +374,7 @@
 	pdata->rx_ring_count = min_t(unsigned int,
 				     netif_get_num_default_rss_queues(),
 				     pdata->hw_feat.rx_ch_cnt);
+	pdata->rx_q_count = pdata->hw_feat.rx_q_cnt;
 	ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
 	if (ret) {
 		dev_err(dev, "error setting real rx queue count\n");
@@ -382,9 +400,12 @@
 	if (ret)
 		goto err_bus_id;
 
-	/* Set network and ethtool operations */
+	/* Set device operations */
 	netdev->netdev_ops = xgbe_get_netdev_ops();
 	netdev->ethtool_ops = xgbe_get_ethtool_ops();
+#ifdef CONFIG_AMD_XGBE_DCB
+	netdev->dcbnl_ops = xgbe_get_dcbnl_ops();
+#endif
 
 	/* Set device features */
 	netdev->hw_features = NETIF_F_SG |
@@ -419,6 +440,8 @@
 		goto err_reg_netdev;
 	}
 
+	xgbe_ptp_register(pdata);
+
 	xgbe_debugfs_init(pdata);
 
 	netdev_notice(netdev, "net device enabled\n");
@@ -451,6 +474,8 @@
 
 	xgbe_debugfs_exit(pdata);
 
+	xgbe_ptp_unregister(pdata);
+
 	unregister_netdev(netdev);
 
 	xgbe_mdio_unregister(pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
new file mode 100644
index 0000000..37e64cf
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -0,0 +1,285 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ *     Inc. unless otherwise expressly agreed to in writing between Synopsys
+ *     and you.
+ *
+ *     The Software IS NOT an item of Licensed Software or Licensed Product
+ *     under any End User Software License Agreement or Agreement for Licensed
+ *     Product with Synopsys or any supplement thereto.  Permission is hereby
+ *     granted, free of charge, to any person obtaining a copy of this software
+ *     annotated with this license and the Software, to deal in the Software
+ *     without restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ *     of the Software, and to permit persons to whom the Software is furnished
+ *     to do so, subject to the following conditions:
+ *
+ *     The above copyright notice and this permission notice shall be included
+ *     in all copies or substantial portions of the Software.
+ *
+ *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ *     THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Advanced Micro Devices, Inc. nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ *     Inc. unless otherwise expressly agreed to in writing between Synopsys
+ *     and you.
+ *
+ *     The Software IS NOT an item of Licensed Software or Licensed Product
+ *     under any End User Software License Agreement or Agreement for Licensed
+ *     Product with Synopsys or any supplement thereto.  Permission is hereby
+ *     granted, free of charge, to any person obtaining a copy of this software
+ *     annotated with this license and the Software, to deal in the Software
+ *     without restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ *     of the Software, and to permit persons to whom the Software is furnished
+ *     to do so, subject to the following conditions:
+ *
+ *     The above copyright notice and this permission notice shall be included
+ *     in all copies or substantial portions of the Software.
+ *
+ *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ *     THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
+{
+	struct xgbe_prv_data *pdata = container_of(cc,
+						   struct xgbe_prv_data,
+						   tstamp_cc);
+	u64 nsec;
+
+	nsec = pdata->hw_if.get_tstamp_time(pdata);
+
+	return nsec;
+}
+
+static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
+{
+	struct xgbe_prv_data *pdata = container_of(info,
+						   struct xgbe_prv_data,
+						   ptp_clock_info);
+	unsigned long flags;
+	u64 adjust;
+	u32 addend, diff;
+	unsigned int neg_adjust = 0;
+
+	if (delta < 0) {
+		neg_adjust = 1;
+		delta = -delta;
+	}
+
+	adjust = pdata->tstamp_addend;
+	adjust *= delta;
+	diff = div_u64(adjust, 1000000000UL);
+
+	addend = (neg_adjust) ? pdata->tstamp_addend - diff :
+				pdata->tstamp_addend + diff;
+
+	spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+	pdata->hw_if.update_tstamp_addend(pdata, addend);
+
+	spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+	return 0;
+}
+
+static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+	struct xgbe_prv_data *pdata = container_of(info,
+						   struct xgbe_prv_data,
+						   ptp_clock_info);
+	unsigned long flags;
+	u64 nsec;
+
+	spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+	nsec = timecounter_read(&pdata->tstamp_tc);
+
+	nsec += delta;
+	timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
+
+	spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+	return 0;
+}
+
+static int xgbe_gettime(struct ptp_clock_info *info, struct timespec *ts)
+{
+	struct xgbe_prv_data *pdata = container_of(info,
+						   struct xgbe_prv_data,
+						   ptp_clock_info);
+	unsigned long flags;
+	u64 nsec;
+
+	spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+	nsec = timecounter_read(&pdata->tstamp_tc);
+
+	spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+	*ts = ns_to_timespec(nsec);
+
+	return 0;
+}
+
+static int xgbe_settime(struct ptp_clock_info *info, const struct timespec *ts)
+{
+	struct xgbe_prv_data *pdata = container_of(info,
+						   struct xgbe_prv_data,
+						   ptp_clock_info);
+	unsigned long flags;
+	u64 nsec;
+
+	nsec = timespec_to_ns(ts);
+
+	spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+	timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
+
+	spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+	return 0;
+}
+
+static int xgbe_enable(struct ptp_clock_info *info,
+		       struct ptp_clock_request *request, int on)
+{
+	return -EOPNOTSUPP;
+}
+
+void xgbe_ptp_register(struct xgbe_prv_data *pdata)
+{
+	struct ptp_clock_info *info = &pdata->ptp_clock_info;
+	struct ptp_clock *clock;
+	struct cyclecounter *cc = &pdata->tstamp_cc;
+	u64 dividend;
+
+	snprintf(info->name, sizeof(info->name), "%s",
+		 netdev_name(pdata->netdev));
+	info->owner = THIS_MODULE;
+	info->max_adj = clk_get_rate(pdata->ptpclk);
+	info->adjfreq = xgbe_adjfreq;
+	info->adjtime = xgbe_adjtime;
+	info->gettime = xgbe_gettime;
+	info->settime = xgbe_settime;
+	info->enable = xgbe_enable;
+
+	clock = ptp_clock_register(info, pdata->dev);
+	if (IS_ERR(clock)) {
+		dev_err(pdata->dev, "ptp_clock_register failed\n");
+		return;
+	}
+
+	pdata->ptp_clock = clock;
+
+	/* Calculate the addend:
+	 *   addend = 2^32 / (PTP ref clock / 50Mhz)
+	 *          = (2^32 * 50Mhz) / PTP ref clock
+	 */
+	dividend = 50000000;
+	dividend <<= 32;
+	pdata->tstamp_addend = div_u64(dividend, clk_get_rate(pdata->ptpclk));
+
+	/* Setup the timecounter */
+	cc->read = xgbe_cc_read;
+	cc->mask = CLOCKSOURCE_MASK(64);
+	cc->mult = 1;
+	cc->shift = 0;
+
+	timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
+			 ktime_to_ns(ktime_get_real()));
+
+	/* Disable all timestamping to start */
+	XGMAC_IOWRITE(pdata, MAC_TCR, 0);
+	pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+	pdata->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+}
+
+void xgbe_ptp_unregister(struct xgbe_prv_data *pdata)
+{
+	if (pdata->ptp_clock)
+		ptp_clock_unregister(pdata->ptp_clock);
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 9e24b29..07bf70a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -123,6 +123,10 @@
 #include <linux/phy.h>
 #include <linux/if_vlan.h>
 #include <linux/bitops.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <net/dcbnl.h>
 
 
 #define XGBE_DRV_NAME		"amd-xgbe"
@@ -141,6 +145,7 @@
 #define XGBE_RX_BUF_ALIGN	64
 
 #define XGBE_MAX_DMA_CHANNELS	16
+#define XGBE_MAX_QUEUES		16
 
 /* DMA cache settings - Outer sharable, write-back, write-allocate */
 #define XGBE_DMA_OS_AXDOMAIN	0x2
@@ -164,6 +169,16 @@
 #define XGBE_PHY_NAME		"amd_xgbe_phy"
 #define XGBE_PRTAD		0
 
+/* Device-tree clock names */
+#define XGBE_DMA_CLOCK		"dma_clk"
+#define XGBE_PTP_CLOCK		"ptp_clk"
+
+/* Timestamp support - values based on 50MHz PTP clock
+ *   50MHz => 20 nsec
+ */
+#define XGBE_TSTAMP_SSINC	20
+#define XGBE_TSTAMP_SNSINC	0
+
 /* Driver PMT macros */
 #define XGMAC_DRIVER_CONTEXT	1
 #define XGMAC_IOCTL_CONTEXT	2
@@ -171,7 +186,7 @@
 #define XGBE_FIFO_SIZE_B(x)	(x)
 #define XGBE_FIFO_SIZE_KB(x)	(x * 1024)
 
-#define XGBE_TC_CNT		2
+#define XGBE_TC_MIN_QUANTUM	10
 
 /* Helper macro for descriptor handling
  *  Always use XGBE_GET_DESC_DATA to access the descriptor data
@@ -214,6 +229,8 @@
 	unsigned short mss;
 
 	unsigned short vlan_ctag;
+
+	u64 rx_tstamp;
 };
 
 /* Common Rx and Tx descriptor mapping */
@@ -242,6 +259,20 @@
 	unsigned int interrupt;		/* Interrupt indicator */
 
 	unsigned int mapped_as_page;
+
+	/* Incomplete receive save location.  If the budget is exhausted
+	 * or the last descriptor (last normal descriptor or a following
+	 * context descriptor) has not been DMA'd yet the current state
+	 * of the receive processing needs to be saved.
+	 */
+	unsigned int state_saved;
+	struct {
+		unsigned int incomplete;
+		unsigned int context_next;
+		struct sk_buff *skb;
+		unsigned int len;
+		unsigned int error;
+	} state;
 };
 
 struct xgbe_ring {
@@ -467,6 +498,18 @@
 	void (*rx_mmc_int)(struct xgbe_prv_data *);
 	void (*tx_mmc_int)(struct xgbe_prv_data *);
 	void (*read_mmc_stats)(struct xgbe_prv_data *);
+
+	/* For Timestamp config */
+	int (*config_tstamp)(struct xgbe_prv_data *, unsigned int);
+	void (*update_tstamp_addend)(struct xgbe_prv_data *, unsigned int);
+	void (*set_tstamp_time)(struct xgbe_prv_data *, unsigned int sec,
+				unsigned int nsec);
+	u64 (*get_tstamp_time)(struct xgbe_prv_data *);
+	u64 (*get_tx_tstamp)(struct xgbe_prv_data *);
+
+	/* For Data Center Bridging config */
+	void (*config_dcb_tc)(struct xgbe_prv_data *);
+	void (*config_dcb_pfc)(struct xgbe_prv_data *);
 };
 
 struct xgbe_desc_if {
@@ -508,6 +551,7 @@
 	unsigned int tso;		/* TCP Segmentation Offload */
 	unsigned int dma_debug;		/* DMA Debug Registers */
 	unsigned int rss;		/* Receive Side Scaling */
+	unsigned int tc_cnt;		/* Number of Traffic Classes */
 	unsigned int hash_table_size;	/* Hash Table Size */
 	unsigned int l3l4_filter_num;	/* Number of L3-L4 Filters */
 
@@ -553,6 +597,9 @@
 	unsigned int rx_ring_count;
 	unsigned int rx_desc_count;
 
+	unsigned int tx_q_count;
+	unsigned int rx_q_count;
+
 	/* Tx/Rx common settings */
 	unsigned int pblx8;
 
@@ -607,8 +654,27 @@
 	/* Filtering support */
 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 
-	/* System clock value used for Rx watchdog */
-	struct clk *sysclock;
+	/* Device clocks */
+	struct clk *sysclk;
+	struct clk *ptpclk;
+
+	/* Timestamp support */
+	spinlock_t tstamp_lock;
+	struct ptp_clock_info ptp_clock_info;
+	struct ptp_clock *ptp_clock;
+	struct hwtstamp_config tstamp_config;
+	struct cyclecounter tstamp_cc;
+	struct timecounter tstamp_tc;
+	unsigned int tstamp_addend;
+	struct work_struct tx_tstamp_work;
+	struct sk_buff *tx_tstamp_skb;
+	u64 tx_tstamp;
+
+	/* DCB support */
+	struct ieee_ets *ets;
+	struct ieee_pfc *pfc;
+	unsigned int q2tc_map[XGBE_MAX_QUEUES];
+	unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS];
 
 	/* Hardware features of the device */
 	struct xgbe_hw_features hw_feat;
@@ -635,10 +701,15 @@
 void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
 struct net_device_ops *xgbe_get_netdev_ops(void);
 struct ethtool_ops *xgbe_get_ethtool_ops(void);
+#ifdef CONFIG_AMD_XGBE_DCB
+const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void);
+#endif
 
 int xgbe_mdio_register(struct xgbe_prv_data *);
 void xgbe_mdio_unregister(struct xgbe_prv_data *);
 void xgbe_dump_phy_registers(struct xgbe_prv_data *);
+void xgbe_ptp_register(struct xgbe_prv_data *);
+void xgbe_ptp_unregister(struct xgbe_prv_data *);
 void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int,
 		       unsigned int);
 void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index ce8f869..d777fae 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -346,6 +346,7 @@
 	u8		flags;
 /* Set on the first BD descriptor when there is a split BD */
 #define BNX2X_TSO_SPLIT_BD		(1<<0)
+#define BNX2X_HAS_SECOND_PBD		(1<<1)
 };
 
 struct sw_rx_page {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index dca1236..4e6c82e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -227,6 +227,12 @@
 	--nbd;
 	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 
+	if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
+		/* Skip second parse bd... */
+		--nbd;
+		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+	}
+
 	/* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
 	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
 		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
@@ -3902,6 +3908,9 @@
 			/* set encapsulation flag in start BD */
 			SET_FLAG(tx_start_bd->general_data,
 				 ETH_TX_START_BD_TUNNEL_EXIST, 1);
+
+			tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
+
 			nbd++;
 		} else if (xmit_type & XMIT_CSUM) {
 			/* Set PBD in checksum offload case w/o encapsulation */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 08ea91c..92fee84 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -416,6 +416,7 @@
 			break;
 		case PORT_FIBRE:
 		case PORT_DA:
+		case PORT_NONE:
 			if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
 			      bp->port.supported[1] & SUPPORTED_FIBRE)) {
 				DP(BNX2X_MSG_ETHTOOL,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 0173a6d..ce455ae 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -6,15 +6,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
 #define pr_fmt(fmt)				"bcmgenet: " fmt
@@ -79,13 +70,13 @@
 				TOTAL_DESC * DMA_DESC_SIZE)
 
 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
-						void __iomem *d, u32 value)
+					     void __iomem *d, u32 value)
 {
 	__raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
 }
 
 static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
-						void __iomem *d)
+					    void __iomem *d)
 {
 	return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
 }
@@ -98,7 +89,7 @@
 
 	/* Register writes to GISB bus can take couple hundred nanoseconds
 	 * and are done for each packet, save these expensive writes unless
-	 * the platform is explicitely configured for 64-bits/LPAE.
+	 * the platform is explicitly configured for 64-bits/LPAE.
 	 */
 #ifdef CONFIG_PHYS_ADDR_T_64BIT
 	if (priv->hw_params->flags & GENET_HAS_40BITS)
@@ -108,7 +99,7 @@
 
 /* Combined address + length/status setter */
 static inline void dmadesc_set(struct bcmgenet_priv *priv,
-				void __iomem *d, dma_addr_t addr, u32 val)
+			       void __iomem *d, dma_addr_t addr, u32 val)
 {
 	dmadesc_set_length_status(priv, d, val);
 	dmadesc_set_addr(priv, d, addr);
@@ -123,7 +114,7 @@
 
 	/* Register writes to GISB bus can take couple hundred nanoseconds
 	 * and are done for each packet, save these expensive writes unless
-	 * the platform is explicitely configured for 64-bits/LPAE.
+	 * the platform is explicitly configured for 64-bits/LPAE.
 	 */
 #ifdef CONFIG_PHYS_ADDR_T_64BIT
 	if (priv->hw_params->flags & GENET_HAS_40BITS)
@@ -242,7 +233,7 @@
 }
 
 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
-					enum dma_reg r)
+				      enum dma_reg r)
 {
 	return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
 			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
@@ -256,7 +247,7 @@
 }
 
 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
-					enum dma_reg r)
+				      enum dma_reg r)
 {
 	return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
 			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
@@ -333,8 +324,8 @@
 static const u8 *genet_dma_ring_regs;
 
 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
-						unsigned int ring,
-						enum dma_ring_reg r)
+					   unsigned int ring,
+					   enum dma_ring_reg r)
 {
 	return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
 			(DMA_RING_SIZE * ring) +
@@ -342,9 +333,8 @@
 }
 
 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
-						unsigned int ring,
-						u32 val,
-						enum dma_ring_reg r)
+					     unsigned int ring, u32 val,
+					     enum dma_ring_reg r)
 {
 	__raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
 			(DMA_RING_SIZE * ring) +
@@ -352,8 +342,8 @@
 }
 
 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
-						unsigned int ring,
-						enum dma_ring_reg r)
+					   unsigned int ring,
+					   enum dma_ring_reg r)
 {
 	return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
 			(DMA_RING_SIZE * ring) +
@@ -361,9 +351,8 @@
 }
 
 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
-						unsigned int ring,
-						u32 val,
-						enum dma_ring_reg r)
+					     unsigned int ring, u32 val,
+					     enum dma_ring_reg r)
 {
 	__raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
 			(DMA_RING_SIZE * ring) +
@@ -371,7 +360,7 @@
 }
 
 static int bcmgenet_get_settings(struct net_device *dev,
-		struct ethtool_cmd *cmd)
+				 struct ethtool_cmd *cmd)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
 
@@ -385,7 +374,7 @@
 }
 
 static int bcmgenet_set_settings(struct net_device *dev,
-		struct ethtool_cmd *cmd)
+				 struct ethtool_cmd *cmd)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
 
@@ -458,7 +447,7 @@
 }
 
 static int bcmgenet_set_features(struct net_device *dev,
-		netdev_features_t features)
+				 netdev_features_t features)
 {
 	netdev_features_t changed = features ^ dev->features;
 	netdev_features_t wanted = dev->wanted_features;
@@ -625,12 +614,11 @@
 #define BCMGENET_STATS_LEN	ARRAY_SIZE(bcmgenet_gstrings_stats)
 
 static void bcmgenet_get_drvinfo(struct net_device *dev,
-		struct ethtool_drvinfo *info)
+				 struct ethtool_drvinfo *info)
 {
 	strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
 	strlcpy(info->version, "v2.0", sizeof(info->version));
 	info->n_stats = BCMGENET_STATS_LEN;
-
 }
 
 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
@@ -643,8 +631,8 @@
 	}
 }
 
-static void bcmgenet_get_strings(struct net_device *dev,
-				u32 stringset, u8 *data)
+static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
+				 u8 *data)
 {
 	int i;
 
@@ -652,8 +640,8 @@
 	case ETH_SS_STATS:
 		for (i = 0; i < BCMGENET_STATS_LEN; i++) {
 			memcpy(data + i * ETH_GSTRING_LEN,
-				bcmgenet_gstrings_stats[i].stat_string,
-				ETH_GSTRING_LEN);
+			       bcmgenet_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
 		}
 		break;
 	}
@@ -678,8 +666,8 @@
 		case BCMGENET_STAT_RUNT:
 			if (s->type != BCMGENET_STAT_MIB_RX)
 				offset = BCMGENET_STAT_OFFSET;
-			val = bcmgenet_umac_readl(priv, UMAC_MIB_START +
-								j + offset);
+			val = bcmgenet_umac_readl(priv,
+						  UMAC_MIB_START + j + offset);
 			break;
 		case BCMGENET_STAT_MISC:
 			val = bcmgenet_umac_readl(priv, s->reg_offset);
@@ -696,8 +684,8 @@
 }
 
 static void bcmgenet_get_ethtool_stats(struct net_device *dev,
-					struct ethtool_stats *stats,
-					u64 *data)
+				       struct ethtool_stats *stats,
+				       u64 *data)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
 	int i;
@@ -765,7 +753,7 @@
 }
 
 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
-				enum bcmgenet_power_mode mode)
+			      enum bcmgenet_power_mode mode)
 {
 	u32 reg;
 
@@ -850,37 +838,37 @@
 						  struct bcmgenet_tx_ring *ring)
 {
 	bcmgenet_intrl2_0_writel(priv,
-			UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
-			INTRL2_CPU_MASK_SET);
+				 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+				 INTRL2_CPU_MASK_SET);
 }
 
 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv,
 						 struct bcmgenet_tx_ring *ring)
 {
 	bcmgenet_intrl2_0_writel(priv,
-			UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
-			INTRL2_CPU_MASK_CLEAR);
+				 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+				 INTRL2_CPU_MASK_CLEAR);
 }
 
 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv,
-						struct bcmgenet_tx_ring *ring)
+					       struct bcmgenet_tx_ring *ring)
 {
-	bcmgenet_intrl2_1_writel(priv,
-			(1 << ring->index), INTRL2_CPU_MASK_CLEAR);
+	bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
+				 INTRL2_CPU_MASK_CLEAR);
 	priv->int1_mask &= ~(1 << ring->index);
 }
 
 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
 						struct bcmgenet_tx_ring *ring)
 {
-	bcmgenet_intrl2_1_writel(priv,
-			(1 << ring->index), INTRL2_CPU_MASK_SET);
+	bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
+				 INTRL2_CPU_MASK_SET);
 	priv->int1_mask |= (1 << ring->index);
 }
 
 /* Unlocked version of the reclaim routine */
 static void __bcmgenet_tx_reclaim(struct net_device *dev,
-				struct bcmgenet_tx_ring *ring)
+				  struct bcmgenet_tx_ring *ring)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
 	int last_tx_cn, last_c_index, num_tx_bds;
@@ -888,7 +876,7 @@
 	struct netdev_queue *txq;
 	unsigned int c_index;
 
-	/* Compute how many buffers are transmited since last xmit call */
+	/* Compute how many buffers are transmitted since last xmit call */
 	c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
 	txq = netdev_get_tx_queue(dev, ring->queue);
 
@@ -903,9 +891,9 @@
 		last_tx_cn = num_tx_bds - last_c_index + c_index;
 
 	netif_dbg(priv, tx_done, dev,
-			"%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
-			__func__, ring->index,
-			c_index, last_tx_cn, last_c_index);
+		  "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
+		  __func__, ring->index,
+		  c_index, last_tx_cn, last_c_index);
 
 	/* Reclaim transmitted buffers */
 	while (last_tx_cn-- > 0) {
@@ -913,17 +901,17 @@
 		if (tx_cb_ptr->skb) {
 			dev->stats.tx_bytes += tx_cb_ptr->skb->len;
 			dma_unmap_single(&dev->dev,
-					dma_unmap_addr(tx_cb_ptr, dma_addr),
-					tx_cb_ptr->skb->len,
-					DMA_TO_DEVICE);
+					 dma_unmap_addr(tx_cb_ptr, dma_addr),
+					 tx_cb_ptr->skb->len,
+					 DMA_TO_DEVICE);
 			bcmgenet_free_cb(tx_cb_ptr);
 		} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
 			dev->stats.tx_bytes +=
 				dma_unmap_len(tx_cb_ptr, dma_len);
 			dma_unmap_page(&dev->dev,
-					dma_unmap_addr(tx_cb_ptr, dma_addr),
-					dma_unmap_len(tx_cb_ptr, dma_len),
-					DMA_TO_DEVICE);
+				       dma_unmap_addr(tx_cb_ptr, dma_addr),
+				       dma_unmap_len(tx_cb_ptr, dma_len),
+				       DMA_TO_DEVICE);
 			dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
 		}
 		dev->stats.tx_packets++;
@@ -943,7 +931,7 @@
 }
 
 static void bcmgenet_tx_reclaim(struct net_device *dev,
-		struct bcmgenet_tx_ring *ring)
+				struct bcmgenet_tx_ring *ring)
 {
 	unsigned long flags;
 
@@ -1017,11 +1005,11 @@
 	return 0;
 }
 
-/* Transmit a SKB fragement */
+/* Transmit a SKB fragment */
 static int bcmgenet_xmit_frag(struct net_device *dev,
-				skb_frag_t *frag,
-				u16 dma_desc_flags,
-				struct bcmgenet_tx_ring *ring)
+			      skb_frag_t *frag,
+			      u16 dma_desc_flags,
+			      struct bcmgenet_tx_ring *ring)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
 	struct device *kdev = &priv->pdev->dev;
@@ -1036,11 +1024,11 @@
 	tx_cb_ptr->skb = NULL;
 
 	mapping = skb_frag_dma_map(kdev, frag, 0,
-		skb_frag_size(frag), DMA_TO_DEVICE);
+				   skb_frag_size(frag), DMA_TO_DEVICE);
 	ret = dma_mapping_error(kdev, mapping);
 	if (ret) {
 		netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
-				__func__);
+			  __func__);
 		return ret;
 	}
 
@@ -1048,8 +1036,8 @@
 	dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
 
 	dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
-			(frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
-			(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
+		    (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+		    (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
 
 
 	ring->free_bds -= 1;
@@ -1112,8 +1100,9 @@
 			tx_csum_info |= STATUS_TX_CSUM_LV;
 			if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
 				tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
-		} else
+		} else {
 			tx_csum_info = 0;
+		}
 
 		status->tx_csum_info = tx_csum_info;
 	}
@@ -1153,11 +1142,16 @@
 	if (ring->free_bds <= nr_frags + 1) {
 		netif_tx_stop_queue(txq);
 		netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
-				__func__, index, ring->queue);
+			   __func__, index, ring->queue);
 		ret = NETDEV_TX_BUSY;
 		goto out;
 	}
 
+	if (skb_padto(skb, ETH_ZLEN)) {
+		ret = NETDEV_TX_OK;
+		goto out;
+	}
+
 	/* set the SKB transmit checksum */
 	if (priv->desc_64b_en) {
 		ret = bcmgenet_put_tx_csum(dev, skb);
@@ -1181,8 +1175,9 @@
 	/* xmit fragment */
 	for (i = 0; i < nr_frags; i++) {
 		ret = bcmgenet_xmit_frag(dev,
-				&skb_shinfo(skb)->frags[i],
-				(i == nr_frags - 1) ? DMA_EOP : 0, ring);
+					 &skb_shinfo(skb)->frags[i],
+					 (i == nr_frags - 1) ? DMA_EOP : 0,
+					 ring);
 		if (ret) {
 			ret = NETDEV_TX_OK;
 			goto out;
@@ -1195,7 +1190,7 @@
 	 * producer index, now write it down to the hardware
 	 */
 	bcmgenet_tdma_ring_writel(priv, ring->index,
-			ring->prod_index, TDMA_PROD_INDEX);
+				  ring->prod_index, TDMA_PROD_INDEX);
 
 	if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
 		netif_tx_stop_queue(txq);
@@ -1209,16 +1204,14 @@
 }
 
 
-static int bcmgenet_rx_refill(struct bcmgenet_priv *priv,
-				struct enet_cb *cb)
+static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb)
 {
 	struct device *kdev = &priv->pdev->dev;
 	struct sk_buff *skb;
 	dma_addr_t mapping;
 	int ret;
 
-	skb = netdev_alloc_skb(priv->dev,
-				priv->rx_buf_len + SKB_ALIGNMENT);
+	skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
 	if (!skb)
 		return -ENOMEM;
 
@@ -1226,12 +1219,12 @@
 	WARN_ON(cb->skb != NULL);
 	cb->skb = skb;
 	mapping = dma_map_single(kdev, skb->data,
-			priv->rx_buf_len, DMA_FROM_DEVICE);
+				 priv->rx_buf_len, DMA_FROM_DEVICE);
 	ret = dma_mapping_error(kdev, mapping);
 	if (ret) {
 		bcmgenet_free_cb(cb);
 		netif_err(priv, rx_err, priv->dev,
-				"%s DMA map failed\n", __func__);
+			  "%s DMA map failed\n", __func__);
 		return ret;
 	}
 
@@ -1266,8 +1259,7 @@
 	unsigned int p_index;
 	unsigned int chksum_ok = 0;
 
-	p_index = bcmgenet_rdma_ring_readl(priv,
-			DESC_INDEX, RDMA_PROD_INDEX);
+	p_index = bcmgenet_rdma_ring_readl(priv, DESC_INDEX, RDMA_PROD_INDEX);
 	p_index &= DMA_P_INDEX_MASK;
 
 	if (p_index < priv->rx_c_index)
@@ -1277,11 +1269,10 @@
 		rxpkttoprocess = p_index - priv->rx_c_index;
 
 	netif_dbg(priv, rx_status, dev,
-		"RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
+		  "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
 
 	while ((rxpktprocessed < rxpkttoprocess) &&
-			(rxpktprocessed < budget)) {
-
+	       (rxpktprocessed < budget)) {
 		/* Unmap the packet contents such that we can use the
 		 * RSV from the 64 bytes descriptor when enabled and save
 		 * a 32-bits register read
@@ -1289,15 +1280,17 @@
 		cb = &priv->rx_cbs[priv->rx_read_ptr];
 		skb = cb->skb;
 		dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
-				priv->rx_buf_len, DMA_FROM_DEVICE);
+				 priv->rx_buf_len, DMA_FROM_DEVICE);
 
 		if (!priv->desc_64b_en) {
-			dma_length_status = dmadesc_get_length_status(priv,
-							priv->rx_bds +
-							(priv->rx_read_ptr *
-							 DMA_DESC_SIZE));
+			dma_length_status =
+				dmadesc_get_length_status(priv,
+							  priv->rx_bds +
+							  (priv->rx_read_ptr *
+							   DMA_DESC_SIZE));
 		} else {
 			struct status_64 *status;
+
 			status = (struct status_64 *)skb->data;
 			dma_length_status = status->length_status;
 		}
@@ -1309,9 +1302,9 @@
 		len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
 
 		netif_dbg(priv, rx_status, dev,
-			"%s: p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
-			__func__, p_index, priv->rx_c_index, priv->rx_read_ptr,
-			dma_length_status);
+			  "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
+			  __func__, p_index, priv->rx_c_index,
+			  priv->rx_read_ptr, dma_length_status);
 
 		rxpktprocessed++;
 
@@ -1327,7 +1320,7 @@
 
 		if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
 			netif_err(priv, rx_status, dev,
-					"Droping fragmented packet!\n");
+				  "dropping fragmented packet!\n");
 			dev->stats.rx_dropped++;
 			dev->stats.rx_errors++;
 			dev_kfree_skb_any(cb->skb);
@@ -1341,7 +1334,7 @@
 						DMA_RX_LG |
 						DMA_RX_RXER))) {
 			netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
-						(unsigned int)dma_flag);
+				  (unsigned int)dma_flag);
 			if (dma_flag & DMA_RX_CRC_ERROR)
 				dev->stats.rx_crc_errors++;
 			if (dma_flag & DMA_RX_OV)
@@ -1360,7 +1353,7 @@
 		} /* error packet */
 
 		chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
-				priv->desc_rxchk_en;
+			     priv->desc_rxchk_en;
 
 		skb_put(skb, len);
 		if (priv->desc_64b_en) {
@@ -1420,7 +1413,6 @@
 		ret = bcmgenet_rx_refill(priv, cb);
 		if (ret)
 			break;
-
 	}
 
 	return ret;
@@ -1436,8 +1428,8 @@
 
 		if (dma_unmap_addr(cb, dma_addr)) {
 			dma_unmap_single(&priv->dev->dev,
-					dma_unmap_addr(cb, dma_addr),
-					priv->rx_buf_len, DMA_FROM_DEVICE);
+					 dma_unmap_addr(cb, dma_addr),
+					 priv->rx_buf_len, DMA_FROM_DEVICE);
 			dma_unmap_addr_set(cb, dma_addr, 0);
 		}
 
@@ -1446,8 +1438,7 @@
 	}
 }
 
-static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask,
-				bool enable)
+static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
 {
 	u32 reg;
 
@@ -1490,7 +1481,7 @@
 
 	if (timeout == 1000) {
 		dev_err(kdev,
-			"timeout waiting for MAC to come out of resetn\n");
+			"timeout waiting for MAC to come out of reset\n");
 		return -ETIMEDOUT;
 	}
 
@@ -1523,7 +1514,8 @@
 	bcmgenet_umac_writel(priv, 0, UMAC_CMD);
 	/* clear tx/rx counter */
 	bcmgenet_umac_writel(priv,
-		MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, UMAC_MIB_CTRL);
+			     MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
+			     UMAC_MIB_CTRL);
 	bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
 
 	bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
@@ -1542,12 +1534,12 @@
 
 	dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
 
-	/* Monitor cable plug/unpluged event for internal PHY */
-	if (phy_is_internal(priv->phydev))
+	/* Monitor cable plug/unplugged event for internal PHY */
+	if (phy_is_internal(priv->phydev)) {
 		cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
-	else if (priv->ext_phy)
+	} else if (priv->ext_phy) {
 		cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
-	else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+	} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
 		reg = bcmgenet_bp_mc_get(priv);
 		reg |= BIT(priv->hw_params->bp_in_en_shift);
 
@@ -1563,8 +1555,7 @@
 	if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
 		cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR;
 
-	bcmgenet_intrl2_0_writel(priv, cpu_mask_clear,
-		INTRL2_CPU_MASK_CLEAR);
+	bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
 
 	/* Enable rx/tx engine.*/
 	dev_dbg(kdev, "done init umac\n");
@@ -1613,28 +1604,28 @@
 	bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
 	/* Disable rate control for now */
 	bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
-			TDMA_FLOW_PERIOD);
+				  TDMA_FLOW_PERIOD);
 	/* Unclassified traffic goes to ring 16 */
 	bcmgenet_tdma_ring_writel(priv, index,
-			((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
-			DMA_RING_BUF_SIZE);
+				  ((size << DMA_RING_SIZE_SHIFT) |
+				   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
 
 	first_bd = write_ptr;
 
 	/* Set start and end address, read and write pointers */
 	bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
-			DMA_START_ADDR);
+				  DMA_START_ADDR);
 	bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
-			TDMA_READ_PTR);
+				  TDMA_READ_PTR);
 	bcmgenet_tdma_ring_writel(priv, index, first_bd,
-			TDMA_WRITE_PTR);
+				  TDMA_WRITE_PTR);
 	bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
-			DMA_END_ADDR);
+				  DMA_END_ADDR);
 }
 
 /* Initialize a RDMA ring */
 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
-				  unsigned int index, unsigned int size)
+				 unsigned int index, unsigned int size)
 {
 	u32 words_per_bd = WORDS_PER_BD(priv);
 	int ret;
@@ -1645,8 +1636,8 @@
 	priv->rx_bd_assign_index = 0;
 	priv->rx_c_index = 0;
 	priv->rx_read_ptr = 0;
-	priv->rx_cbs = kzalloc(priv->num_rx_bds * sizeof(struct enet_cb),
-				GFP_KERNEL);
+	priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
+			       GFP_KERNEL);
 	if (!priv->rx_cbs)
 		return -ENOMEM;
 
@@ -1660,14 +1651,15 @@
 	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
 	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
 	bcmgenet_rdma_ring_writel(priv, index,
-		((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
-		DMA_RING_BUF_SIZE);
+				  ((size << DMA_RING_SIZE_SHIFT) |
+				   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
 	bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
 	bcmgenet_rdma_ring_writel(priv, index,
-		words_per_bd * size - 1, DMA_END_ADDR);
+				  words_per_bd * size - 1, DMA_END_ADDR);
 	bcmgenet_rdma_ring_writel(priv, index,
-			(DMA_FC_THRESH_LO << DMA_XOFF_THRESHOLD_SHIFT) |
-			DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
+				  (DMA_FC_THRESH_LO <<
+				   DMA_XOFF_THRESHOLD_SHIFT) |
+				   DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
 	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR);
 
 	return ret;
@@ -1714,10 +1706,10 @@
 		 * (ring 16)
 		 */
 		bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt,
-					i * priv->hw_params->bds_cnt,
-					(i + 1) * priv->hw_params->bds_cnt);
+				      i * priv->hw_params->bds_cnt,
+				      (i + 1) * priv->hw_params->bds_cnt);
 
-		/* Configure ring as decriptor ring and setup priority */
+		/* Configure ring as descriptor ring and setup priority */
 		ring_cfg |= 1 << i;
 		dma_priority |= ((GENET_Q0_PRIORITY + i) <<
 				(GENET_MAX_MQ_CNT + 1) * i);
@@ -1783,11 +1775,11 @@
 	/* Init tDma */
 	bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
 
-	/* Initialize commont TX ring structures */
+	/* Initialize common TX ring structures */
 	priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
 	priv->num_tx_bds = TOTAL_DESC;
-	priv->tx_cbs = kzalloc(priv->num_tx_bds * sizeof(struct enet_cb),
-				GFP_KERNEL);
+	priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
+			       GFP_KERNEL);
 	if (!priv->tx_cbs) {
 		bcmgenet_fini_dma(priv);
 		return -ENOMEM;
@@ -1798,8 +1790,9 @@
 
 	/* initialize special ring 16 */
 	bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT,
-			priv->hw_params->tx_queues * priv->hw_params->bds_cnt,
-			TOTAL_DESC);
+			      priv->hw_params->tx_queues *
+			      priv->hw_params->bds_cnt,
+			      TOTAL_DESC);
 
 	return 0;
 }
@@ -1820,11 +1813,11 @@
 	priv->rx_c_index += work_done;
 	priv->rx_c_index &= DMA_C_INDEX_MASK;
 	bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
-				priv->rx_c_index, RDMA_CONS_INDEX);
+				  priv->rx_c_index, RDMA_CONS_INDEX);
 	if (work_done < budget) {
 		napi_complete(napi);
-		bcmgenet_intrl2_0_writel(priv,
-			UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_CLEAR);
+		bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
+					 INTRL2_CPU_MASK_CLEAR);
 	}
 
 	return work_done;
@@ -1847,9 +1840,9 @@
 
 	/* Link UP/DOWN event */
 	if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
-		(priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
+	    (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
 		phy_mac_interrupt(priv->phydev,
-			priv->irq0_stat & UMAC_IRQ_LINK_UP);
+				  priv->irq0_stat & UMAC_IRQ_LINK_UP);
 		priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN);
 	}
 }
@@ -1864,11 +1857,11 @@
 	priv->irq1_stat =
 		bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
 		~priv->int1_mask;
-	/* clear inerrupts*/
+	/* clear interrupts */
 	bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
 
 	netif_dbg(priv, intr, priv->dev,
-		"%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+		  "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
 	/* Check the MBDONE interrupts.
 	 * packet is done, reclaim descriptors
 	 */
@@ -1877,7 +1870,7 @@
 		for (index = 0; index < 16; index++) {
 			if (priv->irq1_stat & (1 << index))
 				bcmgenet_tx_reclaim(priv->dev,
-						&priv->tx_rings[index]);
+						    &priv->tx_rings[index]);
 		}
 	}
 	return IRQ_HANDLED;
@@ -1892,11 +1885,11 @@
 	priv->irq0_stat =
 		bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
 		~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
-	/* clear inerrupts*/
+	/* clear interrupts */
 	bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
 
 	netif_dbg(priv, intr, priv->dev,
-		"IRQ=0x%x\n", priv->irq0_stat);
+		  "IRQ=0x%x\n", priv->irq0_stat);
 
 	if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) {
 		/* We use NAPI(software interrupt throttling, if
@@ -1904,8 +1897,8 @@
 		 * Disable interrupt, will be enabled in the poll method.
 		 */
 		if (likely(napi_schedule_prep(&priv->napi))) {
-			bcmgenet_intrl2_0_writel(priv,
-				UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_SET);
+			bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
+						 INTRL2_CPU_MASK_SET);
 			__napi_schedule(&priv->napi);
 		}
 	}
@@ -1926,7 +1919,7 @@
 	}
 
 	if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
-		priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
+	    priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
 		priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
 		wake_up(&priv->wq);
 	}
@@ -1958,7 +1951,7 @@
 }
 
 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
-				  unsigned char *addr)
+				 unsigned char *addr)
 {
 	bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
 			(addr[2] << 8) | addr[3], UMAC_MAC0);
@@ -2079,14 +2072,14 @@
 	bcmgenet_enable_dma(priv, dma_ctrl);
 
 	ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
-			dev->name, priv);
+			  dev->name, priv);
 	if (ret < 0) {
 		netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
 		goto err_fini_dma;
 	}
 
 	ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
-				dev->name, priv);
+			  dev->name, priv);
 	if (ret < 0) {
 		netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
 		goto err_irq0;
@@ -2127,8 +2120,7 @@
 	}
 
 	if (timeout == DMA_TIMEOUT_VAL) {
-		netdev_warn(priv->dev,
-			"Timed out while disabling TX DMA\n");
+		netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
 		ret = -ETIMEDOUT;
 	}
 
@@ -2151,9 +2143,8 @@
 	}
 
 	if (timeout == DMA_TIMEOUT_VAL) {
-		netdev_warn(priv->dev,
-			"Timed out while disabling RX DMA\n");
-			ret = -ETIMEDOUT;
+		netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
+		ret = -ETIMEDOUT;
 	}
 
 	return ret;
@@ -2232,12 +2223,11 @@
 {
 	u32 reg;
 
-	bcmgenet_umac_writel(priv,
-			addr[0] << 8 | addr[1], UMAC_MDF_ADDR + (*i * 4));
-	bcmgenet_umac_writel(priv,
-			addr[2] << 24 | addr[3] << 16 |
-			addr[4] << 8 | addr[5],
-			UMAC_MDF_ADDR + ((*i + 1) * 4));
+	bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
+			     UMAC_MDF_ADDR + (*i * 4));
+	bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
+			     addr[4] << 8 | addr[5],
+			     UMAC_MDF_ADDR + ((*i + 1) * 4));
 	reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
 	reg |= (1 << (MAX_MC_COUNT - *mc));
 	bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
@@ -2254,7 +2244,7 @@
 
 	netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
 
-	/* Promiscous mode */
+	/* Promiscuous mode */
 	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
 	if (dev->flags & IFF_PROMISC) {
 		reg |= CMD_PROMISC;
@@ -2434,7 +2424,7 @@
 
 	/* Print the GENET core version */
 	dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
-		major, (reg >> 16) & 0x0f, reg & 0xffff);
+		 major, (reg >> 16) & 0x0f, reg & 0xffff);
 
 #ifdef CONFIG_PHYS_ADDR_T_64BIT
 	if (!(params->flags & GENET_HAS_40BITS))
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index c61cd98..c862d06 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -4,18 +4,8 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- *
-*/
+ */
+
 #ifndef __BCMGENET_H__
 #define __BCMGENET_H__
 
@@ -514,9 +504,9 @@
 	unsigned int	cb_ptr;		/* Tx ring initial CB ptr */
 	unsigned int	end_ptr;	/* Tx ring end CB ptr */
 	void (*int_enable)(struct bcmgenet_priv *priv,
-				struct bcmgenet_tx_ring *);
+			   struct bcmgenet_tx_ring *);
 	void (*int_disable)(struct bcmgenet_priv *priv,
-				struct bcmgenet_tx_ring *);
+			    struct bcmgenet_tx_ring *);
 };
 
 /* device context */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index b1338c9..1896161 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -6,15 +6,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
 
@@ -44,15 +35,15 @@
 	u32 reg;
 
 	bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) |
-			(location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD);
+			     (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD);
 	/* Start MDIO transaction*/
 	reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
 	reg |= MDIO_START_BUSY;
 	bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
 	wait_event_timeout(priv->wq,
-			!(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD)
-				& MDIO_START_BUSY),
-			HZ / 100);
+			   !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD)
+			   & MDIO_START_BUSY),
+			   HZ / 100);
 	ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
 
 	if (ret & MDIO_READ_FAIL)
@@ -63,22 +54,22 @@
 
 /* write a value to the MII */
 static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
-			int location, u16 val)
+			      int location, u16 val)
 {
 	struct net_device *dev = bus->priv;
 	struct bcmgenet_priv *priv = netdev_priv(dev);
 	u32 reg;
 
 	bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) |
-			(location << MDIO_REG_SHIFT) | (0xffff & val)),
-			UMAC_MDIO_CMD);
+			     (location << MDIO_REG_SHIFT) | (0xffff & val)),
+			     UMAC_MDIO_CMD);
 	reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
 	reg |= MDIO_START_BUSY;
 	bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
 	wait_event_timeout(priv->wq,
-			!(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) &
-				MDIO_START_BUSY),
-			HZ / 100);
+			   !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) &
+			   MDIO_START_BUSY),
+			   HZ / 100);
 
 	return 0;
 }
@@ -248,7 +239,7 @@
 		phy_name = "external MII";
 		phydev->supported &= PHY_BASIC_FEATURES;
 		bcmgenet_sys_writel(priv,
-				PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
+				    PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
 		break;
 
 	case PHY_INTERFACE_MODE_REVMII:
@@ -284,7 +275,7 @@
 		reg |= RGMII_MODE_EN | id_mode_dis;
 		bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
 		bcmgenet_sys_writel(priv,
-				PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
+				    PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
 		break;
 	default:
 		dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface);
@@ -363,7 +354,7 @@
 		priv->mii_bus->irq[phydev->addr] = PHY_POLL;
 
 	pr_info("attached PHY at address %d [%s]\n",
-			phydev->addr, phydev->drv->name);
+		phydev->addr, phydev->drv->name);
 
 	return 0;
 }
@@ -388,9 +379,9 @@
 	bus->read = bcmgenet_mii_read;
 	bus->write = bcmgenet_mii_write;
 	snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d",
-			priv->pdev->name, priv->pdev->id);
+		 priv->pdev->name, priv->pdev->id);
 
-	bus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+	bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
 	if (!bus->irq) {
 		mdiobus_free(priv->mii_bus);
 		return -ENOMEM;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index e9daa07..ca5d779 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -52,6 +52,9 @@
 					| MACB_BIT(TXERR))
 #define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 
+#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
+#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
+
 /*
  * Graceful stop timeouts in us. We should allow up to
  * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
@@ -264,7 +267,8 @@
 				reg |= MACB_BIT(FD);
 			if (phydev->speed == SPEED_100)
 				reg |= MACB_BIT(SPD);
-			if (phydev->speed == SPEED_1000)
+			if (phydev->speed == SPEED_1000 &&
+			    bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
 				reg |= GEM_BIT(GBE);
 
 			macb_or_gem_writel(bp, NCFGR, reg);
@@ -337,7 +341,7 @@
 	}
 
 	/* mask with MAC supported features */
-	if (macb_is_gem(bp))
+	if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
 		phydev->supported &= PHY_GBIT_FEATURES;
 	else
 		phydev->supported &= PHY_BASIC_FEATURES;
@@ -467,6 +471,24 @@
 	return -ETIMEDOUT;
 }
 
+static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
+{
+	if (tx_skb->mapping) {
+		if (tx_skb->mapped_as_page)
+			dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
+				       tx_skb->size, DMA_TO_DEVICE);
+		else
+			dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
+					 tx_skb->size, DMA_TO_DEVICE);
+		tx_skb->mapping = 0;
+	}
+
+	if (tx_skb->skb) {
+		dev_kfree_skb_any(tx_skb->skb);
+		tx_skb->skb = NULL;
+	}
+}
+
 static void macb_tx_error_task(struct work_struct *work)
 {
 	struct macb	*bp = container_of(work, struct macb, tx_error_task);
@@ -504,10 +526,23 @@
 		skb = tx_skb->skb;
 
 		if (ctrl & MACB_BIT(TX_USED)) {
-			netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
-				    macb_tx_ring_wrap(tail), skb->data);
-			bp->stats.tx_packets++;
-			bp->stats.tx_bytes += skb->len;
+			/* skb is set for the last buffer of the frame */
+			while (!skb) {
+				macb_tx_unmap(bp, tx_skb);
+				tail++;
+				tx_skb = macb_tx_skb(bp, tail);
+				skb = tx_skb->skb;
+			}
+
+			/* ctrl still refers to the first buffer descriptor
+			 * since it's the only one written back by the hardware
+			 */
+			if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
+				netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
+					    macb_tx_ring_wrap(tail), skb->data);
+				bp->stats.tx_packets++;
+				bp->stats.tx_bytes += skb->len;
+			}
 		} else {
 			/*
 			 * "Buffers exhausted mid-frame" errors may only happen
@@ -521,10 +556,7 @@
 			desc->ctrl = ctrl | MACB_BIT(TX_USED);
 		}
 
-		dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
-				 DMA_TO_DEVICE);
-		tx_skb->skb = NULL;
-		dev_kfree_skb(skb);
+		macb_tx_unmap(bp, tx_skb);
 	}
 
 	/* Make descriptor updates visible to hardware */
@@ -572,20 +604,35 @@
 
 		ctrl = desc->ctrl;
 
+		/* TX_USED bit is only set by hardware on the very first buffer
+		 * descriptor of the transmitted frame.
+		 */
 		if (!(ctrl & MACB_BIT(TX_USED)))
 			break;
 
-		tx_skb = macb_tx_skb(bp, tail);
-		skb = tx_skb->skb;
+		/* Process all buffers of the current transmitted frame */
+		for (;; tail++) {
+			tx_skb = macb_tx_skb(bp, tail);
+			skb = tx_skb->skb;
 
-		netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
-			macb_tx_ring_wrap(tail), skb->data);
-		dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
-				 DMA_TO_DEVICE);
-		bp->stats.tx_packets++;
-		bp->stats.tx_bytes += skb->len;
-		tx_skb->skb = NULL;
-		dev_kfree_skb_irq(skb);
+			/* First, update TX stats if needed */
+			if (skb) {
+				netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
+					    macb_tx_ring_wrap(tail), skb->data);
+				bp->stats.tx_packets++;
+				bp->stats.tx_bytes += skb->len;
+			}
+
+			/* Now we can safely release resources */
+			macb_tx_unmap(bp, tx_skb);
+
+			/* skb is set only for the last buffer of the frame.
+			 * WARNING: at this point skb has been freed by
+			 * macb_tx_unmap().
+			 */
+			if (skb)
+				break;
+		}
 	}
 
 	bp->tx_tail = tail;
@@ -718,6 +765,10 @@
 
 		skb->protocol = eth_type_trans(skb, bp->dev);
 		skb_checksum_none_assert(skb);
+		if (bp->dev->features & NETIF_F_RXCSUM &&
+		    !(bp->dev->flags & IFF_PROMISC) &&
+		    GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
 
 		bp->stats.rx_packets++;
 		bp->stats.rx_bytes += skb->len;
@@ -1001,15 +1052,145 @@
 }
 #endif
 
+static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
+						     unsigned int len)
+{
+	return (len + bp->max_tx_length - 1) / bp->max_tx_length;
+}
+
+static unsigned int macb_tx_map(struct macb *bp,
+				struct sk_buff *skb)
+{
+	dma_addr_t mapping;
+	unsigned int len, entry, i, tx_head = bp->tx_head;
+	struct macb_tx_skb *tx_skb = NULL;
+	struct macb_dma_desc *desc;
+	unsigned int offset, size, count = 0;
+	unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
+	unsigned int eof = 1;
+	u32 ctrl;
+
+	/* First, map non-paged data */
+	len = skb_headlen(skb);
+	offset = 0;
+	while (len) {
+		size = min(len, bp->max_tx_length);
+		entry = macb_tx_ring_wrap(tx_head);
+		tx_skb = &bp->tx_skb[entry];
+
+		mapping = dma_map_single(&bp->pdev->dev,
+					 skb->data + offset,
+					 size, DMA_TO_DEVICE);
+		if (dma_mapping_error(&bp->pdev->dev, mapping))
+			goto dma_error;
+
+		/* Save info to properly release resources */
+		tx_skb->skb = NULL;
+		tx_skb->mapping = mapping;
+		tx_skb->size = size;
+		tx_skb->mapped_as_page = false;
+
+		len -= size;
+		offset += size;
+		count++;
+		tx_head++;
+	}
+
+	/* Then, map paged data from fragments */
+	for (f = 0; f < nr_frags; f++) {
+		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
+
+		len = skb_frag_size(frag);
+		offset = 0;
+		while (len) {
+			size = min(len, bp->max_tx_length);
+			entry = macb_tx_ring_wrap(tx_head);
+			tx_skb = &bp->tx_skb[entry];
+
+			mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
+						   offset, size, DMA_TO_DEVICE);
+			if (dma_mapping_error(&bp->pdev->dev, mapping))
+				goto dma_error;
+
+			/* Save info to properly release resources */
+			tx_skb->skb = NULL;
+			tx_skb->mapping = mapping;
+			tx_skb->size = size;
+			tx_skb->mapped_as_page = true;
+
+			len -= size;
+			offset += size;
+			count++;
+			tx_head++;
+		}
+	}
+
+	/* Should never happen */
+	if (unlikely(tx_skb == NULL)) {
+		netdev_err(bp->dev, "BUG! empty skb!\n");
+		return 0;
+	}
+
+	/* This is the last buffer of the frame: save socket buffer */
+	tx_skb->skb = skb;
+
+	/* Update TX ring: update buffer descriptors in reverse order
+	 * to avoid race condition
+	 */
+
+	/* Set 'TX_USED' bit in buffer descriptor at tx_head position
+	 * to set the end of TX queue
+	 */
+	i = tx_head;
+	entry = macb_tx_ring_wrap(i);
+	ctrl = MACB_BIT(TX_USED);
+	desc = &bp->tx_ring[entry];
+	desc->ctrl = ctrl;
+
+	do {
+		i--;
+		entry = macb_tx_ring_wrap(i);
+		tx_skb = &bp->tx_skb[entry];
+		desc = &bp->tx_ring[entry];
+
+		ctrl = (u32)tx_skb->size;
+		if (eof) {
+			ctrl |= MACB_BIT(TX_LAST);
+			eof = 0;
+		}
+		if (unlikely(entry == (TX_RING_SIZE - 1)))
+			ctrl |= MACB_BIT(TX_WRAP);
+
+		/* Set TX buffer descriptor */
+		desc->addr = tx_skb->mapping;
+		/* desc->addr must be visible to hardware before clearing
+		 * 'TX_USED' bit in desc->ctrl.
+		 */
+		wmb();
+		desc->ctrl = ctrl;
+	} while (i != bp->tx_head);
+
+	bp->tx_head = tx_head;
+
+	return count;
+
+dma_error:
+	netdev_err(bp->dev, "TX DMA map failed\n");
+
+	for (i = bp->tx_head; i != tx_head; i++) {
+		tx_skb = macb_tx_skb(bp, i);
+
+		macb_tx_unmap(bp, tx_skb);
+	}
+
+	return 0;
+}
+
 static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct macb *bp = netdev_priv(dev);
-	dma_addr_t mapping;
-	unsigned int len, entry;
-	struct macb_dma_desc *desc;
-	struct macb_tx_skb *tx_skb;
-	u32 ctrl;
 	unsigned long flags;
+	unsigned int count, nr_frags, frag_size, f;
 
 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
 	netdev_vdbg(bp->dev,
@@ -1020,44 +1201,34 @@
 		       skb->data, 16, true);
 #endif
 
-	len = skb->len;
+	/* Count how many TX buffer descriptors are needed to send this
+	 * socket buffer: skb fragments of jumbo frames may need to be
+	 * splitted into many buffer descriptors.
+	 */
+	count = macb_count_tx_descriptors(bp, skb_headlen(skb));
+	nr_frags = skb_shinfo(skb)->nr_frags;
+	for (f = 0; f < nr_frags; f++) {
+		frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
+		count += macb_count_tx_descriptors(bp, frag_size);
+	}
+
 	spin_lock_irqsave(&bp->lock, flags);
 
 	/* This is a hard error, log it. */
-	if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) {
+	if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < count) {
 		netif_stop_queue(dev);
 		spin_unlock_irqrestore(&bp->lock, flags);
-		netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
 		netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
 			   bp->tx_head, bp->tx_tail);
 		return NETDEV_TX_BUSY;
 	}
 
-	entry = macb_tx_ring_wrap(bp->tx_head);
-	netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
-	mapping = dma_map_single(&bp->pdev->dev, skb->data,
-				 len, DMA_TO_DEVICE);
-	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+	/* Map socket buffer for DMA transfer */
+	if (!macb_tx_map(bp, skb)) {
 		dev_kfree_skb_any(skb);
 		goto unlock;
 	}
 
-	bp->tx_head++;
-	tx_skb = &bp->tx_skb[entry];
-	tx_skb->skb = skb;
-	tx_skb->mapping = mapping;
-	netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
-		   skb->data, (unsigned long)mapping);
-
-	ctrl = MACB_BF(TX_FRMLEN, len);
-	ctrl |= MACB_BIT(TX_LAST);
-	if (entry == (TX_RING_SIZE - 1))
-		ctrl |= MACB_BIT(TX_WRAP);
-
-	desc = &bp->tx_ring[entry];
-	desc->addr = mapping;
-	desc->ctrl = ctrl;
-
 	/* Make newly initialized descriptor visible to hardware */
 	wmb();
 
@@ -1342,7 +1513,7 @@
 /*
  * Configure the receive DMA engine
  * - use the correct receive buffer size
- * - set the possibility to use INCR16 bursts
+ * - set best burst length for DMA operations
  *   (if not supported by FIFO, it will fallback to default)
  * - set both rx/tx packet buffers to full memory size
  * These are configurable parameters for GEM.
@@ -1354,24 +1525,20 @@
 	if (macb_is_gem(bp)) {
 		dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
 		dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
-		dmacfg |= GEM_BF(FBLDO, 16);
+		if (bp->dma_burst_length)
+			dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
 		dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
 		dmacfg &= ~GEM_BIT(ENDIA);
+		if (bp->dev->features & NETIF_F_HW_CSUM)
+			dmacfg |= GEM_BIT(TXCOEN);
+		else
+			dmacfg &= ~GEM_BIT(TXCOEN);
+		netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
+			   dmacfg);
 		gem_writel(bp, DMACFG, dmacfg);
 	}
 }
 
-/*
- * Configure peripheral capacities according to integration options used
- */
-static void macb_configure_caps(struct macb *bp)
-{
-	if (macb_is_gem(bp)) {
-		if (GEM_BFEXT(IRQCOR, gem_readl(bp, DCFG1)) == 0)
-			bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
-	}
-}
-
 static void macb_init_hw(struct macb *bp)
 {
 	u32 config;
@@ -1386,6 +1553,8 @@
 	config |= MACB_BIT(BIG);		/* Receive oversized frames */
 	if (bp->dev->flags & IFF_PROMISC)
 		config |= MACB_BIT(CAF);	/* Copy All Frames */
+	else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
+		config |= GEM_BIT(RXCOEN);
 	if (!(bp->dev->flags & IFF_BROADCAST))
 		config |= MACB_BIT(NBC);	/* No BroadCast */
 	config |= macb_dbw(bp);
@@ -1394,7 +1563,6 @@
 	bp->duplex = DUPLEX_HALF;
 
 	macb_configure_dma(bp);
-	macb_configure_caps(bp);
 
 	/* Initialize TX and RX buffers */
 	macb_writel(bp, RBQP, bp->rx_ring_dma);
@@ -1500,13 +1668,22 @@
 
 	cfg = macb_readl(bp, NCFGR);
 
-	if (dev->flags & IFF_PROMISC)
+	if (dev->flags & IFF_PROMISC) {
 		/* Enable promiscuous mode */
 		cfg |= MACB_BIT(CAF);
-	else if (dev->flags & (~IFF_PROMISC))
-		 /* Disable promiscuous mode */
+
+		/* Disable RX checksum offload */
+		if (macb_is_gem(bp))
+			cfg &= ~GEM_BIT(RXCOEN);
+	} else {
+		/* Disable promiscuous mode */
 		cfg &= ~MACB_BIT(CAF);
 
+		/* Enable RX checksum offload only if requested */
+		if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
+			cfg |= GEM_BIT(RXCOEN);
+	}
+
 	if (dev->flags & IFF_ALLMULTI) {
 		/* Enable all multicast mode */
 		macb_or_gem_writel(bp, HRB, -1);
@@ -1767,6 +1944,40 @@
 }
 EXPORT_SYMBOL_GPL(macb_ioctl);
 
+static int macb_set_features(struct net_device *netdev,
+			     netdev_features_t features)
+{
+	struct macb *bp = netdev_priv(netdev);
+	netdev_features_t changed = features ^ netdev->features;
+
+	/* TX checksum offload */
+	if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
+		u32 dmacfg;
+
+		dmacfg = gem_readl(bp, DMACFG);
+		if (features & NETIF_F_HW_CSUM)
+			dmacfg |= GEM_BIT(TXCOEN);
+		else
+			dmacfg &= ~GEM_BIT(TXCOEN);
+		gem_writel(bp, DMACFG, dmacfg);
+	}
+
+	/* RX checksum offload */
+	if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
+		u32 netcfg;
+
+		netcfg = gem_readl(bp, NCFGR);
+		if (features & NETIF_F_RXCSUM &&
+		    !(netdev->flags & IFF_PROMISC))
+			netcfg |= GEM_BIT(RXCOEN);
+		else
+			netcfg &= ~GEM_BIT(RXCOEN);
+		gem_writel(bp, NCFGR, netcfg);
+	}
+
+	return 0;
+}
+
 static const struct net_device_ops macb_netdev_ops = {
 	.ndo_open		= macb_open,
 	.ndo_stop		= macb_close,
@@ -1780,20 +1991,77 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= macb_poll_controller,
 #endif
+	.ndo_set_features	= macb_set_features,
 };
 
 #if defined(CONFIG_OF)
+static struct macb_config pc302gem_config = {
+	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
+	.dma_burst_length = 16,
+};
+
+static struct macb_config sama5d3_config = {
+	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
+	.dma_burst_length = 16,
+};
+
+static struct macb_config sama5d4_config = {
+	.caps = 0,
+	.dma_burst_length = 4,
+};
+
 static const struct of_device_id macb_dt_ids[] = {
 	{ .compatible = "cdns,at32ap7000-macb" },
 	{ .compatible = "cdns,at91sam9260-macb" },
 	{ .compatible = "cdns,macb" },
-	{ .compatible = "cdns,pc302-gem" },
-	{ .compatible = "cdns,gem" },
+	{ .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
+	{ .compatible = "cdns,gem", .data = &pc302gem_config },
+	{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
+	{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, macb_dt_ids);
 #endif
 
+/*
+ * Configure peripheral capacities according to device tree
+ * and integration options used
+ */
+static void macb_configure_caps(struct macb *bp)
+{
+	u32 dcfg;
+	const struct of_device_id *match;
+	const struct macb_config *config;
+
+	if (bp->pdev->dev.of_node) {
+		match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node);
+		if (match && match->data) {
+			config = (const struct macb_config *)match->data;
+
+			bp->caps = config->caps;
+			/*
+			 * As we have access to the matching node, configure
+			 * DMA burst length as well
+			 */
+			bp->dma_burst_length = config->dma_burst_length;
+		}
+	}
+
+	if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2)
+		bp->caps |= MACB_CAPS_MACB_IS_GEM;
+
+	if (macb_is_gem(bp)) {
+		dcfg = gem_readl(bp, DCFG1);
+		if (GEM_BFEXT(IRQCOR, dcfg) == 0)
+			bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
+		dcfg = gem_readl(bp, DCFG2);
+		if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
+			bp->caps |= MACB_CAPS_FIFO_MODE;
+	}
+
+	netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps);
+}
+
 static int __init macb_probe(struct platform_device *pdev)
 {
 	struct macb_platform_data *pdata;
@@ -1828,9 +2096,6 @@
 
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
-	/* TODO: Actually, we have some interesting features... */
-	dev->features |= 0;
-
 	bp = netdev_priv(dev);
 	bp->pdev = pdev;
 	bp->dev = dev;
@@ -1897,19 +2162,33 @@
 
 	dev->base_addr = regs->start;
 
+	/* setup capacities */
+	macb_configure_caps(bp);
+
 	/* setup appropriated routines according to adapter type */
 	if (macb_is_gem(bp)) {
+		bp->max_tx_length = GEM_MAX_TX_LEN;
 		bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
 		bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
 		bp->macbgem_ops.mog_init_rings = gem_init_rings;
 		bp->macbgem_ops.mog_rx = gem_rx;
 	} else {
+		bp->max_tx_length = MACB_MAX_TX_LEN;
 		bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
 		bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
 		bp->macbgem_ops.mog_init_rings = macb_init_rings;
 		bp->macbgem_ops.mog_rx = macb_rx;
 	}
 
+	/* Set features */
+	dev->hw_features = NETIF_F_SG;
+	/* Checksum offload is only available on gem with packet buffer */
+	if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
+		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+	if (bp->caps & MACB_CAPS_SG_DISABLED)
+		dev->hw_features &= ~NETIF_F_SG;
+	dev->features = dev->hw_features;
+
 	/* Set MII management clock divider */
 	config = macb_mdc_clk_div(bp);
 	config |= macb_dbw(bp);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 51c0244..517c09d 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -164,6 +164,8 @@
 #define GEM_CLK_SIZE				3
 #define GEM_DBW_OFFSET				21
 #define GEM_DBW_SIZE				2
+#define GEM_RXCOEN_OFFSET			24
+#define GEM_RXCOEN_SIZE				1
 
 /* Constants for data bus width. */
 #define GEM_DBW32				0
@@ -305,6 +307,12 @@
 #define GEM_DBWDEF_OFFSET			25
 #define GEM_DBWDEF_SIZE				3
 
+/* Bitfields in DCFG2. */
+#define GEM_RX_PKT_BUFF_OFFSET			20
+#define GEM_RX_PKT_BUFF_SIZE			1
+#define GEM_TX_PKT_BUFF_OFFSET			21
+#define GEM_TX_PKT_BUFF_SIZE			1
+
 /* Constants for CLK */
 #define MACB_CLK_DIV8				0
 #define MACB_CLK_DIV16				1
@@ -326,7 +334,11 @@
 #define MACB_MAN_CODE				2
 
 /* Capability mask bits */
-#define MACB_CAPS_ISR_CLEAR_ON_WRITE		0x1
+#define MACB_CAPS_ISR_CLEAR_ON_WRITE		0x00000001
+#define MACB_CAPS_FIFO_MODE			0x10000000
+#define MACB_CAPS_GIGABIT_MODE_AVAILABLE	0x20000000
+#define MACB_CAPS_SG_DISABLED			0x40000000
+#define MACB_CAPS_MACB_IS_GEM			0x80000000
 
 /* Bit manipulation macros */
 #define MACB_BIT(name)					\
@@ -442,6 +454,14 @@
 #define MACB_RX_BROADCAST_OFFSET		31
 #define MACB_RX_BROADCAST_SIZE			1
 
+/* RX checksum offload disabled: bit 24 clear in NCFGR */
+#define GEM_RX_TYPEID_MATCH_OFFSET		22
+#define GEM_RX_TYPEID_MATCH_SIZE		2
+
+/* RX checksum offload enabled: bit 24 set in NCFGR */
+#define GEM_RX_CSUM_OFFSET			22
+#define GEM_RX_CSUM_SIZE			2
+
 #define MACB_TX_FRMLEN_OFFSET			0
 #define MACB_TX_FRMLEN_SIZE			11
 #define MACB_TX_LAST_OFFSET			15
@@ -459,14 +479,32 @@
 #define MACB_TX_USED_OFFSET			31
 #define MACB_TX_USED_SIZE			1
 
+#define GEM_TX_FRMLEN_OFFSET			0
+#define GEM_TX_FRMLEN_SIZE			14
+
+/* Buffer descriptor constants */
+#define GEM_RX_CSUM_NONE			0
+#define GEM_RX_CSUM_IP_ONLY			1
+#define GEM_RX_CSUM_IP_TCP			2
+#define GEM_RX_CSUM_IP_UDP			3
+
+/* limit RX checksum offload to TCP and UDP packets */
+#define GEM_RX_CSUM_CHECKED_MASK		2
+
 /**
  * struct macb_tx_skb - data about an skb which is being transmitted
- * @skb: skb currently being transmitted
- * @mapping: DMA address of the skb's data buffer
+ * @skb: skb currently being transmitted, only set for the last buffer
+ *       of the frame
+ * @mapping: DMA address of the skb's fragment buffer
+ * @size: size of the DMA mapped buffer
+ * @mapped_as_page: true when buffer was mapped with skb_frag_dma_map(),
+ *                  false when buffer was mapped with dma_map_single()
  */
 struct macb_tx_skb {
 	struct sk_buff		*skb;
 	dma_addr_t		mapping;
+	size_t			size;
+	bool			mapped_as_page;
 };
 
 /*
@@ -554,6 +592,11 @@
 	int	(*mog_rx)(struct macb *bp, int budget);
 };
 
+struct macb_config {
+	u32			caps;
+	unsigned int		dma_burst_length;
+};
+
 struct macb {
 	void __iomem		*regs;
 
@@ -595,6 +638,7 @@
 	unsigned int 		duplex;
 
 	u32			caps;
+	unsigned int		dma_burst_length;
 
 	phy_interface_t		phy_interface;
 
@@ -602,6 +646,7 @@
 	struct sk_buff *skb;			/* holds skb until xmit interrupt completes */
 	dma_addr_t skb_physaddr;		/* phys addr from pci_map_single */
 	int skb_length;				/* saved skb length for pci_unmap_single */
+	unsigned int		max_tx_length;
 };
 
 extern const struct ethtool_ops macb_ethtool_ops;
@@ -615,7 +660,7 @@
 
 static inline bool macb_is_gem(struct macb *bp)
 {
-	return MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2;
+	return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
 }
 
 #endif /* _MACB_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index e768852..448bec1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -545,7 +545,7 @@
 		unsigned char *bp;
 		int i;
 
-		if (dir == T4_MEMORY_WRITE) {
+		if (dir == T4_MEMORY_READ) {
 			last.word = (__force __be32) t4_read_reg(adap,
 							mem_base + offset);
 			for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index e0efb21..66fe1f6 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2696,9 +2696,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int
-fec_suspend(struct device *dev)
+static int __maybe_unused fec_suspend(struct device *dev)
 {
 	struct net_device *ndev = dev_get_drvdata(dev);
 	struct fec_enet_private *fep = netdev_priv(ndev);
@@ -2723,8 +2721,7 @@
 	return 0;
 }
 
-static int
-fec_resume(struct device *dev)
+static int __maybe_unused fec_resume(struct device *dev)
 {
 	struct net_device *ndev = dev_get_drvdata(dev);
 	struct fec_enet_private *fep = netdev_priv(ndev);
@@ -2759,7 +2756,6 @@
 		regulator_disable(fep->reg_phy);
 	return ret;
 }
-#endif /* CONFIG_PM_SLEEP */
 
 static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
 
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index 0c9d55c..6e7db66 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -46,7 +46,7 @@
 #define MDIO_DATA_BSY		(1 << 31)
 
 /*
- * Wait untill the MDIO bus is free
+ * Wait until the MDIO bus is free
  */
 static int xgmac_wait_until_free(struct device *dev,
 				 struct tgec_mdio_controller __iomem *regs)
@@ -163,7 +163,7 @@
 	/* Return all Fs if nothing was there */
 	if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
 		dev_err(&bus->dev,
-			"Error while reading PHY%d reg at %d.%d\n",
+			"Error while reading PHY%d reg at %d.%hhu\n",
 			phy_id, dev_addr, regnum);
 		return 0xffff;
 	}
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index d18e892..bb7ab3c 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -342,6 +342,7 @@
 #define E1000_TIPG_IPGR2_SHIFT  20
 
 #define MAX_JUMBO_FRAME_SIZE    0x3F00
+#define E1000_TX_PTR_GAP		0x1F
 
 /* Extended Configuration Control and Size */
 #define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP      0x00000020
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index f236861..48b74a5 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -2444,7 +2444,7 @@
 			return ret_val;
 		e1e_rphy(hw, PHY_REG(776, 20), &data);
 		data &= ~(0x3FF << 2);
-		data |= (0x1A << 2);
+		data |= (E1000_TX_PTR_GAP << 2);
 		ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
 		if (ret_val)
 			return ret_val;
@@ -4605,14 +4605,23 @@
 
 			/* Disable LPLU if both link partners support 100BaseT
 			 * EEE and 100Full is advertised on both ends of the
-			 * link.
+			 * link, and enable Auto Enable LPI since there will
+			 * be no driver to enable LPI while in Sx.
 			 */
 			if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
 			    (dev_spec->eee_lp_ability &
 			     I82579_EEE_100_SUPPORTED) &&
-			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
+			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
 				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
 					      E1000_PHY_CTRL_NOND0A_LPLU);
+
+				/* Set Auto Enable LPI after link up */
+				e1e_rphy_locked(hw,
+						I217_LPI_GPIO_CTRL, &phy_reg);
+				phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
+				e1e_wphy_locked(hw,
+						I217_LPI_GPIO_CTRL, phy_reg);
+			}
 		}
 
 		/* For i217 Intel Rapid Start Technology support,
@@ -4709,6 +4718,11 @@
 			return;
 		}
 
+		/* Clear Auto Enable LPI after link up */
+		e1e_rphy_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
+		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
+		e1e_wphy_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
+
 		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
 			/* Restore clear on SMB if no manageability engine
 			 * is present
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 5515126..8066a49 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -217,6 +217,10 @@
 #define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK	0x3F00
 #define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT	8
 
+/* Low Power Idle GPIO Control */
+#define I217_LPI_GPIO_CTRL			PHY_REG(772, 18)
+#define I217_LPI_GPIO_CTRL_AUTO_EN_LPI		0x0800
+
 /* PHY Low Power Idle Control */
 #define I82579_LPI_CTRL				PHY_REG(772, 20)
 #define I82579_LPI_CTRL_100_ENABLE		0x2000
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index cb37ff1..5885603 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -327,9 +327,12 @@
 	} else if ((hw->mac.type == e1000_82574) ||
 		   (hw->mac.type == e1000_82583)) {
 		u16 data;
+		s32 ret_val;
 
 		factps = er32(FACTPS);
-		e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+		ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+		if (ret_val)
+			return false;
 
 		if (!(factps & E1000_FACTPS_MNGCG) &&
 		    ((data & E1000_NVM_INIT_CTRL2_MNGM) ==
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 201cc93..65c3aef 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6033,6 +6033,28 @@
 	return retval;
 }
 
+static void e1000e_flush_lpic(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ret_val;
+
+	pm_runtime_get_sync(netdev->dev.parent);
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto fl_out;
+
+	pr_info("EEE TX LPI TIMER: %08X\n",
+		er32(LPIC) >> E1000_LPIC_LPIET_SHIFT);
+
+	hw->phy.ops.release(hw);
+
+fl_out:
+	pm_runtime_put_sync(netdev->dev.parent);
+}
+
 static int e1000e_pm_freeze(struct device *dev)
 {
 	struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
@@ -6333,6 +6355,8 @@
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 
+	e1000e_flush_lpic(pdev);
+
 	e1000e_pm_freeze(dev);
 
 	return __e1000_shutdown(pdev, false);
@@ -6357,9 +6381,14 @@
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct e1000_adapter *adapter = netdev_priv(netdev);
+	u16 eee_lp;
 
-	if (!e1000e_has_link(adapter))
+	eee_lp = adapter->hw.dev_spec.ich8lan.eee_lp_ability;
+
+	if (!e1000e_has_link(adapter)) {
+		adapter->hw.dev_spec.ich8lan.eee_lp_ability = eee_lp;
 		pm_schedule_suspend(dev, 5 * MSEC_PER_SEC);
+	}
 
 	return -EBUSY;
 }
@@ -6411,6 +6440,8 @@
 
 static void e1000_shutdown(struct pci_dev *pdev)
 {
+	e1000e_flush_lpic(pdev);
+
 	e1000e_pm_freeze(&pdev->dev);
 
 	__e1000_shutdown(pdev, false);
@@ -6708,6 +6739,7 @@
 	int bars, i, err, pci_using_dac;
 	u16 eeprom_data = 0;
 	u16 eeprom_apme_mask = E1000_EEPROM_APME;
+	s32 rval = 0;
 
 	if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
 		aspm_disable_flag = PCIE_LINK_STATE_L0S;
@@ -6940,15 +6972,19 @@
 	} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
 		if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
 		    (adapter->hw.bus.func == 1))
-			e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B,
-				       1, &eeprom_data);
+			rval = e1000_read_nvm(&adapter->hw,
+					      NVM_INIT_CONTROL3_PORT_B,
+					      1, &eeprom_data);
 		else
-			e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A,
-				       1, &eeprom_data);
+			rval = e1000_read_nvm(&adapter->hw,
+					      NVM_INIT_CONTROL3_PORT_A,
+					      1, &eeprom_data);
 	}
 
 	/* fetch WoL from EEPROM */
-	if (eeprom_data & eeprom_apme_mask)
+	if (rval)
+		e_dbg("NVM read error getting WoL initial values: %d\n", rval);
+	else if (eeprom_data & eeprom_apme_mask)
 		adapter->eeprom_wol |= E1000_WUFC_MAG;
 
 	/* now that we have the eeprom settings, apply the special cases
@@ -6967,7 +7003,12 @@
 		device_wakeup_enable(&pdev->dev);
 
 	/* save off EEPROM version number */
-	e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
+	rval = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
+
+	if (rval) {
+		e_dbg("NVM read error getting EEPROM version: %d\n", rval);
+		adapter->eeprom_vers = 0;
+	}
 
 	/* reset the hardware with the new settings */
 	e1000e_reset(adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index b1f212b..fa6b103 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -327,8 +327,10 @@
 
 		ew32(EERD, eerd);
 		ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
-		if (ret_val)
+		if (ret_val) {
+			e_dbg("NVM read error: %d\n", ret_val);
 			break;
+		}
 
 		data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
 	}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 0e551f2..b29c157 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -38,8 +38,8 @@
  **/
 static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
 {
-	return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
-	       (desc->opcode == i40e_aqc_opc_nvm_update);
+	return (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_erase)) ||
+		(desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_update));
 }
 
 /**
@@ -889,14 +889,9 @@
 		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
 	}
 
-	if (i40e_is_nvm_update_op(desc))
-		hw->aq.nvm_busy = true;
-
-	if (le16_to_cpu(desc->datalen) == buff_size) {
-		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-			   "AQTX: desc and buffer writeback:\n");
-		i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff);
-	}
+	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+		   "AQTX: desc and buffer writeback:\n");
+	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff);
 
 	/* update the error if time out occurred */
 	if ((!cmd_completed) &&
@@ -907,6 +902,9 @@
 		status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
 	}
 
+	if (!status && i40e_is_nvm_update_op(desc))
+		hw->aq.nvm_busy = true;
+
 asq_send_command_error:
 	mutex_unlock(&hw->aq.asq_mutex);
 asq_send_command_exit:
@@ -979,17 +977,14 @@
 			   I40E_DEBUG_AQ_MESSAGE,
 			   "AQRX: Event received with error 0x%X.\n",
 			   hw->aq.arq_last_status);
-	} else {
-		e->desc = *desc;
-		datalen = le16_to_cpu(desc->datalen);
-		e->msg_size = min(datalen, e->msg_size);
-		if (e->msg_buf != NULL && (e->msg_size != 0))
-			memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
-			       e->msg_size);
 	}
 
-	if (i40e_is_nvm_update_op(&e->desc))
-		hw->aq.nvm_busy = false;
+	e->desc = *desc;
+	datalen = le16_to_cpu(desc->datalen);
+	e->msg_size = min(datalen, e->msg_size);
+	if (e->msg_buf != NULL && (e->msg_size != 0))
+		memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
+		       e->msg_size);
 
 	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf);
@@ -1023,6 +1018,14 @@
 		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
 	mutex_unlock(&hw->aq.arq_mutex);
 
+	if (i40e_is_nvm_update_op(&e->desc)) {
+		hw->aq.nvm_busy = false;
+		if (hw->aq.nvm_release_on_done) {
+			i40e_release_nvm(hw);
+			hw->aq.nvm_release_on_done = false;
+		}
+	}
+
 	return ret_code;
 }
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index bb76be1..ba38a89 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -94,6 +94,7 @@
 	u16 api_maj_ver;                /* api major version */
 	u16 api_min_ver;                /* api minor version */
 	bool nvm_busy;
+	bool nvm_release_on_done;
 
 	struct mutex asq_mutex; /* Send queue lock */
 	struct mutex arq_mutex; /* Receive queue lock */
@@ -103,6 +104,41 @@
 	enum i40e_admin_queue_err arq_last_status;
 };
 
+/**
+ * i40e_aq_rc_to_posix - convert errors to user-land codes
+ * aq_rc: AdminQ error code to convert
+ **/
+static inline int i40e_aq_rc_to_posix(u16 aq_rc)
+{
+	int aq_to_posix[] = {
+		0,           /* I40E_AQ_RC_OK */
+		-EPERM,      /* I40E_AQ_RC_EPERM */
+		-ENOENT,     /* I40E_AQ_RC_ENOENT */
+		-ESRCH,      /* I40E_AQ_RC_ESRCH */
+		-EINTR,      /* I40E_AQ_RC_EINTR */
+		-EIO,        /* I40E_AQ_RC_EIO */
+		-ENXIO,      /* I40E_AQ_RC_ENXIO */
+		-E2BIG,      /* I40E_AQ_RC_E2BIG */
+		-EAGAIN,     /* I40E_AQ_RC_EAGAIN */
+		-ENOMEM,     /* I40E_AQ_RC_ENOMEM */
+		-EACCES,     /* I40E_AQ_RC_EACCES */
+		-EFAULT,     /* I40E_AQ_RC_EFAULT */
+		-EBUSY,      /* I40E_AQ_RC_EBUSY */
+		-EEXIST,     /* I40E_AQ_RC_EEXIST */
+		-EINVAL,     /* I40E_AQ_RC_EINVAL */
+		-ENOTTY,     /* I40E_AQ_RC_ENOTTY */
+		-ENOSPC,     /* I40E_AQ_RC_ENOSPC */
+		-ENOSYS,     /* I40E_AQ_RC_ENOSYS */
+		-ERANGE,     /* I40E_AQ_RC_ERANGE */
+		-EPIPE,      /* I40E_AQ_RC_EFLUSHED */
+		-ESPIPE,     /* I40E_AQ_RC_BAD_ADDR */
+		-EROFS,      /* I40E_AQ_RC_EMODE */
+		-EFBIG,      /* I40E_AQ_RC_EFBIG */
+	};
+
+	return aq_to_posix[aq_rc];
+}
+
 /* general information */
 #define I40E_AQ_LARGE_BUF	512
 #define I40E_ASQ_CMD_TIMEOUT	100000  /* usecs */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index c65f4e8..f4e502a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -2121,6 +2121,47 @@
 	return status;
 }
 
+/**
+ * i40e_aq_erase_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in the module (expressed in 4 KB from module's beginning)
+ * @length: length of the section to be erased (expressed in 4 KB)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Erase the NVM sector using the admin queue commands
+ **/
+i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+			      u32 offset, u16 length, bool last_command,
+			      struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_nvm_update *cmd =
+		(struct i40e_aqc_nvm_update *)&desc.params.raw;
+	i40e_status status;
+
+	/* In offset the highest byte must be zeroed. */
+	if (offset & 0xFF000000) {
+		status = I40E_ERR_PARAM;
+		goto i40e_aq_erase_nvm_exit;
+	}
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
+
+	/* If this is the last command in a series, set the proper flag. */
+	if (last_command)
+		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+	cmd->module_pointer = module_pointer;
+	cmd->offset = cpu_to_le32(offset);
+	cmd->length = cpu_to_le16(length);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+i40e_aq_erase_nvm_exit:
+	return status;
+}
+
 #define I40E_DEV_FUNC_CAP_SWITCH_MODE	0x01
 #define I40E_DEV_FUNC_CAP_MGMT_MODE	0x02
 #define I40E_DEV_FUNC_CAP_NPAR		0x03
@@ -2351,6 +2392,53 @@
 }
 
 /**
+ * i40e_aq_update_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update the NVM using the admin queue commands
+ **/
+i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+			       u32 offset, u16 length, void *data,
+			       bool last_command,
+			       struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_nvm_update *cmd =
+		(struct i40e_aqc_nvm_update *)&desc.params.raw;
+	i40e_status status;
+
+	/* In offset the highest byte must be zeroed. */
+	if (offset & 0xFF000000) {
+		status = I40E_ERR_PARAM;
+		goto i40e_aq_update_nvm_exit;
+	}
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
+
+	/* If this is the last command in a series, set the proper flag. */
+	if (last_command)
+		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+	cmd->module_pointer = module_pointer;
+	cmd->offset = cpu_to_le32(offset);
+	cmd->length = cpu_to_le16(length);
+
+	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (length > I40E_AQ_LARGE_BUF)
+		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_update_nvm_exit:
+	return status;
+}
+
+/**
  * i40e_aq_get_lldp_mib
  * @hw: pointer to the hw struct
  * @bridge_type: type of bridge requested
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index ec07332..9eaed04 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1238,7 +1238,7 @@
 	} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
 		i40e_status ret;
 		u16 vid;
-		int v;
+		unsigned int v;
 
 		cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
 		if (cnt != 2) {
@@ -1254,7 +1254,7 @@
 			goto command_write_done;
 		}
 
-		vid = (unsigned)v;
+		vid = v;
 		ret = i40e_vsi_add_pvid(vsi, vid);
 		if (!ret)
 			dev_info(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 3abd3cb..9c93ff2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -630,7 +630,7 @@
 	bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
 	i40e_status status;
 	u8 aq_failures;
-	int err;
+	int err = 0;
 
 	if (vsi != pf->vsi[pf->lan_vsi])
 		return -EOPNOTSUPP;
@@ -683,8 +683,12 @@
 		err = -EAGAIN;
 	}
 
-	if (!test_bit(__I40E_DOWN, &pf->state))
-		return i40e_nway_reset(netdev);
+	if (!test_bit(__I40E_DOWN, &pf->state)) {
+		/* Give it a little more time to try to come back */
+		msleep(75);
+		if (!test_bit(__I40E_DOWN, &pf->state))
+			return i40e_nway_reset(netdev);
+	}
 
 	return err;
 }
@@ -759,10 +763,33 @@
 	u8 *eeprom_buff;
 	u16 i, sectors;
 	bool last;
+	u32 magic;
+
 #define I40E_NVM_SECTOR_SIZE  4096
 	if (eeprom->len == 0)
 		return -EINVAL;
 
+	/* check for NVMUpdate access method */
+	magic = hw->vendor_id | (hw->device_id << 16);
+	if (eeprom->magic && eeprom->magic != magic) {
+		int errno;
+
+		/* make sure it is the right magic for NVMUpdate */
+		if ((eeprom->magic >> 16) != hw->device_id)
+			return -EINVAL;
+
+		ret_val = i40e_nvmupd_command(hw,
+					      (struct i40e_nvm_access *)eeprom,
+					      bytes, &errno);
+		if (ret_val)
+			dev_info(&pf->pdev->dev,
+				 "NVMUpdate read failed err=%d status=0x%x\n",
+				 ret_val, hw->aq.asq_last_status);
+
+		return errno;
+	}
+
+	/* normal ethtool get_eeprom support */
 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
 
 	eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
@@ -789,7 +816,7 @@
 		ret_val = i40e_aq_read_nvm(hw, 0x0,
 				eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
 				len,
-				eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
+				(u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
 				last, NULL);
 		if (ret_val) {
 			dev_info(&pf->pdev->dev,
@@ -801,7 +828,7 @@
 
 release_nvm:
 	i40e_release_nvm(hw);
-	memcpy(bytes, eeprom_buff, eeprom->len);
+	memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
 free_buff:
 	kfree(eeprom_buff);
 	return ret_val;
@@ -821,6 +848,39 @@
 	return val;
 }
 
+static int i40e_set_eeprom(struct net_device *netdev,
+			   struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_hw *hw = &np->vsi->back->hw;
+	struct i40e_pf *pf = np->vsi->back;
+	int ret_val = 0;
+	int errno;
+	u32 magic;
+
+	/* normal ethtool set_eeprom is not supported */
+	magic = hw->vendor_id | (hw->device_id << 16);
+	if (eeprom->magic == magic)
+		return -EOPNOTSUPP;
+
+	/* check for NVMUpdate access method */
+	if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id)
+		return -EINVAL;
+
+	if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
+	    test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
+		return -EBUSY;
+
+	ret_val = i40e_nvmupd_command(hw, (struct i40e_nvm_access *)eeprom,
+				      bytes, &errno);
+	if (ret_val)
+		dev_info(&pf->pdev->dev,
+			 "NVMUpdate write failed err=%d status=0x%x\n",
+			 ret_val, hw->aq.asq_last_status);
+
+	return errno;
+}
+
 static void i40e_get_drvinfo(struct net_device *netdev,
 			     struct ethtool_drvinfo *drvinfo)
 {
@@ -2094,6 +2154,7 @@
 	.get_link		= ethtool_op_get_link,
 	.get_wol		= i40e_get_wol,
 	.set_wol		= i40e_set_wol,
+	.set_eeprom		= i40e_set_eeprom,
 	.get_eeprom_len		= i40e_get_eeprom_len,
 	.get_eeprom		= i40e_get_eeprom,
 	.get_ringparam		= i40e_get_ringparam,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index c34e390..821fcc1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -8642,24 +8642,18 @@
 	dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
 	if (err) {
 		dev_info(&pdev->dev,
-			 "init_adminq failed: %d expecting API %02x.%02x\n",
-			 err,
-			 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
+			 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
 		goto err_pf_reset;
 	}
 
-	if (hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
+	if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
 		dev_info(&pdev->dev,
-			 "Note: FW API version %02x.%02x newer than expected %02x.%02x, recommend driver update.\n",
-			 hw->aq.api_maj_ver, hw->aq.api_min_ver,
-			 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
-
-	if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
-	    hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR-1))
+			 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
+		 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
 		dev_info(&pdev->dev,
-			 "Note: FW API version %02x.%02x older than expected %02x.%02x, recommend nvm update.\n",
-			 hw->aq.api_maj_ver, hw->aq.api_min_ver,
-			 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
+			 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
 
 
 	i40e_verify_eeprom(pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 66bcb15..97bda3d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -241,6 +241,46 @@
 }
 
 /**
+ * i40e_write_nvm_aq - Writes Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+			      u32 offset, u16 words, void *data,
+			      bool last_command)
+{
+	i40e_status ret_code = I40E_ERR_NVM;
+
+	/* Here we are checking the SR limit only for the flat memory model.
+	 * We cannot do it for the module-based model, as we did not acquire
+	 * the NVM resource yet (we cannot get the module pointer value).
+	 * Firmware will check the module-based model.
+	 */
+	if ((offset + words) > hw->nvm.sr_size)
+		hw_dbg(hw, "NVM write error: offset beyond Shadow RAM limit.\n");
+	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+		/* We can write only up to 4KB (one sector), in one AQ write */
+		hw_dbg(hw, "NVM write fail error: cannot write more than 4KB in a single write.\n");
+	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+		/* A single write cannot spread over two sectors */
+		hw_dbg(hw, "NVM write error: cannot spread over two sectors in a single write.\n");
+	else
+		ret_code = i40e_aq_update_nvm(hw, module_pointer,
+					      2 * offset,  /*bytes*/
+					      2 * words,   /*bytes*/
+					      data, last_command, NULL);
+
+	return ret_code;
+}
+
+/**
  * i40e_calc_nvm_checksum - Calculates and returns the checksum
  * @hw: pointer to hardware structure
  * @checksum: pointer to the checksum
@@ -310,6 +350,27 @@
 }
 
 /**
+ * i40e_update_nvm_checksum - Updates the NVM checksum
+ * @hw: pointer to hardware structure
+ *
+ * NVM ownership must be acquired before calling this function and released
+ * on ARQ completion event reception by caller.
+ * This function will commit SR to NVM.
+ **/
+i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
+{
+	i40e_status ret_code = 0;
+	u16 checksum;
+
+	ret_code = i40e_calc_nvm_checksum(hw, &checksum);
+	if (!ret_code)
+		ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
+					     1, &checksum, true);
+
+	return ret_code;
+}
+
+/**
  * i40e_validate_nvm_checksum - Validate EEPROM checksum
  * @hw: pointer to hardware structure
  * @checksum: calculated checksum
@@ -346,3 +407,453 @@
 i40e_validate_nvm_checksum_exit:
 	return ret_code;
 }
+
+static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
+					  struct i40e_nvm_access *cmd,
+					  u8 *bytes, int *errno);
+static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
+					     struct i40e_nvm_access *cmd,
+					     u8 *bytes, int *errno);
+static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
+					     struct i40e_nvm_access *cmd,
+					     u8 *bytes, int *errno);
+static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
+						struct i40e_nvm_access *cmd,
+						int *errno);
+static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+					 struct i40e_nvm_access *cmd,
+					 int *errno);
+static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+					 struct i40e_nvm_access *cmd,
+					 u8 *bytes, int *errno);
+static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+					struct i40e_nvm_access *cmd,
+					u8 *bytes, int *errno);
+static inline u8 i40e_nvmupd_get_module(u32 val)
+{
+	return (u8)(val & I40E_NVM_MOD_PNT_MASK);
+}
+static inline u8 i40e_nvmupd_get_transaction(u32 val)
+{
+	return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
+}
+
+/**
+ * i40e_nvmupd_command - Process an NVM update command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command
+ * @bytes: pointer to the data buffer
+ * @errno: pointer to return error code
+ *
+ * Dispatches command depending on what update state is current
+ **/
+i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
+				struct i40e_nvm_access *cmd,
+				u8 *bytes, int *errno)
+{
+	i40e_status status;
+
+	/* assume success */
+	*errno = 0;
+
+	switch (hw->nvmupd_state) {
+	case I40E_NVMUPD_STATE_INIT:
+		status = i40e_nvmupd_state_init(hw, cmd, bytes, errno);
+		break;
+
+	case I40E_NVMUPD_STATE_READING:
+		status = i40e_nvmupd_state_reading(hw, cmd, bytes, errno);
+		break;
+
+	case I40E_NVMUPD_STATE_WRITING:
+		status = i40e_nvmupd_state_writing(hw, cmd, bytes, errno);
+		break;
+
+	default:
+		/* invalid state, should never happen */
+		status = I40E_NOT_SUPPORTED;
+		*errno = -ESRCH;
+		break;
+	}
+	return status;
+}
+
+/**
+ * i40e_nvmupd_state_init - Handle NVM update state Init
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @errno: pointer to return error code
+ *
+ * Process legitimate commands of the Init state and conditionally set next
+ * state. Reject all other commands.
+ **/
+static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
+					  struct i40e_nvm_access *cmd,
+					  u8 *bytes, int *errno)
+{
+	i40e_status status = 0;
+	enum i40e_nvmupd_cmd upd_cmd;
+
+	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+
+	switch (upd_cmd) {
+	case I40E_NVMUPD_READ_SA:
+		status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+		if (status) {
+			*errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+		} else {
+			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+			i40e_release_nvm(hw);
+		}
+		break;
+
+	case I40E_NVMUPD_READ_SNT:
+		status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+		if (status) {
+			*errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+		} else {
+			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+			hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
+		}
+		break;
+
+	case I40E_NVMUPD_WRITE_ERA:
+		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+		if (status) {
+			*errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+		} else {
+			status = i40e_nvmupd_nvm_erase(hw, cmd, errno);
+			if (status)
+				i40e_release_nvm(hw);
+			else
+				hw->aq.nvm_release_on_done = true;
+		}
+		break;
+
+	case I40E_NVMUPD_WRITE_SA:
+		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+		if (status) {
+			*errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+		} else {
+			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
+			if (status)
+				i40e_release_nvm(hw);
+			else
+				hw->aq.nvm_release_on_done = true;
+		}
+		break;
+
+	case I40E_NVMUPD_WRITE_SNT:
+		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+		if (status) {
+			*errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+		} else {
+			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
+			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+		}
+		break;
+
+	case I40E_NVMUPD_CSUM_SA:
+		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+		if (status) {
+			*errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+		} else {
+			status = i40e_update_nvm_checksum(hw);
+			if (status) {
+				*errno = hw->aq.asq_last_status ?
+				   i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
+				   -EIO;
+				i40e_release_nvm(hw);
+			} else {
+				hw->aq.nvm_release_on_done = true;
+			}
+		}
+		break;
+
+	default:
+		status = I40E_ERR_NVM;
+		*errno = -ESRCH;
+		break;
+	}
+	return status;
+}
+
+/**
+ * i40e_nvmupd_state_reading - Handle NVM update state Reading
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @errno: pointer to return error code
+ *
+ * NVM ownership is already held.  Process legitimate commands and set any
+ * change in state; reject all other commands.
+ **/
+static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
+					     struct i40e_nvm_access *cmd,
+					     u8 *bytes, int *errno)
+{
+	i40e_status status;
+	enum i40e_nvmupd_cmd upd_cmd;
+
+	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+
+	switch (upd_cmd) {
+	case I40E_NVMUPD_READ_SA:
+	case I40E_NVMUPD_READ_CON:
+		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+		break;
+
+	case I40E_NVMUPD_READ_LCB:
+		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+		i40e_release_nvm(hw);
+		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+		break;
+
+	default:
+		status = I40E_NOT_SUPPORTED;
+		*errno = -ESRCH;
+		break;
+	}
+	return status;
+}
+
+/**
+ * i40e_nvmupd_state_writing - Handle NVM update state Writing
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @errno: pointer to return error code
+ *
+ * NVM ownership is already held.  Process legitimate commands and set any
+ * change in state; reject all other commands
+ **/
+static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
+					     struct i40e_nvm_access *cmd,
+					     u8 *bytes, int *errno)
+{
+	i40e_status status;
+	enum i40e_nvmupd_cmd upd_cmd;
+
+	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+
+	switch (upd_cmd) {
+	case I40E_NVMUPD_WRITE_CON:
+		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
+		break;
+
+	case I40E_NVMUPD_WRITE_LCB:
+		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
+		if (!status) {
+			hw->aq.nvm_release_on_done = true;
+			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+		}
+		break;
+
+	case I40E_NVMUPD_CSUM_CON:
+		status = i40e_update_nvm_checksum(hw);
+		if (status)
+			*errno = hw->aq.asq_last_status ?
+				   i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
+				   -EIO;
+		break;
+
+	case I40E_NVMUPD_CSUM_LCB:
+		status = i40e_update_nvm_checksum(hw);
+		if (status) {
+			*errno = hw->aq.asq_last_status ?
+				   i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
+				   -EIO;
+		} else {
+			hw->aq.nvm_release_on_done = true;
+			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+		}
+		break;
+
+	default:
+		status = I40E_NOT_SUPPORTED;
+		*errno = -ESRCH;
+		break;
+	}
+	return status;
+}
+
+/**
+ * i40e_nvmupd_validate_command - Validate given command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @errno: pointer to return error code
+ *
+ * Return one of the valid command types or I40E_NVMUPD_INVALID
+ **/
+static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
+						 struct i40e_nvm_access *cmd,
+						 int *errno)
+{
+	enum i40e_nvmupd_cmd upd_cmd;
+	u8 transaction, module;
+
+	/* anything that doesn't match a recognized case is an error */
+	upd_cmd = I40E_NVMUPD_INVALID;
+
+	transaction = i40e_nvmupd_get_transaction(cmd->config);
+	module = i40e_nvmupd_get_module(cmd->config);
+
+	/* limits on data size */
+	if ((cmd->data_size < 1) ||
+	    (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
+		hw_dbg(hw, "i40e_nvmupd_validate_command data_size %d\n",
+		       cmd->data_size);
+		*errno = -EFAULT;
+		return I40E_NVMUPD_INVALID;
+	}
+
+	switch (cmd->command) {
+	case I40E_NVM_READ:
+		switch (transaction) {
+		case I40E_NVM_CON:
+			upd_cmd = I40E_NVMUPD_READ_CON;
+			break;
+		case I40E_NVM_SNT:
+			upd_cmd = I40E_NVMUPD_READ_SNT;
+			break;
+		case I40E_NVM_LCB:
+			upd_cmd = I40E_NVMUPD_READ_LCB;
+			break;
+		case I40E_NVM_SA:
+			upd_cmd = I40E_NVMUPD_READ_SA;
+			break;
+		}
+		break;
+
+	case I40E_NVM_WRITE:
+		switch (transaction) {
+		case I40E_NVM_CON:
+			upd_cmd = I40E_NVMUPD_WRITE_CON;
+			break;
+		case I40E_NVM_SNT:
+			upd_cmd = I40E_NVMUPD_WRITE_SNT;
+			break;
+		case I40E_NVM_LCB:
+			upd_cmd = I40E_NVMUPD_WRITE_LCB;
+			break;
+		case I40E_NVM_SA:
+			upd_cmd = I40E_NVMUPD_WRITE_SA;
+			break;
+		case I40E_NVM_ERA:
+			upd_cmd = I40E_NVMUPD_WRITE_ERA;
+			break;
+		case I40E_NVM_CSUM:
+			upd_cmd = I40E_NVMUPD_CSUM_CON;
+			break;
+		case (I40E_NVM_CSUM|I40E_NVM_SA):
+			upd_cmd = I40E_NVMUPD_CSUM_SA;
+			break;
+		case (I40E_NVM_CSUM|I40E_NVM_LCB):
+			upd_cmd = I40E_NVMUPD_CSUM_LCB;
+			break;
+		}
+		break;
+	}
+
+	if (upd_cmd == I40E_NVMUPD_INVALID) {
+		*errno = -EFAULT;
+		hw_dbg(hw,
+		       "i40e_nvmupd_validate_command returns %d  errno: %d\n",
+		       upd_cmd, *errno);
+	}
+	return upd_cmd;
+}
+
+/**
+ * i40e_nvmupd_nvm_read - Read NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @errno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+					struct i40e_nvm_access *cmd,
+					u8 *bytes, int *errno)
+{
+	i40e_status status;
+	u8 module, transaction;
+	bool last;
+
+	transaction = i40e_nvmupd_get_transaction(cmd->config);
+	module = i40e_nvmupd_get_module(cmd->config);
+	last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
+	hw_dbg(hw, "i40e_nvmupd_nvm_read mod 0x%x  off 0x%x  len 0x%x\n",
+	       module, cmd->offset, cmd->data_size);
+
+	status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+				  bytes, last, NULL);
+	hw_dbg(hw, "i40e_nvmupd_nvm_read status %d\n", status);
+	if (status)
+		*errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+
+	return status;
+}
+
+/**
+ * i40e_nvmupd_nvm_erase - Erase an NVM module
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @errno: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+					 struct i40e_nvm_access *cmd,
+					 int *errno)
+{
+	i40e_status status = 0;
+	u8 module, transaction;
+	bool last;
+
+	transaction = i40e_nvmupd_get_transaction(cmd->config);
+	module = i40e_nvmupd_get_module(cmd->config);
+	last = (transaction & I40E_NVM_LCB);
+	hw_dbg(hw, "i40e_nvmupd_nvm_erase mod 0x%x  off 0x%x  len 0x%x\n",
+	       module, cmd->offset, cmd->data_size);
+	status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+				   last, NULL);
+	hw_dbg(hw, "i40e_nvmupd_nvm_erase status %d\n", status);
+	if (status)
+		*errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+
+	return status;
+}
+
+/**
+ * i40e_nvmupd_nvm_write - Write NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @errno: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+					 struct i40e_nvm_access *cmd,
+					 u8 *bytes, int *errno)
+{
+	i40e_status status = 0;
+	u8 module, transaction;
+	bool last;
+
+	transaction = i40e_nvmupd_get_transaction(cmd->config);
+	module = i40e_nvmupd_get_module(cmd->config);
+	last = (transaction & I40E_NVM_LCB);
+	hw_dbg(hw, "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
+	       module, cmd->offset, cmd->data_size);
+	status = i40e_aq_update_nvm(hw, module, cmd->offset,
+				    (u16)cmd->data_size, bytes, last, NULL);
+	hw_dbg(hw, "i40e_nvmupd_nvm_write status %d\n", status);
+	if (status)
+		*errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+
+	return status;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 9383f08..a91d7e1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -150,6 +150,9 @@
 				u32 offset, u16 length, void *data,
 				bool last_command,
 				struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+			      u32 offset, u16 length, bool last_command,
+			      struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
 				void *buff, u16 buff_size, u16 *data_size,
 				enum i40e_admin_queue_opc list_type_opc,
@@ -245,8 +248,12 @@
 					 u16 *data);
 i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
 					   u16 *words, u16 *data);
+i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
 i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
 						 u16 *checksum);
+i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
+				struct i40e_nvm_access *cmd,
+				u8 *bytes, int *);
 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
 
 extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 989866a..d26d683 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1237,8 +1237,6 @@
 
 	/* likely incorrect csum if alternate IP extension headers found */
 	if (ipv6 &&
-	    decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
-	    rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
 	    rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
 		/* don't increment checksum err here, non-fatal err */
 		return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 1fcf220..8bb90491 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -269,6 +269,61 @@
 	u32 eetrack;              /* NVM data version */
 };
 
+/* definitions used in NVM update support */
+
+enum i40e_nvmupd_cmd {
+	I40E_NVMUPD_INVALID,
+	I40E_NVMUPD_READ_CON,
+	I40E_NVMUPD_READ_SNT,
+	I40E_NVMUPD_READ_LCB,
+	I40E_NVMUPD_READ_SA,
+	I40E_NVMUPD_WRITE_ERA,
+	I40E_NVMUPD_WRITE_CON,
+	I40E_NVMUPD_WRITE_SNT,
+	I40E_NVMUPD_WRITE_LCB,
+	I40E_NVMUPD_WRITE_SA,
+	I40E_NVMUPD_CSUM_CON,
+	I40E_NVMUPD_CSUM_SA,
+	I40E_NVMUPD_CSUM_LCB,
+};
+
+enum i40e_nvmupd_state {
+	I40E_NVMUPD_STATE_INIT,
+	I40E_NVMUPD_STATE_READING,
+	I40E_NVMUPD_STATE_WRITING
+};
+
+/* nvm_access definition and its masks/shifts need to be accessible to
+ * application, core driver, and shared code.  Where is the right file?
+ */
+#define I40E_NVM_READ	0xB
+#define I40E_NVM_WRITE	0xC
+
+#define I40E_NVM_MOD_PNT_MASK 0xFF
+
+#define I40E_NVM_TRANS_SHIFT	8
+#define I40E_NVM_TRANS_MASK	(0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_CON		0x0
+#define I40E_NVM_SNT		0x1
+#define I40E_NVM_LCB		0x2
+#define I40E_NVM_SA		(I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA		0x4
+#define I40E_NVM_CSUM		0x8
+
+#define I40E_NVM_ADAPT_SHIFT	16
+#define I40E_NVM_ADAPT_MASK	(0xffff << I40E_NVM_ADAPT_SHIFT)
+
+#define I40E_NVMUPD_MAX_DATA	4096
+#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */
+
+struct i40e_nvm_access {
+	u32 command;
+	u32 config;
+	u32 offset;	/* in bytes */
+	u32 data_size;	/* in bytes */
+	u8 data[1];
+};
+
 /* PCI bus types */
 enum i40e_bus_type {
 	i40e_bus_type_unknown = 0,
@@ -404,6 +459,9 @@
 	/* Admin Queue info */
 	struct i40e_adminq_info aq;
 
+	/* state of nvm update process */
+	enum i40e_nvmupd_state nvmupd_state;
+
 	/* HMC info */
 	struct i40e_hmc_info hmc; /* HMC info struct */
 
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 8330744b..0030060 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -708,12 +708,6 @@
 		goto asq_send_command_exit;
 	}
 
-	if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
-		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
-		status = I40E_ERR_NVM;
-		goto asq_send_command_exit;
-	}
-
 	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
 	if (cmd_details) {
 		*details = *cmd_details;
@@ -846,11 +840,9 @@
 	if (i40e_is_nvm_update_op(desc))
 		hw->aq.nvm_busy = true;
 
-	if (le16_to_cpu(desc->datalen) == buff_size) {
-		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-			   "AQTX: desc and buffer writeback:\n");
-		i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff);
-	}
+	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+		   "AQTX: desc and buffer writeback:\n");
+	i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff);
 
 	/* update the error if time out occurred */
 	if ((!cmd_completed) &&
@@ -933,15 +925,15 @@
 			   I40E_DEBUG_AQ_MESSAGE,
 			   "AQRX: Event received with error 0x%X.\n",
 			   hw->aq.arq_last_status);
-	} else {
-		e->desc = *desc;
-		datalen = le16_to_cpu(desc->datalen);
-		e->msg_size = min(datalen, e->msg_size);
-		if (e->msg_buf != NULL && (e->msg_size != 0))
-			memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
-			       e->msg_size);
 	}
 
+	e->desc = *desc;
+	datalen = le16_to_cpu(desc->datalen);
+	e->msg_size = min(datalen, e->msg_size);
+	if (e->msg_buf != NULL && (e->msg_size != 0))
+		memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
+		       e->msg_size);
+
 	if (i40e_is_nvm_update_op(&e->desc))
 		hw->aq.nvm_busy = false;
 
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 1628455..91a5c5b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -94,6 +94,7 @@
 	u16 api_maj_ver;                /* api major version */
 	u16 api_min_ver;                /* api minor version */
 	bool nvm_busy;
+	bool nvm_release_on_done;
 
 	struct mutex asq_mutex; /* Send queue lock */
 	struct mutex arq_mutex; /* Receive queue lock */
@@ -103,6 +104,41 @@
 	enum i40e_admin_queue_err arq_last_status;
 };
 
+/**
+ * i40e_aq_rc_to_posix - convert errors to user-land codes
+ * aq_rc: AdminQ error code to convert
+ **/
+static inline int i40e_aq_rc_to_posix(u16 aq_rc)
+{
+	int aq_to_posix[] = {
+		0,           /* I40E_AQ_RC_OK */
+		-EPERM,      /* I40E_AQ_RC_EPERM */
+		-ENOENT,     /* I40E_AQ_RC_ENOENT */
+		-ESRCH,      /* I40E_AQ_RC_ESRCH */
+		-EINTR,      /* I40E_AQ_RC_EINTR */
+		-EIO,        /* I40E_AQ_RC_EIO */
+		-ENXIO,      /* I40E_AQ_RC_ENXIO */
+		-E2BIG,      /* I40E_AQ_RC_E2BIG */
+		-EAGAIN,     /* I40E_AQ_RC_EAGAIN */
+		-ENOMEM,     /* I40E_AQ_RC_ENOMEM */
+		-EACCES,     /* I40E_AQ_RC_EACCES */
+		-EFAULT,     /* I40E_AQ_RC_EFAULT */
+		-EBUSY,      /* I40E_AQ_RC_EBUSY */
+		-EEXIST,     /* I40E_AQ_RC_EEXIST */
+		-EINVAL,     /* I40E_AQ_RC_EINVAL */
+		-ENOTTY,     /* I40E_AQ_RC_ENOTTY */
+		-ENOSPC,     /* I40E_AQ_RC_ENOSPC */
+		-ENOSYS,     /* I40E_AQ_RC_ENOSYS */
+		-ERANGE,     /* I40E_AQ_RC_ERANGE */
+		-EPIPE,      /* I40E_AQ_RC_EFLUSHED */
+		-ESPIPE,     /* I40E_AQ_RC_BAD_ADDR */
+		-EROFS,      /* I40E_AQ_RC_EMODE */
+		-EFBIG,      /* I40E_AQ_RC_EFBIG */
+	};
+
+	return aq_to_posix[aq_rc];
+}
+
 /* general information */
 #define I40E_AQ_LARGE_BUF	512
 #define I40E_ASQ_CMD_TIMEOUT	100000  /* usecs */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index b342f21..79bf96c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -773,8 +773,6 @@
 
 	/* likely incorrect csum if alternate IP extension headers found */
 	if (ipv6 &&
-	    decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
-	    rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
 	    rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
 		/* don't increment checksum err here, non-fatal err */
 		return;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 6dd72ad..1537643 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -268,6 +268,61 @@
 	u32 eetrack;              /* NVM data version */
 };
 
+/* definitions used in NVM update support */
+
+enum i40e_nvmupd_cmd {
+	I40E_NVMUPD_INVALID,
+	I40E_NVMUPD_READ_CON,
+	I40E_NVMUPD_READ_SNT,
+	I40E_NVMUPD_READ_LCB,
+	I40E_NVMUPD_READ_SA,
+	I40E_NVMUPD_WRITE_ERA,
+	I40E_NVMUPD_WRITE_CON,
+	I40E_NVMUPD_WRITE_SNT,
+	I40E_NVMUPD_WRITE_LCB,
+	I40E_NVMUPD_WRITE_SA,
+	I40E_NVMUPD_CSUM_CON,
+	I40E_NVMUPD_CSUM_SA,
+	I40E_NVMUPD_CSUM_LCB,
+};
+
+enum i40e_nvmupd_state {
+	I40E_NVMUPD_STATE_INIT,
+	I40E_NVMUPD_STATE_READING,
+	I40E_NVMUPD_STATE_WRITING
+};
+
+/* nvm_access definition and its masks/shifts need to be accessible to
+ * application, core driver, and shared code.  Where is the right file?
+ */
+#define I40E_NVM_READ	0xB
+#define I40E_NVM_WRITE	0xC
+
+#define I40E_NVM_MOD_PNT_MASK 0xFF
+
+#define I40E_NVM_TRANS_SHIFT	8
+#define I40E_NVM_TRANS_MASK	(0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_CON		0x0
+#define I40E_NVM_SNT		0x1
+#define I40E_NVM_LCB		0x2
+#define I40E_NVM_SA		(I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA		0x4
+#define I40E_NVM_CSUM		0x8
+
+#define I40E_NVM_ADAPT_SHIFT	16
+#define I40E_NVM_ADAPT_MASK	(0xffff << I40E_NVM_ADAPT_SHIFT)
+
+#define I40E_NVMUPD_MAX_DATA	4096
+#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */
+
+struct i40e_nvm_access {
+	u32 command;
+	u32 config;
+	u32 offset;	/* in bytes */
+	u32 data_size;	/* in bytes */
+	u8 data[1];
+};
+
 /* PCI bus types */
 enum i40e_bus_type {
 	i40e_bus_type_unknown = 0,
@@ -403,6 +458,9 @@
 	/* Admin Queue info */
 	struct i40e_adminq_info aq;
 
+	/* state of nvm update process */
+	enum i40e_nvmupd_state nvmupd_state;
+
 	/* HMC info */
 	struct i40e_hmc_info hmc; /* HMC info struct */
 
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index ed1eb12..a53e81b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -768,7 +768,7 @@
 
 	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
 				&adapter->crit_section))
-		mdelay(1);
+		udelay(1);
 
 	f = i40evf_find_filter(adapter, macaddr);
 	if (NULL == f) {
@@ -840,7 +840,7 @@
 
 	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
 				&adapter->crit_section))
-		mdelay(1);
+		udelay(1);
 	/* remove filter if not in netdev list */
 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
 		bool found = false;
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index f5ba4e4..6f0490d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -355,6 +355,7 @@
 #define E1000_UTA       0x0A000 /* Unicast Table Array - RW */
 #define E1000_IOVTCL    0x05BBC /* IOV Control Register */
 #define E1000_TXSWC     0x05ACC /* Tx Switch Control */
+#define E1000_LVMMC	0x03548 /* Last VM Misbehavior cause */
 /* These act per VF so an array friendly macro is used */
 #define E1000_P2VMAILBOX(_n)   (0x00C00 + (4 * (_n)))
 #define E1000_VMBMEM(_n)       (0x00800 + (64 * (_n)))
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 4d2dc17..cb14bbd 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -57,8 +57,8 @@
 #include "igb.h"
 
 #define MAJ 5
-#define MIN 0
-#define BUILD 5
+#define MIN 2
+#define BUILD 13
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
 __stringify(BUILD) "-k"
 char igb_driver_name[] = "igb";
@@ -4167,6 +4167,26 @@
 }
 
 /**
+ *  igb_check_lvmmc - check for malformed packets received
+ *  and indicated in LVMMC register
+ *  @adapter: pointer to adapter
+ **/
+static void igb_check_lvmmc(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 lvmmc;
+
+	lvmmc = rd32(E1000_LVMMC);
+	if (lvmmc) {
+		if (unlikely(net_ratelimit())) {
+			netdev_warn(adapter->netdev,
+				    "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
+				    lvmmc);
+		}
+	}
+}
+
+/**
  *  igb_watchdog - Timer Call-back
  *  @data: pointer to adapter cast into an unsigned long
  **/
@@ -4361,6 +4381,11 @@
 	igb_spoof_check(adapter);
 	igb_ptp_rx_hang(adapter);
 
+	/* Check LVMMC register on i350/i354 only */
+	if ((adapter->hw.mac.type == e1000_i350) ||
+	    (adapter->hw.mac.type == e1000_i354))
+		igb_check_lvmmc(adapter);
+
 	/* Reset the timer */
 	if (!test_bit(__IGB_DOWN, &adapter->state)) {
 		if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 206171f..c5c97b4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -122,7 +122,7 @@
 {
 	struct ixgbe_mac_info *mac = &hw->mac;
 	struct ixgbe_phy_info *phy = &hw->phy;
-	s32 ret_val = 0;
+	s32 ret_val;
 	u16 list_offset, data_offset;
 
 	/* Identify the PHY */
@@ -147,28 +147,23 @@
 
 		/* Call SFP+ identify routine to get the SFP+ module type */
 		ret_val = phy->ops.identify_sfp(hw);
-		if (ret_val != 0)
-			goto out;
-		else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
-			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
-			goto out;
-		}
+		if (ret_val)
+			return ret_val;
+		if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
+			return IXGBE_ERR_SFP_NOT_SUPPORTED;
 
 		/* Check to see if SFP+ module is supported */
 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
 							    &list_offset,
 							    &data_offset);
-		if (ret_val != 0) {
-			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
-			goto out;
-		}
+		if (ret_val)
+			return IXGBE_ERR_SFP_NOT_SUPPORTED;
 		break;
 	default:
 		break;
 	}
 
-out:
-	return ret_val;
+	return 0;
 }
 
 /**
@@ -183,7 +178,7 @@
 {
 	u32 regval;
 	u32 i;
-	s32 ret_val = 0;
+	s32 ret_val;
 
 	ret_val = ixgbe_start_hw_generic(hw);
 
@@ -203,11 +198,13 @@
 		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
 	}
 
-	/* set the completion timeout for interface */
-	if (ret_val == 0)
-		ixgbe_set_pcie_completion_timeout(hw);
+	if (ret_val)
+		return ret_val;
 
-	return ret_val;
+	/* set the completion timeout for interface */
+	ixgbe_set_pcie_completion_timeout(hw);
+
+	return 0;
 }
 
 /**
@@ -222,7 +219,6 @@
 					     ixgbe_link_speed *speed,
 					     bool *autoneg)
 {
-	s32 status = 0;
 	u32 autoc = 0;
 
 	/*
@@ -262,11 +258,10 @@
 		break;
 
 	default:
-		status = IXGBE_ERR_LINK_SETUP;
-		break;
+		return IXGBE_ERR_LINK_SETUP;
 	}
 
-	return status;
+	return 0;
 }
 
 /**
@@ -277,14 +272,12 @@
  **/
 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
 {
-	enum ixgbe_media_type media_type;
-
 	/* Detect if there is a copper PHY attached. */
 	switch (hw->phy.type) {
 	case ixgbe_phy_cu_unknown:
 	case ixgbe_phy_tn:
-		media_type = ixgbe_media_type_copper;
-		goto out;
+		return ixgbe_media_type_copper;
+
 	default:
 		break;
 	}
@@ -294,30 +287,27 @@
 	case IXGBE_DEV_ID_82598:
 	case IXGBE_DEV_ID_82598_BX:
 		/* Default device ID is mezzanine card KX/KX4 */
-		media_type = ixgbe_media_type_backplane;
-		break;
+		return ixgbe_media_type_backplane;
+
 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
 	case IXGBE_DEV_ID_82598EB_XF_LR:
 	case IXGBE_DEV_ID_82598EB_SFP_LOM:
-		media_type = ixgbe_media_type_fiber;
-		break;
+		return ixgbe_media_type_fiber;
+
 	case IXGBE_DEV_ID_82598EB_CX4:
 	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
-		media_type = ixgbe_media_type_cx4;
-		break;
+		return ixgbe_media_type_cx4;
+
 	case IXGBE_DEV_ID_82598AT:
 	case IXGBE_DEV_ID_82598AT2:
-		media_type = ixgbe_media_type_copper;
-		break;
+		return ixgbe_media_type_copper;
+
 	default:
-		media_type = ixgbe_media_type_unknown;
-		break;
+		return ixgbe_media_type_unknown;
 	}
-out:
-	return media_type;
 }
 
 /**
@@ -328,7 +318,6 @@
  **/
 static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
 {
-	s32 ret_val = 0;
 	u32 fctrl_reg;
 	u32 rmcs_reg;
 	u32 reg;
@@ -338,10 +327,8 @@
 	bool link_up;
 
 	/* Validate the water mark configuration */
-	if (!hw->fc.pause_time) {
-		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-		goto out;
-	}
+	if (!hw->fc.pause_time)
+		return IXGBE_ERR_INVALID_LINK_SETTINGS;
 
 	/* Low water mark of zero causes XOFF floods */
 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
@@ -350,8 +337,7 @@
 			if (!hw->fc.low_water[i] ||
 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
 				hw_dbg(hw, "Invalid water mark configuration\n");
-				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-				goto out;
+				return IXGBE_ERR_INVALID_LINK_SETTINGS;
 			}
 		}
 	}
@@ -428,8 +414,7 @@
 		break;
 	default:
 		hw_dbg(hw, "Flow control param set incorrectly\n");
-		ret_val = IXGBE_ERR_CONFIG;
-		goto out;
+		return IXGBE_ERR_CONFIG;
 	}
 
 	/* Set 802.3x based flow control settings. */
@@ -460,8 +445,7 @@
 	/* Configure flow control refresh threshold value */
 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
 
-out:
-	return ret_val;
+	return 0;
 }
 
 /**
@@ -597,7 +581,7 @@
 		}
 
 		if (!*link_up)
-			goto out;
+			return 0;
 	}
 
 	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
@@ -628,7 +612,6 @@
 	    (ixgbe_validate_link_ready(hw) != 0))
 		*link_up = false;
 
-out:
 	return 0;
 }
 
@@ -645,7 +628,6 @@
 				      bool autoneg_wait_to_complete)
 {
 	bool		 autoneg	   = false;
-	s32              status            = 0;
 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
 	u32              curr_autoc        = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 	u32              autoc             = curr_autoc;
@@ -656,7 +638,7 @@
 	speed &= link_capabilities;
 
 	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
-		status = IXGBE_ERR_LINK_SETUP;
+		return IXGBE_ERR_LINK_SETUP;
 
 	/* Set KX4/KX support according to speed requested */
 	else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
@@ -670,17 +652,11 @@
 			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
 	}
 
-	if (status == 0) {
-		/*
-		 * Setup and restart the link based on the new values in
-		 * ixgbe_hw This will write the AUTOC register based on the new
-		 * stored values
-		 */
-		status = ixgbe_start_mac_link_82598(hw,
-						    autoneg_wait_to_complete);
-	}
-
-	return status;
+	/* Setup and restart the link based on the new values in
+	 * ixgbe_hw This will write the AUTOC register based on the new
+	 * stored values
+	 */
+	return ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
 }
 
 
@@ -717,7 +693,7 @@
  **/
 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
 {
-	s32 status = 0;
+	s32 status;
 	s32 phy_status = 0;
 	u32 ctrl;
 	u32 gheccr;
@@ -727,8 +703,8 @@
 
 	/* Call adapter stop to disable tx/rx and clear interrupts */
 	status = hw->mac.ops.stop_adapter(hw);
-	if (status != 0)
-		goto reset_hw_out;
+	if (status)
+		return status;
 
 	/*
 	 * Power up the Atlas Tx lanes if they are currently powered down.
@@ -770,7 +746,7 @@
 		/* Init PHY and function pointers, perform SFP setup */
 		phy_status = hw->phy.ops.init(hw);
 		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
-			goto reset_hw_out;
+			return phy_status;
 		if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
 			goto mac_reset_top;
 
@@ -836,7 +812,6 @@
 	 */
 	hw->mac.ops.init_rx_addrs(hw);
 
-reset_hw_out:
 	if (phy_status)
 		status = phy_status;
 
@@ -1109,106 +1084,6 @@
 }
 
 /**
- *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
- *  @hw: pointer to hardware structure
- *
- *  Determines physical layer capabilities of the current configuration.
- **/
-static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
-{
-	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-	u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
-	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
-	u16 ext_ability = 0;
-
-	hw->phy.ops.identify(hw);
-
-	/* Copper PHY must be checked before AUTOC LMS to determine correct
-	 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
-	switch (hw->phy.type) {
-	case ixgbe_phy_tn:
-	case ixgbe_phy_cu_unknown:
-		hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE,
-		MDIO_MMD_PMAPMD, &ext_ability);
-		if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
-		if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
-		if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
-		goto out;
-	default:
-		break;
-	}
-
-	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
-	case IXGBE_AUTOC_LMS_1G_AN:
-	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
-		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
-			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
-		else
-			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
-		break;
-	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
-		if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
-		else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
-		else /* XAUI */
-			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-		break;
-	case IXGBE_AUTOC_LMS_KX4_AN:
-	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
-		if (autoc & IXGBE_AUTOC_KX_SUPP)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
-		if (autoc & IXGBE_AUTOC_KX4_SUPP)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
-		break;
-	default:
-		break;
-	}
-
-	if (hw->phy.type == ixgbe_phy_nl) {
-		hw->phy.ops.identify_sfp(hw);
-
-		switch (hw->phy.sfp_type) {
-		case ixgbe_sfp_type_da_cu:
-			physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
-			break;
-		case ixgbe_sfp_type_sr:
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
-			break;
-		case ixgbe_sfp_type_lr:
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
-			break;
-		default:
-			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-			break;
-		}
-	}
-
-	switch (hw->device_id) {
-	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
-		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
-		break;
-	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
-	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
-	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
-		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
-		break;
-	case IXGBE_DEV_ID_82598EB_XF_LR:
-		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
-		break;
-	default:
-		break;
-	}
-
-out:
-	return physical_layer;
-}
-
-/**
  *  ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
  *  port devices.
  *  @hw: pointer to the HW structure
@@ -1285,7 +1160,6 @@
 	.start_hw		= &ixgbe_start_hw_82598,
 	.clear_hw_cntrs		= &ixgbe_clear_hw_cntrs_generic,
 	.get_media_type		= &ixgbe_get_media_type_82598,
-	.get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598,
 	.enable_rx_dma          = &ixgbe_enable_rx_dma_generic,
 	.get_mac_addr		= &ixgbe_get_mac_addr_generic,
 	.stop_adapter		= &ixgbe_stop_adapter_generic,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 0373a5b..cf55a0d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -123,7 +123,7 @@
 
 static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
 {
-	s32 ret_val = 0;
+	s32 ret_val;
 	u16 list_offset, data_offset, data_value;
 
 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
@@ -133,16 +133,14 @@
 
 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
 							      &data_offset);
-		if (ret_val != 0)
-			goto setup_sfp_out;
+		if (ret_val)
+			return ret_val;
 
 		/* PHY config will finish before releasing the semaphore */
 		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
 							IXGBE_GSSR_MAC_CSR_SM);
-		if (ret_val != 0) {
-			ret_val = IXGBE_ERR_SWFW_SYNC;
-			goto setup_sfp_out;
-		}
+		if (ret_val)
+			return IXGBE_ERR_SWFW_SYNC;
 
 		if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
 			goto setup_sfp_err;
@@ -169,13 +167,11 @@
 
 		if (ret_val) {
 			hw_dbg(hw, " sfp module setup not complete\n");
-			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
-			goto setup_sfp_out;
+			return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
 		}
 	}
 
-setup_sfp_out:
-	return ret_val;
+	return 0;
 
 setup_sfp_err:
 	/* Release the semaphore */
@@ -294,7 +290,7 @@
 {
 	struct ixgbe_mac_info *mac = &hw->mac;
 	struct ixgbe_phy_info *phy = &hw->phy;
-	s32 ret_val = 0;
+	s32 ret_val;
 	u32 esdp;
 
 	if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
@@ -355,7 +351,6 @@
 					     ixgbe_link_speed *speed,
 					     bool *autoneg)
 {
-	s32 status = 0;
 	u32 autoc = 0;
 
 	/* Determine 1G link capabilities off of SFP+ type */
@@ -367,7 +362,7 @@
 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
 		*autoneg = true;
-		goto out;
+		return 0;
 	}
 
 	/*
@@ -430,8 +425,7 @@
 		break;
 
 	default:
-		status = IXGBE_ERR_LINK_SETUP;
-		goto out;
+		return IXGBE_ERR_LINK_SETUP;
 	}
 
 	if (hw->phy.multispeed_fiber) {
@@ -445,8 +439,7 @@
 			*autoneg = true;
 	}
 
-out:
-	return status;
+	return 0;
 }
 
 /**
@@ -457,14 +450,12 @@
  **/
 static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
 {
-	enum ixgbe_media_type media_type;
-
 	/* Detect if there is a copper PHY attached. */
 	switch (hw->phy.type) {
 	case ixgbe_phy_cu_unknown:
 	case ixgbe_phy_tn:
-		media_type = ixgbe_media_type_copper;
-		goto out;
+		return ixgbe_media_type_copper;
+
 	default:
 		break;
 	}
@@ -477,34 +468,31 @@
 	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
 	case IXGBE_DEV_ID_82599_XAUI_LOM:
 		/* Default device ID is mezzanine card KX/KX4 */
-		media_type = ixgbe_media_type_backplane;
-		break;
+		return ixgbe_media_type_backplane;
+
 	case IXGBE_DEV_ID_82599_SFP:
 	case IXGBE_DEV_ID_82599_SFP_FCOE:
 	case IXGBE_DEV_ID_82599_SFP_EM:
 	case IXGBE_DEV_ID_82599_SFP_SF2:
 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
 	case IXGBE_DEV_ID_82599EN_SFP:
-		media_type = ixgbe_media_type_fiber;
-		break;
+		return ixgbe_media_type_fiber;
+
 	case IXGBE_DEV_ID_82599_CX4:
-		media_type = ixgbe_media_type_cx4;
-		break;
+		return ixgbe_media_type_cx4;
+
 	case IXGBE_DEV_ID_82599_T3_LOM:
-		media_type = ixgbe_media_type_copper;
-		break;
+		return ixgbe_media_type_copper;
+
 	case IXGBE_DEV_ID_82599_LS:
-		media_type = ixgbe_media_type_fiber_lco;
-		break;
+		return ixgbe_media_type_fiber_lco;
+
 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
-		media_type = ixgbe_media_type_fiber_qsfp;
-		break;
+		return ixgbe_media_type_fiber_qsfp;
+
 	default:
-		media_type = ixgbe_media_type_unknown;
-		break;
+		return ixgbe_media_type_unknown;
 	}
-out:
-	return media_type;
 }
 
 /**
@@ -554,7 +542,7 @@
 		status = hw->mac.ops.acquire_swfw_sync(hw,
 						IXGBE_GSSR_MAC_CSR_SM);
 		if (status)
-			goto out;
+			return status;
 
 		got_lock = true;
 	}
@@ -591,7 +579,6 @@
 	/* Add delay to filter out noises during initial link setup */
 	msleep(50);
 
-out:
 	return status;
 }
 
@@ -958,7 +945,7 @@
 				      bool autoneg_wait_to_complete)
 {
 	bool autoneg = false;
-	s32 status = 0;
+	s32 status;
 	u32 pma_pmd_1g, link_mode, links_reg, i;
 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
@@ -974,15 +961,13 @@
 	/* Check to see if speed passed in is supported. */
 	status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
 						   &autoneg);
-	if (status != 0)
-		goto out;
+	if (status)
+		return status;
 
 	speed &= link_capabilities;
 
-	if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
-		status = IXGBE_ERR_LINK_SETUP;
-		goto out;
-	}
+	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
+		return IXGBE_ERR_LINK_SETUP;
 
 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
 	if (hw->mac.orig_link_settings_stored)
@@ -1033,7 +1018,7 @@
 		/* Restart link */
 		status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
 		if (status)
-			goto out;
+			return status;
 
 		/* Only poll for autoneg to complete if specified to do so */
 		if (autoneg_wait_to_complete) {
@@ -1060,7 +1045,6 @@
 		msleep(50);
 	}
 
-out:
 	return status;
 }
 
@@ -1105,8 +1089,8 @@
 
 	/* Call adapter stop to disable tx/rx and clear interrupts */
 	status = hw->mac.ops.stop_adapter(hw);
-	if (status != 0)
-		goto reset_hw_out;
+	if (status)
+		return status;
 
 	/* flush pending Tx transactions */
 	ixgbe_clear_tx_pending(hw);
@@ -1117,7 +1101,7 @@
 	status = hw->phy.ops.init(hw);
 
 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
-		goto reset_hw_out;
+		return status;
 
 	/* Setup SFP module if there is one present. */
 	if (hw->phy.sfp_setup_needed) {
@@ -1126,7 +1110,7 @@
 	}
 
 	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
-		goto reset_hw_out;
+		return status;
 
 	/* Reset PHY */
 	if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
@@ -1216,7 +1200,7 @@
 							hw->mac.orig_autoc,
 							false);
 			if (status)
-				goto reset_hw_out;
+				return status;
 		}
 
 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
@@ -1258,7 +1242,6 @@
 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
 				       &hw->mac.wwpn_prefix);
 
-reset_hw_out:
 	return status;
 }
 
@@ -1927,20 +1910,20 @@
 	s32 ret_val = 0;
 
 	ret_val = ixgbe_start_hw_generic(hw);
-	if (ret_val != 0)
-		goto out;
+	if (ret_val)
+		return ret_val;
 
 	ret_val = ixgbe_start_hw_gen2(hw);
-	if (ret_val != 0)
-		goto out;
+	if (ret_val)
+		return ret_val;
 
 	/* We need to run link autotry after the driver loads */
 	hw->mac.autotry_restart = true;
 
-	if (ret_val == 0)
-		ret_val = ixgbe_verify_fw_version_82599(hw);
-out:
-	return ret_val;
+	if (ret_val)
+		return ret_val;
+
+	return ixgbe_verify_fw_version_82599(hw);
 }
 
 /**
@@ -1953,16 +1936,15 @@
  **/
 static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
 {
-	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+	s32 status;
 
 	/* Detect PHY if not unknown - returns success if already detected. */
 	status = ixgbe_identify_phy_generic(hw);
-	if (status != 0) {
+	if (status) {
 		/* 82599 10GBASE-T requires an external PHY */
 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
-			goto out;
-		else
-			status = ixgbe_identify_module_generic(hw);
+			return status;
+		status = ixgbe_identify_module_generic(hw);
 	}
 
 	/* Set PHY type none if no PHY detected */
@@ -1973,139 +1955,12 @@
 
 	/* Return error if SFP module has been detected but is not supported */
 	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
-		status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+		return IXGBE_ERR_SFP_NOT_SUPPORTED;
 
-out:
 	return status;
 }
 
 /**
- *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
- *  @hw: pointer to hardware structure
- *
- *  Determines physical layer capabilities of the current configuration.
- **/
-static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
-{
-	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
-	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
-	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
-	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
-	u16 ext_ability = 0;
-	u8 comp_codes_10g = 0;
-	u8 comp_codes_1g = 0;
-
-	hw->phy.ops.identify(hw);
-
-	switch (hw->phy.type) {
-	case ixgbe_phy_tn:
-	case ixgbe_phy_cu_unknown:
-		hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
-							 &ext_ability);
-		if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
-		if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
-		if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
-		goto out;
-	default:
-		break;
-	}
-
-	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
-	case IXGBE_AUTOC_LMS_1G_AN:
-	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
-		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
-			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
-			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
-			goto out;
-		} else
-			/* SFI mode so read SFP module */
-			goto sfp_check;
-		break;
-	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
-		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
-		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
-		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
-		goto out;
-	case IXGBE_AUTOC_LMS_10G_SERIAL:
-		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
-			goto out;
-		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
-			goto sfp_check;
-		break;
-	case IXGBE_AUTOC_LMS_KX4_KX_KR:
-	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
-		if (autoc & IXGBE_AUTOC_KX_SUPP)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
-		if (autoc & IXGBE_AUTOC_KX4_SUPP)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
-		if (autoc & IXGBE_AUTOC_KR_SUPP)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
-		goto out;
-	default:
-		goto out;
-	}
-
-sfp_check:
-	/* SFP check must be done last since DA modules are sometimes used to
-	 * test KR mode -  we need to id KR mode correctly before SFP module.
-	 * Call identify_sfp because the pluggable module may have changed */
-	hw->phy.ops.identify_sfp(hw);
-	if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
-		goto out;
-
-	switch (hw->phy.type) {
-	case ixgbe_phy_sfp_passive_tyco:
-	case ixgbe_phy_sfp_passive_unknown:
-	case ixgbe_phy_qsfp_passive_unknown:
-		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
-		break;
-	case ixgbe_phy_sfp_ftl_active:
-	case ixgbe_phy_sfp_active_unknown:
-	case ixgbe_phy_qsfp_active_unknown:
-		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
-		break;
-	case ixgbe_phy_sfp_avago:
-	case ixgbe_phy_sfp_ftl:
-	case ixgbe_phy_sfp_intel:
-	case ixgbe_phy_sfp_unknown:
-		hw->phy.ops.read_i2c_eeprom(hw,
-		      IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
-		hw->phy.ops.read_i2c_eeprom(hw,
-		      IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
-		if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
-		else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
-		else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
-			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
-		break;
-	case ixgbe_phy_qsfp_intel:
-	case ixgbe_phy_qsfp_unknown:
-		hw->phy.ops.read_i2c_eeprom(hw,
-			IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g);
-		if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
-		else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
-		break;
-	default:
-		break;
-	}
-
-out:
-	return physical_layer;
-}
-
-/**
  *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
  *  @hw: pointer to hardware structure
  *  @regval: register value to write to RXCTRL
@@ -2147,26 +2002,24 @@
 	u16 fw_version = 0;
 
 	/* firmware check is only necessary for SFI devices */
-	if (hw->phy.media_type != ixgbe_media_type_fiber) {
-		status = 0;
-		goto fw_version_out;
-	}
+	if (hw->phy.media_type != ixgbe_media_type_fiber)
+		return 0;
 
 	/* get the offset to the Firmware Module block */
 	offset = IXGBE_FW_PTR;
 	if (hw->eeprom.ops.read(hw, offset, &fw_offset))
 		goto fw_version_err;
 
-	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
-		goto fw_version_out;
+	if (fw_offset == 0 || fw_offset == 0xFFFF)
+		return IXGBE_ERR_EEPROM_VERSION;
 
 	/* get the offset to the Pass Through Patch Configuration block */
 	offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR;
 	if (hw->eeprom.ops.read(hw, offset, &fw_ptp_cfg_offset))
 		goto fw_version_err;
 
-	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
-		goto fw_version_out;
+	if (fw_ptp_cfg_offset == 0 || fw_ptp_cfg_offset == 0xFFFF)
+		return IXGBE_ERR_EEPROM_VERSION;
 
 	/* get the firmware version */
 	offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4;
@@ -2176,7 +2029,6 @@
 	if (fw_version > 0x5)
 		status = 0;
 
-fw_version_out:
 	return status;
 
 fw_version_err:
@@ -2193,37 +2045,33 @@
  **/
 static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
 {
-	bool lesm_enabled = false;
 	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
 	s32 status;
 
 	/* get the offset to the Firmware Module block */
 	status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
 
-	if ((status != 0) ||
-	    (fw_offset == 0) || (fw_offset == 0xFFFF))
-		goto out;
+	if (status || fw_offset == 0 || fw_offset == 0xFFFF)
+		return false;
 
 	/* get the offset to the LESM Parameters block */
 	status = hw->eeprom.ops.read(hw, (fw_offset +
 				     IXGBE_FW_LESM_PARAMETERS_PTR),
 				     &fw_lesm_param_offset);
 
-	if ((status != 0) ||
-	    (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
-		goto out;
+	if (status ||
+	    fw_lesm_param_offset == 0 || fw_lesm_param_offset == 0xFFFF)
+		return false;
 
 	/* get the lesm state word */
 	status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
 				     IXGBE_FW_LESM_STATE_1),
 				     &fw_lesm_state);
 
-	if ((status == 0) &&
-	    (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
-		lesm_enabled = true;
+	if (!status && (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
+		return true;
 
-out:
-	return lesm_enabled;
+	return false;
 }
 
 /**
@@ -2241,22 +2089,16 @@
 					  u16 words, u16 *data)
 {
 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
-	s32 ret_val = IXGBE_ERR_CONFIG;
 
-	/*
-	 * If EEPROM is detected and can be addressed using 14 bits,
+	/* If EEPROM is detected and can be addressed using 14 bits,
 	 * use EERD otherwise use bit bang
 	 */
-	if ((eeprom->type == ixgbe_eeprom_spi) &&
-	    (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
-		ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
-							 data);
-	else
-		ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
-								    words,
-								    data);
+	if (eeprom->type == ixgbe_eeprom_spi &&
+	    offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)
+		return ixgbe_read_eerd_buffer_generic(hw, offset, words, data);
 
-	return ret_val;
+	return ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, words,
+							 data);
 }
 
 /**
@@ -2273,19 +2115,15 @@
 				   u16 offset, u16 *data)
 {
 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
-	s32 ret_val = IXGBE_ERR_CONFIG;
 
 	/*
 	 * If EEPROM is detected and can be addressed using 14 bits,
 	 * use EERD otherwise use bit bang
 	 */
-	if ((eeprom->type == ixgbe_eeprom_spi) &&
-	    (offset <= IXGBE_EERD_MAX_ADDR))
-		ret_val = ixgbe_read_eerd_generic(hw, offset, data);
-	else
-		ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
+	if (eeprom->type == ixgbe_eeprom_spi && offset <= IXGBE_EERD_MAX_ADDR)
+		return ixgbe_read_eerd_generic(hw, offset, data);
 
-	return ret_val;
+	return ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
 }
 
 /**
@@ -2454,7 +2292,6 @@
 	.start_hw               = &ixgbe_start_hw_82599,
 	.clear_hw_cntrs         = &ixgbe_clear_hw_cntrs_generic,
 	.get_media_type         = &ixgbe_get_media_type_82599,
-	.get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599,
 	.enable_rx_dma          = &ixgbe_enable_rx_dma_82599,
 	.disable_rx_buff	= &ixgbe_disable_rx_buff_generic,
 	.enable_rx_buff		= &ixgbe_enable_rx_buff_generic,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 3f318c5..b5f484b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -122,8 +122,7 @@
 	 */
 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
 		hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
-		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-		goto out;
+		return IXGBE_ERR_INVALID_LINK_SETTINGS;
 	}
 
 	/*
@@ -143,7 +142,7 @@
 		/* some MAC's need RMW protection on AUTOC */
 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
 		if (ret_val)
-			goto out;
+			return ret_val;
 
 		/* only backplane uses autoc so fall though */
 	case ixgbe_media_type_fiber:
@@ -214,8 +213,7 @@
 		break;
 	default:
 		hw_dbg(hw, "Flow control param set incorrectly\n");
-		ret_val = IXGBE_ERR_CONFIG;
-		goto out;
+		return IXGBE_ERR_CONFIG;
 	}
 
 	if (hw->mac.type != ixgbe_mac_X540) {
@@ -246,7 +244,7 @@
 		 */
 		ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
 		if (ret_val)
-			goto out;
+			return ret_val;
 
 	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
 		   ixgbe_device_supports_autoneg_fc(hw)) {
@@ -255,7 +253,6 @@
 	}
 
 	hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
-out:
 	return ret_val;
 }
 
@@ -294,12 +291,11 @@
 	/* Setup flow control */
 	ret_val = ixgbe_setup_fc(hw);
 	if (!ret_val)
-		goto out;
+		return 0;
 
 	/* Clear adapter stopped flag */
 	hw->adapter_stopped = false;
 
-out:
 	return ret_val;
 }
 
@@ -836,20 +832,16 @@
 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
 					       u16 words, u16 *data)
 {
-	s32 status = 0;
+	s32 status;
 	u16 i, count;
 
 	hw->eeprom.ops.init_params(hw);
 
-	if (words == 0) {
-		status = IXGBE_ERR_INVALID_ARGUMENT;
-		goto out;
-	}
+	if (words == 0)
+		return IXGBE_ERR_INVALID_ARGUMENT;
 
-	if (offset + words > hw->eeprom.word_size) {
-		status = IXGBE_ERR_EEPROM;
-		goto out;
-	}
+	if (offset + words > hw->eeprom.word_size)
+		return IXGBE_ERR_EEPROM;
 
 	/*
 	 * The EEPROM page size cannot be queried from the chip. We do lazy
@@ -874,7 +866,6 @@
 			break;
 	}
 
-out:
 	return status;
 }
 
@@ -899,64 +890,61 @@
 
 	/* Prepare the EEPROM for writing  */
 	status = ixgbe_acquire_eeprom(hw);
+	if (status)
+		return status;
 
-	if (status == 0) {
-		if (ixgbe_ready_eeprom(hw) != 0) {
-			ixgbe_release_eeprom(hw);
-			status = IXGBE_ERR_EEPROM;
-		}
-	}
-
-	if (status == 0) {
-		for (i = 0; i < words; i++) {
-			ixgbe_standby_eeprom(hw);
-
-			/*  Send the WRITE ENABLE command (8 bit opcode )  */
-			ixgbe_shift_out_eeprom_bits(hw,
-						  IXGBE_EEPROM_WREN_OPCODE_SPI,
-						  IXGBE_EEPROM_OPCODE_BITS);
-
-			ixgbe_standby_eeprom(hw);
-
-			/*
-			 * Some SPI eeproms use the 8th address bit embedded
-			 * in the opcode
-			 */
-			if ((hw->eeprom.address_bits == 8) &&
-			    ((offset + i) >= 128))
-				write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
-
-			/* Send the Write command (8-bit opcode + addr) */
-			ixgbe_shift_out_eeprom_bits(hw, write_opcode,
-						    IXGBE_EEPROM_OPCODE_BITS);
-			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
-						    hw->eeprom.address_bits);
-
-			page_size = hw->eeprom.word_page_size;
-
-			/* Send the data in burst via SPI*/
-			do {
-				word = data[i];
-				word = (word >> 8) | (word << 8);
-				ixgbe_shift_out_eeprom_bits(hw, word, 16);
-
-				if (page_size == 0)
-					break;
-
-				/* do not wrap around page */
-				if (((offset + i) & (page_size - 1)) ==
-				    (page_size - 1))
-					break;
-			} while (++i < words);
-
-			ixgbe_standby_eeprom(hw);
-			usleep_range(10000, 20000);
-		}
-		/* Done with writing - release the EEPROM */
+	if (ixgbe_ready_eeprom(hw) != 0) {
 		ixgbe_release_eeprom(hw);
+		return IXGBE_ERR_EEPROM;
 	}
 
-	return status;
+	for (i = 0; i < words; i++) {
+		ixgbe_standby_eeprom(hw);
+
+		/* Send the WRITE ENABLE command (8 bit opcode) */
+		ixgbe_shift_out_eeprom_bits(hw,
+					    IXGBE_EEPROM_WREN_OPCODE_SPI,
+					    IXGBE_EEPROM_OPCODE_BITS);
+
+		ixgbe_standby_eeprom(hw);
+
+		/* Some SPI eeproms use the 8th address bit embedded
+		 * in the opcode
+		 */
+		if ((hw->eeprom.address_bits == 8) &&
+		    ((offset + i) >= 128))
+			write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+		/* Send the Write command (8-bit opcode + addr) */
+		ixgbe_shift_out_eeprom_bits(hw, write_opcode,
+					    IXGBE_EEPROM_OPCODE_BITS);
+		ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
+					    hw->eeprom.address_bits);
+
+		page_size = hw->eeprom.word_page_size;
+
+		/* Send the data in burst via SPI */
+		do {
+			word = data[i];
+			word = (word >> 8) | (word << 8);
+			ixgbe_shift_out_eeprom_bits(hw, word, 16);
+
+			if (page_size == 0)
+				break;
+
+			/* do not wrap around page */
+			if (((offset + i) & (page_size - 1)) ==
+			    (page_size - 1))
+				break;
+		} while (++i < words);
+
+		ixgbe_standby_eeprom(hw);
+		usleep_range(10000, 20000);
+	}
+	/* Done with writing - release the EEPROM */
+	ixgbe_release_eeprom(hw);
+
+	return 0;
 }
 
 /**
@@ -970,19 +958,12 @@
  **/
 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
 {
-	s32 status;
-
 	hw->eeprom.ops.init_params(hw);
 
-	if (offset >= hw->eeprom.word_size) {
-		status = IXGBE_ERR_EEPROM;
-		goto out;
-	}
+	if (offset >= hw->eeprom.word_size)
+		return IXGBE_ERR_EEPROM;
 
-	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
-
-out:
-	return status;
+	return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
 }
 
 /**
@@ -997,20 +978,16 @@
 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
 					      u16 words, u16 *data)
 {
-	s32 status = 0;
+	s32 status;
 	u16 i, count;
 
 	hw->eeprom.ops.init_params(hw);
 
-	if (words == 0) {
-		status = IXGBE_ERR_INVALID_ARGUMENT;
-		goto out;
-	}
+	if (words == 0)
+		return IXGBE_ERR_INVALID_ARGUMENT;
 
-	if (offset + words > hw->eeprom.word_size) {
-		status = IXGBE_ERR_EEPROM;
-		goto out;
-	}
+	if (offset + words > hw->eeprom.word_size)
+		return IXGBE_ERR_EEPROM;
 
 	/*
 	 * We cannot hold synchronization semaphores for too long
@@ -1024,12 +1001,11 @@
 		status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
 							   count, &data[i]);
 
-		if (status != 0)
-			break;
+		if (status)
+			return status;
 	}
 
-out:
-	return status;
+	return 0;
 }
 
 /**
@@ -1051,41 +1027,38 @@
 
 	/* Prepare the EEPROM for reading  */
 	status = ixgbe_acquire_eeprom(hw);
+	if (status)
+		return status;
 
-	if (status == 0) {
-		if (ixgbe_ready_eeprom(hw) != 0) {
-			ixgbe_release_eeprom(hw);
-			status = IXGBE_ERR_EEPROM;
-		}
-	}
-
-	if (status == 0) {
-		for (i = 0; i < words; i++) {
-			ixgbe_standby_eeprom(hw);
-			/*
-			 * Some SPI eeproms use the 8th address bit embedded
-			 * in the opcode
-			 */
-			if ((hw->eeprom.address_bits == 8) &&
-			    ((offset + i) >= 128))
-				read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
-
-			/* Send the READ command (opcode + addr) */
-			ixgbe_shift_out_eeprom_bits(hw, read_opcode,
-						    IXGBE_EEPROM_OPCODE_BITS);
-			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
-						    hw->eeprom.address_bits);
-
-			/* Read the data. */
-			word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
-			data[i] = (word_in >> 8) | (word_in << 8);
-		}
-
-		/* End this read operation */
+	if (ixgbe_ready_eeprom(hw) != 0) {
 		ixgbe_release_eeprom(hw);
+		return IXGBE_ERR_EEPROM;
 	}
 
-	return status;
+	for (i = 0; i < words; i++) {
+		ixgbe_standby_eeprom(hw);
+		/* Some SPI eeproms use the 8th address bit embedded
+		 * in the opcode
+		 */
+		if ((hw->eeprom.address_bits == 8) &&
+		    ((offset + i) >= 128))
+			read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+		/* Send the READ command (opcode + addr) */
+		ixgbe_shift_out_eeprom_bits(hw, read_opcode,
+					    IXGBE_EEPROM_OPCODE_BITS);
+		ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
+					    hw->eeprom.address_bits);
+
+		/* Read the data. */
+		word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
+		data[i] = (word_in >> 8) | (word_in << 8);
+	}
+
+	/* End this read operation */
+	ixgbe_release_eeprom(hw);
+
+	return 0;
 }
 
 /**
@@ -1099,19 +1072,12 @@
 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
 				       u16 *data)
 {
-	s32 status;
-
 	hw->eeprom.ops.init_params(hw);
 
-	if (offset >= hw->eeprom.word_size) {
-		status = IXGBE_ERR_EEPROM;
-		goto out;
-	}
+	if (offset >= hw->eeprom.word_size)
+		return IXGBE_ERR_EEPROM;
 
-	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
-
-out:
-	return status;
+	return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
 }
 
 /**
@@ -1127,20 +1093,16 @@
 				   u16 words, u16 *data)
 {
 	u32 eerd;
-	s32 status = 0;
+	s32 status;
 	u32 i;
 
 	hw->eeprom.ops.init_params(hw);
 
-	if (words == 0) {
-		status = IXGBE_ERR_INVALID_ARGUMENT;
-		goto out;
-	}
+	if (words == 0)
+		return IXGBE_ERR_INVALID_ARGUMENT;
 
-	if (offset >= hw->eeprom.word_size) {
-		status = IXGBE_ERR_EEPROM;
-		goto out;
-	}
+	if (offset >= hw->eeprom.word_size)
+		return IXGBE_ERR_EEPROM;
 
 	for (i = 0; i < words; i++) {
 		eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
@@ -1154,11 +1116,11 @@
 				   IXGBE_EEPROM_RW_REG_DATA);
 		} else {
 			hw_dbg(hw, "Eeprom read timed out\n");
-			goto out;
+			return status;
 		}
 	}
-out:
-	return status;
+
+	return 0;
 }
 
 /**
@@ -1174,7 +1136,7 @@
 						 u16 offset)
 {
 	u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
-	s32 status = 0;
+	s32 status;
 	u16 i;
 
 	for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
@@ -1184,12 +1146,12 @@
 	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
 					     IXGBE_EEPROM_PAGE_SIZE_MAX, data);
 	hw->eeprom.word_page_size = 0;
-	if (status != 0)
-		goto out;
+	if (status)
+		return status;
 
 	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
-	if (status != 0)
-		goto out;
+	if (status)
+		return status;
 
 	/*
 	 * When writing in burst more than the actual page size
@@ -1199,8 +1161,7 @@
 
 	hw_dbg(hw, "Detected EEPROM page size = %d words.\n",
 	       hw->eeprom.word_page_size);
-out:
-	return status;
+	return 0;
 }
 
 /**
@@ -1229,20 +1190,16 @@
 				    u16 words, u16 *data)
 {
 	u32 eewr;
-	s32 status = 0;
+	s32 status;
 	u16 i;
 
 	hw->eeprom.ops.init_params(hw);
 
-	if (words == 0) {
-		status = IXGBE_ERR_INVALID_ARGUMENT;
-		goto out;
-	}
+	if (words == 0)
+		return IXGBE_ERR_INVALID_ARGUMENT;
 
-	if (offset >= hw->eeprom.word_size) {
-		status = IXGBE_ERR_EEPROM;
-		goto out;
-	}
+	if (offset >= hw->eeprom.word_size)
+		return IXGBE_ERR_EEPROM;
 
 	for (i = 0; i < words; i++) {
 		eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
@@ -1250,22 +1207,21 @@
 		       IXGBE_EEPROM_RW_REG_START;
 
 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
-		if (status != 0) {
+		if (status) {
 			hw_dbg(hw, "Eeprom write EEWR timed out\n");
-			goto out;
+			return status;
 		}
 
 		IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
 
 		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
-		if (status != 0) {
+		if (status) {
 			hw_dbg(hw, "Eeprom write EEWR timed out\n");
-			goto out;
+			return status;
 		}
 	}
 
-out:
-	return status;
+	return 0;
 }
 
 /**
@@ -1293,7 +1249,6 @@
 {
 	u32 i;
 	u32 reg;
-	s32 status = IXGBE_ERR_EEPROM;
 
 	for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
 		if (ee_reg == IXGBE_NVM_POLL_READ)
@@ -1302,12 +1257,11 @@
 			reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
 
 		if (reg & IXGBE_EEPROM_RW_REG_DONE) {
-			status = 0;
-			break;
+			return 0;
 		}
 		udelay(5);
 	}
-	return status;
+	return IXGBE_ERR_EEPROM;
 }
 
 /**
@@ -1319,47 +1273,42 @@
  **/
 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
 {
-	s32 status = 0;
 	u32 eec;
 	u32 i;
 
 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
-		status = IXGBE_ERR_SWFW_SYNC;
+		return IXGBE_ERR_SWFW_SYNC;
 
-	if (status == 0) {
+	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+	/* Request EEPROM Access */
+	eec |= IXGBE_EEC_REQ;
+	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+
+	for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
 		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-
-		/* Request EEPROM Access */
-		eec |= IXGBE_EEC_REQ;
-		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
-
-		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
-			eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-			if (eec & IXGBE_EEC_GNT)
-				break;
-			udelay(5);
-		}
-
-		/* Release if grant not acquired */
-		if (!(eec & IXGBE_EEC_GNT)) {
-			eec &= ~IXGBE_EEC_REQ;
-			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
-			hw_dbg(hw, "Could not acquire EEPROM grant\n");
-
-			hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-			status = IXGBE_ERR_EEPROM;
-		}
-
-		/* Setup EEPROM for Read/Write */
-		if (status == 0) {
-			/* Clear CS and SK */
-			eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
-			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
-			IXGBE_WRITE_FLUSH(hw);
-			udelay(1);
-		}
+		if (eec & IXGBE_EEC_GNT)
+			break;
+		udelay(5);
 	}
-	return status;
+
+	/* Release if grant not acquired */
+	if (!(eec & IXGBE_EEC_GNT)) {
+		eec &= ~IXGBE_EEC_REQ;
+		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+		hw_dbg(hw, "Could not acquire EEPROM grant\n");
+
+		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+		return IXGBE_ERR_EEPROM;
+	}
+
+	/* Setup EEPROM for Read/Write */
+	/* Clear CS and SK */
+	eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
+	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+	IXGBE_WRITE_FLUSH(hw);
+	udelay(1);
+	return 0;
 }
 
 /**
@@ -1370,7 +1319,6 @@
  **/
 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
 {
-	s32 status = IXGBE_ERR_EEPROM;
 	u32 timeout = 2000;
 	u32 i;
 	u32 swsm;
@@ -1382,68 +1330,60 @@
 		 * set and we have the semaphore
 		 */
 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
-		if (!(swsm & IXGBE_SWSM_SMBI)) {
-			status = 0;
+		if (!(swsm & IXGBE_SWSM_SMBI))
 			break;
-		}
-		udelay(50);
+		usleep_range(50, 100);
 	}
 
 	if (i == timeout) {
 		hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n");
-		/*
-		 * this release is particularly important because our attempts
+		/* this release is particularly important because our attempts
 		 * above to get the semaphore may have succeeded, and if there
 		 * was a timeout, we should unconditionally clear the semaphore
 		 * bits to free the driver to make progress
 		 */
 		ixgbe_release_eeprom_semaphore(hw);
 
-		udelay(50);
-		/*
-		 * one last try
+		usleep_range(50, 100);
+		/* one last try
 		 * If the SMBI bit is 0 when we read it, then the bit will be
 		 * set and we have the semaphore
 		 */
 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
-		if (!(swsm & IXGBE_SWSM_SMBI))
-			status = 0;
+		if (swsm & IXGBE_SWSM_SMBI) {
+			hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
+			return IXGBE_ERR_EEPROM;
+		}
 	}
 
 	/* Now get the semaphore between SW/FW through the SWESMBI bit */
-	if (status == 0) {
-		for (i = 0; i < timeout; i++) {
-			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+	for (i = 0; i < timeout; i++) {
+		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
 
-			/* Set the SW EEPROM semaphore bit to request access */
-			swsm |= IXGBE_SWSM_SWESMBI;
-			IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+		/* Set the SW EEPROM semaphore bit to request access */
+		swsm |= IXGBE_SWSM_SWESMBI;
+		IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
 
-			/*
-			 * If we set the bit successfully then we got the
-			 * semaphore.
-			 */
-			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
-			if (swsm & IXGBE_SWSM_SWESMBI)
-				break;
-
-			udelay(50);
-		}
-
-		/*
-		 * Release semaphores and return error if SW EEPROM semaphore
-		 * was not granted because we don't have access to the EEPROM
+		/* If we set the bit successfully then we got the
+		 * semaphore.
 		 */
-		if (i >= timeout) {
-			hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
-			ixgbe_release_eeprom_semaphore(hw);
-			status = IXGBE_ERR_EEPROM;
-		}
-	} else {
-		hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
+		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+		if (swsm & IXGBE_SWSM_SWESMBI)
+			break;
+
+		usleep_range(50, 100);
 	}
 
-	return status;
+	/* Release semaphores and return error if SW EEPROM semaphore
+	 * was not granted because we don't have access to the EEPROM
+	 */
+	if (i >= timeout) {
+		hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
+		ixgbe_release_eeprom_semaphore(hw);
+		return IXGBE_ERR_EEPROM;
+	}
+
+	return 0;
 }
 
 /**
@@ -1470,7 +1410,6 @@
  **/
 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
 {
-	s32 status = 0;
 	u16 i;
 	u8 spi_stat_reg;
 
@@ -1497,10 +1436,10 @@
 	 */
 	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
 		hw_dbg(hw, "SPI EEPROM Status error\n");
-		status = IXGBE_ERR_EEPROM;
+		return IXGBE_ERR_EEPROM;
 	}
 
-	return status;
+	return 0;
 }
 
 /**
@@ -2099,17 +2038,14 @@
  **/
 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
 {
-	s32 ret_val = 0;
 	u32 mflcn_reg, fccfg_reg;
 	u32 reg;
 	u32 fcrtl, fcrth;
 	int i;
 
 	/* Validate the water mark configuration. */
-	if (!hw->fc.pause_time) {
-		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-		goto out;
-	}
+	if (!hw->fc.pause_time)
+		return IXGBE_ERR_INVALID_LINK_SETTINGS;
 
 	/* Low water mark of zero causes XOFF floods */
 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
@@ -2118,8 +2054,7 @@
 			if (!hw->fc.low_water[i] ||
 			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
 				hw_dbg(hw, "Invalid water mark configuration\n");
-				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-				goto out;
+				return IXGBE_ERR_INVALID_LINK_SETTINGS;
 			}
 		}
 	}
@@ -2176,8 +2111,7 @@
 		break;
 	default:
 		hw_dbg(hw, "Flow control param set incorrectly\n");
-		ret_val = IXGBE_ERR_CONFIG;
-		goto out;
+		return IXGBE_ERR_CONFIG;
 	}
 
 	/* Set 802.3x based flow control settings. */
@@ -2213,8 +2147,7 @@
 
 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
 
-out:
-	return ret_val;
+	return 0;
 }
 
 /**
@@ -2275,7 +2208,7 @@
 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
 {
 	u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
-	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+	s32 ret_val;
 
 	/*
 	 * On multispeed fiber at 1g, bail out if
@@ -2286,7 +2219,7 @@
 	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
 	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
 	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
-		goto out;
+		return IXGBE_ERR_FC_NOT_NEGOTIATED;
 
 	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
 	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
@@ -2297,7 +2230,6 @@
 			       IXGBE_PCS1GANA_SYM_PAUSE,
 			       IXGBE_PCS1GANA_ASM_PAUSE);
 
-out:
 	return ret_val;
 }
 
@@ -2310,7 +2242,7 @@
 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
 {
 	u32 links2, anlp1_reg, autoc_reg, links;
-	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+	s32 ret_val;
 
 	/*
 	 * On backplane, bail out if
@@ -2319,12 +2251,12 @@
 	 */
 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
 	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
-		goto out;
+		return IXGBE_ERR_FC_NOT_NEGOTIATED;
 
 	if (hw->mac.type == ixgbe_mac_82599EB) {
 		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
 		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
-			goto out;
+			return IXGBE_ERR_FC_NOT_NEGOTIATED;
 	}
 	/*
 	 * Read the 10g AN autoc and LP ability registers and resolve
@@ -2337,7 +2269,6 @@
 		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
 		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
 
-out:
 	return ret_val;
 }
 
@@ -2483,7 +2414,6 @@
  **/
 static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
 {
-	s32 status = 0;
 	u32 i, poll;
 	u16 value;
 
@@ -2493,13 +2423,13 @@
 	/* Exit if master requests are blocked */
 	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
 	    ixgbe_removed(hw->hw_addr))
-		goto out;
+		return 0;
 
 	/* Poll for master request bit to clear */
 	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
 		udelay(100);
 		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
-			goto out;
+			return 0;
 	}
 
 	/*
@@ -2522,16 +2452,13 @@
 		udelay(100);
 		value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS);
 		if (ixgbe_removed(hw->hw_addr))
-			goto out;
+			return 0;
 		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
-			goto out;
+			return 0;
 	}
 
 	hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
-	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
-
-out:
-	return status;
+	return IXGBE_ERR_MASTER_REQUESTS_PENDING;
 }
 
 /**
@@ -2706,8 +2633,8 @@
 	bool link_up = false;
 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
-	s32 ret_val = 0;
 	bool locked = false;
+	s32 ret_val;
 
 	/*
 	 * Link must be up to auto-blink the LEDs;
@@ -2718,14 +2645,14 @@
 	if (!link_up) {
 		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
 		if (ret_val)
-			goto out;
+			return ret_val;
 
 		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
 		autoc_reg |= IXGBE_AUTOC_FLU;
 
 		ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
 		if (ret_val)
-			goto out;
+			return ret_val;
 
 		IXGBE_WRITE_FLUSH(hw);
 
@@ -2737,8 +2664,7 @@
 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
 	IXGBE_WRITE_FLUSH(hw);
 
-out:
-	return ret_val;
+	return 0;
 }
 
 /**
@@ -2750,19 +2676,19 @@
 {
 	u32 autoc_reg = 0;
 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
-	s32 ret_val = 0;
 	bool locked = false;
+	s32 ret_val;
 
 	ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
 	if (ret_val)
-		goto out;
+		return ret_val;
 
 	autoc_reg &= ~IXGBE_AUTOC_FLU;
 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
 
 	ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
 	if (ret_val)
-		goto out;
+		return ret_val;
 
 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
 	led_reg &= ~IXGBE_LED_BLINK(index);
@@ -2770,8 +2696,7 @@
 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
 	IXGBE_WRITE_FLUSH(hw);
 
-out:
-	return ret_val;
+	return 0;
 }
 
 /**
@@ -2863,7 +2788,7 @@
  **/
 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
 {
-	u16 msix_count = 1;
+	u16 msix_count;
 	u16 max_msix_count;
 	u16 pcie_offset;
 
@@ -2878,7 +2803,7 @@
 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
 		break;
 	default:
-		return msix_count;
+		return 1;
 	}
 
 	msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset);
@@ -2916,10 +2841,10 @@
 	mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
 
 	if (ixgbe_removed(hw->hw_addr))
-		goto done;
+		return 0;
 
 	if (!mpsar_lo && !mpsar_hi)
-		goto done;
+		return 0;
 
 	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
 		if (mpsar_lo) {
@@ -2941,7 +2866,6 @@
 	/* was that the last pool using this rar? */
 	if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
 		hw->mac.ops.clear_rar(hw, rar);
-done:
 	return 0;
 }
 
@@ -3310,14 +3234,14 @@
 
 	if ((alt_san_mac_blk_offset == 0) ||
 	    (alt_san_mac_blk_offset == 0xFFFF))
-		goto wwn_prefix_out;
+		return 0;
 
 	/* check capability in alternative san mac address block */
 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
 	if (hw->eeprom.ops.read(hw, offset, &caps))
 		goto wwn_prefix_err;
 	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
-		goto wwn_prefix_out;
+		return 0;
 
 	/* get the corresponding prefix for WWNN/WWPN */
 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
@@ -3328,7 +3252,6 @@
 	if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
 		goto wwn_prefix_err;
 
-wwn_prefix_out:
 	return 0;
 
 wwn_prefix_err:
@@ -3522,21 +3445,17 @@
 	u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
 	u8 buf_len, dword_len;
 
-	s32 ret_val = 0;
-
 	if (length == 0 || length & 0x3 ||
 	    length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
 		hw_dbg(hw, "Buffer length failure.\n");
-		ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
-		goto out;
+		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
 	}
 
 	/* Check that the host interface is enabled. */
 	hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
 	if ((hicr & IXGBE_HICR_EN) == 0) {
 		hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
-		ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
-		goto out;
+		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
 	}
 
 	/* Calculate length in DWORDs */
@@ -3564,8 +3483,7 @@
 	if (i == IXGBE_HI_COMMAND_TIMEOUT ||
 	    (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
 		hw_dbg(hw, "Command has failed with no status valid.\n");
-		ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
-		goto out;
+		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
 	}
 
 	/* Calculate length in DWORDs */
@@ -3580,12 +3498,11 @@
 	/* If there is any thing in data position pull it in */
 	buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
 	if (buf_len == 0)
-		goto out;
+		return 0;
 
 	if (length < (buf_len + hdr_size)) {
 		hw_dbg(hw, "Buffer not large enough for reply message.\n");
-		ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
-		goto out;
+		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
 	}
 
 	/* Calculate length in DWORDs, add 3 for odd lengths */
@@ -3597,8 +3514,7 @@
 		le32_to_cpus(&buffer[bi]);
 	}
 
-out:
-	return ret_val;
+	return 0;
 }
 
 /**
@@ -3619,12 +3535,10 @@
 {
 	struct ixgbe_hic_drv_info fw_cmd;
 	int i;
-	s32 ret_val = 0;
+	s32 ret_val;
 
-	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM) != 0) {
-		ret_val = IXGBE_ERR_SWFW_SYNC;
-		goto out;
-	}
+	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM))
+		return IXGBE_ERR_SWFW_SYNC;
 
 	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
 	fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
@@ -3656,7 +3570,6 @@
 	}
 
 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
-out:
 	return ret_val;
 }
 
@@ -3725,28 +3638,23 @@
 static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
 			      u16 *ets_offset)
 {
-	s32 status = 0;
+	s32 status;
 
 	status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset);
 	if (status)
-		goto out;
+		return status;
 
-	if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) {
-		status = IXGBE_NOT_IMPLEMENTED;
-		goto out;
-	}
+	if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF))
+		return IXGBE_NOT_IMPLEMENTED;
 
 	status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg);
 	if (status)
-		goto out;
+		return status;
 
-	if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) {
-		status = IXGBE_NOT_IMPLEMENTED;
-		goto out;
-	}
+	if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED)
+		return IXGBE_NOT_IMPLEMENTED;
 
-out:
-	return status;
+	return 0;
 }
 
 /**
@@ -3757,7 +3665,7 @@
  **/
 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
 {
-	s32 status = 0;
+	s32 status;
 	u16 ets_offset;
 	u16 ets_cfg;
 	u16 ets_sensor;
@@ -3766,14 +3674,12 @@
 	struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
 
 	/* Only support thermal sensors attached to physical port 0 */
-	if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
-		status = IXGBE_NOT_IMPLEMENTED;
-		goto out;
-	}
+	if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
+		return IXGBE_NOT_IMPLEMENTED;
 
 	status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
 	if (status)
-		goto out;
+		return status;
 
 	num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
 	if (num_sensors > IXGBE_MAX_SENSORS)
@@ -3786,7 +3692,7 @@
 		status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
 					     &ets_sensor);
 		if (status)
-			goto out;
+			return status;
 
 		sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
 				IXGBE_ETS_DATA_INDEX_SHIFT);
@@ -3799,11 +3705,11 @@
 					IXGBE_I2C_THERMAL_SENSOR_ADDR,
 					&data->sensor[i].temp);
 			if (status)
-				goto out;
+				return status;
 		}
 	}
-out:
-	return status;
+
+	return 0;
 }
 
 /**
@@ -3815,7 +3721,7 @@
  **/
 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
 {
-	s32 status = 0;
+	s32 status;
 	u16 ets_offset;
 	u16 ets_cfg;
 	u16 ets_sensor;
@@ -3828,14 +3734,12 @@
 	memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
 
 	/* Only support thermal sensors attached to physical port 0 */
-	if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
-		status = IXGBE_NOT_IMPLEMENTED;
-		goto out;
-	}
+	if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
+		return IXGBE_NOT_IMPLEMENTED;
 
 	status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
 	if (status)
-		goto out;
+		return status;
 
 	low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
 			     IXGBE_ETS_LTHRES_DELTA_SHIFT);
@@ -3869,7 +3773,7 @@
 		data->sensor[i].caution_thresh = therm_limit;
 		data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta;
 	}
-out:
-	return status;
+
+	return 0;
 }
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index a689ee0..48f35fc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -87,7 +87,6 @@
 	int min_credit;
 	int min_multiplier;
 	int min_percent = 100;
-	s32 ret_val = 0;
 	/* Initialization values default for Tx settings */
 	u32 credit_refill       = 0;
 	u32 credit_max          = 0;
@@ -95,10 +94,8 @@
 	u8  bw_percent          = 0;
 	u8  i;
 
-	if (dcb_config == NULL) {
-		ret_val = DCB_ERR_CONFIG;
-		goto out;
-	}
+	if (!dcb_config)
+		return DCB_ERR_CONFIG;
 
 	min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
 			DCB_CREDIT_QUANTUM;
@@ -174,8 +171,7 @@
 		p->data_credits_max = (u16)credit_max;
 	}
 
-out:
-	return ret_val;
+	return 0;
 }
 
 void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
@@ -236,7 +232,7 @@
 
 	/* If tc is 0 then DCB is likely not enabled or supported */
 	if (!tc)
-		goto out;
+		return 0;
 
 	/*
 	 * Test from maximum TC to 1 and report the first match we find.  If
@@ -247,7 +243,7 @@
 		if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
 			break;
 	}
-out:
+
 	return tc;
 }
 
@@ -269,7 +265,6 @@
 s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
 			struct ixgbe_dcb_config *dcb_config)
 {
-	s32 ret = 0;
 	u8 pfc_en;
 	u8 ptype[MAX_TRAFFIC_CLASS];
 	u8 bwgid[MAX_TRAFFIC_CLASS];
@@ -287,37 +282,31 @@
 
 	switch (hw->mac.type) {
 	case ixgbe_mac_82598EB:
-		ret = ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max,
-						bwgid, ptype);
-		break;
+		return ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max,
+						 bwgid, ptype);
 	case ixgbe_mac_82599EB:
 	case ixgbe_mac_X540:
-		ret = ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
-						bwgid, ptype, prio_tc);
-		break;
+		return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
+						 bwgid, ptype, prio_tc);
 	default:
 		break;
 	}
-	return ret;
+	return 0;
 }
 
 /* Helper routines to abstract HW specifics from DCB netlink ops */
 s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
 {
-	int ret = -EINVAL;
-
 	switch (hw->mac.type) {
 	case ixgbe_mac_82598EB:
-		ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
-		break;
+		return ixgbe_dcb_config_pfc_82598(hw, pfc_en);
 	case ixgbe_mac_82599EB:
 	case ixgbe_mac_X540:
-		ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
-		break;
+		return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
 	default:
 		break;
 	}
-	return ret;
+	return -EINVAL;
 }
 
 s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 75bcb2e..58a7f53 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -153,7 +153,6 @@
 static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
-	int err = 0;
 
 	/* Fail command if not in CEE mode */
 	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
@@ -161,12 +160,10 @@
 
 	/* verify there is something to do, if not then exit */
 	if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
-		goto out;
+		return 0;
 
-	err = ixgbe_setup_tc(netdev,
-			     state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0);
-out:
-	return !!err;
+	return !!ixgbe_setup_tc(netdev,
+				state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0);
 }
 
 static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
@@ -331,12 +328,12 @@
 
 	/* Fail command if not in CEE mode */
 	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
-		return ret;
+		return DCB_NO_HW_CHG;
 
 	adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter,
 						      MAX_TRAFFIC_CLASS);
 	if (!adapter->dcb_set_bitmap)
-		return ret;
+		return DCB_NO_HW_CHG;
 
 	if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
 		u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
@@ -536,7 +533,7 @@
 {
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
 	int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
-	int i, err = 0;
+	int i, err;
 	__u8 max_tc = 0;
 	__u8 map_chg = 0;
 
@@ -573,17 +570,15 @@
 	if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs)
 		return -EINVAL;
 
-	if (max_tc != netdev_get_num_tc(dev))
+	if (max_tc != netdev_get_num_tc(dev)) {
 		err = ixgbe_setup_tc(dev, max_tc);
-	else if (map_chg)
+		if (err)
+			return err;
+	} else if (map_chg) {
 		ixgbe_dcbnl_devreset(dev);
+	}
 
-	if (err)
-		goto err_out;
-
-	err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
-err_out:
-	return err;
+	return ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
 }
 
 static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
@@ -647,10 +642,10 @@
 				   struct dcb_app *app)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
-	int err = -EINVAL;
+	int err;
 
 	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
-		return err;
+		return -EINVAL;
 
 	err = dcb_ieee_setapp(dev, app);
 	if (err)
@@ -662,7 +657,7 @@
 		u8 app_mask = dcb_ieee_getapp_mask(dev, app);
 
 		if (app_mask & (1 << adapter->fcoe.up))
-			return err;
+			return 0;
 
 		adapter->fcoe.up = app->priority;
 		ixgbe_dcbnl_devreset(dev);
@@ -705,7 +700,7 @@
 		u8 app_mask = dcb_ieee_getapp_mask(dev, app);
 
 		if (app_mask & (1 << adapter->fcoe.up))
-			return err;
+			return 0;
 
 		adapter->fcoe.up = app_mask ?
 				   ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 25a3dfe..2ad91cb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -67,23 +67,23 @@
  */
 int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
 {
-	int len = 0;
+	int len;
 	struct ixgbe_fcoe *fcoe;
 	struct ixgbe_adapter *adapter;
 	struct ixgbe_fcoe_ddp *ddp;
 	u32 fcbuff;
 
 	if (!netdev)
-		goto out_ddp_put;
+		return 0;
 
 	if (xid >= IXGBE_FCOE_DDP_MAX)
-		goto out_ddp_put;
+		return 0;
 
 	adapter = netdev_priv(netdev);
 	fcoe = &adapter->fcoe;
 	ddp = &fcoe->ddp[xid];
 	if (!ddp->udl)
-		goto out_ddp_put;
+		return 0;
 
 	len = ddp->len;
 	/* if there an error, force to invalidate ddp context */
@@ -114,7 +114,6 @@
 
 	ixgbe_fcoe_clear_ddp(ddp);
 
-out_ddp_put:
 	return len;
 }
 
@@ -394,17 +393,17 @@
 		xid =  be16_to_cpu(fh->fh_rx_id);
 
 	if (xid >= IXGBE_FCOE_DDP_MAX)
-		goto ddp_out;
+		return -EINVAL;
 
 	fcoe = &adapter->fcoe;
 	ddp = &fcoe->ddp[xid];
 	if (!ddp->udl)
-		goto ddp_out;
+		return -EINVAL;
 
 	ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE |
 					      IXGBE_RXDADV_ERR_FCERR);
 	if (ddp_err)
-		goto ddp_out;
+		return -EINVAL;
 
 	switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
 	/* return 0 to bypass going to ULD for DDPed data */
@@ -447,7 +446,7 @@
 		crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
 		crc->fcoe_eof = FC_EOF_T;
 	}
-ddp_out:
+
 	return rc;
 }
 
@@ -878,7 +877,6 @@
  */
 int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
 {
-	int rc = -EINVAL;
 	u16 prefix = 0xffff;
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
@@ -903,9 +901,9 @@
 		       ((u64) mac->san_addr[3] << 16) |
 		       ((u64) mac->san_addr[4] << 8)  |
 		       ((u64) mac->san_addr[5]);
-		rc = 0;
+		return 0;
 	}
-	return rc;
+	return -EINVAL;
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index f5aa331..5384ed3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -570,7 +570,7 @@
 
 	/* Print TX Ring Summary */
 	if (!netdev || !netif_running(netdev))
-		goto exit;
+		return;
 
 	dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
 	pr_info(" %s     %s              %s        %s\n",
@@ -685,7 +685,7 @@
 
 	/* Print RX Rings */
 	if (!netif_msg_rx_status(adapter))
-		goto exit;
+		return;
 
 	dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
 
@@ -787,9 +787,6 @@
 
 		}
 	}
-
-exit:
-	return;
 }
 
 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
@@ -1011,7 +1008,6 @@
 	u32 tx_done = ixgbe_get_tx_completed(tx_ring);
 	u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
 	u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
-	bool ret = false;
 
 	clear_check_for_tx_hang(tx_ring);
 
@@ -1027,18 +1023,16 @@
 	 * run the check_tx_hang logic with a transmit completion
 	 * pending but without time to complete it yet.
 	 */
-	if ((tx_done_old == tx_done) && tx_pending) {
+	if (tx_done_old == tx_done && tx_pending)
 		/* make sure it is true for two checks in a row */
-		ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
-				       &tx_ring->state);
-	} else {
-		/* update completed stats and continue */
-		tx_ring->tx_stats.tx_done_old = tx_done;
-		/* reset the countdown */
-		clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
-	}
+		return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
+					&tx_ring->state);
+	/* update completed stats and continue */
+	tx_ring->tx_stats.tx_done_old = tx_done;
+	/* reset the countdown */
+	clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
 
-	return ret;
+	return false;
 }
 
 /**
@@ -4701,18 +4695,18 @@
 		ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
 
 	if (ret)
-		goto link_cfg_out;
+		return ret;
 
 	speed = hw->phy.autoneg_advertised;
 	if ((!speed) && (hw->mac.ops.get_link_capabilities))
 		ret = hw->mac.ops.get_link_capabilities(hw, &speed,
 							&autoneg);
 	if (ret)
-		goto link_cfg_out;
+		return ret;
 
 	if (hw->mac.ops.setup_link)
 		ret = hw->mac.ops.setup_link(hw, speed, link_up);
-link_cfg_out:
+
 	return ret;
 }
 
@@ -7973,23 +7967,32 @@
  **/
 static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
 {
-	struct list_head *entry;
+	struct pci_dev *entry, *pdev = adapter->pdev;
 	int physfns = 0;
 
 	/* Some cards can not use the generic count PCIe functions method,
 	 * because they are behind a parent switch, so we hardcode these with
 	 * the correct number of functions.
 	 */
-	if (ixgbe_pcie_from_parent(&adapter->hw)) {
+	if (ixgbe_pcie_from_parent(&adapter->hw))
 		physfns = 4;
-	} else {
-		list_for_each(entry, &adapter->pdev->bus_list) {
-			struct pci_dev *pdev =
-				list_entry(entry, struct pci_dev, bus_list);
-			/* don't count virtual functions */
-			if (!pdev->is_virtfn)
-				physfns++;
-		}
+
+	list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
+		/* don't count virtual functions */
+		if (entry->is_virtfn)
+			continue;
+
+		/* When the devices on the bus don't all match our device ID,
+		 * we can't reliably determine the correct number of
+		 * functions. This can occur if a function has been direct
+		 * attached to a virtual machine using VT-d, for example. In
+		 * this case, simply return -1 to indicate this.
+		 */
+		if ((entry->vendor != pdev->vendor) ||
+		    (entry->device != pdev->device))
+			return -1;
+
+		physfns++;
 	}
 
 	return physfns;
@@ -8161,7 +8164,7 @@
 	netdev->netdev_ops = &ixgbe_netdev_ops;
 	ixgbe_set_ethtool_ops(netdev);
 	netdev->watchdog_timeo = 5 * HZ;
-	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+	strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
 
 	adapter->bd_number = cards_found;
 
@@ -8384,11 +8387,14 @@
 		expected_gts = ixgbe_enumerate_functions(adapter) * 10;
 		break;
 	}
-	ixgbe_check_minimum_link(adapter, expected_gts);
 
-	err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
+	/* don't check link if we failed to enumerate functions */
+	if (expected_gts > 0)
+		ixgbe_check_minimum_link(adapter, expected_gts);
+
+	err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
 	if (err)
-		strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
+		strlcpy(part_str, "Unknown", sizeof(part_str));
 	if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
 		e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
 			   hw->mac.type, hw->phy.type, hw->phy.sfp_type,
@@ -8477,7 +8483,7 @@
 				     pci_select_bars(pdev, IORESOURCE_MEM));
 err_pci_reg:
 err_dma:
-	if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+	if (!adapter || !test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
 		pci_disable_device(pdev);
 	return err;
 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index 1918e0a..cc8f012 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -43,16 +43,15 @@
 s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
 {
 	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	s32 ret_val = IXGBE_ERR_MBX;
 
 	/* limit read to size of mailbox */
 	if (size > mbx->size)
 		size = mbx->size;
 
-	if (mbx->ops.read)
-		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+	if (!mbx->ops.read)
+		return IXGBE_ERR_MBX;
 
-	return ret_val;
+	return mbx->ops.read(hw, msg, size, mbx_id);
 }
 
 /**
@@ -67,15 +66,14 @@
 s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
 {
 	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	s32 ret_val = 0;
 
 	if (size > mbx->size)
-		ret_val = IXGBE_ERR_MBX;
+		return IXGBE_ERR_MBX;
 
-	else if (mbx->ops.write)
-		ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+	if (!mbx->ops.write)
+		return IXGBE_ERR_MBX;
 
-	return ret_val;
+	return mbx->ops.write(hw, msg, size, mbx_id);
 }
 
 /**
@@ -88,12 +86,11 @@
 s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
 {
 	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	s32 ret_val = IXGBE_ERR_MBX;
 
-	if (mbx->ops.check_for_msg)
-		ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+	if (!mbx->ops.check_for_msg)
+		return IXGBE_ERR_MBX;
 
-	return ret_val;
+	return mbx->ops.check_for_msg(hw, mbx_id);
 }
 
 /**
@@ -106,12 +103,11 @@
 s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
 {
 	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	s32 ret_val = IXGBE_ERR_MBX;
 
-	if (mbx->ops.check_for_ack)
-		ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+	if (!mbx->ops.check_for_ack)
+		return IXGBE_ERR_MBX;
 
-	return ret_val;
+	return mbx->ops.check_for_ack(hw, mbx_id);
 }
 
 /**
@@ -124,12 +120,11 @@
 s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
 {
 	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	s32 ret_val = IXGBE_ERR_MBX;
 
-	if (mbx->ops.check_for_rst)
-		ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+	if (!mbx->ops.check_for_rst)
+		return IXGBE_ERR_MBX;
 
-	return ret_val;
+	return mbx->ops.check_for_rst(hw, mbx_id);
 }
 
 /**
@@ -145,17 +140,16 @@
 	int countdown = mbx->timeout;
 
 	if (!countdown || !mbx->ops.check_for_msg)
-		goto out;
+		return IXGBE_ERR_MBX;
 
-	while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+	while (mbx->ops.check_for_msg(hw, mbx_id)) {
 		countdown--;
 		if (!countdown)
-			break;
+			return IXGBE_ERR_MBX;
 		udelay(mbx->usec_delay);
 	}
 
-out:
-	return countdown ? 0 : IXGBE_ERR_MBX;
+	return 0;
 }
 
 /**
@@ -171,17 +165,16 @@
 	int countdown = mbx->timeout;
 
 	if (!countdown || !mbx->ops.check_for_ack)
-		goto out;
+		return IXGBE_ERR_MBX;
 
-	while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+	while (mbx->ops.check_for_ack(hw, mbx_id)) {
 		countdown--;
 		if (!countdown)
-			break;
+			return IXGBE_ERR_MBX;
 		udelay(mbx->usec_delay);
 	}
 
-out:
-	return countdown ? 0 : IXGBE_ERR_MBX;
+	return 0;
 }
 
 /**
@@ -198,18 +191,17 @@
 				 u16 mbx_id)
 {
 	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	s32 ret_val = IXGBE_ERR_MBX;
+	s32 ret_val;
 
 	if (!mbx->ops.read)
-		goto out;
+		return IXGBE_ERR_MBX;
 
 	ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+	if (ret_val)
+		return ret_val;
 
-	/* if ack received read message, otherwise we timed out */
-	if (!ret_val)
-		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
-out:
-	return ret_val;
+	/* if ack received read message */
+	return mbx->ops.read(hw, msg, size, mbx_id);
 }
 
 /**
@@ -226,33 +218,31 @@
 			   u16 mbx_id)
 {
 	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	s32 ret_val = IXGBE_ERR_MBX;
+	s32 ret_val;
 
 	/* exit if either we can't write or there isn't a defined timeout */
 	if (!mbx->ops.write || !mbx->timeout)
-		goto out;
+		return IXGBE_ERR_MBX;
 
 	/* send msg */
 	ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+	if (ret_val)
+		return ret_val;
 
 	/* if msg sent wait until we receive an ack */
-	if (!ret_val)
-		ret_val = ixgbe_poll_for_ack(hw, mbx_id);
-out:
-	return ret_val;
+	return ixgbe_poll_for_ack(hw, mbx_id);
 }
 
 static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
 {
 	u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
-	s32 ret_val = IXGBE_ERR_MBX;
 
 	if (mbvficr & mask) {
-		ret_val = 0;
 		IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
+		return 0;
 	}
 
-	return ret_val;
+	return IXGBE_ERR_MBX;
 }
 
 /**
@@ -264,17 +254,16 @@
  **/
 static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
 {
-	s32 ret_val = IXGBE_ERR_MBX;
 	s32 index = IXGBE_MBVFICR_INDEX(vf_number);
 	u32 vf_bit = vf_number % 16;
 
 	if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
 				    index)) {
-		ret_val = 0;
 		hw->mbx.stats.reqs++;
+		return 0;
 	}
 
-	return ret_val;
+	return IXGBE_ERR_MBX;
 }
 
 /**
@@ -286,17 +275,16 @@
  **/
 static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
 {
-	s32 ret_val = IXGBE_ERR_MBX;
 	s32 index = IXGBE_MBVFICR_INDEX(vf_number);
 	u32 vf_bit = vf_number % 16;
 
 	if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
 				    index)) {
-		ret_val = 0;
 		hw->mbx.stats.acks++;
+		return 0;
 	}
 
-	return ret_val;
+	return IXGBE_ERR_MBX;
 }
 
 /**
@@ -311,7 +299,6 @@
 	u32 reg_offset = (vf_number < 32) ? 0 : 1;
 	u32 vf_shift = vf_number % 32;
 	u32 vflre = 0;
-	s32 ret_val = IXGBE_ERR_MBX;
 
 	switch (hw->mac.type) {
 	case ixgbe_mac_82599EB:
@@ -325,12 +312,12 @@
 	}
 
 	if (vflre & (1 << vf_shift)) {
-		ret_val = 0;
 		IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
 		hw->mbx.stats.rsts++;
+		return 0;
 	}
 
-	return ret_val;
+	return IXGBE_ERR_MBX;
 }
 
 /**
@@ -342,7 +329,6 @@
  **/
 static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
 {
-	s32 ret_val = IXGBE_ERR_MBX;
 	u32 p2v_mailbox;
 
 	/* Take ownership of the buffer */
@@ -351,9 +337,9 @@
 	/* reserve mailbox for vf use */
 	p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
 	if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
-		ret_val = 0;
+		return 0;
 
-	return ret_val;
+	return IXGBE_ERR_MBX;
 }
 
 /**
@@ -374,7 +360,7 @@
 	/* lock the mailbox to prevent pf/vf race condition */
 	ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
 	if (ret_val)
-		goto out_no_write;
+		return ret_val;
 
 	/* flush msg and acks as we are overwriting the message buffer */
 	ixgbe_check_for_msg_pf(hw, vf_number);
@@ -390,9 +376,7 @@
 	/* update stats */
 	hw->mbx.stats.msgs_tx++;
 
-out_no_write:
-	return ret_val;
-
+	return 0;
 }
 
 /**
@@ -415,7 +399,7 @@
 	/* lock the mailbox to prevent pf/vf race condition */
 	ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
 	if (ret_val)
-		goto out_no_read;
+		return ret_val;
 
 	/* copy the message to the mailbox memory buffer */
 	for (i = 0; i < size; i++)
@@ -427,8 +411,7 @@
 	/* update stats */
 	hw->mbx.stats.msgs_rx++;
 
-out_no_read:
-	return ret_val;
+	return 0;
 }
 
 #ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index ff68b7a..11f02ea 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -57,7 +57,6 @@
  **/
 s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
 {
-	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
 	u32 phy_addr;
 	u16 ext_ability = 0;
 
@@ -84,18 +83,14 @@
 							 ixgbe_phy_generic;
 				}
 
-				status = 0;
-				break;
+				return 0;
 			}
 		}
 		/* clear value if nothing found */
-		if (status != 0)
-			hw->phy.mdio.prtad = 0;
-	} else {
-		status = 0;
+		hw->phy.mdio.prtad = 0;
+		return IXGBE_ERR_PHY_ADDR_INVALID;
 	}
-
-	return status;
+	return 0;
 }
 
 /**
@@ -192,16 +187,16 @@
 		status = ixgbe_identify_phy_generic(hw);
 
 	if (status != 0 || hw->phy.type == ixgbe_phy_none)
-		goto out;
+		return status;
 
 	/* Don't reset PHY if it's shut down due to overtemp. */
 	if (!hw->phy.reset_if_overtemp &&
 	    (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
-		goto out;
+		return 0;
 
 	/* Blocked by MNG FW so bail */
 	if (ixgbe_check_reset_blocked(hw))
-		goto out;
+		return 0;
 
 	/*
 	 * Perform soft PHY reset to the PHY_XS.
@@ -227,12 +222,11 @@
 	}
 
 	if (ctrl & MDIO_CTRL1_RESET) {
-		status = IXGBE_ERR_RESET_FAILED;
 		hw_dbg(hw, "PHY reset polling failed to complete.\n");
+		return IXGBE_ERR_RESET_FAILED;
 	}
 
-out:
-	return status;
+	return 0;
 }
 
 /**
@@ -333,7 +327,7 @@
 						phy_data);
 		hw->mac.ops.release_swfw_sync(hw, gssr);
 	} else {
-		status = IXGBE_ERR_SWFW_SYNC;
+		return IXGBE_ERR_SWFW_SYNC;
 	}
 
 	return status;
@@ -436,7 +430,7 @@
 						 phy_data);
 		hw->mac.ops.release_swfw_sync(hw, gssr);
 	} else {
-		status = IXGBE_ERR_SWFW_SYNC;
+		return IXGBE_ERR_SWFW_SYNC;
 	}
 
 	return status;
@@ -509,7 +503,7 @@
 
 	/* Blocked by MNG FW so don't reset PHY */
 	if (ixgbe_check_reset_blocked(hw))
-		return status;
+		return 0;
 
 	/* Restart PHY autonegotiation and wait for completion */
 	hw->phy.ops.read_reg(hw, MDIO_CTRL1,
@@ -535,8 +529,8 @@
 	}
 
 	if (time_out == max_time_out) {
-		status = IXGBE_ERR_LINK_SETUP;
 		hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out\n");
+		return IXGBE_ERR_LINK_SETUP;
 	}
 
 	return status;
@@ -585,7 +579,7 @@
 					       ixgbe_link_speed *speed,
 					       bool *autoneg)
 {
-	s32 status = IXGBE_ERR_LINK_SETUP;
+	s32 status;
 	u16 speed_ability;
 
 	*speed = 0;
@@ -616,7 +610,7 @@
 s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
 			     bool *link_up)
 {
-	s32 status = 0;
+	s32 status;
 	u32 time_out;
 	u32 max_time_out = 10;
 	u16 phy_link = 0;
@@ -662,7 +656,7 @@
  **/
 s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
 {
-	s32 status = 0;
+	s32 status;
 	u32 time_out;
 	u32 max_time_out = 10;
 	u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
@@ -719,7 +713,7 @@
 
 	/* Blocked by MNG FW so don't reset PHY */
 	if (ixgbe_check_reset_blocked(hw))
-		return status;
+		return 0;
 
 	/* Restart PHY autonegotiation and wait for completion */
 	hw->phy.ops.read_reg(hw, MDIO_CTRL1,
@@ -744,8 +738,8 @@
 	}
 
 	if (time_out == max_time_out) {
-		status = IXGBE_ERR_LINK_SETUP;
 		hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out\n");
+		return IXGBE_ERR_LINK_SETUP;
 	}
 
 	return status;
@@ -759,7 +753,7 @@
 s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
 				       u16 *firmware_version)
 {
-	s32 status = 0;
+	s32 status;
 
 	status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
 				      MDIO_MMD_VEND1,
@@ -776,7 +770,7 @@
 s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
 					   u16 *firmware_version)
 {
-	s32 status = 0;
+	s32 status;
 
 	status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
 				      MDIO_MMD_VEND1,
@@ -795,12 +789,12 @@
 	bool end_data = false;
 	u16 list_offset, data_offset;
 	u16 phy_data = 0;
-	s32 ret_val = 0;
+	s32 ret_val;
 	u32 i;
 
 	/* Blocked by MNG FW so bail */
 	if (ixgbe_check_reset_blocked(hw))
-		goto out;
+		return 0;
 
 	hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
 
@@ -818,15 +812,14 @@
 
 	if ((phy_data & MDIO_CTRL1_RESET) != 0) {
 		hw_dbg(hw, "PHY reset did not complete.\n");
-		ret_val = IXGBE_ERR_PHY;
-		goto out;
+		return IXGBE_ERR_PHY;
 	}
 
 	/* Get init offsets */
 	ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
 						      &data_offset);
-	if (ret_val != 0)
-		goto out;
+	if (ret_val)
+		return ret_val;
 
 	ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
 	data_offset++;
@@ -876,18 +869,15 @@
 				hw_dbg(hw, "SOL\n");
 			} else {
 				hw_dbg(hw, "Bad control value\n");
-				ret_val = IXGBE_ERR_PHY;
-				goto out;
+				return IXGBE_ERR_PHY;
 			}
 			break;
 		default:
 			hw_dbg(hw, "Bad control type\n");
-			ret_val = IXGBE_ERR_PHY;
-			goto out;
+			return IXGBE_ERR_PHY;
 		}
 	}
 
-out:
 	return ret_val;
 
 err_eeprom:
@@ -903,34 +893,29 @@
  **/
 s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
 {
-	s32 status = IXGBE_ERR_SFP_NOT_PRESENT;
-
 	switch (hw->mac.ops.get_media_type(hw)) {
 	case ixgbe_media_type_fiber:
-		status = ixgbe_identify_sfp_module_generic(hw);
-		break;
+		return ixgbe_identify_sfp_module_generic(hw);
 	case ixgbe_media_type_fiber_qsfp:
-		status = ixgbe_identify_qsfp_module_generic(hw);
-		break;
+		return ixgbe_identify_qsfp_module_generic(hw);
 	default:
 		hw->phy.sfp_type = ixgbe_sfp_type_not_present;
-		status = IXGBE_ERR_SFP_NOT_PRESENT;
-		break;
+		return IXGBE_ERR_SFP_NOT_PRESENT;
 	}
 
-	return status;
+	return IXGBE_ERR_SFP_NOT_PRESENT;
 }
 
 /**
  *  ixgbe_identify_sfp_module_generic - Identifies SFP modules
  *  @hw: pointer to hardware structure
-*
+ *
  *  Searches for and identifies the SFP module and assigns appropriate PHY type.
  **/
 s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
 {
 	struct ixgbe_adapter *adapter = hw->back;
-	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+	s32 status;
 	u32 vendor_oui = 0;
 	enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
 	u8 identifier = 0;
@@ -943,15 +928,14 @@
 
 	if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
 		hw->phy.sfp_type = ixgbe_sfp_type_not_present;
-		status = IXGBE_ERR_SFP_NOT_PRESENT;
-		goto out;
+		return IXGBE_ERR_SFP_NOT_PRESENT;
 	}
 
 	status = hw->phy.ops.read_i2c_eeprom(hw,
 					     IXGBE_SFF_IDENTIFIER,
 					     &identifier);
 
-	if (status != 0)
+	if (status)
 		goto err_read_i2c_eeprom;
 
 	/* LAN ID is needed for sfp_type determination */
@@ -959,239 +943,224 @@
 
 	if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
 		hw->phy.type = ixgbe_phy_sfp_unsupported;
-		status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-	} else {
-		status = hw->phy.ops.read_i2c_eeprom(hw,
-						     IXGBE_SFF_1GBE_COMP_CODES,
-						     &comp_codes_1g);
+		return IXGBE_ERR_SFP_NOT_SUPPORTED;
+	}
+	status = hw->phy.ops.read_i2c_eeprom(hw,
+					     IXGBE_SFF_1GBE_COMP_CODES,
+					     &comp_codes_1g);
 
-		if (status != 0)
-			goto err_read_i2c_eeprom;
+	if (status)
+		goto err_read_i2c_eeprom;
 
-		status = hw->phy.ops.read_i2c_eeprom(hw,
-						     IXGBE_SFF_10GBE_COMP_CODES,
-						     &comp_codes_10g);
+	status = hw->phy.ops.read_i2c_eeprom(hw,
+					     IXGBE_SFF_10GBE_COMP_CODES,
+					     &comp_codes_10g);
 
-		if (status != 0)
-			goto err_read_i2c_eeprom;
-		status = hw->phy.ops.read_i2c_eeprom(hw,
-						     IXGBE_SFF_CABLE_TECHNOLOGY,
-						     &cable_tech);
+	if (status)
+		goto err_read_i2c_eeprom;
+	status = hw->phy.ops.read_i2c_eeprom(hw,
+					     IXGBE_SFF_CABLE_TECHNOLOGY,
+					     &cable_tech);
 
-		if (status != 0)
-			goto err_read_i2c_eeprom;
+	if (status)
+		goto err_read_i2c_eeprom;
 
-		 /* ID Module
-		  * =========
-		  * 0   SFP_DA_CU
-		  * 1   SFP_SR
-		  * 2   SFP_LR
-		  * 3   SFP_DA_CORE0 - 82599-specific
-		  * 4   SFP_DA_CORE1 - 82599-specific
-		  * 5   SFP_SR/LR_CORE0 - 82599-specific
-		  * 6   SFP_SR/LR_CORE1 - 82599-specific
-		  * 7   SFP_act_lmt_DA_CORE0 - 82599-specific
-		  * 8   SFP_act_lmt_DA_CORE1 - 82599-specific
-		  * 9   SFP_1g_cu_CORE0 - 82599-specific
-		  * 10  SFP_1g_cu_CORE1 - 82599-specific
-		  * 11  SFP_1g_sx_CORE0 - 82599-specific
-		  * 12  SFP_1g_sx_CORE1 - 82599-specific
-		  */
-		if (hw->mac.type == ixgbe_mac_82598EB) {
-			if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
-				hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
-			else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
-				hw->phy.sfp_type = ixgbe_sfp_type_sr;
-			else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
-				hw->phy.sfp_type = ixgbe_sfp_type_lr;
+	 /* ID Module
+	  * =========
+	  * 0   SFP_DA_CU
+	  * 1   SFP_SR
+	  * 2   SFP_LR
+	  * 3   SFP_DA_CORE0 - 82599-specific
+	  * 4   SFP_DA_CORE1 - 82599-specific
+	  * 5   SFP_SR/LR_CORE0 - 82599-specific
+	  * 6   SFP_SR/LR_CORE1 - 82599-specific
+	  * 7   SFP_act_lmt_DA_CORE0 - 82599-specific
+	  * 8   SFP_act_lmt_DA_CORE1 - 82599-specific
+	  * 9   SFP_1g_cu_CORE0 - 82599-specific
+	  * 10  SFP_1g_cu_CORE1 - 82599-specific
+	  * 11  SFP_1g_sx_CORE0 - 82599-specific
+	  * 12  SFP_1g_sx_CORE1 - 82599-specific
+	  */
+	if (hw->mac.type == ixgbe_mac_82598EB) {
+		if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+			hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+		else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+			hw->phy.sfp_type = ixgbe_sfp_type_sr;
+		else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+			hw->phy.sfp_type = ixgbe_sfp_type_lr;
+		else
+			hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+	} else if (hw->mac.type == ixgbe_mac_82599EB) {
+		if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
+			if (hw->bus.lan_id == 0)
+				hw->phy.sfp_type =
+					     ixgbe_sfp_type_da_cu_core0;
 			else
-				hw->phy.sfp_type = ixgbe_sfp_type_unknown;
-		} else if (hw->mac.type == ixgbe_mac_82599EB) {
-			if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
+				hw->phy.sfp_type =
+					     ixgbe_sfp_type_da_cu_core1;
+		} else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
+			hw->phy.ops.read_i2c_eeprom(
+					hw, IXGBE_SFF_CABLE_SPEC_COMP,
+					&cable_spec);
+			if (cable_spec &
+			    IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
 				if (hw->bus.lan_id == 0)
 					hw->phy.sfp_type =
-						     ixgbe_sfp_type_da_cu_core0;
+					ixgbe_sfp_type_da_act_lmt_core0;
 				else
 					hw->phy.sfp_type =
-						     ixgbe_sfp_type_da_cu_core1;
-			} else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
-				hw->phy.ops.read_i2c_eeprom(
-						hw, IXGBE_SFF_CABLE_SPEC_COMP,
-						&cable_spec);
-				if (cable_spec &
-				    IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
-					if (hw->bus.lan_id == 0)
-						hw->phy.sfp_type =
-						ixgbe_sfp_type_da_act_lmt_core0;
-					else
-						hw->phy.sfp_type =
-						ixgbe_sfp_type_da_act_lmt_core1;
-				} else {
-					hw->phy.sfp_type =
-							ixgbe_sfp_type_unknown;
-				}
-			} else if (comp_codes_10g &
-				   (IXGBE_SFF_10GBASESR_CAPABLE |
-				    IXGBE_SFF_10GBASELR_CAPABLE)) {
-				if (hw->bus.lan_id == 0)
-					hw->phy.sfp_type =
-						      ixgbe_sfp_type_srlr_core0;
-				else
-					hw->phy.sfp_type =
-						      ixgbe_sfp_type_srlr_core1;
-			} else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
-				if (hw->bus.lan_id == 0)
-					hw->phy.sfp_type =
-						ixgbe_sfp_type_1g_cu_core0;
-				else
-					hw->phy.sfp_type =
-						ixgbe_sfp_type_1g_cu_core1;
-			} else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
-				if (hw->bus.lan_id == 0)
-					hw->phy.sfp_type =
-						ixgbe_sfp_type_1g_sx_core0;
-				else
-					hw->phy.sfp_type =
-						ixgbe_sfp_type_1g_sx_core1;
-			} else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) {
-				if (hw->bus.lan_id == 0)
-					hw->phy.sfp_type =
-						ixgbe_sfp_type_1g_lx_core0;
-				else
-					hw->phy.sfp_type =
-						ixgbe_sfp_type_1g_lx_core1;
+					ixgbe_sfp_type_da_act_lmt_core1;
 			} else {
-				hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+				hw->phy.sfp_type =
+						ixgbe_sfp_type_unknown;
 			}
-		}
-
-		if (hw->phy.sfp_type != stored_sfp_type)
-			hw->phy.sfp_setup_needed = true;
-
-		/* Determine if the SFP+ PHY is dual speed or not. */
-		hw->phy.multispeed_fiber = false;
-		if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
-		   (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
-		   ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
-		   (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
-			hw->phy.multispeed_fiber = true;
-
-		/* Determine PHY vendor */
-		if (hw->phy.type != ixgbe_phy_nl) {
-			hw->phy.id = identifier;
-			status = hw->phy.ops.read_i2c_eeprom(hw,
-						    IXGBE_SFF_VENDOR_OUI_BYTE0,
-						    &oui_bytes[0]);
-
-			if (status != 0)
-				goto err_read_i2c_eeprom;
-
-			status = hw->phy.ops.read_i2c_eeprom(hw,
-						    IXGBE_SFF_VENDOR_OUI_BYTE1,
-						    &oui_bytes[1]);
-
-			if (status != 0)
-				goto err_read_i2c_eeprom;
-
-			status = hw->phy.ops.read_i2c_eeprom(hw,
-						    IXGBE_SFF_VENDOR_OUI_BYTE2,
-						    &oui_bytes[2]);
-
-			if (status != 0)
-				goto err_read_i2c_eeprom;
-
-			vendor_oui =
-			  ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
-			   (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
-			   (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
-
-			switch (vendor_oui) {
-			case IXGBE_SFF_VENDOR_OUI_TYCO:
-				if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
-					hw->phy.type =
-						    ixgbe_phy_sfp_passive_tyco;
-				break;
-			case IXGBE_SFF_VENDOR_OUI_FTL:
-				if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
-					hw->phy.type = ixgbe_phy_sfp_ftl_active;
-				else
-					hw->phy.type = ixgbe_phy_sfp_ftl;
-				break;
-			case IXGBE_SFF_VENDOR_OUI_AVAGO:
-				hw->phy.type = ixgbe_phy_sfp_avago;
-				break;
-			case IXGBE_SFF_VENDOR_OUI_INTEL:
-				hw->phy.type = ixgbe_phy_sfp_intel;
-				break;
-			default:
-				if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
-					hw->phy.type =
-						 ixgbe_phy_sfp_passive_unknown;
-				else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
-					hw->phy.type =
-						ixgbe_phy_sfp_active_unknown;
-				else
-					hw->phy.type = ixgbe_phy_sfp_unknown;
-				break;
-			}
-		}
-
-		/* Allow any DA cable vendor */
-		if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
-		    IXGBE_SFF_DA_ACTIVE_CABLE)) {
-			status = 0;
-			goto out;
-		}
-
-		/* Verify supported 1G SFP modules */
-		if (comp_codes_10g == 0 &&
-		    !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
-			hw->phy.type = ixgbe_phy_sfp_unsupported;
-			status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-			goto out;
-		}
-
-		/* Anything else 82598-based is supported */
-		if (hw->mac.type == ixgbe_mac_82598EB) {
-			status = 0;
-			goto out;
-		}
-
-		hw->mac.ops.get_device_caps(hw, &enforce_sfp);
-		if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
-		    !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
-			/* Make sure we're a supported PHY type */
-			if (hw->phy.type == ixgbe_phy_sfp_intel) {
-				status = 0;
-			} else {
-				if (hw->allow_unsupported_sfp) {
-					e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics.  Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter.  Intel Corporation is not responsible for any harm caused by using untested modules.\n");
-					status = 0;
-				} else {
-					hw_dbg(hw,
-					       "SFP+ module not supported\n");
-					hw->phy.type =
-						ixgbe_phy_sfp_unsupported;
-					status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-				}
-			}
+		} else if (comp_codes_10g &
+			   (IXGBE_SFF_10GBASESR_CAPABLE |
+			    IXGBE_SFF_10GBASELR_CAPABLE)) {
+			if (hw->bus.lan_id == 0)
+				hw->phy.sfp_type =
+					      ixgbe_sfp_type_srlr_core0;
+			else
+				hw->phy.sfp_type =
+					      ixgbe_sfp_type_srlr_core1;
+		} else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
+			if (hw->bus.lan_id == 0)
+				hw->phy.sfp_type =
+					ixgbe_sfp_type_1g_cu_core0;
+			else
+				hw->phy.sfp_type =
+					ixgbe_sfp_type_1g_cu_core1;
+		} else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
+			if (hw->bus.lan_id == 0)
+				hw->phy.sfp_type =
+					ixgbe_sfp_type_1g_sx_core0;
+			else
+				hw->phy.sfp_type =
+					ixgbe_sfp_type_1g_sx_core1;
+		} else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) {
+			if (hw->bus.lan_id == 0)
+				hw->phy.sfp_type =
+					ixgbe_sfp_type_1g_lx_core0;
+			else
+				hw->phy.sfp_type =
+					ixgbe_sfp_type_1g_lx_core1;
 		} else {
-			status = 0;
+			hw->phy.sfp_type = ixgbe_sfp_type_unknown;
 		}
 	}
 
-out:
-	return status;
+	if (hw->phy.sfp_type != stored_sfp_type)
+		hw->phy.sfp_setup_needed = true;
+
+	/* Determine if the SFP+ PHY is dual speed or not. */
+	hw->phy.multispeed_fiber = false;
+	if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
+	     (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
+	    ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
+	     (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
+		hw->phy.multispeed_fiber = true;
+
+	/* Determine PHY vendor */
+	if (hw->phy.type != ixgbe_phy_nl) {
+		hw->phy.id = identifier;
+		status = hw->phy.ops.read_i2c_eeprom(hw,
+					    IXGBE_SFF_VENDOR_OUI_BYTE0,
+					    &oui_bytes[0]);
+
+		if (status != 0)
+			goto err_read_i2c_eeprom;
+
+		status = hw->phy.ops.read_i2c_eeprom(hw,
+					    IXGBE_SFF_VENDOR_OUI_BYTE1,
+					    &oui_bytes[1]);
+
+		if (status != 0)
+			goto err_read_i2c_eeprom;
+
+		status = hw->phy.ops.read_i2c_eeprom(hw,
+					    IXGBE_SFF_VENDOR_OUI_BYTE2,
+					    &oui_bytes[2]);
+
+		if (status != 0)
+			goto err_read_i2c_eeprom;
+
+		vendor_oui =
+		  ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
+		   (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
+		   (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
+
+		switch (vendor_oui) {
+		case IXGBE_SFF_VENDOR_OUI_TYCO:
+			if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+				hw->phy.type =
+					    ixgbe_phy_sfp_passive_tyco;
+			break;
+		case IXGBE_SFF_VENDOR_OUI_FTL:
+			if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+				hw->phy.type = ixgbe_phy_sfp_ftl_active;
+			else
+				hw->phy.type = ixgbe_phy_sfp_ftl;
+			break;
+		case IXGBE_SFF_VENDOR_OUI_AVAGO:
+			hw->phy.type = ixgbe_phy_sfp_avago;
+			break;
+		case IXGBE_SFF_VENDOR_OUI_INTEL:
+			hw->phy.type = ixgbe_phy_sfp_intel;
+			break;
+		default:
+			if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+				hw->phy.type =
+					 ixgbe_phy_sfp_passive_unknown;
+			else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+				hw->phy.type =
+					ixgbe_phy_sfp_active_unknown;
+			else
+				hw->phy.type = ixgbe_phy_sfp_unknown;
+			break;
+		}
+	}
+
+	/* Allow any DA cable vendor */
+	if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
+	    IXGBE_SFF_DA_ACTIVE_CABLE))
+		return 0;
+
+	/* Verify supported 1G SFP modules */
+	if (comp_codes_10g == 0 &&
+	    !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+	      hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+	      hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+	      hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
+	      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+	      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+		hw->phy.type = ixgbe_phy_sfp_unsupported;
+		return IXGBE_ERR_SFP_NOT_SUPPORTED;
+	}
+
+	/* Anything else 82598-based is supported */
+	if (hw->mac.type == ixgbe_mac_82598EB)
+		return 0;
+
+	hw->mac.ops.get_device_caps(hw, &enforce_sfp);
+	if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
+	    !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+	      hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+	      hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+	      hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
+	      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+	      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+		/* Make sure we're a supported PHY type */
+		if (hw->phy.type == ixgbe_phy_sfp_intel)
+			return 0;
+		if (hw->allow_unsupported_sfp) {
+			e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics.  Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter.  Intel Corporation is not responsible for any harm caused by using untested modules.\n");
+			return 0;
+		}
+		hw_dbg(hw, "SFP+ module not supported\n");
+		hw->phy.type = ixgbe_phy_sfp_unsupported;
+		return IXGBE_ERR_SFP_NOT_SUPPORTED;
+	}
+	return 0;
 
 err_read_i2c_eeprom:
 	hw->phy.sfp_type = ixgbe_sfp_type_not_present;
@@ -1211,7 +1180,7 @@
 static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
 {
 	struct ixgbe_adapter *adapter = hw->back;
-	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+	s32 status;
 	u32 vendor_oui = 0;
 	enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
 	u8 identifier = 0;
@@ -1226,8 +1195,7 @@
 
 	if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
 		hw->phy.sfp_type = ixgbe_sfp_type_not_present;
-		status = IXGBE_ERR_SFP_NOT_PRESENT;
-		goto out;
+		return IXGBE_ERR_SFP_NOT_PRESENT;
 	}
 
 	status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
@@ -1238,8 +1206,7 @@
 
 	if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
 		hw->phy.type = ixgbe_phy_sfp_unsupported;
-		status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-		goto out;
+		return IXGBE_ERR_SFP_NOT_SUPPORTED;
 	}
 
 	hw->phy.id = identifier;
@@ -1310,8 +1277,7 @@
 		} else {
 			/* unsupported module type */
 			hw->phy.type = ixgbe_phy_sfp_unsupported;
-			status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-			goto out;
+			return IXGBE_ERR_SFP_NOT_SUPPORTED;
 		}
 	}
 
@@ -1363,27 +1329,19 @@
 		hw->mac.ops.get_device_caps(hw, &enforce_sfp);
 		if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
 			/* Make sure we're a supported PHY type */
-			if (hw->phy.type == ixgbe_phy_qsfp_intel) {
-				status = 0;
-			} else {
-				if (hw->allow_unsupported_sfp == true) {
-					e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
-					status = 0;
-				} else {
-					hw_dbg(hw,
-					       "QSFP module not supported\n");
-					hw->phy.type =
-						ixgbe_phy_sfp_unsupported;
-					status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-				}
+			if (hw->phy.type == ixgbe_phy_qsfp_intel)
+				return 0;
+			if (hw->allow_unsupported_sfp) {
+				e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
+				return 0;
 			}
-		} else {
-			status = 0;
+			hw_dbg(hw, "QSFP module not supported\n");
+			hw->phy.type = ixgbe_phy_sfp_unsupported;
+			return IXGBE_ERR_SFP_NOT_SUPPORTED;
 		}
+		return 0;
 	}
-
-out:
-	return status;
+	return 0;
 
 err_read_i2c_eeprom:
 	hw->phy.sfp_type = ixgbe_sfp_type_not_present;
@@ -1544,7 +1502,7 @@
 s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
 				u8 dev_addr, u8 *data)
 {
-	s32 status = 0;
+	s32 status;
 	u32 max_retry = 10;
 	u32 retry = 0;
 	u16 swfw_mask = 0;
@@ -1557,10 +1515,8 @@
 		swfw_mask = IXGBE_GSSR_PHY0_SM;
 
 	do {
-		if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) {
-			status = IXGBE_ERR_SWFW_SYNC;
-			goto read_byte_out;
-		}
+		if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+			return IXGBE_ERR_SWFW_SYNC;
 
 		ixgbe_i2c_start(hw);
 
@@ -1617,7 +1573,6 @@
 
 	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
 
-read_byte_out:
 	return status;
 }
 
@@ -1633,7 +1588,7 @@
 s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
 				 u8 dev_addr, u8 data)
 {
-	s32 status = 0;
+	s32 status;
 	u32 max_retry = 1;
 	u32 retry = 0;
 	u16 swfw_mask = 0;
@@ -1643,10 +1598,8 @@
 	else
 		swfw_mask = IXGBE_GSSR_PHY0_SM;
 
-	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) {
-		status = IXGBE_ERR_SWFW_SYNC;
-		goto write_byte_out;
-	}
+	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+		return IXGBE_ERR_SWFW_SYNC;
 
 	do {
 		ixgbe_i2c_start(hw);
@@ -1689,7 +1642,6 @@
 
 	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
 
-write_byte_out:
 	return status;
 }
 
@@ -1774,7 +1726,7 @@
  **/
 static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
 {
-	s32 status = 0;
+	s32 status;
 	s32 i;
 	u32 i2cctl;
 	bool bit = false;
@@ -1893,11 +1845,11 @@
 		 */
 		udelay(IXGBE_I2C_T_LOW);
 	} else {
-		status = IXGBE_ERR_I2C;
 		hw_dbg(hw, "I2C data was not set to %X\n", data);
+		return IXGBE_ERR_I2C;
 	}
 
-	return status;
+	return 0;
 }
 /**
  *  ixgbe_raise_i2c_clk - Raises the I2C SCL clock
@@ -1954,8 +1906,6 @@
  **/
 static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
 {
-	s32 status = 0;
-
 	if (data)
 		*i2cctl |= IXGBE_I2C_DATA_OUT;
 	else
@@ -1970,11 +1920,11 @@
 	/* Verify data was set correctly */
 	*i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
 	if (data != ixgbe_get_i2c_data(i2cctl)) {
-		status = IXGBE_ERR_I2C;
 		hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
+		return IXGBE_ERR_I2C;
 	}
 
-	return status;
+	return 0;
 }
 
 /**
@@ -1986,14 +1936,9 @@
  **/
 static bool ixgbe_get_i2c_data(u32 *i2cctl)
 {
-	bool data;
-
 	if (*i2cctl & IXGBE_I2C_DATA_IN)
-		data = true;
-	else
-		data = false;
-
-	return data;
+		return true;
+	return false;
 }
 
 /**
@@ -2038,20 +1983,17 @@
  **/
 s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
 {
-	s32 status = 0;
 	u16 phy_data = 0;
 
 	if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
-		goto out;
+		return 0;
 
 	/* Check that the LASI temp alarm status was triggered */
 	hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
 			     MDIO_MMD_PMAPMD, &phy_data);
 
 	if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
-		goto out;
+		return 0;
 
-	status = IXGBE_ERR_OVERTEMP;
-out:
-	return status;
+	return IXGBE_ERR_OVERTEMP;
 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 16b3a1c..c14d4d8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -245,10 +245,10 @@
 	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
 		err = ixgbe_disable_sriov(adapter);
 	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
-		goto out;
+		return num_vfs;
 
 	if (err)
-		goto err_out;
+		return err;
 
 	/* While the SR-IOV capability structure reports total VFs to be
 	 * 64 we limit the actual number that can be allocated to 63 so
@@ -256,16 +256,14 @@
 	 * PF.  The PCI bus driver already checks for other values out of
 	 * range.
 	 */
-	if (num_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
-		err = -EPERM;
-		goto err_out;
-	}
+	if (num_vfs > IXGBE_MAX_VFS_DRV_LIMIT)
+		return -EPERM;
 
 	adapter->num_vfs = num_vfs;
 
 	err = __ixgbe_enable_sriov(adapter);
 	if (err)
-		goto err_out;
+		return  err;
 
 	for (i = 0; i < adapter->num_vfs; i++)
 		ixgbe_vf_configuration(dev, (i | 0x10000000));
@@ -273,17 +271,14 @@
 	err = pci_enable_sriov(dev, num_vfs);
 	if (err) {
 		e_dev_warn("Failed to enable PCI sriov: %d\n", err);
-		goto err_out;
+		return err;
 	}
 	ixgbe_sriov_reinit(adapter);
 
-out:
 	return num_vfs;
-
-err_out:
-	return err;
-#endif
+#else
 	return 0;
+#endif
 }
 
 static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
@@ -807,7 +802,7 @@
 	if (!add && adapter->netdev->flags & IFF_PROMISC) {
 		reg_ndx = ixgbe_find_vlvf_entry(hw, vid);
 		if (reg_ndx < 0)
-			goto out;
+			return err;
 		vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx));
 		/* See if any other pools are set for this VLAN filter
 		 * entry other than the PF.
@@ -833,8 +828,6 @@
 			ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
 	}
 
-out:
-
 	return err;
 }
 
@@ -951,7 +944,7 @@
 
 	/* this is a message we already processed, do nothing */
 	if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
-		return retval;
+		return 0;
 
 	/* flush the ack before we write any messages back */
 	IXGBE_WRITE_FLUSH(hw);
@@ -966,7 +959,7 @@
 	if (!adapter->vfinfo[vf].clear_to_send) {
 		msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
 		ixgbe_write_mbx(hw, msgbuf, 1, vf);
-		return retval;
+		return 0;
 	}
 
 	switch ((msgbuf[0] & 0xFFFF)) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 9a89f98..e6b07c2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2440,25 +2440,6 @@
 					IXGBE_LINK_SPEED_1GB_FULL | \
 					IXGBE_LINK_SPEED_10GB_FULL)
 
-
-/* Physical layer type */
-typedef u32 ixgbe_physical_layer;
-#define IXGBE_PHYSICAL_LAYER_UNKNOWN      0
-#define IXGBE_PHYSICAL_LAYER_10GBASE_T    0x0001
-#define IXGBE_PHYSICAL_LAYER_1000BASE_T   0x0002
-#define IXGBE_PHYSICAL_LAYER_100BASE_TX   0x0004
-#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU  0x0008
-#define IXGBE_PHYSICAL_LAYER_10GBASE_LR   0x0010
-#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM  0x0020
-#define IXGBE_PHYSICAL_LAYER_10GBASE_SR   0x0040
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4  0x0080
-#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4  0x0100
-#define IXGBE_PHYSICAL_LAYER_1000BASE_KX  0x0200
-#define IXGBE_PHYSICAL_LAYER_1000BASE_BX  0x0400
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KR   0x0800
-#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
-#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
-
 /* Flow Control Data Sheet defined values
  * Calculation and defines taken from 802.1bb Annex O
  */
@@ -2860,7 +2841,6 @@
 	s32 (*start_hw)(struct ixgbe_hw *);
 	s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
 	enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
-	u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
 	s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
 	s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
 	s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 40dd798..e88305d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -99,8 +99,8 @@
 
 	/* Call adapter stop to disable tx/rx and clear interrupts */
 	status = hw->mac.ops.stop_adapter(hw);
-	if (status != 0)
-		goto reset_hw_out;
+	if (status)
+		return status;
 
 	/* flush pending Tx transactions */
 	ixgbe_clear_tx_pending(hw);
@@ -168,7 +168,6 @@
 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
 				   &hw->mac.wwpn_prefix);
 
-reset_hw_out:
 	return status;
 }
 
@@ -182,40 +181,13 @@
  **/
 static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
 {
-	s32 ret_val = 0;
+	s32 ret_val;
 
 	ret_val = ixgbe_start_hw_generic(hw);
-	if (ret_val != 0)
-		goto out;
+	if (ret_val)
+		return ret_val;
 
-	ret_val = ixgbe_start_hw_gen2(hw);
-out:
-	return ret_val;
-}
-
-/**
- *  ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
- *  @hw: pointer to hardware structure
- *
- *  Determines physical layer capabilities of the current configuration.
- **/
-static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
-{
-	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-	u16 ext_ability = 0;
-
-	hw->phy.ops.identify(hw);
-
-	hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
-			     &ext_ability);
-	if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
-		physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
-	if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
-		physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
-	if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
-		physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
-
-	return physical_layer;
+	return ixgbe_start_hw_gen2(hw);
 }
 
 /**
@@ -258,13 +230,12 @@
  **/
 static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
 {
-	s32 status = 0;
+	s32 status;
 
-	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
-	    0)
-		status = ixgbe_read_eerd_generic(hw, offset, data);
-	else
-		status = IXGBE_ERR_SWFW_SYNC;
+	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+		return IXGBE_ERR_SWFW_SYNC;
+
+	status = ixgbe_read_eerd_generic(hw, offset, data);
 
 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
 	return status;
@@ -282,14 +253,12 @@
 static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
 				       u16 offset, u16 words, u16 *data)
 {
-	s32 status = 0;
+	s32 status;
 
-	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
-	    0)
-		status = ixgbe_read_eerd_buffer_generic(hw, offset,
-							words, data);
-	else
-		status = IXGBE_ERR_SWFW_SYNC;
+	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+		return IXGBE_ERR_SWFW_SYNC;
+
+	status = ixgbe_read_eerd_buffer_generic(hw, offset, words, data);
 
 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
 	return status;
@@ -305,12 +274,12 @@
  **/
 static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
 {
-	s32 status = 0;
+	s32 status;
 
-	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0)
-		status = ixgbe_write_eewr_generic(hw, offset, data);
-	else
-		status = IXGBE_ERR_SWFW_SYNC;
+	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+		return IXGBE_ERR_SWFW_SYNC;
+
+	status = ixgbe_write_eewr_generic(hw, offset, data);
 
 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
 	return status;
@@ -328,14 +297,12 @@
 static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
 					u16 offset, u16 words, u16 *data)
 {
-	s32 status = 0;
+	s32 status;
 
-	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
-	    0)
-		status = ixgbe_write_eewr_buffer_generic(hw, offset,
-							 words, data);
-	else
-		status = IXGBE_ERR_SWFW_SYNC;
+	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+		return IXGBE_ERR_SWFW_SYNC;
+
+	status = ixgbe_write_eewr_buffer_generic(hw, offset, words, data);
 
 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
 	return status;
@@ -430,44 +397,37 @@
 	u16 checksum;
 	u16 read_checksum = 0;
 
-	/*
-	 * Read the first word from the EEPROM. If this times out or fails, do
+	/* Read the first word from the EEPROM. If this times out or fails, do
 	 * not continue or we could be in for a very long wait while every
 	 * EEPROM read fails
 	 */
 	status = hw->eeprom.ops.read(hw, 0, &checksum);
-
-	if (status != 0) {
+	if (status) {
 		hw_dbg(hw, "EEPROM read failed\n");
-		goto out;
+		return status;
 	}
 
-	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
-		checksum = hw->eeprom.ops.calc_checksum(hw);
+	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+		return IXGBE_ERR_SWFW_SYNC;
 
-		/*
-		 * Do not use hw->eeprom.ops.read because we do not want to take
-		 * the synchronization semaphores twice here.
-		 */
-		ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
-					&read_checksum);
+	checksum = hw->eeprom.ops.calc_checksum(hw);
 
-		/*
-		 * Verify read checksum from EEPROM is the same as
-		 * calculated checksum
-		 */
-		if (read_checksum != checksum)
-			status = IXGBE_ERR_EEPROM_CHECKSUM;
-
-		/* If the user cares, return the calculated checksum */
-		if (checksum_val)
-			*checksum_val = checksum;
-	} else {
-		status = IXGBE_ERR_SWFW_SYNC;
-	}
+	/* Do not use hw->eeprom.ops.read because we do not want to take
+	 * the synchronization semaphores twice here.
+	 */
+	status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
+					 &read_checksum);
 
 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-out:
+
+	/* If the user cares, return the calculated checksum */
+	if (checksum_val)
+		*checksum_val = checksum;
+
+	/* Verify read and calculated checksums are the same */
+	if (read_checksum != checksum)
+		return IXGBE_ERR_EEPROM_CHECKSUM;
+
 	return status;
 }
 
@@ -484,34 +444,29 @@
 	s32 status;
 	u16 checksum;
 
-	/*
-	 * Read the first word from the EEPROM. If this times out or fails, do
+	/* Read the first word from the EEPROM. If this times out or fails, do
 	 * not continue or we could be in for a very long wait while every
 	 * EEPROM read fails
 	 */
 	status = hw->eeprom.ops.read(hw, 0, &checksum);
-
-	if (status != 0)
+	if (status) {
 		hw_dbg(hw, "EEPROM read failed\n");
-
-	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
-		checksum = hw->eeprom.ops.calc_checksum(hw);
-
-		/*
-		 * Do not use hw->eeprom.ops.write because we do not want to
-		 * take the synchronization semaphores twice here.
-		 */
-		status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM,
-						  checksum);
-
-	if (status == 0)
-		status = ixgbe_update_flash_X540(hw);
-	else
-		status = IXGBE_ERR_SWFW_SYNC;
+		return status;
 	}
 
-	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+		return  IXGBE_ERR_SWFW_SYNC;
 
+	checksum = hw->eeprom.ops.calc_checksum(hw);
+
+	/* Do not use hw->eeprom.ops.write because we do not want to
+	 * take the synchronization semaphores twice here.
+	 */
+	status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum);
+	if (!status)
+		status = ixgbe_update_flash_X540(hw);
+
+	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
 	return status;
 }
 
@@ -525,12 +480,12 @@
 static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
 {
 	u32 flup;
-	s32 status = IXGBE_ERR_EEPROM;
+	s32 status;
 
 	status = ixgbe_poll_flash_update_done_X540(hw);
 	if (status == IXGBE_ERR_EEPROM) {
 		hw_dbg(hw, "Flash update time out\n");
-		goto out;
+		return status;
 	}
 
 	flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
@@ -556,7 +511,7 @@
 		else
 			hw_dbg(hw, "Flash update time out\n");
 	}
-out:
+
 	return status;
 }
 
@@ -571,17 +526,14 @@
 {
 	u32 i;
 	u32 reg;
-	s32 status = IXGBE_ERR_EEPROM;
 
 	for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
 		reg = IXGBE_READ_REG(hw, IXGBE_EEC);
-		if (reg & IXGBE_EEC_FLUDONE) {
-			status = 0;
-			break;
-		}
+		if (reg & IXGBE_EEC_FLUDONE)
+			return 0;
 		udelay(5);
 	}
-	return status;
+	return IXGBE_ERR_EEPROM;
 }
 
 /**
@@ -676,46 +628,44 @@
 }
 
 /**
- * ixgbe_get_nvm_semaphore - Get hardware semaphore
+ * ixgbe_get_swfw_sync_semaphore - Get hardware semaphore
  * @hw: pointer to hardware structure
  *
  * Sets the hardware semaphores so SW/FW can gain control of shared resources
- **/
+ */
 static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
 {
-	s32 status = IXGBE_ERR_EEPROM;
 	u32 timeout = 2000;
 	u32 i;
 	u32 swsm;
 
 	/* Get SMBI software semaphore between device drivers first */
 	for (i = 0; i < timeout; i++) {
-		/*
-		 * If the SMBI bit is 0 when we read it, then the bit will be
+		/* If the SMBI bit is 0 when we read it, then the bit will be
 		 * set and we have the semaphore
 		 */
 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
-		if (!(swsm & IXGBE_SWSM_SMBI)) {
-			status = 0;
+		if (!(swsm & IXGBE_SWSM_SMBI))
 			break;
-		}
-		udelay(50);
+		usleep_range(50, 100);
+	}
+
+	if (i == timeout) {
+		hw_dbg(hw,
+		       "Software semaphore SMBI between device drivers not granted.\n");
+		return IXGBE_ERR_EEPROM;
 	}
 
 	/* Now get the semaphore between SW/FW through the REGSMP bit */
-	if (status) {
-		for (i = 0; i < timeout; i++) {
-			swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
-			if (!(swsm & IXGBE_SWFW_REGSMP))
-				break;
+	for (i = 0; i < timeout; i++) {
+		swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+		if (!(swsm & IXGBE_SWFW_REGSMP))
+			return 0;
 
-			udelay(50);
-		}
-	} else {
-		hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
+		usleep_range(50, 100);
 	}
 
-	return status;
+	return IXGBE_ERR_EEPROM;
 }
 
 /**
@@ -811,8 +761,6 @@
 	.start_hw               = &ixgbe_start_hw_X540,
 	.clear_hw_cntrs         = &ixgbe_clear_hw_cntrs_generic,
 	.get_media_type         = &ixgbe_get_media_type_X540,
-	.get_supported_physical_layer =
-				  &ixgbe_get_supported_physical_layer_X540,
 	.enable_rx_dma          = &ixgbe_enable_rx_dma_generic,
 	.get_mac_addr           = &ixgbe_get_mac_addr_generic,
 	.get_san_mac_addr       = &ixgbe_get_san_mac_addr_generic,
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 3061d18..aa8cc8d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -49,7 +49,6 @@
 	s32 (*start_hw)(struct ixgbe_hw *);
 	s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
 	enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
-	u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
 	s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
 	s32 (*stop_adapter)(struct ixgbe_hw *);
 	s32 (*get_bus_info)(struct ixgbe_hw *);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 4f6e4a1..ece83f1 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -248,6 +248,8 @@
 /* LMS registers */
 #define MVPP2_SRC_ADDR_MIDDLE			0x24
 #define MVPP2_SRC_ADDR_HIGH			0x28
+#define MVPP2_PHY_AN_CFG0_REG			0x34
+#define     MVPP2_PHY_AN_STOP_SMI0_MASK		BIT(7)
 #define MVPP2_MIB_COUNTERS_BASE(port)		(0x1000 + ((port) >> 1) * \
 						0x400 + (port) * 0x400)
 #define     MVPP2_MIB_LATE_COLLISION		0x7c
@@ -262,7 +264,7 @@
 #define      MVPP2_GMAC_MAX_RX_SIZE_MASK	0x7ffc
 #define      MVPP2_GMAC_MIB_CNTR_EN_MASK	BIT(15)
 #define MVPP2_GMAC_CTRL_1_REG			0x4
-#define      MVPP2_GMAC_PERIODIC_XON_EN_MASK	BIT(0)
+#define      MVPP2_GMAC_PERIODIC_XON_EN_MASK	BIT(1)
 #define      MVPP2_GMAC_GMII_LB_EN_MASK		BIT(5)
 #define      MVPP2_GMAC_PCS_LB_EN_BIT		6
 #define      MVPP2_GMAC_PCS_LB_EN_MASK		BIT(6)
@@ -278,6 +280,7 @@
 #define      MVPP2_GMAC_CONFIG_MII_SPEED	BIT(5)
 #define      MVPP2_GMAC_CONFIG_GMII_SPEED	BIT(6)
 #define      MVPP2_GMAC_AN_SPEED_EN		BIT(7)
+#define      MVPP2_GMAC_FC_ADV_EN		BIT(9)
 #define      MVPP2_GMAC_CONFIG_FULL_DUPLEX	BIT(12)
 #define      MVPP2_GMAC_AN_DUPLEX_EN		BIT(13)
 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG		0x1c
@@ -3382,17 +3385,12 @@
 	mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
 }
 
-/* Free "num" buffers from the pool */
-static int mvpp2_bm_bufs_free(struct mvpp2 *priv,
-			      struct mvpp2_bm_pool *bm_pool, int num)
+/* Free all buffers from the pool */
+static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
 {
 	int i;
 
-	if (num >= bm_pool->buf_num)
-		/* Free all buffers from the pool */
-		num = bm_pool->buf_num;
-
-	for (i = 0; i < num; i++) {
+	for (i = 0; i < bm_pool->buf_num; i++) {
 		u32 vaddr;
 
 		/* Get buffer virtual adress (indirect access) */
@@ -3405,7 +3403,6 @@
 
 	/* Update BM driver with number of buffers removed from pool */
 	bm_pool->buf_num -= i;
-	return i;
 }
 
 /* Cleanup pool */
@@ -3413,11 +3410,10 @@
 				 struct mvpp2 *priv,
 				 struct mvpp2_bm_pool *bm_pool)
 {
-	int num;
 	u32 val;
 
-	num = mvpp2_bm_bufs_free(priv, bm_pool, bm_pool->buf_num);
-	if (num != bm_pool->buf_num) {
+	mvpp2_bm_bufs_free(priv, bm_pool);
+	if (bm_pool->buf_num) {
 		WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
 		return 0;
 	}
@@ -3672,7 +3668,7 @@
 				   MVPP2_BM_LONG_BUF_NUM :
 				   MVPP2_BM_SHORT_BUF_NUM;
 		else
-			mvpp2_bm_bufs_free(port->priv, new_pool, pkts_num);
+			mvpp2_bm_bufs_free(port->priv, new_pool);
 
 		new_pool->pkt_size = pkt_size;
 
@@ -3745,8 +3741,8 @@
 	int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
 
 	/* Update BM pool with new buffer size */
-	num = mvpp2_bm_bufs_free(port->priv, port_pool, pkts_num);
-	if (num != pkts_num) {
+	mvpp2_bm_bufs_free(port->priv, port_pool);
+	if (port_pool->buf_num) {
 		WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
 		return -EIO;
 	}
@@ -3809,16 +3805,30 @@
 
 static void mvpp2_port_mii_set(struct mvpp2_port *port)
 {
-	u32 reg, val = 0;
+	u32 val;
 
-	if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
-		val = MVPP2_GMAC_PCS_ENABLE_MASK |
-		      MVPP2_GMAC_INBAND_AN_MASK;
-	else if (port->phy_interface == PHY_INTERFACE_MODE_RGMII)
-		val = MVPP2_GMAC_PORT_RGMII_MASK;
+	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
 
-	reg = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
-	writel(reg | val, port->base + MVPP2_GMAC_CTRL_2_REG);
+	switch (port->phy_interface) {
+	case PHY_INTERFACE_MODE_SGMII:
+		val |= MVPP2_GMAC_INBAND_AN_MASK;
+		break;
+	case PHY_INTERFACE_MODE_RGMII:
+		val |= MVPP2_GMAC_PORT_RGMII_MASK;
+	default:
+		val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
+	}
+
+	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
+}
+
+static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
+{
+	u32 val;
+
+	val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+	val |= MVPP2_GMAC_FC_ADV_EN;
+	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
 }
 
 static void mvpp2_port_enable(struct mvpp2_port *port)
@@ -4832,7 +4842,7 @@
 
 			if (phydev->speed == SPEED_1000)
 				val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
-			else
+			else if (phydev->speed == SPEED_100)
 				val |= MVPP2_GMAC_CONFIG_MII_SPEED;
 
 			writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
@@ -5704,6 +5714,21 @@
 	return stats;
 }
 
+static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct mvpp2_port *port = netdev_priv(dev);
+	int ret;
+
+	if (!port->phy_dev)
+		return -ENOTSUPP;
+
+	ret = phy_mii_ioctl(port->phy_dev, ifr, cmd);
+	if (!ret)
+		mvpp2_link_event(dev);
+
+	return ret;
+}
+
 /* Ethtool methods */
 
 /* Get settings (phy address, speed) for ethtools */
@@ -5858,6 +5883,7 @@
 	.ndo_set_mac_address	= mvpp2_set_mac_address,
 	.ndo_change_mtu		= mvpp2_change_mtu,
 	.ndo_get_stats64	= mvpp2_get_stats64,
+	.ndo_do_ioctl		= mvpp2_ioctl,
 };
 
 static const struct ethtool_ops mvpp2_eth_tool_ops = {
@@ -5877,6 +5903,7 @@
 {
 	mvpp2_port_mii_set(port);
 	mvpp2_port_periodic_xon_disable(port);
+	mvpp2_port_fc_adv_enable(port);
 	mvpp2_port_reset(port);
 }
 
@@ -6197,6 +6224,7 @@
 {
 	const struct mbus_dram_target_info *dram_target_info;
 	int err, i;
+	u32 val;
 
 	/* Checks for hardware constraints */
 	if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
@@ -6210,6 +6238,11 @@
 	if (dram_target_info)
 		mvpp2_conf_mbus_windows(dram_target_info, priv);
 
+	/* Disable HW PHY polling */
+	val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+	val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
+	writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+
 	/* Allocate and initialize aggregated TXQs */
 	priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
 				       sizeof(struct mvpp2_tx_queue),
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 68d763d..e22f24f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -98,6 +98,10 @@
 	drvinfo->eedump_len = 0;
 }
 
+static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
+	"blueflame",
+};
+
 static const char main_strings[][ETH_GSTRING_LEN] = {
 	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
 	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
@@ -235,6 +239,8 @@
 	case ETH_SS_TEST:
 		return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
 					& MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
+	case ETH_SS_PRIV_FLAGS:
+		return ARRAY_SIZE(mlx4_en_priv_flags);
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -358,6 +364,12 @@
 #endif
 		}
 		break;
+	case ETH_SS_PRIV_FLAGS:
+		for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++)
+			strcpy(data + i * ETH_GSTRING_LEN,
+			       mlx4_en_priv_flags[i]);
+		break;
+
 	}
 }
 
@@ -1209,6 +1221,49 @@
 	return ret;
 }
 
+static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
+	bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
+	int i;
+
+	if (bf_enabled_new == bf_enabled_old)
+		return 0; /* Nothing to do */
+
+	if (bf_enabled_new) {
+		bool bf_supported = true;
+
+		for (i = 0; i < priv->tx_ring_num; i++)
+			bf_supported &= priv->tx_ring[i]->bf_alloced;
+
+		if (!bf_supported) {
+			en_err(priv, "BlueFlame is not supported\n");
+			return -EINVAL;
+		}
+
+		priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
+	} else {
+		priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
+	}
+
+	for (i = 0; i < priv->tx_ring_num; i++)
+		priv->tx_ring[i]->bf_enabled = bf_enabled_new;
+
+	en_info(priv, "BlueFlame %s\n",
+		bf_enabled_new ?  "Enabled" : "Disabled");
+
+	return 0;
+}
+
+static u32 mlx4_en_get_priv_flags(struct net_device *dev)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+
+	return priv->pflags;
+}
+
+
 const struct ethtool_ops mlx4_en_ethtool_ops = {
 	.get_drvinfo = mlx4_en_get_drvinfo,
 	.get_settings = mlx4_en_get_settings,
@@ -1236,6 +1291,8 @@
 	.get_channels = mlx4_en_get_channels,
 	.set_channels = mlx4_en_set_channels,
 	.get_ts_info = mlx4_en_get_ts_info,
+	.set_priv_flags = mlx4_en_set_priv_flags,
+	.get_priv_flags = mlx4_en_get_priv_flags,
 };
 
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index f953c1d..3626fdf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -129,8 +129,10 @@
 	int i;
 
 	params->udp_rss = udp_rss;
-	params->num_tx_rings_p_up = min_t(int, num_online_cpus(),
-			MLX4_EN_MAX_TX_RING_P_UP);
+	params->num_tx_rings_p_up = mlx4_low_memory_profile() ?
+		MLX4_EN_MIN_TX_RING_P_UP :
+		min_t(int, num_online_cpus(), MLX4_EN_MAX_TX_RING_P_UP);
+
 	if (params->udp_rss && !(mdev->dev->caps.flags
 					& MLX4_DEV_CAP_FLAG_UDP_RSS)) {
 		mlx4_warn(mdev, "UDP RSS is not supported on this device\n");
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 887cf01..bb536aa 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -644,6 +644,7 @@
 		goto alloc_err;
 	}
 	memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
+	memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
 	entry->reg_id = reg_id;
 
 	hlist_add_head_rcu(&entry->hlist,
@@ -2464,6 +2465,7 @@
 	priv->port = port;
 	priv->port_up = false;
 	priv->flags = prof->flags;
+	priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
 	priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
 			MLX4_WQE_CTRL_SOLICITED);
 	priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 7765a08..9c909d2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -335,8 +335,9 @@
 					   dev->caps.comp_pool/
 					   dev->caps.num_ports) - 1;
 
-		num_rx_rings = min_t(int, num_of_eqs,
-				     netif_get_num_default_rss_queues());
+		num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
+			min_t(int, num_of_eqs,
+			      netif_get_num_default_rss_queues());
 		mdev->profile.prof[i].rx_ring_num =
 			rounddown_pow_of_two(num_rx_rings);
 	}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 5045bab..dae3da6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -126,8 +126,13 @@
 		ring->bf.uar = &mdev->priv_uar;
 		ring->bf.uar->map = mdev->uar_map;
 		ring->bf_enabled = false;
-	} else
-		ring->bf_enabled = true;
+		ring->bf_alloced = false;
+		priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
+	} else {
+		ring->bf_alloced = true;
+		ring->bf_enabled = !!(priv->pflags &
+				      MLX4_EN_PRIV_FLAGS_BLUEFLAME);
+	}
 
 	ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
 	ring->queue_index = queue_index;
@@ -161,7 +166,7 @@
 	struct mlx4_en_tx_ring *ring = *pring;
 	en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
 
-	if (ring->bf_enabled)
+	if (ring->bf_alloced)
 		mlx4_bf_free(mdev->dev, &ring->bf);
 	mlx4_qp_remove(mdev->dev, &ring->qp);
 	mlx4_qp_free(mdev->dev, &ring->qp);
@@ -195,7 +200,7 @@
 
 	mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
 				ring->cqn, user_prio, &ring->context);
-	if (ring->bf_enabled)
+	if (ring->bf_alloced)
 		ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
 
 	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 82ab427..80b8c5f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -120,6 +120,16 @@
 	.num_mtt	= 1 << 20, /* It is really num mtt segements */
 };
 
+static struct mlx4_profile low_mem_profile = {
+	.num_qp		= 1 << 17,
+	.num_srq	= 1 << 6,
+	.rdmarc_per_qp	= 1 << 4,
+	.num_cq		= 1 << 8,
+	.num_mcg	= 1 << 8,
+	.num_mpt	= 1 << 9,
+	.num_mtt	= 1 << 7,
+};
+
 static int log_num_mac = 7;
 module_param_named(log_num_mac, log_num_mac, int, 0444);
 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
@@ -129,6 +139,8 @@
 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
 /* Log2 max number of VLANs per ETH port (0-7) */
 #define MLX4_LOG_NUM_VLANS 7
+#define MLX4_MIN_LOG_NUM_VLANS 0
+#define MLX4_MIN_LOG_NUM_MAC 1
 
 static bool use_prio;
 module_param_named(use_prio, use_prio, bool, 0444);
@@ -287,8 +299,13 @@
 	if (mlx4_is_mfunc(dev))
 		dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
 
-	dev->caps.log_num_macs  = log_num_mac;
-	dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
+	if (mlx4_low_memory_profile()) {
+		dev->caps.log_num_macs  = MLX4_MIN_LOG_NUM_MAC;
+		dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS;
+	} else {
+		dev->caps.log_num_macs  = log_num_mac;
+		dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
+	}
 
 	for (i = 1; i <= dev->caps.num_ports; ++i) {
 		dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
@@ -1587,7 +1604,12 @@
 		if (mlx4_is_master(dev))
 			mlx4_parav_master_pf_caps(dev);
 
-		profile = default_profile;
+		if (mlx4_low_memory_profile()) {
+			mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n");
+			profile = low_mem_profile;
+		} else {
+			profile = default_profile;
+		}
 		if (dev->caps.steering_mode ==
 		    MLX4_STEERING_MODE_DEVICE_MANAGED)
 			profile.num_mcg = MLX4_FS_NUM_MCG;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 2b19dd1..3de41be 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -93,6 +93,8 @@
  * OS related constants and tunables
  */
 
+#define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1
+
 #define MLX4_EN_WATCHDOG_TIMEOUT	(15 * HZ)
 
 /* Use the maximum between 16384 and a single page */
@@ -119,6 +121,7 @@
 #define MLX4_EN_MIN_TX_SIZE	(4096 / TXBB_SIZE)
 
 #define MLX4_EN_SMALL_PKT_SIZE		64
+#define MLX4_EN_MIN_TX_RING_P_UP	1
 #define MLX4_EN_MAX_TX_RING_P_UP	32
 #define MLX4_EN_NUM_UP			8
 #define MLX4_EN_DEF_TX_RING_SIZE	512
@@ -278,6 +281,7 @@
 	unsigned long wake_queue;
 	struct mlx4_bf bf;
 	bool bf_enabled;
+	bool bf_alloced;
 	struct netdev_queue *tx_queue;
 	int hwtstamp_tx_type;
 	int inline_thold;
@@ -592,6 +596,8 @@
 #endif
 	u64 tunnel_reg_id;
 	__be16 vxlan_port;
+
+	u32 pflags;
 };
 
 enum mlx4_en_wol {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index b215742..56779c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -56,7 +56,7 @@
 	if (size <= max_direct) {
 		buf->nbufs        = 1;
 		buf->npages       = 1;
-		buf->page_shift   = get_order(size) + PAGE_SHIFT;
+		buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
 		buf->direct.buf   = dma_zalloc_coherent(&dev->pdev->dev,
 							size, &t, GFP_KERNEL);
 		if (!buf->direct.buf)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 87d1b01..4671747 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -464,7 +464,7 @@
 	struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
 	struct mlx5_cmd_mailbox *next = msg->next;
 	int data_only;
-	int offset = 0;
+	u32 offset = 0;
 	int dump_len;
 
 	data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 7f39ebc..4e8bd0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -252,7 +252,9 @@
 			case MLX5_PORT_CHANGE_SUBTYPE_GUID:
 			case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
 			case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
-				dev->event(dev, port_subtype_event(eqe->sub_type), &port);
+				if (dev->event)
+					dev->event(dev, port_subtype_event(eqe->sub_type),
+						   (unsigned long)port);
 				break;
 			default:
 				mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
index 18d6fd5..fd80ecf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mad.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
@@ -37,7 +37,7 @@
 #include "mlx5_core.h"
 
 int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
-		      u16 opmod, int port)
+		      u16 opmod, u8 port)
 {
 	struct mlx5_mad_ifc_mbox_in *in = NULL;
 	struct mlx5_mad_ifc_mbox_out *out = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index ee24f13..f2716cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -58,7 +58,100 @@
 module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644);
 MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
 
+#define MLX5_DEFAULT_PROF	2
+static int prof_sel = MLX5_DEFAULT_PROF;
+module_param_named(prof_sel, prof_sel, int, 0444);
+MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
+
 struct workqueue_struct *mlx5_core_wq;
+static LIST_HEAD(intf_list);
+static LIST_HEAD(dev_list);
+static DEFINE_MUTEX(intf_mutex);
+
+struct mlx5_device_context {
+	struct list_head	list;
+	struct mlx5_interface  *intf;
+	void		       *context;
+};
+
+static struct mlx5_profile profile[] = {
+	[0] = {
+		.mask           = 0,
+	},
+	[1] = {
+		.mask		= MLX5_PROF_MASK_QP_SIZE,
+		.log_max_qp	= 12,
+	},
+	[2] = {
+		.mask		= MLX5_PROF_MASK_QP_SIZE |
+				  MLX5_PROF_MASK_MR_CACHE,
+		.log_max_qp	= 17,
+		.mr_cache[0]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[1]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[2]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[3]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[4]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[5]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[6]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[7]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[8]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[9]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[10]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[11]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[12]	= {
+			.size	= 64,
+			.limit	= 32
+		},
+		.mr_cache[13]	= {
+			.size	= 32,
+			.limit	= 16
+		},
+		.mr_cache[14]	= {
+			.size	= 16,
+			.limit	= 8
+		},
+		.mr_cache[15]	= {
+			.size	= 8,
+			.limit	= 4
+		},
+	},
+};
 
 static int set_dma_caps(struct pci_dev *pdev)
 {
@@ -218,7 +311,7 @@
 
 	copy_rw_fields(&set_ctx->hca_cap, &query_out->hca_cap);
 
-	if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
+	if (dev->profile && dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
 		set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
 
 	flags = be64_to_cpu(query_out->hca_cap.flags);
@@ -299,7 +392,7 @@
 	return 0;
 }
 
-int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
+static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 {
 	struct mlx5_priv *priv = &dev->priv;
 	int err;
@@ -489,7 +582,7 @@
 }
 EXPORT_SYMBOL(mlx5_dev_init);
 
-void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
+static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
 {
 	struct mlx5_priv *priv = &dev->priv;
 
@@ -516,7 +609,190 @@
 	pci_disable_device(dev->pdev);
 	debugfs_remove(priv->dbg_root);
 }
-EXPORT_SYMBOL(mlx5_dev_cleanup);
+
+static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+	struct mlx5_device_context *dev_ctx;
+	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+	dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
+	if (!dev_ctx) {
+		pr_warn("mlx5_add_device: alloc context failed\n");
+		return;
+	}
+
+	dev_ctx->intf    = intf;
+	dev_ctx->context = intf->add(dev);
+
+	if (dev_ctx->context) {
+		spin_lock_irq(&priv->ctx_lock);
+		list_add_tail(&dev_ctx->list, &priv->ctx_list);
+		spin_unlock_irq(&priv->ctx_lock);
+	} else {
+		kfree(dev_ctx);
+	}
+}
+
+static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+	struct mlx5_device_context *dev_ctx;
+	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+		if (dev_ctx->intf == intf) {
+			spin_lock_irq(&priv->ctx_lock);
+			list_del(&dev_ctx->list);
+			spin_unlock_irq(&priv->ctx_lock);
+
+			intf->remove(dev, dev_ctx->context);
+			kfree(dev_ctx);
+			return;
+		}
+}
+static int mlx5_register_device(struct mlx5_core_dev *dev)
+{
+	struct mlx5_priv *priv = &dev->priv;
+	struct mlx5_interface *intf;
+
+	mutex_lock(&intf_mutex);
+	list_add_tail(&priv->dev_list, &dev_list);
+	list_for_each_entry(intf, &intf_list, list)
+		mlx5_add_device(intf, priv);
+	mutex_unlock(&intf_mutex);
+
+	return 0;
+}
+static void mlx5_unregister_device(struct mlx5_core_dev *dev)
+{
+	struct mlx5_priv *priv = &dev->priv;
+	struct mlx5_interface *intf;
+
+	mutex_lock(&intf_mutex);
+	list_for_each_entry(intf, &intf_list, list)
+		mlx5_remove_device(intf, priv);
+	list_del(&priv->dev_list);
+	mutex_unlock(&intf_mutex);
+}
+
+int mlx5_register_interface(struct mlx5_interface *intf)
+{
+	struct mlx5_priv *priv;
+
+	if (!intf->add || !intf->remove)
+		return -EINVAL;
+
+	mutex_lock(&intf_mutex);
+	list_add_tail(&intf->list, &intf_list);
+	list_for_each_entry(priv, &dev_list, dev_list)
+		mlx5_add_device(intf, priv);
+	mutex_unlock(&intf_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL(mlx5_register_interface);
+
+void mlx5_unregister_interface(struct mlx5_interface *intf)
+{
+	struct mlx5_priv *priv;
+
+	mutex_lock(&intf_mutex);
+	list_for_each_entry(priv, &dev_list, dev_list)
+	       mlx5_remove_device(intf, priv);
+	list_del(&intf->list);
+	mutex_unlock(&intf_mutex);
+}
+EXPORT_SYMBOL(mlx5_unregister_interface);
+
+static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+			    unsigned long param)
+{
+	struct mlx5_priv *priv = &dev->priv;
+	struct mlx5_device_context *dev_ctx;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->ctx_lock, flags);
+
+	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+		if (dev_ctx->intf->event)
+			dev_ctx->intf->event(dev, dev_ctx->context, event, param);
+
+	spin_unlock_irqrestore(&priv->ctx_lock, flags);
+}
+
+struct mlx5_core_event_handler {
+	void (*event)(struct mlx5_core_dev *dev,
+		      enum mlx5_dev_event event,
+		      void *data);
+};
+
+static int init_one(struct pci_dev *pdev,
+		    const struct pci_device_id *id)
+{
+	struct mlx5_core_dev *dev;
+	struct mlx5_priv *priv;
+	int err;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev) {
+		dev_err(&pdev->dev, "kzalloc failed\n");
+		return -ENOMEM;
+	}
+	priv = &dev->priv;
+
+	pci_set_drvdata(pdev, dev);
+
+	if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
+		pr_warn("selected profile out of range, selecting default (%d)\n",
+			MLX5_DEFAULT_PROF);
+		prof_sel = MLX5_DEFAULT_PROF;
+	}
+	dev->profile = &profile[prof_sel];
+	dev->event = mlx5_core_event;
+
+	err = mlx5_dev_init(dev, pdev);
+	if (err) {
+		dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err);
+		goto out;
+	}
+
+	INIT_LIST_HEAD(&priv->ctx_list);
+	spin_lock_init(&priv->ctx_lock);
+	err = mlx5_register_device(dev);
+	if (err) {
+		dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
+		goto out_init;
+	}
+
+	return 0;
+
+out_init:
+	mlx5_dev_cleanup(dev);
+out:
+	kfree(dev);
+	return err;
+}
+static void remove_one(struct pci_dev *pdev)
+{
+	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
+
+	mlx5_unregister_device(dev);
+	mlx5_dev_cleanup(dev);
+	kfree(dev);
+}
+
+static const struct pci_device_id mlx5_core_pci_table[] = {
+	{ PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */
+	{ 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
+
+static struct pci_driver mlx5_core_driver = {
+	.name           = DRIVER_NAME,
+	.id_table       = mlx5_core_pci_table,
+	.probe          = init_one,
+	.remove         = remove_one
+};
 
 static int __init init(void)
 {
@@ -530,8 +806,15 @@
 	}
 	mlx5_health_init();
 
+	err = pci_register_driver(&mlx5_core_driver);
+	if (err)
+		goto err_health;
+
 	return 0;
 
+err_health:
+	mlx5_health_cleanup();
+	destroy_workqueue(mlx5_core_wq);
 err_debug:
 	mlx5_unregister_debugfs();
 	return err;
@@ -539,6 +822,7 @@
 
 static void __exit cleanup(void)
 {
+	pci_unregister_driver(&mlx5_core_driver);
 	mlx5_health_cleanup();
 	destroy_workqueue(mlx5_core_wq);
 	mlx5_unregister_debugfs();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index c2a953e..d476918 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -51,7 +51,7 @@
 
 struct mlx5_pages_req {
 	struct mlx5_core_dev *dev;
-	u32	func_id;
+	u16	func_id;
 	s32	npages;
 	struct work_struct work;
 };
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 8c9ac87..3139658 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -86,7 +86,7 @@
 	__be32			caps_31_0;
 };
 
-int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps)
+int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
 {
 	struct mlx5_reg_pcap in;
 	struct mlx5_reg_pcap out;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 7a0dead..2eda153 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -503,7 +503,6 @@
 
 			skb_hwts = skb_hwtstamps(skb);
 			skb_hwts->hwtstamp = ns_to_ktime(ns);
-			skb_hwts->syststamp.tv64 = 0;
 		}
 
 		/* rth_hash_type and rth_it_hit are non-zero regardless of
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 7dc3e9b..979c698 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -247,28 +247,6 @@
 	}
 }
 
-static ktime_t ptp_to_ktime(u64 ptptime)
-{
-	ktime_t ktimebase;
-	u64 ptpbase;
-	unsigned long flags;
-
-	local_irq_save(flags);
-	/* Fill the icache with the code */
-	ktime_get_real();
-	/* Flush all pending operations */
-	mb();
-	/* Read the time and PTP clock as close together as
-	 * possible. It is important that this sequence take the same
-	 * amount of time to reduce jitter
-	 */
-	ktimebase = ktime_get_real();
-	ptpbase = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_HI);
-	local_irq_restore(flags);
-
-	return ktime_sub_ns(ktimebase, ptpbase - ptptime);
-}
-
 static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
 {
 	union cvmx_mixx_orcnt mix_orcnt;
@@ -312,12 +290,12 @@
 		/* Read the hardware TX timestamp if one was recorded */
 		if (unlikely(re.s.tstamp)) {
 			struct skb_shared_hwtstamps ts;
+			memset(&ts, 0, sizeof(ts));
 			/* Read the timestamp */
 			u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
 			/* Remove the timestamp from the FIFO */
 			cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
 			/* Tell the kernel about the timestamp */
-			ts.syststamp = ptp_to_ktime(ns);
 			ts.hwtstamp = ns_to_ktime(ns);
 			skb_tstamp_tx(skb, &ts);
 		}
@@ -429,7 +407,6 @@
 			struct skb_shared_hwtstamps *ts;
 			ts = skb_hwtstamps(skb);
 			ts->hwtstamp = ns_to_ktime(ns);
-			ts->syststamp = ptp_to_ktime(ns);
 			__skb_pull(skb, 8);
 		}
 		skb->protocol = eth_type_trans(skb, netdev);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 2bc728e..75b1693 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -382,13 +382,6 @@
 static int cp_set_eeprom(struct net_device *dev,
 			 struct ethtool_eeprom *eeprom, u8 *data);
 
-static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
-	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	PCI_DEVICE_ID_REALTEK_8139), },
-	{ PCI_DEVICE(PCI_VENDOR_ID_TTTECH,	PCI_DEVICE_ID_TTTECH_MC322), },
-	{ },
-};
-MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
-
 static struct {
 	const char str[ETH_GSTRING_LEN];
 } ethtool_stats_keys[] = {
@@ -1887,11 +1880,7 @@
 	resource_size_t pciaddr;
 	unsigned int addr_len, i, pci_using_dac;
 
-#ifndef MODULE
-	static int version_printed;
-	if (version_printed++ == 0)
-		pr_info("%s", version);
-#endif
+	pr_info_once("%s", version);
 
 	if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
 	    pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
@@ -2110,6 +2099,13 @@
 }
 #endif /* CONFIG_PM */
 
+static const struct pci_device_id cp_pci_tbl[] = {
+        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     PCI_DEVICE_ID_REALTEK_8139), },
+        { PCI_DEVICE(PCI_VENDOR_ID_TTTECH,      PCI_DEVICE_ID_TTTECH_MC322), },
+        { },
+};
+MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
+
 static struct pci_driver cp_driver = {
 	.name         = DRV_NAME,
 	.id_table     = cp_pci_tbl,
@@ -2121,18 +2117,4 @@
 #endif
 };
 
-static int __init cp_init (void)
-{
-#ifdef MODULE
-	pr_info("%s", version);
-#endif
-	return pci_register_driver(&cp_driver);
-}
-
-static void __exit cp_exit (void)
-{
-	pci_unregister_driver (&cp_driver);
-}
-
-module_init(cp_init);
-module_exit(cp_exit);
+module_pci_driver(cp_driver);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 4b80c0b..4cebe9d 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -272,6 +272,9 @@
 	struct efx_nic *efx = channel->efx;
 	int spent;
 
+	if (!efx_channel_lock_napi(channel))
+		return budget;
+
 	netif_vdbg(efx, intr, efx->net_dev,
 		   "channel %d NAPI poll executing on CPU %d\n",
 		   channel->channel, raw_smp_processor_id());
@@ -311,6 +314,7 @@
 		efx_nic_eventq_read_ack(channel);
 	}
 
+	efx_channel_unlock_napi(channel);
 	return spent;
 }
 
@@ -357,7 +361,7 @@
 }
 
 /* Enable event queue processing and NAPI */
-static void efx_start_eventq(struct efx_channel *channel)
+void efx_start_eventq(struct efx_channel *channel)
 {
 	netif_dbg(channel->efx, ifup, channel->efx->net_dev,
 		  "chan %d start event queue\n", channel->channel);
@@ -366,17 +370,20 @@
 	channel->enabled = true;
 	smp_wmb();
 
+	efx_channel_enable(channel);
 	napi_enable(&channel->napi_str);
 	efx_nic_eventq_read_ack(channel);
 }
 
 /* Disable event queue processing and NAPI */
-static void efx_stop_eventq(struct efx_channel *channel)
+void efx_stop_eventq(struct efx_channel *channel)
 {
 	if (!channel->enabled)
 		return;
 
 	napi_disable(&channel->napi_str);
+	while (!efx_channel_disable(channel))
+		usleep_range(1000, 20000);
 	channel->enabled = false;
 }
 
@@ -1960,6 +1967,8 @@
 	channel->napi_dev = efx->net_dev;
 	netif_napi_add(channel->napi_dev, &channel->napi_str,
 		       efx_poll, napi_weight);
+	napi_hash_add(&channel->napi_str);
+	efx_channel_init_lock(channel);
 }
 
 static void efx_init_napi(struct efx_nic *efx)
@@ -1972,8 +1981,10 @@
 
 static void efx_fini_napi_channel(struct efx_channel *channel)
 {
-	if (channel->napi_dev)
+	if (channel->napi_dev) {
 		netif_napi_del(&channel->napi_str);
+		napi_hash_del(&channel->napi_str);
+	}
 	channel->napi_dev = NULL;
 }
 
@@ -2008,6 +2019,37 @@
 
 #endif
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static int efx_busy_poll(struct napi_struct *napi)
+{
+	struct efx_channel *channel =
+		container_of(napi, struct efx_channel, napi_str);
+	struct efx_nic *efx = channel->efx;
+	int budget = 4;
+	int old_rx_packets, rx_packets;
+
+	if (!netif_running(efx->net_dev))
+		return LL_FLUSH_FAILED;
+
+	if (!efx_channel_lock_poll(channel))
+		return LL_FLUSH_BUSY;
+
+	old_rx_packets = channel->rx_queue.rx_packets;
+	efx_process_channel(channel, budget);
+
+	rx_packets = channel->rx_queue.rx_packets - old_rx_packets;
+
+	/* There is no race condition with NAPI here.
+	 * NAPI will automatically be rescheduled if it yielded during busy
+	 * polling, because it was not able to take the lock and thus returned
+	 * the full budget.
+	 */
+	efx_channel_unlock_poll(channel);
+
+	return rx_packets;
+}
+#endif
+
 /**************************************************************************
  *
  * Kernel net device interface
@@ -2177,6 +2219,9 @@
 	.ndo_poll_controller = efx_netpoll,
 #endif
 	.ndo_setup_tc		= efx_setup_tc,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	.ndo_busy_poll		= efx_busy_poll,
+#endif
 #ifdef CONFIG_RFS_ACCEL
 	.ndo_rx_flow_steer	= efx_filter_rfs,
 #endif
@@ -2197,6 +2242,9 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= efx_netpoll,
 #endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	.ndo_busy_poll		= efx_busy_poll,
+#endif
 #ifdef CONFIG_RFS_ACCEL
 	.ndo_rx_flow_steer	= efx_filter_rfs,
 #endif
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index b41601e..2587c58 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -194,6 +194,8 @@
 			    bool rx_may_override_tx);
 void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
 			    unsigned int *rx_usecs, bool *rx_adaptive);
+void efx_stop_eventq(struct efx_channel *channel);
+void efx_start_eventq(struct efx_channel *channel);
 
 /* Dummy PHY ops for PHY drivers */
 int efx_port_dummy_op_int(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index fb2e3bf..9ede320 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -28,6 +28,7 @@
 #include <linux/vmalloc.h>
 #include <linux/i2c.h>
 #include <linux/mtd/mtd.h>
+#include <net/busy_poll.h>
 
 #include "enum.h"
 #include "bitfield.h"
@@ -387,6 +388,8 @@
  * @irq_moderation: IRQ moderation value (in hardware ticks)
  * @napi_dev: Net device used with NAPI
  * @napi_str: NAPI control structure
+ * @state: state for NAPI vs busy polling
+ * @state_lock: lock protecting @state
  * @eventq: Event queue buffer
  * @eventq_mask: Event queue pointer mask
  * @eventq_read_ptr: Event queue read pointer
@@ -424,6 +427,22 @@
 	unsigned int irq_moderation;
 	struct net_device *napi_dev;
 	struct napi_struct napi_str;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	unsigned int state;
+	spinlock_t state_lock;
+#define EFX_CHANNEL_STATE_IDLE		0
+#define EFX_CHANNEL_STATE_NAPI		(1 << 0)  /* NAPI owns this channel */
+#define EFX_CHANNEL_STATE_POLL		(1 << 1)  /* poll owns this channel */
+#define EFX_CHANNEL_STATE_DISABLED	(1 << 2)  /* channel is disabled */
+#define EFX_CHANNEL_STATE_NAPI_YIELD	(1 << 3)  /* NAPI yielded this channel */
+#define EFX_CHANNEL_STATE_POLL_YIELD	(1 << 4)  /* poll yielded this channel */
+#define EFX_CHANNEL_OWNED \
+	(EFX_CHANNEL_STATE_NAPI | EFX_CHANNEL_STATE_POLL)
+#define EFX_CHANNEL_LOCKED \
+	(EFX_CHANNEL_OWNED | EFX_CHANNEL_STATE_DISABLED)
+#define EFX_CHANNEL_USER_PEND \
+	(EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_POLL_YIELD)
+#endif /* CONFIG_NET_RX_BUSY_POLL */
 	struct efx_special_buffer eventq;
 	unsigned int eventq_mask;
 	unsigned int eventq_read_ptr;
@@ -457,6 +476,135 @@
 	u32 sync_timestamp_minor;
 };
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void efx_channel_init_lock(struct efx_channel *channel)
+{
+	spin_lock_init(&channel->state_lock);
+}
+
+/* Called from the device poll routine to get ownership of a channel. */
+static inline bool efx_channel_lock_napi(struct efx_channel *channel)
+{
+	bool rc = true;
+
+	spin_lock_bh(&channel->state_lock);
+	if (channel->state & EFX_CHANNEL_LOCKED) {
+		WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI);
+		channel->state |= EFX_CHANNEL_STATE_NAPI_YIELD;
+		rc = false;
+	} else {
+		/* we don't care if someone yielded */
+		channel->state = EFX_CHANNEL_STATE_NAPI;
+	}
+	spin_unlock_bh(&channel->state_lock);
+	return rc;
+}
+
+static inline void efx_channel_unlock_napi(struct efx_channel *channel)
+{
+	spin_lock_bh(&channel->state_lock);
+	WARN_ON(channel->state &
+		(EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_YIELD));
+
+	channel->state &= EFX_CHANNEL_STATE_DISABLED;
+	spin_unlock_bh(&channel->state_lock);
+}
+
+/* Called from efx_busy_poll(). */
+static inline bool efx_channel_lock_poll(struct efx_channel *channel)
+{
+	bool rc = true;
+
+	spin_lock_bh(&channel->state_lock);
+	if ((channel->state & EFX_CHANNEL_LOCKED)) {
+		channel->state |= EFX_CHANNEL_STATE_POLL_YIELD;
+		rc = false;
+	} else {
+		/* preserve yield marks */
+		channel->state |= EFX_CHANNEL_STATE_POLL;
+	}
+	spin_unlock_bh(&channel->state_lock);
+	return rc;
+}
+
+/* Returns true if NAPI tried to get the channel while it was locked. */
+static inline void efx_channel_unlock_poll(struct efx_channel *channel)
+{
+	spin_lock_bh(&channel->state_lock);
+	WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI);
+
+	/* will reset state to idle, unless channel is disabled */
+	channel->state &= EFX_CHANNEL_STATE_DISABLED;
+	spin_unlock_bh(&channel->state_lock);
+}
+
+/* True if a socket is polling, even if it did not get the lock. */
+static inline bool efx_channel_busy_polling(struct efx_channel *channel)
+{
+	WARN_ON(!(channel->state & EFX_CHANNEL_OWNED));
+	return channel->state & EFX_CHANNEL_USER_PEND;
+}
+
+static inline void efx_channel_enable(struct efx_channel *channel)
+{
+	spin_lock_bh(&channel->state_lock);
+	channel->state = EFX_CHANNEL_STATE_IDLE;
+	spin_unlock_bh(&channel->state_lock);
+}
+
+/* False if the channel is currently owned. */
+static inline bool efx_channel_disable(struct efx_channel *channel)
+{
+	bool rc = true;
+
+	spin_lock_bh(&channel->state_lock);
+	if (channel->state & EFX_CHANNEL_OWNED)
+		rc = false;
+	channel->state |= EFX_CHANNEL_STATE_DISABLED;
+	spin_unlock_bh(&channel->state_lock);
+
+	return rc;
+}
+
+#else /* CONFIG_NET_RX_BUSY_POLL */
+
+static inline void efx_channel_init_lock(struct efx_channel *channel)
+{
+}
+
+static inline bool efx_channel_lock_napi(struct efx_channel *channel)
+{
+	return true;
+}
+
+static inline void efx_channel_unlock_napi(struct efx_channel *channel)
+{
+}
+
+static inline bool efx_channel_lock_poll(struct efx_channel *channel)
+{
+	return false;
+}
+
+static inline void efx_channel_unlock_poll(struct efx_channel *channel)
+{
+}
+
+static inline bool efx_channel_busy_polling(struct efx_channel *channel)
+{
+	return false;
+}
+
+static inline void efx_channel_enable(struct efx_channel *channel)
+{
+}
+
+static inline bool efx_channel_disable(struct efx_channel *channel)
+{
+	return true;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
 /**
  * struct efx_msi_context - Context for each MSI
  * @efx: The associated NIC
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index a7bb63a..c0ad95d 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -462,6 +462,7 @@
 
 	skb_record_rx_queue(skb, channel->rx_queue.core_index);
 
+	skb_mark_napi_id(skb, &channel->napi_str);
 	gro_result = napi_gro_frags(napi);
 	if (gro_result != GRO_DROP)
 		channel->irq_mod_score += 2;
@@ -520,6 +521,8 @@
 	/* Move past the ethernet header */
 	skb->protocol = eth_type_trans(skb, efx->net_dev);
 
+	skb_mark_napi_id(skb, &channel->napi_str);
+
 	return skb;
 }
 
@@ -666,7 +669,8 @@
 	if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
 		rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
 
-	if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
+	if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb &&
+	    !efx_channel_busy_polling(channel))
 		efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
 	else
 		efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 0fc5bae..10b6173 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -188,7 +188,7 @@
 		schedule_timeout_uninterruptible(wait);
 
 		efx_for_each_channel(channel, efx) {
-			napi_disable(&channel->napi_str);
+			efx_stop_eventq(channel);
 			if (channel->eventq_read_ptr !=
 			    read_ptr[channel->channel]) {
 				set_bit(channel->channel, &napi_ran);
@@ -200,8 +200,7 @@
 				if (efx_nic_event_test_irq_cpu(channel) >= 0)
 					clear_bit(channel->channel, &int_pend);
 			}
-			napi_enable(&channel->napi_str);
-			efx_nic_eventq_read_ack(channel);
+			efx_start_eventq(channel);
 		}
 
 		wait *= 2;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 283e5f8..65c220f 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -189,18 +189,6 @@
 	u8 buf[L1_CACHE_BYTES];
 };
 
-/* Copy in explicit 64-bit writes. */
-static void efx_memcpy_64(void __iomem *dest, void *src, size_t len)
-{
-	u64 *src64 = src;
-	u64 __iomem *dest64 = dest;
-	size_t l64 = len / 8;
-	size_t i;
-
-	for (i = 0; i < l64; i++)
-		writeq(src64[i], &dest64[i]);
-}
-
 /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
  * Advances piobuf pointer. Leaves additional data in the copy buffer.
  */
@@ -210,7 +198,7 @@
 {
 	int block_len = len & ~(sizeof(copy_buf->buf) - 1);
 
-	efx_memcpy_64(*piobuf, data, block_len);
+	__iowrite64_copy(*piobuf, data, block_len >> 3);
 	*piobuf += block_len;
 	len -= block_len;
 
@@ -242,7 +230,8 @@
 		if (copy_buf->used < sizeof(copy_buf->buf))
 			return;
 
-		efx_memcpy_64(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+		__iowrite64_copy(*piobuf, copy_buf->buf,
+				 sizeof(copy_buf->buf) >> 3);
 		*piobuf += sizeof(copy_buf->buf);
 		data += copy_to_buf;
 		len -= copy_to_buf;
@@ -257,7 +246,8 @@
 {
 	/* if there's anything in it, write the whole buffer, including junk */
 	if (copy_buf->used)
-		efx_memcpy_64(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+		__iowrite64_copy(piobuf, copy_buf->buf,
+				 sizeof(copy_buf->buf) >> 3);
 }
 
 /* Traverse skb structure and copy fragments in to PIO buffer.
@@ -316,8 +306,8 @@
 		 */
 		BUILD_BUG_ON(L1_CACHE_BYTES >
 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
-		efx_memcpy_64(tx_queue->piobuf, skb->data,
-			      ALIGN(skb->len, L1_CACHE_BYTES));
+		__iowrite64_copy(tx_queue->piobuf, skb->data,
+				 ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
 	}
 
 	EFX_POPULATE_QWORD_5(buffer->option,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index fd8a217..ec632e6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -20,7 +20,9 @@
 #include <linux/of_net.h>
 #include <linux/phy.h>
 #include <linux/regmap.h>
+#include <linux/reset.h>
 #include <linux/stmmac.h>
+#include "stmmac.h"
 
 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
@@ -34,6 +36,7 @@
 	u32	reg_shift;
 	struct	device *dev;
 	struct regmap *sys_mgr_base_addr;
+	struct reset_control *stmmac_rst;
 };
 
 static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)
@@ -43,6 +46,13 @@
 	u32 reg_offset, reg_shift;
 	int ret;
 
+	dwmac->stmmac_rst = devm_reset_control_get(dev,
+						  STMMAC_RESOURCE_NAME);
+	if (IS_ERR(dwmac->stmmac_rst)) {
+		dev_info(dev, "Could not get reset control!\n");
+		return -EINVAL;
+	}
+
 	dwmac->interface = of_get_phy_mode(np);
 
 	sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
@@ -125,6 +135,65 @@
 	return dwmac;
 }
 
+static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv)
+{
+	struct socfpga_dwmac	*dwmac = priv;
+
+	/* On socfpga platform exit, assert and hold reset to the
+	 * enet controller - the default state after a hard reset.
+	 */
+	if (dwmac->stmmac_rst)
+		reset_control_assert(dwmac->stmmac_rst);
+}
+
+static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
+{
+	struct socfpga_dwmac	*dwmac = priv;
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct stmmac_priv *stpriv = NULL;
+	int ret = 0;
+
+	if (ndev)
+		stpriv = netdev_priv(ndev);
+
+	/* Assert reset to the enet controller before changing the phy mode */
+	if (dwmac->stmmac_rst)
+		reset_control_assert(dwmac->stmmac_rst);
+
+	/* Setup the phy mode in the system manager registers according to
+	 * devicetree configuration
+	 */
+	ret = socfpga_dwmac_setup(dwmac);
+
+	/* Deassert reset for the phy configuration to be sampled by
+	 * the enet controller, and operation to start in requested mode
+	 */
+	if (dwmac->stmmac_rst)
+		reset_control_deassert(dwmac->stmmac_rst);
+
+	/* Before the enet controller is suspended, the phy is suspended.
+	 * This causes the phy clock to be gated. The enet controller is
+	 * resumed before the phy, so the clock is still gated "off" when
+	 * the enet controller is resumed. This code makes sure the phy
+	 * is "resumed" before reinitializing the enet controller since
+	 * the enet controller depends on an active phy clock to complete
+	 * a DMA reset. A DMA reset will "time out" if executed
+	 * with no phy clock input on the Synopsys enet controller.
+	 * Verified through Synopsys Case #8000711656.
+	 *
+	 * Note that the phy clock is also gated when the phy is isolated.
+	 * Phy "suspend" and "isolate" controls are located in phy basic
+	 * control register 0, and can be modified by the phy driver
+	 * framework.
+	 */
+	if (stpriv && stpriv->phydev)
+		phy_resume(stpriv->phydev);
+
+	return ret;
+}
+
 const struct stmmac_of_data socfpga_gmac_data = {
 	.setup = socfpga_dwmac_probe,
+	.init = socfpga_dwmac_init,
+	.exit = socfpga_dwmac_exit,
 };
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 057a120..18315f3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2878,6 +2878,10 @@
 		clk_disable_unprepare(priv->stmmac_clk);
 	}
 	spin_unlock_irqrestore(&priv->lock, flags);
+
+	priv->oldlink = 0;
+	priv->speed = 0;
+	priv->oldduplex = -1;
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index fd411d6..d813bfb 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -610,6 +610,13 @@
 	return err;
 }
 
+static inline bool port_is_up(struct vnet_port *vnet)
+{
+	struct vio_driver_state *vio = &vnet->vio;
+
+	return !!(vio->hs_state & VIO_HS_COMPLETE);
+}
+
 struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
 {
 	unsigned int hash = vnet_hashfn(skb->data);
@@ -617,14 +624,19 @@
 	struct vnet_port *port;
 
 	hlist_for_each_entry(port, hp, hash) {
+		if (!port_is_up(port))
+			continue;
 		if (ether_addr_equal(port->raddr, skb->data))
 			return port;
 	}
-	port = NULL;
-	if (!list_empty(&vp->port_list))
-		port = list_entry(vp->port_list.next, struct vnet_port, list);
-
-	return port;
+	list_for_each_entry(port, &vp->port_list, list) {
+		if (!port->switch_port)
+			continue;
+		if (!port_is_up(port))
+			continue;
+		return port;
+	}
+	return NULL;
 }
 
 struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index ae6379a..999fb72 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1003,17 +1003,6 @@
 	}
 }
 
-static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
-{
-	static char *leader = "........................................";
-
-	if (!val)
-		return 0;
-	else
-		return snprintf(buf, maxlen, "%s %s %10d\n", name,
-				leader + strlen(name), val);
-}
-
 static int cpsw_common_res_usage_state(struct cpsw_priv *priv)
 {
 	u32 i;
@@ -1675,14 +1664,34 @@
 	.ndo_vlan_rx_kill_vid	= cpsw_ndo_vlan_rx_kill_vid,
 };
 
+static int cpsw_get_regs_len(struct net_device *ndev)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+
+	return priv->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
+}
+
+static void cpsw_get_regs(struct net_device *ndev,
+			  struct ethtool_regs *regs, void *p)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	u32 *reg = p;
+
+	/* update CPSW IP version */
+	regs->version = priv->version;
+
+	cpsw_ale_dump(priv->ale, reg);
+}
+
 static void cpsw_get_drvinfo(struct net_device *ndev,
 			     struct ethtool_drvinfo *info)
 {
 	struct cpsw_priv *priv = netdev_priv(ndev);
 
-	strlcpy(info->driver, "TI CPSW Driver v1.0", sizeof(info->driver));
+	strlcpy(info->driver, "cpsw", sizeof(info->driver));
 	strlcpy(info->version, "1.0", sizeof(info->version));
 	strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info));
+	info->regdump_len = cpsw_get_regs_len(ndev);
 }
 
 static u32 cpsw_get_msglevel(struct net_device *ndev)
@@ -1790,6 +1799,8 @@
 	.get_ethtool_stats	= cpsw_get_ethtool_stats,
 	.get_wol	= cpsw_get_wol,
 	.set_wol	= cpsw_set_wol,
+	.get_regs_len	= cpsw_get_regs_len,
+	.get_regs	= cpsw_get_regs,
 };
 
 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 7f89306..0579b22 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -25,8 +25,6 @@
 #include "cpsw_ale.h"
 
 #define BITMASK(bits)		(BIT(bits) - 1)
-#define ALE_ENTRY_BITS		68
-#define ALE_ENTRY_WORDS	DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
 
 #define ALE_VERSION_MAJOR(rev)	((rev >> 8) & 0xff)
 #define ALE_VERSION_MINOR(rev)	(rev & 0xff)
@@ -763,3 +761,13 @@
 	kfree(ale);
 	return 0;
 }
+
+void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
+{
+	int i;
+
+	for (i = 0; i < ale->params.ale_entries; i++) {
+		cpsw_ale_read(ale, i, data);
+		data += ALE_ENTRY_WORDS;
+	}
+}
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index de409c3..31cf43c 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -80,6 +80,9 @@
 #define ALE_MCAST_FWD_LEARN		2
 #define ALE_MCAST_FWD_2			3
 
+#define ALE_ENTRY_BITS		68
+#define ALE_ENTRY_WORDS	DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
+
 struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params);
 int cpsw_ale_destroy(struct cpsw_ale *ale);
 
@@ -104,5 +107,6 @@
 int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
 int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
 			 int control, int value);
+void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data);
 
 #endif
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index f13e0ac..592977a 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -378,8 +378,10 @@
 
 	net_device->send_section_map =
 		kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
-	if (net_device->send_section_map == NULL)
+	if (net_device->send_section_map == NULL) {
+		ret = -ENOMEM;
 		goto cleanup;
+	}
 
 	goto exit;
 
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
index b57c224..388e302 100644
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -74,7 +74,6 @@
 #include <linux/of_platform.h>
 #include <linux/of_device.h>
 #include <linux/uaccess.h>
-#include <asm/irq.h>
 
 
 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
@@ -85,6 +84,8 @@
 #define XGBE_PHY_ID	0x000162d0
 #define XGBE_PHY_MASK	0xfffffff0
 
+#define XGBE_PHY_SPEEDSET_PROPERTY	"amd,speed-set"
+
 #define XGBE_AN_INT_CMPLT		0x01
 #define XGBE_AN_INC_LINK		0x02
 #define XGBE_AN_PG_RCV			0x04
@@ -94,6 +95,8 @@
 #define XNP_MP_FORMATTED		(1 << 13)
 #define XNP_NP_EXCHANGE			(1 << 15)
 
+#define XGBE_PHY_RATECHANGE_COUNT	100
+
 #ifndef MDIO_PMA_10GBR_PMD_CTRL
 #define MDIO_PMA_10GBR_PMD_CTRL		0x0096
 #endif
@@ -116,10 +119,13 @@
 #endif
 
 /* SerDes integration register offsets */
+#define SIR0_KR_RT_1			0x002c
 #define SIR0_STATUS			0x0040
 #define SIR1_SPEED			0x0000
 
 /* SerDes integration register entry bit positions and sizes */
+#define SIR0_KR_RT_1_RESET_INDEX	11
+#define SIR0_KR_RT_1_RESET_WIDTH	1
 #define SIR0_STATUS_RX_READY_INDEX	0
 #define SIR0_STATUS_RX_READY_WIDTH	1
 #define SIR0_STATUS_TX_READY_INDEX	8
@@ -145,7 +151,7 @@
 
 #define SPEED_2500_CDR			0x2
 #define SPEED_2500_PLL			0x0
-#define SPEED_2500_RATE			0x2
+#define SPEED_2500_RATE			0x1
 #define SPEED_2500_TXAMP		0xf
 #define SPEED_2500_WORD			0x1
 
@@ -192,6 +198,16 @@
 	(_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index));	\
 } while (0)
 
+#define XSIR_GET_BITS(_var, _prefix, _field)				\
+	GET_BITS((_var),						\
+		 _prefix##_##_field##_INDEX,				\
+		 _prefix##_##_field##_WIDTH)
+
+#define XSIR_SET_BITS(_var, _prefix, _field, _val)			\
+	SET_BITS((_var),						\
+		 _prefix##_##_field##_INDEX,				\
+		 _prefix##_##_field##_WIDTH, (_val))
+
 /* Macros for reading or writing SerDes integration registers
  *  The ioread macros will get bit fields or full values using the
  *  register definitions formed using the input names
@@ -292,6 +308,11 @@
 	AMD_XGBE_MODE_KX,
 };
 
+enum amd_xgbe_phy_speedset {
+	AMD_XGBE_PHY_SPEEDSET_1000_10000,
+	AMD_XGBE_PHY_SPEEDSET_2500_10000,
+};
+
 struct amd_xgbe_phy_priv {
 	struct platform_device *pdev;
 	struct device *dev;
@@ -311,6 +332,7 @@
 	/* Maintain link status for re-starting auto-negotiation */
 	unsigned int link;
 	enum amd_xgbe_phy_mode mode;
+	unsigned int speed_set;
 
 	/* Auto-negotiation state machine support */
 	struct mutex an_mutex;
@@ -380,14 +402,25 @@
 static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
 {
 	struct amd_xgbe_phy_priv *priv = phydev->priv;
+	unsigned int wait;
+	u16 status;
 
 	/* Release Rx and Tx ratechange */
 	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0);
 
 	/* Wait for Rx and Tx ready */
-	while (!XSIR0_IOREAD_BITS(priv, SIR0_STATUS, RX_READY) &&
-	       !XSIR0_IOREAD_BITS(priv, SIR0_STATUS, TX_READY))
+	wait = XGBE_PHY_RATECHANGE_COUNT;
+	while (wait--) {
 		usleep_range(10, 20);
+
+		status = XSIR0_IOREAD(priv, SIR0_STATUS);
+		if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
+		    XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
+			return;
+	}
+
+	netdev_err(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
+		   status);
 }
 
 static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
@@ -546,10 +579,14 @@
 	int ret;
 
 	/* If we are in KR switch to KX, and vice-versa */
-	if (priv->mode == AMD_XGBE_MODE_KR)
-		ret = amd_xgbe_phy_gmii_mode(phydev);
-	else
+	if (priv->mode == AMD_XGBE_MODE_KR) {
+		if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000)
+			ret = amd_xgbe_phy_gmii_mode(phydev);
+		else
+			ret = amd_xgbe_phy_gmii_2500_mode(phydev);
+	} else {
 		ret = amd_xgbe_phy_xgmii_mode(phydev);
+	}
 
 	return ret;
 }
@@ -602,9 +639,13 @@
 	if (ret < 0)
 		return AMD_XGBE_AN_ERROR;
 
+	XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
+
 	ret |= 0x01;
 	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
 
+	XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
+
 	return AMD_XGBE_AN_EVENT;
 }
 
@@ -713,7 +754,8 @@
 	else
 		ret &= ~0x80;
 
-	if (phydev->supported & SUPPORTED_1000baseKX_Full)
+	if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
+	    (phydev->supported & SUPPORTED_2500baseX_Full))
 		ret |= 0x20;
 	else
 		ret &= ~0x20;
@@ -815,6 +857,7 @@
 	struct phy_device *phydev = priv->phydev;
 	enum amd_xgbe_phy_an cur_state;
 	int sleep;
+	unsigned int an_supported = 0;
 
 	while (1) {
 		mutex_lock(&priv->an_mutex);
@@ -824,6 +867,7 @@
 		switch (priv->an_state) {
 		case AMD_XGBE_AN_START:
 			priv->an_state = amd_xgbe_an_start(phydev);
+			an_supported = 0;
 			break;
 
 		case AMD_XGBE_AN_EVENT:
@@ -832,6 +876,7 @@
 
 		case AMD_XGBE_AN_PAGE_RECEIVED:
 			priv->an_state = amd_xgbe_an_page_received(phydev);
+			an_supported++;
 			break;
 
 		case AMD_XGBE_AN_INCOMPAT_LINK:
@@ -839,6 +884,11 @@
 			break;
 
 		case AMD_XGBE_AN_COMPLETE:
+			netdev_info(phydev->attached_dev, "%s successful\n",
+				    an_supported ? "Auto negotiation"
+						 : "Parallel detection");
+			/* fall through */
+
 		case AMD_XGBE_AN_NO_LINK:
 		case AMD_XGBE_AN_EXIT:
 			goto exit_unlock;
@@ -896,14 +946,22 @@
 
 static int amd_xgbe_phy_config_init(struct phy_device *phydev)
 {
+	struct amd_xgbe_phy_priv *priv = phydev->priv;
+
 	/* Initialize supported features */
 	phydev->supported = SUPPORTED_Autoneg;
 	phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
 	phydev->supported |= SUPPORTED_Backplane;
-	phydev->supported |= SUPPORTED_1000baseKX_Full |
-			     SUPPORTED_2500baseX_Full;
 	phydev->supported |= SUPPORTED_10000baseKR_Full |
 			     SUPPORTED_10000baseR_FEC;
+	switch (priv->speed_set) {
+	case AMD_XGBE_PHY_SPEEDSET_1000_10000:
+		phydev->supported |= SUPPORTED_1000baseKX_Full;
+		break;
+	case AMD_XGBE_PHY_SPEEDSET_2500_10000:
+		phydev->supported |= SUPPORTED_2500baseX_Full;
+		break;
+	}
 	phydev->advertising = phydev->supported;
 
 	/* Turn off and clear interrupts */
@@ -1020,9 +1078,9 @@
 	 * (re-)established (cable connected after the interface is
 	 * up, etc.), the link status may report no link. If there
 	 * is no link, try switching modes and checking the status
-	 * again.
+	 * again if auto negotiation is enabled.
 	 */
-	check_again = 1;
+	check_again = (phydev->autoneg == AUTONEG_ENABLE) ? 1 : 0;
 again:
 	/* Link status is latched low, so read once to clear
 	 * and then read again to get current state
@@ -1038,8 +1096,10 @@
 	phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
 
 	if (!phydev->link) {
-		ret = amd_xgbe_phy_switch_mode(phydev);
 		if (check_again) {
+			ret = amd_xgbe_phy_switch_mode(phydev);
+			if (ret < 0)
+				return ret;
 			check_again = 0;
 			goto again;
 		}
@@ -1059,6 +1119,7 @@
 
 static int amd_xgbe_phy_read_status(struct phy_device *phydev)
 {
+	struct amd_xgbe_phy_priv *priv = phydev->priv;
 	u32 mmd_mask = phydev->c45_ids.devices_in_package;
 	int ret, mode, ad_ret, lp_ret;
 
@@ -1108,9 +1169,19 @@
 					return ret;
 			}
 		} else {
-			phydev->speed = SPEED_1000;
+			int (*mode_fcn)(struct phy_device *);
+
+			if (priv->speed_set ==
+			    AMD_XGBE_PHY_SPEEDSET_1000_10000) {
+				phydev->speed = SPEED_1000;
+				mode_fcn = amd_xgbe_phy_gmii_mode;
+			} else {
+				phydev->speed = SPEED_2500;
+				mode_fcn = amd_xgbe_phy_gmii_2500_mode;
+			}
+
 			if (mode == MDIO_PCS_CTRL2_10GBR) {
-				ret = amd_xgbe_phy_gmii_mode(phydev);
+				ret = mode_fcn(phydev);
 				if (ret < 0)
 					return ret;
 			}
@@ -1118,8 +1189,15 @@
 
 		phydev->duplex = DUPLEX_FULL;
 	} else {
-		phydev->speed = (mode == MDIO_PCS_CTRL2_10GBR) ? SPEED_10000
-							       : SPEED_1000;
+		if (mode == MDIO_PCS_CTRL2_10GBR) {
+			phydev->speed = SPEED_10000;
+		} else {
+			if (priv->speed_set ==
+			    AMD_XGBE_PHY_SPEEDSET_1000_10000)
+				phydev->speed = SPEED_1000;
+			else
+				phydev->speed = SPEED_2500;
+		}
 		phydev->duplex = DUPLEX_FULL;
 		phydev->pause = 0;
 		phydev->asym_pause = 0;
@@ -1176,6 +1254,8 @@
 	struct platform_device *pdev;
 	struct device *dev;
 	char *wq_name;
+	const __be32 *property;
+	unsigned int speed_set;
 	int ret;
 
 	if (!phydev->dev.of_node)
@@ -1227,6 +1307,26 @@
 		goto err_sir0;
 	}
 
+	/* Get the device speed set property */
+	speed_set = 0;
+	property = of_get_property(dev->of_node, XGBE_PHY_SPEEDSET_PROPERTY,
+				   NULL);
+	if (property)
+		speed_set = be32_to_cpu(*property);
+
+	switch (speed_set) {
+	case 0:
+		priv->speed_set = AMD_XGBE_PHY_SPEEDSET_1000_10000;
+		break;
+	case 1:
+		priv->speed_set = AMD_XGBE_PHY_SPEEDSET_2500_10000;
+		break;
+	default:
+		dev_err(dev, "invalid amd,speed-set property\n");
+		ret = -EINVAL;
+		goto err_sir1;
+	}
+
 	priv->link = 1;
 
 	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 255c21f..c301e4c 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -74,7 +74,10 @@
 #define ENDIAN_FLAG	PSF_ENDIAN
 #endif
 
-#define SKB_PTP_TYPE(__skb) (*(unsigned int *)((__skb)->cb))
+struct dp83640_skb_info {
+	int ptp_type;
+	unsigned long tmo;
+};
 
 struct phy_rxts {
 	u16 ns_lo;   /* ns[15:0] */
@@ -768,10 +771,51 @@
 	return parsed * sizeof(u16);
 }
 
+static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
+{
+	u16 *seqid;
+	unsigned int offset = 0;
+	u8 *msgtype, *data = skb_mac_header(skb);
+
+	/* check sequenceID, messageType, 12 bit hash of offset 20-29 */
+
+	if (type & PTP_CLASS_VLAN)
+		offset += VLAN_HLEN;
+
+	switch (type & PTP_CLASS_PMASK) {
+	case PTP_CLASS_IPV4:
+		offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+		break;
+	case PTP_CLASS_IPV6:
+		offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
+		break;
+	case PTP_CLASS_L2:
+		offset += ETH_HLEN;
+		break;
+	default:
+		return 0;
+	}
+
+	if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+		return 0;
+
+	if (unlikely(type & PTP_CLASS_V1))
+		msgtype = data + offset + OFF_PTP_CONTROL;
+	else
+		msgtype = data + offset;
+
+	seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+
+	return rxts->msgtype == (*msgtype & 0xf) &&
+		rxts->seqid   == ntohs(*seqid);
+}
+
 static void decode_rxts(struct dp83640_private *dp83640,
 			struct phy_rxts *phy_rxts)
 {
 	struct rxts *rxts;
+	struct skb_shared_hwtstamps *shhwtstamps = NULL;
+	struct sk_buff *skb;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dp83640->rx_lock, flags);
@@ -785,7 +829,26 @@
 	rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
 	list_del_init(&rxts->list);
 	phy2rxts(phy_rxts, rxts);
-	list_add_tail(&rxts->list, &dp83640->rxts);
+
+	spin_lock_irqsave(&dp83640->rx_queue.lock, flags);
+	skb_queue_walk(&dp83640->rx_queue, skb) {
+		struct dp83640_skb_info *skb_info;
+
+		skb_info = (struct dp83640_skb_info *)skb->cb;
+		if (match(skb, skb_info->ptp_type, rxts)) {
+			__skb_unlink(skb, &dp83640->rx_queue);
+			shhwtstamps = skb_hwtstamps(skb);
+			memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+			shhwtstamps->hwtstamp = ns_to_ktime(rxts->ns);
+			netif_rx_ni(skb);
+			list_add(&rxts->list, &dp83640->rxpool);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dp83640->rx_queue.lock, flags);
+
+	if (!shhwtstamps)
+		list_add_tail(&rxts->list, &dp83640->rxts);
 out:
 	spin_unlock_irqrestore(&dp83640->rx_lock, flags);
 }
@@ -887,45 +950,6 @@
 	return (*msgtype & 0xf) == 0;
 }
 
-static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
-{
-	u16 *seqid;
-	unsigned int offset = 0;
-	u8 *msgtype, *data = skb_mac_header(skb);
-
-	/* check sequenceID, messageType, 12 bit hash of offset 20-29 */
-
-	if (type & PTP_CLASS_VLAN)
-		offset += VLAN_HLEN;
-
-	switch (type & PTP_CLASS_PMASK) {
-	case PTP_CLASS_IPV4:
-		offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
-		break;
-	case PTP_CLASS_IPV6:
-		offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
-		break;
-	case PTP_CLASS_L2:
-		offset += ETH_HLEN;
-		break;
-	default:
-		return 0;
-	}
-
-	if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
-		return 0;
-
-	if (unlikely(type & PTP_CLASS_V1))
-		msgtype = data + offset + OFF_PTP_CONTROL;
-	else
-		msgtype = data + offset;
-
-	seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
-
-	return rxts->msgtype == (*msgtype & 0xf) &&
-		rxts->seqid   == ntohs(*seqid);
-}
-
 static void dp83640_free_clocks(void)
 {
 	struct dp83640_clock *clock;
@@ -1302,44 +1326,34 @@
 {
 	struct dp83640_private *dp83640 =
 		container_of(work, struct dp83640_private, ts_work);
-	struct list_head *this, *next;
-	struct rxts *rxts;
-	struct skb_shared_hwtstamps *shhwtstamps;
 	struct sk_buff *skb;
-	unsigned int type;
-	unsigned long flags;
 
-	/* Deliver each deferred packet, with or without a time stamp. */
+	/* Deliver expired packets. */
+	while ((skb = skb_dequeue(&dp83640->rx_queue))) {
+		struct dp83640_skb_info *skb_info;
 
-	while ((skb = skb_dequeue(&dp83640->rx_queue)) != NULL) {
-		type = SKB_PTP_TYPE(skb);
-		spin_lock_irqsave(&dp83640->rx_lock, flags);
-		list_for_each_safe(this, next, &dp83640->rxts) {
-			rxts = list_entry(this, struct rxts, list);
-			if (match(skb, type, rxts)) {
-				shhwtstamps = skb_hwtstamps(skb);
-				memset(shhwtstamps, 0, sizeof(*shhwtstamps));
-				shhwtstamps->hwtstamp = ns_to_ktime(rxts->ns);
-				list_del_init(&rxts->list);
-				list_add(&rxts->list, &dp83640->rxpool);
-				break;
-			}
+		skb_info = (struct dp83640_skb_info *)skb->cb;
+		if (!time_after(jiffies, skb_info->tmo)) {
+			skb_queue_head(&dp83640->rx_queue, skb);
+			break;
 		}
-		spin_unlock_irqrestore(&dp83640->rx_lock, flags);
+
 		netif_rx_ni(skb);
 	}
 
-	/* Clear out expired time stamps. */
-
-	spin_lock_irqsave(&dp83640->rx_lock, flags);
-	prune_rx_ts(dp83640);
-	spin_unlock_irqrestore(&dp83640->rx_lock, flags);
+	if (!skb_queue_empty(&dp83640->rx_queue))
+		schedule_work(&dp83640->ts_work);
 }
 
 static bool dp83640_rxtstamp(struct phy_device *phydev,
 			     struct sk_buff *skb, int type)
 {
 	struct dp83640_private *dp83640 = phydev->priv;
+	struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
+	struct list_head *this, *next;
+	struct rxts *rxts;
+	struct skb_shared_hwtstamps *shhwtstamps = NULL;
+	unsigned long flags;
 
 	if (is_status_frame(skb, type)) {
 		decode_status_frame(dp83640, skb);
@@ -1350,9 +1364,27 @@
 	if (!dp83640->hwts_rx_en)
 		return false;
 
-	SKB_PTP_TYPE(skb) = type;
-	skb_queue_tail(&dp83640->rx_queue, skb);
-	schedule_work(&dp83640->ts_work);
+	spin_lock_irqsave(&dp83640->rx_lock, flags);
+	list_for_each_safe(this, next, &dp83640->rxts) {
+		rxts = list_entry(this, struct rxts, list);
+		if (match(skb, type, rxts)) {
+			shhwtstamps = skb_hwtstamps(skb);
+			memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+			shhwtstamps->hwtstamp = ns_to_ktime(rxts->ns);
+			netif_rx_ni(skb);
+			list_del_init(&rxts->list);
+			list_add(&rxts->list, &dp83640->rxpool);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dp83640->rx_lock, flags);
+
+	if (!shhwtstamps) {
+		skb_info->ptp_type = type;
+		skb_info->tmo = jiffies + 2;
+		skb_queue_tail(&dp83640->rx_queue, skb);
+		schedule_work(&dp83640->ts_work);
+	}
 
 	return true;
 }
@@ -1373,7 +1405,6 @@
 	case HWTSTAMP_TX_ON:
 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 		skb_queue_tail(&dp83640->tx_queue, skb);
-		schedule_work(&dp83640->ts_work);
 		break;
 
 	case HWTSTAMP_TX_OFF:
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 4eaadcf..203651e 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -255,6 +255,7 @@
 
 	bus->dev.parent = bus->parent;
 	bus->dev.class = &mdio_bus_class;
+	bus->dev.driver = bus->parent->driver;
 	bus->dev.groups = NULL;
 	dev_set_name(&bus->dev, "%s", bus->id);
 
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index bc7c7d2..fd0ea7c 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -420,6 +420,26 @@
 	return 0;
 }
 
+/* This routine returns -1 as an indication to the caller that the
+ * Micrel ksz9021 10/100/1000 PHY does not support standard IEEE
+ * MMD extended PHY registers.
+ */
+static int
+ksz9021_rd_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum,
+		      int regnum)
+{
+	return -1;
+}
+
+/* This routine does nothing since the Micrel ksz9021 does not support
+ * standard IEEE MMD extended PHY registers.
+ */
+static void
+ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum,
+		      int regnum, u32 val)
+{
+}
+
 static struct phy_driver ksphy_driver[] = {
 {
 	.phy_id		= PHY_ID_KS8737,
@@ -565,6 +585,8 @@
 	.config_intr	= ksz9021_config_intr,
 	.suspend	= genphy_suspend,
 	.resume		= genphy_resume,
+	.read_mmd_indirect = ksz9021_rd_mmd_phyreg,
+	.write_mmd_indirect = ksz9021_wr_mmd_phyreg,
 	.driver		= { .owner = THIS_MODULE, },
 }, {
 	.phy_id		= PHY_ID_KSZ9031,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index e56e269..c94e2a2 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -942,7 +942,7 @@
 
 /**
  * phy_read_mmd_indirect - reads data from the MMD registers
- * @bus: the target MII bus
+ * @phydev: The PHY device bus
  * @prtad: MMD Address
  * @devad: MMD DEVAD
  * @addr: PHY address on the MII bus
@@ -955,18 +955,26 @@
  * 3) Write reg 13 // MMD Data Command for MMD DEVAD
  * 3) Read  reg 14 // Read MMD data
  */
-static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
-				 int addr)
+static int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
+				 int devad, int addr)
 {
-	mmd_phy_indirect(bus, prtad, devad, addr);
+	struct phy_driver *phydrv = phydev->drv;
+	int value = -1;
 
-	/* Read the content of the MMD's selected register */
-	return bus->read(bus, addr, MII_MMD_DATA);
+	if (phydrv->read_mmd_indirect == NULL) {
+		mmd_phy_indirect(phydev->bus, prtad, devad, addr);
+
+		/* Read the content of the MMD's selected register */
+		value = phydev->bus->read(phydev->bus, addr, MII_MMD_DATA);
+	} else {
+		value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr);
+	}
+	return value;
 }
 
 /**
  * phy_write_mmd_indirect - writes data to the MMD registers
- * @bus: the target MII bus
+ * @phydev: The PHY device
  * @prtad: MMD Address
  * @devad: MMD DEVAD
  * @addr: PHY address on the MII bus
@@ -980,13 +988,19 @@
  * 3) Write reg 13 // MMD Data Command for MMD DEVAD
  * 3) Write reg 14 // Write MMD data
  */
-static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
-				   int addr, u32 data)
+static void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
+				   int devad, int addr, u32 data)
 {
-	mmd_phy_indirect(bus, prtad, devad, addr);
+	struct phy_driver *phydrv = phydev->drv;
 
-	/* Write the data into MMD's selected register */
-	bus->write(bus, addr, MII_MMD_DATA, data);
+	if (phydrv->write_mmd_indirect == NULL) {
+		mmd_phy_indirect(phydev->bus, prtad, devad, addr);
+
+		/* Write the data into MMD's selected register */
+		phydev->bus->write(phydev->bus, addr, MII_MMD_DATA, data);
+	} else {
+		phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data);
+	}
 }
 
 /**
@@ -1020,7 +1034,7 @@
 			return status;
 
 		/* First check if the EEE ability is supported */
-		eee_cap = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
+		eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
 						MDIO_MMD_PCS, phydev->addr);
 		if (eee_cap < 0)
 			return eee_cap;
@@ -1032,12 +1046,12 @@
 		/* Check which link settings negotiated and verify it in
 		 * the EEE advertising registers.
 		 */
-		eee_lp = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
+		eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
 					       MDIO_MMD_AN, phydev->addr);
 		if (eee_lp < 0)
 			return eee_lp;
 
-		eee_adv = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
+		eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
 						MDIO_MMD_AN, phydev->addr);
 		if (eee_adv < 0)
 			return eee_adv;
@@ -1052,15 +1066,16 @@
 			/* Configure the PHY to stop receiving xMII
 			 * clock while it is signaling LPI.
 			 */
-			int val = phy_read_mmd_indirect(phydev->bus, MDIO_CTRL1,
+			int val = phy_read_mmd_indirect(phydev, MDIO_CTRL1,
 							MDIO_MMD_PCS,
 							phydev->addr);
 			if (val < 0)
 				return val;
 
 			val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
-			phy_write_mmd_indirect(phydev->bus, MDIO_CTRL1,
-					       MDIO_MMD_PCS, phydev->addr, val);
+			phy_write_mmd_indirect(phydev, MDIO_CTRL1,
+					       MDIO_MMD_PCS, phydev->addr,
+					       val);
 		}
 
 		return 0; /* EEE supported */
@@ -1079,7 +1094,7 @@
  */
 int phy_get_eee_err(struct phy_device *phydev)
 {
-	return phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_WK_ERR,
+	return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR,
 				     MDIO_MMD_PCS, phydev->addr);
 }
 EXPORT_SYMBOL(phy_get_eee_err);
@@ -1097,21 +1112,21 @@
 	int val;
 
 	/* Get Supported EEE */
-	val = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
+	val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
 				    MDIO_MMD_PCS, phydev->addr);
 	if (val < 0)
 		return val;
 	data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
 
 	/* Get advertisement EEE */
-	val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
+	val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
 				    MDIO_MMD_AN, phydev->addr);
 	if (val < 0)
 		return val;
 	data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
 
 	/* Get LP advertisement EEE */
-	val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
+	val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
 				    MDIO_MMD_AN, phydev->addr);
 	if (val < 0)
 		return val;
@@ -1132,7 +1147,7 @@
 {
 	int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
 
-	phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
+	phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
 			       phydev->addr, val);
 
 	return 0;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 4f4568e..ca5ec3e 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -355,7 +355,7 @@
 	phydev->bus->phy_map[phydev->addr] = phydev;
 
 	/* Run all of the fixups for this PHY */
-	err = phy_init_hw(phydev);
+	err = phy_scan_fixups(phydev);
 	if (err) {
 		pr_err("PHY %d failed to initialize\n", phydev->addr);
 		goto out;
@@ -575,6 +575,7 @@
 		      u32 flags, phy_interface_t interface)
 {
 	struct device *d = &phydev->dev;
+	struct module *bus_module;
 	int err;
 
 	/* Assume that if there is no driver, that it doesn't
@@ -599,6 +600,14 @@
 		return -EBUSY;
 	}
 
+	/* Increment the bus module reference count */
+	bus_module = phydev->bus->dev.driver ?
+		     phydev->bus->dev.driver->owner : NULL;
+	if (!try_module_get(bus_module)) {
+		dev_err(&dev->dev, "failed to get the bus module\n");
+		return -EIO;
+	}
+
 	phydev->attached_dev = dev;
 	dev->phydev = phydev;
 
@@ -664,6 +673,10 @@
 void phy_detach(struct phy_device *phydev)
 {
 	int i;
+
+	if (phydev->bus->dev.driver)
+		module_put(phydev->bus->dev.driver->owner);
+
 	phydev->attached_dev->phydev = NULL;
 	phydev->attached_dev = NULL;
 	phy_suspend(phydev);
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index a58dfeb..d7be9b3 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -272,7 +272,7 @@
 {
 	struct lb_priv *lb_priv = get_lb_priv(team);
 	struct sk_filter *fp = NULL;
-	struct sk_filter *orig_fp;
+	struct sk_filter *orig_fp = NULL;
 	struct sock_fprog_kern *fprog = NULL;
 	int err;
 
@@ -293,11 +293,15 @@
 		__fprog_destroy(lb_priv->ex->orig_fprog);
 		orig_fp = rcu_dereference_protected(lb_priv->fp,
 						lockdep_is_held(&team->lock));
-		sk_unattached_filter_destroy(orig_fp);
 	}
 
 	rcu_assign_pointer(lb_priv->fp, fp);
 	lb_priv->ex->orig_fprog = fprog;
+
+	if (orig_fp) {
+		synchronize_rcu();
+		sk_unattached_filter_destroy(orig_fp);
+	}
 	return 0;
 }
 
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 9ea4bfe..2a32d91 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -341,6 +341,22 @@
 		usb_driver_release_interface(driver, info->data);
 		return -ENODEV;
 	}
+
+	/* Some devices don't initialise properly. In particular
+	 * the packet filter is not reset. There are devices that
+	 * don't do reset all the way. So the packet filter should
+	 * be set to a sane initial value.
+	 */
+	usb_control_msg(dev->udev,
+			usb_sndctrlpipe(dev->udev, 0),
+			USB_CDC_SET_ETHERNET_PACKET_FILTER,
+			USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+			USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED | USB_CDC_PACKET_TYPE_BROADCAST,
+			intf->cur_altsetting->desc.bInterfaceNumber,
+			NULL,
+			0,
+			USB_CTRL_SET_TIMEOUT
+		);
 	return 0;
 
 bad_desc:
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index e1e4305..87f7104 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -287,7 +287,7 @@
 /* USB_DEV_STAT */
 #define STAT_SPEED_MASK		0x0006
 #define STAT_SPEED_HIGH		0x0000
-#define STAT_SPEED_FULL		0x0001
+#define STAT_SPEED_FULL		0x0002
 
 /* USB_TX_AGG */
 #define TX_AGG_MAX_THRESHOLD	0x03
@@ -2300,9 +2300,8 @@
 	/* rx share fifo credit full threshold */
 	ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL);
 
-	ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_DEV_STAT);
-	ocp_data &= STAT_SPEED_MASK;
-	if (ocp_data == STAT_SPEED_FULL) {
+	if (tp->udev->speed == USB_SPEED_FULL ||
+	    tp->udev->speed == USB_SPEED_LOW) {
 		/* rx share fifo credit near full threshold */
 		ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1,
 				RXFIFO_THR2_FULL);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7d9f84a..59caa06 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/cpu.h>
 #include <linux/average.h>
+#include <net/busy_poll.h>
 
 static int napi_weight = NAPI_POLL_WEIGHT;
 module_param(napi_weight, int, 0444);
@@ -521,6 +522,8 @@
 		skb_shinfo(skb)->gso_segs = 0;
 	}
 
+	skb_mark_napi_id(skb, &rq->napi);
+
 	netif_receive_skb(skb);
 	return;
 
@@ -725,15 +728,12 @@
 	}
 }
 
-static int virtnet_poll(struct napi_struct *napi, int budget)
+static int virtnet_receive(struct receive_queue *rq, int budget)
 {
-	struct receive_queue *rq =
-		container_of(napi, struct receive_queue, napi);
 	struct virtnet_info *vi = rq->vq->vdev->priv;
+	unsigned int len, received = 0;
 	void *buf;
-	unsigned int r, len, received = 0;
 
-again:
 	while (received < budget &&
 	       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
 		receive_buf(rq, buf, len);
@@ -745,6 +745,18 @@
 			schedule_delayed_work(&vi->refill, 0);
 	}
 
+	return received;
+}
+
+static int virtnet_poll(struct napi_struct *napi, int budget)
+{
+	struct receive_queue *rq =
+		container_of(napi, struct receive_queue, napi);
+	unsigned int r, received = 0;
+
+again:
+	received += virtnet_receive(rq, budget - received);
+
 	/* Out of packets? */
 	if (received < budget) {
 		r = virtqueue_enable_cb_prepare(rq->vq);
@@ -760,6 +772,43 @@
 	return received;
 }
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+/* must be called with local_bh_disable()d */
+static int virtnet_busy_poll(struct napi_struct *napi)
+{
+	struct receive_queue *rq =
+		container_of(napi, struct receive_queue, napi);
+	struct virtnet_info *vi = rq->vq->vdev->priv;
+	int r, received = 0, budget = 4;
+
+	if (!(vi->status & VIRTIO_NET_S_LINK_UP))
+		return LL_FLUSH_FAILED;
+
+	if (!napi_schedule_prep(napi))
+		return LL_FLUSH_BUSY;
+
+	virtqueue_disable_cb(rq->vq);
+
+again:
+	received += virtnet_receive(rq, budget);
+
+	r = virtqueue_enable_cb_prepare(rq->vq);
+	clear_bit(NAPI_STATE_SCHED, &napi->state);
+	if (unlikely(virtqueue_poll(rq->vq, r)) &&
+	    napi_schedule_prep(napi)) {
+		virtqueue_disable_cb(rq->vq);
+		if (received < budget) {
+			budget -= received;
+			goto again;
+		} else {
+			__napi_schedule(napi);
+		}
+	}
+
+	return received;
+}
+#endif	/* CONFIG_NET_RX_BUSY_POLL */
+
 static int virtnet_open(struct net_device *dev)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
@@ -1347,6 +1396,9 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller = virtnet_netpoll,
 #endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	.ndo_busy_poll		= virtnet_busy_poll,
+#endif
 };
 
 static void virtnet_config_changed_work(struct work_struct *work)
@@ -1552,6 +1604,7 @@
 		vi->rq[i].pages = NULL;
 		netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
 			       napi_weight);
+		napi_hash_add(&vi->rq[i].napi);
 
 		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
 		ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
@@ -1853,11 +1906,13 @@
 	netif_device_detach(vi->dev);
 	cancel_delayed_work_sync(&vi->refill);
 
-	if (netif_running(vi->dev))
+	if (netif_running(vi->dev)) {
 		for (i = 0; i < vi->max_queue_pairs; i++) {
 			napi_disable(&vi->rq[i].napi);
+			napi_hash_del(&vi->rq[i].napi);
 			netif_napi_del(&vi->rq[i].napi);
 		}
+	}
 
 	remove_vq_common(vi);
 
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index d3f3e5d..1fb7b37 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -340,7 +340,7 @@
 	ndm->ndm_state = fdb->state;
 	ndm->ndm_ifindex = vxlan->dev->ifindex;
 	ndm->ndm_flags = fdb->flags;
-	ndm->ndm_type = NDA_DST;
+	ndm->ndm_type = RTN_UNICAST;
 
 	if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
 		goto nla_put_failure;
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index d185dc0..4333107 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -603,16 +603,19 @@
 		if (ret)
 			return ret;
 
-		src_ring->hw_index =
-			ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
-		src_ring->hw_index &= nentries_mask;
+		read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+		if (read_index == 0xffffffff)
+			return -ENODEV;
+
+		read_index &= nentries_mask;
+		src_ring->hw_index = read_index;
 
 		ath10k_pci_sleep(ar);
 	}
 
 	read_index = src_ring->hw_index;
 
-	if ((read_index == sw_index) || (read_index == 0xffffffff))
+	if (read_index == sw_index)
 		return -EIO;
 
 	sbase = src_ring->shadow_base;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index e6c56c5..93adb8c 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -802,7 +802,7 @@
 
 	INIT_LIST_HEAD(&ar->arvifs);
 
-	if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
+	if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) {
 		ath10k_info("%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n",
 			    ar->hw_params.name,
 			    ar->target_version,
@@ -811,6 +811,12 @@
 			    ar->fw_api,
 			    ar->htt.target_version_major,
 			    ar->htt.target_version_minor);
+		ath10k_info("debug %d debugfs %d tracing %d dfs %d\n",
+			    config_enabled(CONFIG_ATH10K_DEBUG),
+			    config_enabled(CONFIG_ATH10K_DEBUGFS),
+			    config_enabled(CONFIG_ATH10K_TRACING),
+			    config_enabled(CONFIG_ATH10K_DFS_CERTIFIED));
+	}
 
 	__set_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags);
 
@@ -988,7 +994,9 @@
 err_release_fw:
 	ath10k_core_free_firmware_files(ar);
 err:
-	device_release_driver(ar->dev);
+	/* TODO: It's probably a good idea to release device from the driver
+	 * but calling device_release_driver() here will cause a deadlock.
+	 */
 	return;
 }
 
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 68ceef6..83a5fa9 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -290,6 +290,9 @@
 	struct ath_dfs_pool_stats dfs_pool_stats;
 
 	u32 fw_dbglog_mask;
+
+	u8 htt_max_amsdu;
+	u8 htt_max_ampdu;
 };
 
 enum ath10k_state {
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 1b7ff4b..3030158 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -671,6 +671,72 @@
 	.llseek = default_llseek,
 };
 
+static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file,
+					       char __user *user_buf,
+					       size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	char buf[64];
+	u8 amsdu = 3, ampdu = 64;
+	unsigned int len;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->debug.htt_max_amsdu)
+		amsdu = ar->debug.htt_max_amsdu;
+
+	if (ar->debug.htt_max_ampdu)
+		ampdu = ar->debug.htt_max_ampdu;
+
+	mutex_unlock(&ar->conf_mutex);
+
+	len = scnprintf(buf, sizeof(buf), "%u %u\n", amsdu, ampdu);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file,
+						const char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	int res;
+	char buf[64];
+	unsigned int amsdu, ampdu;
+
+	simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+
+	/* make sure that buf is null terminated */
+	buf[sizeof(buf) - 1] = 0;
+
+	res = sscanf(buf, "%u %u", &amsdu, &ampdu);
+
+	if (res != 2)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	res = ath10k_htt_h2t_aggr_cfg_msg(&ar->htt, ampdu, amsdu);
+	if (res)
+		goto out;
+
+	res = count;
+	ar->debug.htt_max_amsdu = amsdu;
+	ar->debug.htt_max_ampdu = ampdu;
+
+out:
+	mutex_unlock(&ar->conf_mutex);
+	return res;
+}
+
+static const struct file_operations fops_htt_max_amsdu_ampdu = {
+	.read = ath10k_read_htt_max_amsdu_ampdu,
+	.write = ath10k_write_htt_max_amsdu_ampdu,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
 static ssize_t ath10k_read_fw_dbglog(struct file *file,
 					    char __user *user_buf,
 					    size_t count, loff_t *ppos)
@@ -757,6 +823,9 @@
 	 * warning from del_timer(). */
 	if (ar->debug.htt_stats_mask != 0)
 		cancel_delayed_work(&ar->debug.htt_stats_dwork);
+
+	ar->debug.htt_max_amsdu = 0;
+	ar->debug.htt_max_ampdu = 0;
 }
 
 static ssize_t ath10k_write_simulate_radar(struct file *file,
@@ -867,6 +936,10 @@
 	debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
 			    ar, &fops_htt_stats_mask);
 
+	debugfs_create_file("htt_max_amsdu_ampdu", S_IRUSR | S_IWUSR,
+			    ar->debug.debugfs_phy, ar,
+			    &fops_htt_max_amsdu_ampdu);
+
 	debugfs_create_file("fw_dbglog", S_IRUSR, ar->debug.debugfs_phy,
 			    ar, &fops_fw_dbglog);
 
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 9a26346..6c93f38 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -240,16 +240,10 @@
 	__le16 rsvd0;
 } __packed;
 
-#define HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_MASK 0x1F
-#define HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_LSB  0
-
 struct htt_aggr_conf {
 	u8 max_num_ampdu_subframes;
-	union {
-		/* dont use bitfields; undefined behaviour */
-		u8 flags; /* see %HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_ */
-		u8 max_num_amsdu_subframes:5;
-	} __packed;
+	/* amsdu_subframes is limited by 0x1F mask */
+	u8 max_num_amsdu_subframes;
 } __packed;
 
 #define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
@@ -1343,6 +1337,9 @@
 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
+int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
+				u8 max_subfrms_ampdu,
+				u8 max_subfrms_amsdu);
 
 void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt);
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 7064354..accb6b4 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -307,6 +307,52 @@
 	return 0;
 }
 
+int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
+				u8 max_subfrms_ampdu,
+				u8 max_subfrms_amsdu)
+{
+	struct htt_aggr_conf *aggr_conf;
+	struct sk_buff *skb;
+	struct htt_cmd *cmd;
+	int len;
+	int ret;
+
+	/* Firmware defaults are: amsdu = 3 and ampdu = 64 */
+
+	if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
+		return -EINVAL;
+
+	if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
+		return -EINVAL;
+
+	len = sizeof(cmd->hdr);
+	len += sizeof(cmd->aggr_conf);
+
+	skb = ath10k_htc_alloc_skb(len);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, len);
+	cmd = (struct htt_cmd *)skb->data;
+	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
+
+	aggr_conf = &cmd->aggr_conf;
+	aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
+	aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
+
+	ath10k_dbg(ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
+		   aggr_conf->max_num_amsdu_subframes,
+		   aggr_conf->max_num_ampdu_subframes);
+
+	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+	if (ret) {
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
+
+	return 0;
+}
+
 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
 {
 	struct device *dev = htt->ar->dev;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index d0004d5..06840d1 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1362,8 +1362,6 @@
 		ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
 	}
 
-	init_completion(&xfer.done);
-
 	ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
 	if (ret)
 		goto err_resp;
@@ -1414,10 +1412,7 @@
 					  &nbytes, &transfer_id))
 		return;
 
-	if (xfer->wait_for_resp)
-		return;
-
-	complete(&xfer->done);
+	xfer->tx_done = true;
 }
 
 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
@@ -1438,7 +1433,7 @@
 	}
 
 	xfer->resp_len = nbytes;
-	complete(&xfer->done);
+	xfer->rx_done = true;
 }
 
 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
@@ -1451,7 +1446,7 @@
 		ath10k_pci_bmi_send_done(tx_pipe);
 		ath10k_pci_bmi_recv_data(rx_pipe);
 
-		if (completion_done(&xfer->done))
+		if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
 			return 0;
 
 		schedule();
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index dfdebb4..9401292 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -38,7 +38,8 @@
 #define DIAG_TRANSFER_LIMIT 2048
 
 struct bmi_xfer {
-	struct completion done;
+	bool tx_done;
+	bool rx_done;
 	bool wait_for_resp;
 	u32 resp_len;
 };
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 4b7782a..6f83cae 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2106,7 +2106,6 @@
 {
 	struct wmi_cmd_hdr *cmd_hdr;
 	enum wmi_event_id id;
-	u16 len;
 
 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
@@ -2114,8 +2113,6 @@
 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
 		return;
 
-	len = skb->len;
-
 	trace_ath10k_wmi_event(id, skb->data, skb->len);
 
 	switch (id) {
@@ -2225,7 +2222,6 @@
 {
 	struct wmi_cmd_hdr *cmd_hdr;
 	enum wmi_10x_event_id id;
-	u16 len;
 
 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
@@ -2233,8 +2229,6 @@
 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
 		return;
 
-	len = skb->len;
-
 	trace_ath10k_wmi_event(id, skb->data, skb->len);
 
 	switch (id) {
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.h b/drivers/net/wireless/ath/ath6kl/bmi.h
index 18fdd69..397a52f 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.h
+++ b/drivers/net/wireless/ath/ath6kl/bmi.h
@@ -242,7 +242,8 @@
 		(void) (check_type == val);				\
 		addr = ath6kl_get_hi_item_addr(ar, HI_ITEM(item));	\
 		ret = ath6kl_bmi_read(ar, addr, (u8 *) &tmp, 4);	\
-		*val = le32_to_cpu(tmp);				\
+		if (!ret)						\
+			*val = le32_to_cpu(tmp);			\
 		ret;							\
 	})
 
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 1c4ce8e..e535807 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -2899,7 +2899,8 @@
 	if (info->inactivity_timeout) {
 		inactivity_timeout = info->inactivity_timeout;
 
-		if (ar->hw.flags & ATH6KL_HW_AP_INACTIVITY_MINS)
+		if (test_bit(ATH6KL_FW_CAPABILITY_AP_INACTIVITY_MINS,
+			     ar->fw_capabilities))
 			inactivity_timeout = DIV_ROUND_UP(inactivity_timeout,
 							  60);
 
@@ -3782,7 +3783,8 @@
 		ath6kl_band_5ghz.ht_cap.ht_supported = false;
 	}
 
-	if (ar->hw.flags & ATH6KL_HW_64BIT_RATES) {
+	if (test_bit(ATH6KL_FW_CAPABILITY_64BIT_RATES,
+		     ar->fw_capabilities)) {
 		ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff;
 		ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff;
 		ath6kl_band_2ghz.ht_cap.mcs.rx_mask[1] = 0xff;
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
index b0b6520..0df74b2 100644
--- a/drivers/net/wireless/ath/ath6kl/core.c
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -123,6 +123,22 @@
 
 	/* FIXME: we should free all firmwares in the error cases below */
 
+	/*
+	 * Backwards compatibility support for older ar6004 firmware images
+	 * which do not set these feature flags.
+	 */
+	if (ar->target_type == TARGET_TYPE_AR6004 &&
+	    ar->fw_api <= 4) {
+		__set_bit(ATH6KL_FW_CAPABILITY_64BIT_RATES,
+			  ar->fw_capabilities);
+		__set_bit(ATH6KL_FW_CAPABILITY_AP_INACTIVITY_MINS,
+			  ar->fw_capabilities);
+
+		if (ar->hw.id == AR6004_HW_1_3_VERSION)
+			__set_bit(ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT,
+				  ar->fw_capabilities);
+	}
+
 	/* Indicate that WMI is enabled (although not ready yet) */
 	set_bit(WMI_ENABLED, &ar->flag);
 	ar->wmi = ath6kl_wmi_init(ar);
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 26b0f92..2b78c86 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -136,6 +136,21 @@
 	 */
 	ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL,
 
+	/* WMI_SET_TX_SELECT_RATES_CMDID uses 64 bit size rate table */
+	ATH6KL_FW_CAPABILITY_64BIT_RATES,
+
+	/* WMI_AP_CONN_INACT_CMDID uses minutes as units */
+	ATH6KL_FW_CAPABILITY_AP_INACTIVITY_MINS,
+
+	/* use low priority endpoint for all data */
+	ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT,
+
+	/* ratetable is the 2 stream version (max MCS15) */
+	ATH6KL_FW_CAPABILITY_RATETABLE_MCS15,
+
+	/* firmare doesn't support IP checksumming */
+	ATH6KL_FW_CAPABILITY_NO_IP_CHECKSUM,
+
 	/* this needs to be last */
 	ATH6KL_FW_CAPABILITY_MAX,
 };
@@ -149,15 +164,13 @@
 };
 
 enum ath6kl_hw_flags {
-	ATH6KL_HW_64BIT_RATES		= BIT(0),
-	ATH6KL_HW_AP_INACTIVITY_MINS	= BIT(1),
-	ATH6KL_HW_MAP_LP_ENDPOINT	= BIT(2),
 	ATH6KL_HW_SDIO_CRC_ERROR_WAR	= BIT(3),
 };
 
 #define ATH6KL_FW_API2_FILE "fw-2.bin"
 #define ATH6KL_FW_API3_FILE "fw-3.bin"
 #define ATH6KL_FW_API4_FILE "fw-4.bin"
+#define ATH6KL_FW_API5_FILE "fw-5.bin"
 
 /* AR6003 1.0 definitions */
 #define AR6003_HW_1_0_VERSION                 0x300002ba
@@ -215,8 +228,21 @@
 #define AR6004_HW_1_3_VERSION			0x31c8088a
 #define AR6004_HW_1_3_FW_DIR			"ath6k/AR6004/hw1.3"
 #define AR6004_HW_1_3_FIRMWARE_FILE		"fw.ram.bin"
-#define AR6004_HW_1_3_BOARD_DATA_FILE		"ath6k/AR6004/hw1.3/bdata.bin"
-#define AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE	"ath6k/AR6004/hw1.3/bdata.bin"
+#define AR6004_HW_1_3_TCMD_FIRMWARE_FILE	"utf.bin"
+#define AR6004_HW_1_3_UTF_FIRMWARE_FILE		"utf.bin"
+#define AR6004_HW_1_3_TESTSCRIPT_FILE		"nullTestFlow.bin"
+#define AR6004_HW_1_3_BOARD_DATA_FILE	      AR6004_HW_1_3_FW_DIR "/bdata.bin"
+#define AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE AR6004_HW_1_3_FW_DIR "/bdata.bin"
+
+/* AR6004 3.0 definitions */
+#define AR6004_HW_3_0_VERSION			0x31C809F8
+#define AR6004_HW_3_0_FW_DIR			"ath6k/AR6004/hw3.0"
+#define AR6004_HW_3_0_FIRMWARE_FILE		"fw.ram.bin"
+#define AR6004_HW_3_0_TCMD_FIRMWARE_FILE	"utf.bin"
+#define AR6004_HW_3_0_UTF_FIRMWARE_FILE		"utf.bin"
+#define AR6004_HW_3_0_TESTSCRIPT_FILE		"nullTestFlow.bin"
+#define AR6004_HW_3_0_BOARD_DATA_FILE	      AR6004_HW_3_0_FW_DIR "/bdata.bin"
+#define AR6004_HW_3_0_DEFAULT_BOARD_DATA_FILE AR6004_HW_3_0_FW_DIR "/bdata.bin"
 
 /* Per STA data, used in AP mode */
 #define STA_PS_AWAKE		BIT(0)
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index 756fe52..ca1a18c 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -1170,8 +1170,12 @@
 static void htc_rxctrl_complete(struct htc_target *context,
 				struct htc_packet *packet)
 {
-	/* TODO, can't really receive HTC control messages yet.... */
-	ath6kl_dbg(ATH6KL_DBG_HTC, "%s: invalid call function\n", __func__);
+	struct sk_buff *skb = packet->skb;
+
+	if (packet->endpoint == ENDPOINT_0 &&
+	    packet->status == -ECANCELED &&
+	    skb != NULL)
+		dev_kfree_skb(skb);
 }
 
 /* htc pipe initialization */
@@ -1678,7 +1682,29 @@
 
 static void ath6kl_htc_pipe_flush_rx_buf(struct htc_target *target)
 {
-	/* TODO */
+	struct htc_endpoint *endpoint;
+	struct htc_packet *packet, *tmp_pkt;
+	int i;
+
+	for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
+		endpoint = &target->endpoint[i];
+
+		spin_lock_bh(&target->rx_lock);
+
+		list_for_each_entry_safe(packet, tmp_pkt,
+					 &endpoint->rx_bufq, list) {
+			list_del(&packet->list);
+			spin_unlock_bh(&target->rx_lock);
+			ath6kl_dbg(ATH6KL_DBG_HTC,
+				   "htc rx flush pkt 0x%p len %d ep %d\n",
+				   packet, packet->buf_len,
+				   packet->endpoint);
+			dev_kfree_skb(packet->pkt_cntxt);
+			spin_lock_bh(&target->rx_lock);
+		}
+
+		spin_unlock_bh(&target->rx_lock);
+	}
 }
 
 static int ath6kl_htc_pipe_credit_setup(struct htc_target *target,
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 8ee7097..fffd523 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -93,8 +93,7 @@
 		.board_addr			= 0x433900,
 		.refclk_hz			= 26000000,
 		.uarttx_pin			= 11,
-		.flags				= ATH6KL_HW_64BIT_RATES |
-						  ATH6KL_HW_AP_INACTIVITY_MINS,
+		.flags				= 0,
 
 		.fw = {
 			.dir		= AR6004_HW_1_0_FW_DIR,
@@ -114,8 +113,7 @@
 		.board_addr			= 0x43d400,
 		.refclk_hz			= 40000000,
 		.uarttx_pin			= 11,
-		.flags				= ATH6KL_HW_64BIT_RATES |
-						  ATH6KL_HW_AP_INACTIVITY_MINS,
+		.flags				= 0,
 		.fw = {
 			.dir		= AR6004_HW_1_1_FW_DIR,
 			.fw		= AR6004_HW_1_1_FIRMWARE_FILE,
@@ -134,8 +132,7 @@
 		.board_addr			= 0x435c00,
 		.refclk_hz			= 40000000,
 		.uarttx_pin			= 11,
-		.flags				= ATH6KL_HW_64BIT_RATES |
-						  ATH6KL_HW_AP_INACTIVITY_MINS,
+		.flags				= 0,
 
 		.fw = {
 			.dir		= AR6004_HW_1_2_FW_DIR,
@@ -152,20 +149,43 @@
 		.board_ext_data_addr		= 0x437000,
 		.reserved_ram_size		= 7168,
 		.board_addr			= 0x436400,
-		.refclk_hz                      = 40000000,
+		.refclk_hz                      = 0,
 		.uarttx_pin                     = 11,
-		.flags				= ATH6KL_HW_64BIT_RATES |
-						  ATH6KL_HW_AP_INACTIVITY_MINS |
-						  ATH6KL_HW_MAP_LP_ENDPOINT,
+		.flags				= 0,
 
 		.fw = {
 			.dir            = AR6004_HW_1_3_FW_DIR,
 			.fw             = AR6004_HW_1_3_FIRMWARE_FILE,
+			.tcmd	        = AR6004_HW_1_3_TCMD_FIRMWARE_FILE,
+			.utf		= AR6004_HW_1_3_UTF_FIRMWARE_FILE,
+			.testscript	= AR6004_HW_1_3_TESTSCRIPT_FILE,
 		},
 
 		.fw_board               = AR6004_HW_1_3_BOARD_DATA_FILE,
 		.fw_default_board       = AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE,
 	},
+	{
+		.id				= AR6004_HW_3_0_VERSION,
+		.name				= "ar6004 hw 3.0",
+		.dataset_patch_addr		= 0,
+		.app_load_addr			= 0x1234,
+		.board_ext_data_addr		= 0,
+		.reserved_ram_size		= 7168,
+		.board_addr			= 0x436400,
+		.testscript_addr		= 0,
+		.flags				= 0,
+
+		.fw = {
+			.dir		= AR6004_HW_3_0_FW_DIR,
+			.fw		= AR6004_HW_3_0_FIRMWARE_FILE,
+			.tcmd	        = AR6004_HW_3_0_TCMD_FIRMWARE_FILE,
+			.utf		= AR6004_HW_3_0_UTF_FIRMWARE_FILE,
+			.testscript	= AR6004_HW_3_0_TESTSCRIPT_FILE,
+		},
+
+		.fw_board		= AR6004_HW_3_0_BOARD_DATA_FILE,
+		.fw_default_board	= AR6004_HW_3_0_DEFAULT_BOARD_DATA_FILE,
+	},
 };
 
 /*
@@ -601,7 +621,9 @@
 	 * but possible in theory.
 	 */
 
-	if (ar->target_type == TARGET_TYPE_AR6003) {
+	if ((ar->target_type == TARGET_TYPE_AR6003) ||
+	    (ar->version.target_ver == AR6004_HW_1_3_VERSION) ||
+	    (ar->version.target_ver == AR6004_HW_3_0_VERSION)) {
 		param = ar->hw.board_ext_data_addr;
 		ram_reserved_size = ar->hw.reserved_ram_size;
 
@@ -629,9 +651,12 @@
 		return status;
 
 	/* Configure target refclk_hz */
-	status = ath6kl_bmi_write_hi32(ar, hi_refclk_hz, ar->hw.refclk_hz);
-	if (status)
-		return status;
+	if (ar->hw.refclk_hz != 0) {
+		status = ath6kl_bmi_write_hi32(ar, hi_refclk_hz,
+					       ar->hw.refclk_hz);
+		if (status)
+			return status;
+	}
 
 	return 0;
 }
@@ -1112,6 +1137,12 @@
 	if (ret)
 		return ret;
 
+	ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API5_FILE);
+	if (ret == 0) {
+		ar->fw_api = 5;
+		goto out;
+	}
+
 	ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API4_FILE);
 	if (ret == 0) {
 		ar->fw_api = 4;
@@ -1161,11 +1192,19 @@
 		ath6kl_bmi_write_hi32(ar, hi_board_data,
 				      board_address);
 	} else {
-		ath6kl_bmi_read_hi32(ar, hi_board_data, &board_address);
+		ret = ath6kl_bmi_read_hi32(ar, hi_board_data, &board_address);
+		if (ret) {
+			ath6kl_err("Failed to get board file target address.\n");
+			return ret;
+		}
 	}
 
 	/* determine where in target ram to write extended board data */
-	ath6kl_bmi_read_hi32(ar, hi_board_ext_data, &board_ext_address);
+	ret = ath6kl_bmi_read_hi32(ar, hi_board_ext_data, &board_ext_address);
+	if (ret) {
+		ath6kl_err("Failed to get extended board file target address.\n");
+		return ret;
+	}
 
 	if (ar->target_type == TARGET_TYPE_AR6003 &&
 	    board_ext_address == 0) {
@@ -1229,7 +1268,13 @@
 	}
 
 	/* record the fact that Board Data IS initialized */
-	ath6kl_bmi_write_hi32(ar, hi_board_data_initialized, 1);
+	if ((ar->version.target_ver == AR6004_HW_1_3_VERSION) ||
+	    (ar->version.target_ver == AR6004_HW_3_0_VERSION))
+		param = board_data_size;
+	else
+		param = 1;
+
+	ath6kl_bmi_write_hi32(ar, hi_board_data_initialized, param);
 
 	return ret;
 }
@@ -1360,7 +1405,11 @@
 	}
 
 	ath6kl_bmi_write_hi32(ar, hi_ota_testscript, address);
-	ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz, 4096);
+
+	if ((ar->version.target_ver != AR6004_HW_1_3_VERSION) &&
+	    (ar->version.target_ver != AR6004_HW_3_0_VERSION))
+		ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz, 4096);
+
 	ath6kl_bmi_write_hi32(ar, hi_test_apps_related, 1);
 
 	return 0;
@@ -1566,6 +1615,11 @@
 	{ ATH6KL_FW_CAPABILITY_REGDOMAIN, "regdomain" },
 	{ ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2, "sched-scan-v2" },
 	{ ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL, "hb-poll" },
+	{ ATH6KL_FW_CAPABILITY_64BIT_RATES, "64bit-rates" },
+	{ ATH6KL_FW_CAPABILITY_AP_INACTIVITY_MINS, "ap-inactivity-mins" },
+	{ ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT, "map-lp-endpoint" },
+	{ ATH6KL_FW_CAPABILITY_RATETABLE_MCS15, "ratetable-mcs15" },
+	{ ATH6KL_FW_CAPABILITY_NO_IP_CHECKSUM, "no-ip-checksum" },
 };
 
 static const char *ath6kl_init_get_fw_capa_name(unsigned int id)
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index d565546..21516bc 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -702,6 +702,7 @@
 	struct ath6kl *ar = vif->ar;
 	struct target_stats *stats = &vif->target_stats;
 	struct tkip_ccmp_stats *ccmp_stats;
+	s32 rate;
 	u8 ac;
 
 	if (len < sizeof(*tgt_stats))
@@ -731,8 +732,9 @@
 		le32_to_cpu(tgt_stats->stats.tx.mult_retry_cnt);
 	stats->tx_rts_fail_cnt +=
 		le32_to_cpu(tgt_stats->stats.tx.rts_fail_cnt);
-	stats->tx_ucast_rate =
-	    ath6kl_wmi_get_rate(a_sle32_to_cpu(tgt_stats->stats.tx.ucast_rate));
+
+	rate = a_sle32_to_cpu(tgt_stats->stats.tx.ucast_rate);
+	stats->tx_ucast_rate = ath6kl_wmi_get_rate(ar->wmi, rate);
 
 	stats->rx_pkt += le32_to_cpu(tgt_stats->stats.rx.pkt);
 	stats->rx_byte += le32_to_cpu(tgt_stats->stats.rx.byte);
@@ -749,8 +751,9 @@
 		le32_to_cpu(tgt_stats->stats.rx.key_cache_miss);
 	stats->rx_decrypt_err += le32_to_cpu(tgt_stats->stats.rx.decrypt_err);
 	stats->rx_dupl_frame += le32_to_cpu(tgt_stats->stats.rx.dupl_frame);
-	stats->rx_ucast_rate =
-	    ath6kl_wmi_get_rate(a_sle32_to_cpu(tgt_stats->stats.rx.ucast_rate));
+
+	rate = a_sle32_to_cpu(tgt_stats->stats.rx.ucast_rate);
+	stats->rx_ucast_rate = ath6kl_wmi_get_rate(ar->wmi, rate);
 
 	ccmp_stats = &tgt_stats->stats.tkip_ccmp_stats;
 
@@ -1290,6 +1293,8 @@
 
 void init_netdev(struct net_device *dev)
 {
+	struct ath6kl *ar = ath6kl_priv(dev);
+
 	dev->netdev_ops = &ath6kl_netdev_ops;
 	dev->destructor = free_netdev;
 	dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
@@ -1301,7 +1306,9 @@
 					WMI_MAX_TX_META_SZ +
 					ATH6KL_HTC_ALIGN_BYTES, 4);
 
-	dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+	if (!test_bit(ATH6KL_FW_CAPABILITY_NO_IP_CHECKSUM,
+		      ar->fw_capabilities))
+		dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
 
 	return;
 }
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 3afc5a4..c443258 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -802,7 +802,8 @@
 		break;
 	case WMI_DATA_VI_SVC:
 
-		if (ar->hw.flags & ATH6KL_HW_MAP_LP_ENDPOINT)
+		if (test_bit(ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT,
+			     ar->fw_capabilities))
 			*ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP;
 		else
 			*ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP;
@@ -814,7 +815,8 @@
 		break;
 	case WMI_DATA_VO_SVC:
 
-		if (ar->hw.flags & ATH6KL_HW_MAP_LP_ENDPOINT)
+		if (test_bit(ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT,
+			     ar->fw_capabilities))
 			*ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP;
 		else
 			*ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP;
@@ -1208,6 +1210,7 @@
 
 /* table of devices that work with this driver */
 static struct usb_device_id ath6kl_usb_ids[] = {
+	{USB_DEVICE(0x0cf3, 0x9375)},
 	{USB_DEVICE(0x0cf3, 0x9374)},
 	{ /* Terminating entry */ },
 };
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 4d7f9e4..94df345 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -59,6 +59,55 @@
 	{0, 0}
 };
 
+static const s32 wmi_rate_tbl_mcs15[][2] = {
+	/* {W/O SGI, with SGI} */
+	{1000, 1000},
+	{2000, 2000},
+	{5500, 5500},
+	{11000, 11000},
+	{6000, 6000},
+	{9000, 9000},
+	{12000, 12000},
+	{18000, 18000},
+	{24000, 24000},
+	{36000, 36000},
+	{48000, 48000},
+	{54000, 54000},
+	{6500, 7200},     /* HT 20, MCS 0 */
+	{13000, 14400},
+	{19500, 21700},
+	{26000, 28900},
+	{39000, 43300},
+	{52000, 57800},
+	{58500, 65000},
+	{65000, 72200},
+	{13000, 14400},   /* HT 20, MCS 8 */
+	{26000, 28900},
+	{39000, 43300},
+	{52000, 57800},
+	{78000, 86700},
+	{104000, 115600},
+	{117000, 130000},
+	{130000, 144400}, /* HT 20, MCS 15 */
+	{13500, 15000},   /*HT 40, MCS 0 */
+	{27000, 30000},
+	{40500, 45000},
+	{54000, 60000},
+	{81000, 90000},
+	{108000, 120000},
+	{121500, 135000},
+	{135000, 150000},
+	{27000, 30000},   /*HT 40, MCS 8 */
+	{54000, 60000},
+	{81000, 90000},
+	{108000, 120000},
+	{162000, 180000},
+	{216000, 240000},
+	{243000, 270000},
+	{270000, 300000}, /*HT 40, MCS 15 */
+	{0, 0}
+};
+
 /* 802.1d to AC mapping. Refer pg 57 of WMM-test-plan-v1.2 */
 static const u8 up_to_ac[] = {
 	WMM_AC_BE,
@@ -2838,7 +2887,8 @@
 {
 	struct ath6kl *ar = wmi->parent_dev;
 
-	if (ar->hw.flags & ATH6KL_HW_64BIT_RATES)
+	if (test_bit(ATH6KL_FW_CAPABILITY_64BIT_RATES,
+		     ar->fw_capabilities))
 		return ath6kl_set_bitrate_mask64(wmi, if_idx, mask);
 	else
 		return ath6kl_set_bitrate_mask32(wmi, if_idx, mask);
@@ -3279,9 +3329,11 @@
 				   NO_SYNC_WMIFLAG);
 }
 
-s32 ath6kl_wmi_get_rate(s8 rate_index)
+s32 ath6kl_wmi_get_rate(struct wmi *wmi, s8 rate_index)
 {
+	struct ath6kl *ar = wmi->parent_dev;
 	u8 sgi = 0;
+	s32 ret;
 
 	if (rate_index == RATE_AUTO)
 		return 0;
@@ -3292,10 +3344,20 @@
 		sgi = 1;
 	}
 
-	if (WARN_ON(rate_index > RATE_MCS_7_40))
-		rate_index = RATE_MCS_7_40;
+	if (test_bit(ATH6KL_FW_CAPABILITY_RATETABLE_MCS15,
+		     ar->fw_capabilities)) {
+		if (WARN_ON(rate_index >= ARRAY_SIZE(wmi_rate_tbl_mcs15)))
+			return 0;
 
-	return wmi_rate_tbl[(u32) rate_index][sgi];
+		ret = wmi_rate_tbl_mcs15[(u32) rate_index][sgi];
+	} else {
+		if (WARN_ON(rate_index >= ARRAY_SIZE(wmi_rate_tbl)))
+			return 0;
+
+		ret = wmi_rate_tbl[(u32) rate_index][sgi];
+	}
+
+	return ret;
 }
 
 static int ath6kl_wmi_get_pmkid_list_event_rx(struct wmi *wmi, u8 *datap,
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index bb23fc0..19f88b4 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -2632,7 +2632,7 @@
 			     struct ath6kl_htcap *htcap);
 int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
 
-s32 ath6kl_wmi_get_rate(s8 rate_index);
+s32 ath6kl_wmi_get_rate(struct wmi *wmi, s8 rate_index);
 
 int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, u8 if_idx,
 			  __be32 ips0, __be32 ips1);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 741b38d..59af9f9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -281,7 +281,7 @@
 
 	ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
 		| (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
-		| SM(i->txpower, AR_XmitPower)
+		| SM(i->txpower, AR_XmitPower0)
 		| (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
 		| (i->flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
 		| (i->keyix != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
@@ -306,6 +306,10 @@
 		| set11nRateFlags(i->rates, 2)
 		| set11nRateFlags(i->rates, 3)
 		| SM(i->rtscts_rate, AR_RTSCTSRate);
+
+	ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower, AR_XmitPower1);
+	ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower, AR_XmitPower2);
+	ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower, AR_XmitPower3);
 }
 
 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 729ffbf..71e38e8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -101,7 +101,7 @@
 
 	ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
 		| (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
-		| SM(i->txpower, AR_XmitPower)
+		| SM(i->txpower, AR_XmitPower0)
 		| (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
 		| (i->keyix != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
 		| (i->flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0)
@@ -151,6 +151,10 @@
 		| SM(i->rtscts_rate, AR_RTSCTSRate);
 
 	ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
+
+	ACCESS_ONCE(ads->ctl20) = SM(i->txpower, AR_XmitPower1);
+	ACCESS_ONCE(ads->ctl21) = SM(i->txpower, AR_XmitPower2);
+	ACCESS_ONCE(ads->ctl22) = SM(i->txpower, AR_XmitPower3);
 }
 
 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 11b5e4d..7fc13a8 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -182,7 +182,8 @@
 
 struct ath_frame_info {
 	struct ath_buf *bf;
-	int framelen;
+	u16 framelen;
+	s8 txq;
 	enum ath9k_key_type keytype;
 	u8 keyix;
 	u8 rtscts_rate;
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index ce073e9..d227936 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -202,7 +202,7 @@
 	if (kstrtoul(buf, 0, &ani))
 		return -EINVAL;
 
-	if (ani < 0 || ani > 1)
+	if (ani > 1)
 		return -EINVAL;
 
 	common->disable_ani = !ani;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index da76867..6c56caf 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -346,8 +346,14 @@
 #define AR_FrameLen         0x00000fff
 #define AR_VirtMoreFrag     0x00001000
 #define AR_TxCtlRsvd00      0x0000e000
-#define AR_XmitPower        0x003f0000
-#define AR_XmitPower_S      16
+#define AR_XmitPower0       0x003f0000
+#define AR_XmitPower0_S     16
+#define AR_XmitPower1	    0x3f000000
+#define AR_XmitPower1_S     24
+#define AR_XmitPower2	    0x3f000000
+#define AR_XmitPower2_S     24
+#define AR_XmitPower3	    0x3f000000
+#define AR_XmitPower3_S     24
 #define AR_RTSEnable        0x00400000
 #define AR_VEOL             0x00800000
 #define AR_ClrDestMask      0x01000000
diff --git a/drivers/net/wireless/ath/ath9k/spectral.c b/drivers/net/wireless/ath/ath9k/spectral.c
index 99f4de9..5fe29b9 100644
--- a/drivers/net/wireless/ath/ath9k/spectral.c
+++ b/drivers/net/wireless/ath/ath9k/spectral.c
@@ -313,7 +313,7 @@
 	if (kstrtoul(buf, 0, &val))
 		return -EINVAL;
 
-	if (val < 0 || val > 1)
+	if (val > 1)
 		return -EINVAL;
 
 	sc->spec_config.short_repeat = val;
@@ -361,7 +361,7 @@
 	if (kstrtoul(buf, 0, &val))
 		return -EINVAL;
 
-	if (val < 0 || val > 255)
+	if (val > 255)
 		return -EINVAL;
 
 	sc->spec_config.count = val;
@@ -409,7 +409,7 @@
 	if (kstrtoul(buf, 0, &val))
 		return -EINVAL;
 
-	if (val < 0 || val > 255)
+	if (val > 255)
 		return -EINVAL;
 
 	sc->spec_config.period = val;
@@ -457,7 +457,7 @@
 	if (kstrtoul(buf, 0, &val))
 		return -EINVAL;
 
-	if (val < 0 || val > 15)
+	if (val > 15)
 		return -EINVAL;
 
 	sc->spec_config.fft_period = val;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index d4927c9..704fcbc 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -157,15 +157,14 @@
 			     struct sk_buff *skb)
 {
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-	int q, hw_queue;
+	struct ath_frame_info *fi = get_frame_info(skb);
+	int hw_queue;
+	int q = fi->txq;
 
-	q = skb_get_queue_mapping(skb);
-	if (txq == sc->tx.uapsdq)
-		txq = sc->tx.txq_map[q];
-
-	if (txq != sc->tx.txq_map[q])
+	if (q < 0)
 		return;
 
+	txq = sc->tx.txq_map[q];
 	if (WARN_ON(--txq->pending_frames < 0))
 		txq->pending_frames = 0;
 
@@ -898,6 +897,15 @@
 
 		tx_info = IEEE80211_SKB_CB(skb);
 		tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
+
+		/*
+		 * No aggregation session is running, but there may be frames
+		 * from a previous session or a failed attempt in the queue.
+		 * Send them out as normal data frames
+		 */
+		if (!tid->active)
+			tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
 		if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
 			bf->bf_state.bf_type = 0;
 			return bf;
@@ -2036,6 +2044,7 @@
 		an = (struct ath_node *) sta->drv_priv;
 
 	memset(fi, 0, sizeof(*fi));
+	fi->txq = -1;
 	if (hw_key)
 		fi->keyix = hw_key->hw_key_idx;
 	else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
@@ -2187,6 +2196,7 @@
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_sta *sta = txctl->sta;
 	struct ieee80211_vif *vif = info->control.vif;
+	struct ath_frame_info *fi = get_frame_info(skb);
 	struct ath_vif *avp = NULL;
 	struct ath_softc *sc = hw->priv;
 	struct ath_txq *txq = txctl->txq;
@@ -2216,11 +2226,13 @@
 	hw_queue = (info->hw_queue >= sc->hw->queues - 2) ? q : info->hw_queue;
 
 	ath_txq_lock(sc, txq);
-	if (txq == sc->tx.txq_map[q] &&
-	    ++txq->pending_frames > sc->tx.txq_max_pending[q] &&
-	    !txq->stopped) {
-		ieee80211_stop_queue(sc->hw, hw_queue);
-		txq->stopped = true;
+	if (txq == sc->tx.txq_map[q]) {
+		fi->txq = q;
+		if (++txq->pending_frames > sc->tx.txq_max_pending[q] &&
+		    !txq->stopped) {
+			ieee80211_stop_queue(sc->hw, hw_queue);
+			txq->stopped = true;
+		}
 	}
 
 	queue = ieee80211_is_data_present(hdr->frame_control);
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index a868c5e..8f66186 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -448,8 +448,10 @@
 	char *kbuf = kmalloc(len + 1, GFP_KERNEL);
 	if (!kbuf)
 		return -ENOMEM;
-	if (copy_from_user(kbuf, buf, len))
+	if (copy_from_user(kbuf, buf, len)) {
+		kfree(kbuf);
 		return -EIO;
+	}
 
 	kbuf[len] = '\0';
 	rc = kstrtol(kbuf, 0, &channel);
@@ -963,6 +965,26 @@
 };
 
 /*----------------*/
+static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
+				       struct dentry *dbg)
+{
+	int i;
+	char name[32];
+
+	for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
+		struct debugfs_blob_wrapper *blob = &wil->blobs[i];
+		const struct fw_map *map = &fw_mapping[i];
+
+		if (!map->name)
+			continue;
+
+		blob->data = (void * __force)wil->csr + HOSTADDR(map->host);
+		blob->size = map->to - map->from;
+		snprintf(name, sizeof(name), "blob_%s", map->name);
+		wil_debugfs_create_ioblob(name, S_IRUGO, dbg, blob);
+	}
+}
+
 int wil6210_debugfs_init(struct wil6210_priv *wil)
 {
 	struct dentry *dbg = wil->debug = debugfs_create_dir(WIL_NAME,
@@ -986,6 +1008,8 @@
 			   &wil->secure_pcp);
 	wil_debugfs_create_ulong("status", S_IRUGO | S_IWUSR, dbg,
 				 &wil->status);
+	debugfs_create_u32("fw_version", S_IRUGO, dbg, &wil->fw_version);
+	debugfs_create_x32("hw_version", S_IRUGO, dbg, &wil->hw_version);
 
 	wil6210_debugfs_create_ISR(wil, "USER_ICR", dbg,
 				   HOSTADDR(RGF_USER_USER_ICR));
@@ -998,6 +1022,9 @@
 	wil6210_debugfs_create_pseudo_ISR(wil, dbg);
 	wil6210_debugfs_create_ITR_CNT(wil, dbg);
 
+	wil_debugfs_create_iomem_x32("RGF_USER_USAGE_1", S_IRUGO, dbg,
+				     wil->csr +
+				     HOSTADDR(RGF_USER_USAGE_1));
 	debugfs_create_u32("mem_addr", S_IRUGO | S_IWUSR, dbg, &mem_addr);
 	debugfs_create_file("mem_val", S_IRUGO, dbg, wil, &fops_memread);
 
@@ -1010,34 +1037,7 @@
 	debugfs_create_file("link", S_IRUGO, dbg, wil, &fops_link);
 	debugfs_create_file("info", S_IRUGO, dbg, wil, &fops_info);
 
-	wil->rgf_blob.data = (void * __force)wil->csr + 0;
-	wil->rgf_blob.size = 0xa000;
-	wil_debugfs_create_ioblob("blob_rgf", S_IRUGO, dbg, &wil->rgf_blob);
-
-	wil->fw_code_blob.data = (void * __force)wil->csr + 0x40000;
-	wil->fw_code_blob.size = 0x40000;
-	wil_debugfs_create_ioblob("blob_fw_code", S_IRUGO, dbg,
-				  &wil->fw_code_blob);
-
-	wil->fw_data_blob.data = (void * __force)wil->csr + 0x80000;
-	wil->fw_data_blob.size = 0x8000;
-	wil_debugfs_create_ioblob("blob_fw_data", S_IRUGO, dbg,
-				  &wil->fw_data_blob);
-
-	wil->fw_peri_blob.data = (void * __force)wil->csr + 0x88000;
-	wil->fw_peri_blob.size = 0x18000;
-	wil_debugfs_create_ioblob("blob_fw_peri", S_IRUGO, dbg,
-				  &wil->fw_peri_blob);
-
-	wil->uc_code_blob.data = (void * __force)wil->csr + 0xa0000;
-	wil->uc_code_blob.size = 0x10000;
-	wil_debugfs_create_ioblob("blob_uc_code", S_IRUGO, dbg,
-				  &wil->uc_code_blob);
-
-	wil->uc_data_blob.data = (void * __force)wil->csr + 0xb0000;
-	wil->uc_data_blob.size = 0x4000;
-	wil_debugfs_create_ioblob("blob_uc_data", S_IRUGO, dbg,
-				  &wil->uc_data_blob);
+	wil6210_debugfs_init_blobs(wil, dbg);
 
 	return 0;
 }
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 53a689e..3704d2a 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -314,8 +314,9 @@
 	int delay = 0;
 	u32 hw_state;
 	u32 rev_id;
+	bool is_sparrow = (wil->board->board == WIL_BOARD_SPARROW);
 
-	wil_dbg_misc(wil, "Resetting...\n");
+	wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->board->name);
 
 	/* register read */
 #define R(a) ioread32(wil->csr + HOSTADDR(a))
@@ -328,35 +329,59 @@
 
 	wil->hw_version = R(RGF_USER_FW_REV_ID);
 	rev_id = wil->hw_version & 0xff;
+
+	/* Clear MAC link up */
+	S(RGF_HP_CTRL, BIT(15));
 	/* hpal_perst_from_pad_src_n_mask */
 	S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(6));
 	/* car_perst_rst_src_n_mask */
 	S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(7));
 	wmb(); /* order is important here */
 
+	if (is_sparrow) {
+		W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
+		wmb(); /* order is important here */
+	}
+
 	W(RGF_USER_MAC_CPU_0,  BIT(1)); /* mac_cpu_man_rst */
 	W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */
 	wmb(); /* order is important here */
 
 	W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
 	W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000170);
+	W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, is_sparrow ? 0x000000B0 : 0x00000170);
 	W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FC00);
 	wmb(); /* order is important here */
 
+	if (is_sparrow) {
+		W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
+		wmb(); /* order is important here */
+	}
+
 	W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
 	W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
 	W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
 	W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
 	wmb(); /* order is important here */
 
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001);
-	if (rev_id == 1) {
-		W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
-	} else {
-		W(RGF_PCIE_LOS_COUNTER_CTL, BIT(6) | BIT(8));
+	if (is_sparrow) {
+		W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
+		/* reset A2 PCIE AHB */
 		W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
+
+	} else {
+		W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001);
+		if (rev_id == 1) {
+			/* reset A1 BOTH PCIE AHB & PCIE RGF */
+			W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
+		} else {
+			W(RGF_PCIE_LOS_COUNTER_CTL, BIT(6) | BIT(8));
+			W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
+		}
+
 	}
+
+	/* TODO: check order here!!! Erez code is different */
 	W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
 	wmb(); /* order is important here */
 
@@ -371,7 +396,8 @@
 		}
 	} while (hw_state != HW_MACHINE_BOOT_DONE);
 
-	if (rev_id == 2)
+	/* TODO: Erez check rev_id != 1 */
+	if (!is_sparrow && (rev_id != 1))
 		W(RGF_PCIE_LOS_COUNTER_CTL, BIT(8));
 
 	C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 77b6272..d3fbfa2 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -122,10 +122,12 @@
 	struct wil6210_priv *wil;
 	struct device *dev = &pdev->dev;
 	void __iomem *csr;
+	struct wil_board *board = (struct wil_board *)id->driver_data;
 	int rc;
 
 	/* check HW */
-	dev_info(&pdev->dev, WIL_NAME " device found [%04x:%04x] (rev %x)\n",
+	dev_info(&pdev->dev, WIL_NAME
+		 " \"%s\" device found [%04x:%04x] (rev %x)\n", board->name,
 		 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
 
 	if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) {
@@ -175,6 +177,7 @@
 
 	pci_set_drvdata(pdev, wil);
 	wil->pdev = pdev;
+	wil->board = board;
 
 	wil6210_clear_irq(wil);
 	/* FW should raise IRQ when ready */
@@ -225,8 +228,21 @@
 	pci_disable_device(pdev);
 }
 
-static DEFINE_PCI_DEVICE_TABLE(wil6210_pcie_ids) = {
-	{ PCI_DEVICE(0x1ae9, 0x0301) },
+static const struct wil_board wil_board_marlon = {
+	.board = WIL_BOARD_MARLON,
+	.name = "marlon",
+};
+
+static const struct wil_board wil_board_sparrow = {
+	.board = WIL_BOARD_SPARROW,
+	.name = "sparrow",
+};
+
+static const struct pci_device_id wil6210_pcie_ids[] = {
+	{ PCI_DEVICE(0x1ae9, 0x0301),
+	  .driver_data = (kernel_ulong_t)&wil_board_marlon },
+	{ PCI_DEVICE(0x1ae9, 0x0310),
+	  .driver_data = (kernel_ulong_t)&wil_board_sparrow },
 	{ /* end: all zeroes */	},
 };
 MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index af4b93e..d346794 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -1108,8 +1108,10 @@
 		while (vring->swtail != new_swtail) {
 			struct vring_tx_desc dd, *d = &dd;
 			u16 dmalen;
-			struct wil_ctx *ctx = &vring->ctx[vring->swtail];
-			struct sk_buff *skb = ctx->skb;
+			struct sk_buff *skb;
+
+			ctx = &vring->ctx[vring->swtail];
+			skb = ctx->skb;
 			_d = &vring->va[vring->swtail].tx;
 
 			*d = *_d;
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 4249066..67e9624 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -24,6 +24,13 @@
 
 #define WIL_NAME "wil6210"
 
+struct wil_board {
+	int board;
+#define WIL_BOARD_MARLON	(1)
+#define WIL_BOARD_SPARROW	(2)
+	const char * const name;
+};
+
 /**
  * extract bits [@b0:@b1] (inclusive) from the value @x
  * it should be @b0 <= @b1, or result is incorrect
@@ -78,6 +85,7 @@
 } __packed;
 
 /* registers - FW addresses */
+#define RGF_USER_USAGE_1		(0x880004)
 #define RGF_USER_HW_MACHINE_STATE	(0x8801dc)
 	#define HW_MACHINE_BOOT_DONE	(0x3fffffd)
 #define RGF_USER_USER_CPU_0		(0x8801e0)
@@ -93,6 +101,7 @@
 #define RGF_USER_CLKS_CTL_SW_RST_MASK_0	(0x880b14)
 #define RGF_USER_USER_ICR		(0x880b4c) /* struct RGF_ICR */
 	#define BIT_USER_USER_ICR_SW_INT_2	BIT(18)
+#define RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0	(0x880c18)
 
 #define RGF_DMA_EP_TX_ICR		(0x881bb4) /* struct RGF_ICR */
 	#define BIT_DMA_EP_TX_ICR_TX_DONE	BIT(0)
@@ -121,6 +130,7 @@
 	#define BIT_DMA_PSEUDO_CAUSE_TX		BIT(1)
 	#define BIT_DMA_PSEUDO_CAUSE_MISC	BIT(2)
 
+#define RGF_HP_CTRL			(0x88265c)
 #define RGF_PCIE_LOS_COUNTER_CTL	(0x882dc4)
 
 /* popular locations */
@@ -135,6 +145,14 @@
 #define ISR_MISC_FW_ERROR	BIT_DMA_EP_MISC_ICR_FW_INT(3)
 
 /* Hardware definitions end */
+struct fw_map {
+	u32 from; /* linker address - from, inclusive */
+	u32 to;   /* linker address - to, exclusive */
+	u32 host; /* PCI/Host address - BAR0 + 0x880000 */
+	const char *name; /* for debugfs */
+};
+/* array size should be in sync with actual definition in the wmi.c */
+extern const struct fw_map fw_mapping[7];
 
 /**
  * mk_cidxtid - construct @cidxtid field
@@ -365,6 +383,7 @@
 	ulong status;
 	u32 fw_version;
 	u32 hw_version;
+	struct wil_board *board;
 	u8 n_mids; /* number of additional MIDs as reported by FW */
 	int recovery_count; /* num of FW recovery attempts in a short time */
 	unsigned long last_fw_recovery; /* jiffies of last fw recovery */
@@ -415,12 +434,7 @@
 	atomic_t isr_count_rx, isr_count_tx;
 	/* debugfs */
 	struct dentry *debug;
-	struct debugfs_blob_wrapper fw_code_blob;
-	struct debugfs_blob_wrapper fw_data_blob;
-	struct debugfs_blob_wrapper fw_peri_blob;
-	struct debugfs_blob_wrapper uc_code_blob;
-	struct debugfs_blob_wrapper uc_data_blob;
-	struct debugfs_blob_wrapper rgf_blob;
+	struct debugfs_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
 };
 
 #define wil_to_wiphy(i) (i->wdev->wiphy)
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index a136dab..1d1d0af 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -65,18 +65,17 @@
 
 /**
  * @fw_mapping provides memory remapping table
+ *
+ * array size should be in sync with the declaration in the wil6210.h
  */
-static const struct {
-	u32 from; /* linker address - from, inclusive */
-	u32 to;   /* linker address - to, exclusive */
-	u32 host; /* PCI/Host address - BAR0 + 0x880000 */
-} fw_mapping[] = {
-	{0x000000, 0x040000, 0x8c0000}, /* FW code RAM 256k */
-	{0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */
-	{0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */
-	{0x880000, 0x88a000, 0x880000}, /* various RGF */
-	{0x88b000, 0x88c000, 0x88b000}, /* Pcie_ext_rgf */
-	{0x8c0000, 0x949000, 0x8c0000}, /* trivial mapping for upper area */
+const struct fw_map fw_mapping[] = {
+	{0x000000, 0x040000, 0x8c0000, "fw_code"}, /* FW code RAM      256k */
+	{0x800000, 0x808000, 0x900000, "fw_data"}, /* FW data RAM       32k */
+	{0x840000, 0x860000, 0x908000, "fw_peri"}, /* periph. data RAM 128k */
+	{0x880000, 0x88a000, 0x880000, "rgf"},     /* various RGF       40k */
+	{0x88a000, 0x88b000, 0x88a000, "AGC_tbl"}, /* AGC table          4k */
+	{0x88b000, 0x88c000, 0x88b000, "rgf_ext"}, /* Pcie_ext_rgf       4k */
+	{0x8c0000, 0x949000, 0x8c0000, "upper"},   /* upper area       548k */
 	/*
 	 * 920000..930000 ucode code RAM
 	 * 930000..932000 ucode data RAM
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 15aaeb1..d7055fe 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -210,6 +210,9 @@
 	CHAN2G(13, 2472, 0),
 	CHAN2G(14, 2484, 0),
 };
+
+/* No support for the last 3 channels (12, 13, 14) */
+#define b43_2ghz_chantable_limited_size		11
 #undef CHAN2G
 
 #define CHAN4G(_channel, _flags) {				\
@@ -287,6 +290,14 @@
 	CHAN5G(182, 0),
 };
 
+static struct ieee80211_channel b43_5ghz_nphy_chantable_limited[] = {
+	CHAN5G(36, 0),		CHAN5G(40, 0),
+	CHAN5G(44, 0),		CHAN5G(48, 0),
+	CHAN5G(149, 0),		CHAN5G(153, 0),
+	CHAN5G(157, 0),		CHAN5G(161, 0),
+	CHAN5G(165, 0),
+};
+
 static struct ieee80211_channel b43_5ghz_aphy_chantable[] = {
 	CHAN5G(34, 0),		CHAN5G(36, 0),
 	CHAN5G(38, 0),		CHAN5G(40, 0),
@@ -319,6 +330,14 @@
 	.n_bitrates	= b43_a_ratetable_size,
 };
 
+static struct ieee80211_supported_band b43_band_5GHz_nphy_limited = {
+	.band		= IEEE80211_BAND_5GHZ,
+	.channels	= b43_5ghz_nphy_chantable_limited,
+	.n_channels	= ARRAY_SIZE(b43_5ghz_nphy_chantable_limited),
+	.bitrates	= b43_a_ratetable,
+	.n_bitrates	= b43_a_ratetable_size,
+};
+
 static struct ieee80211_supported_band b43_band_5GHz_aphy = {
 	.band		= IEEE80211_BAND_5GHZ,
 	.channels	= b43_5ghz_aphy_chantable,
@@ -335,6 +354,14 @@
 	.n_bitrates	= b43_g_ratetable_size,
 };
 
+static struct ieee80211_supported_band b43_band_2ghz_limited = {
+	.band		= IEEE80211_BAND_2GHZ,
+	.channels	= b43_2ghz_chantable,
+	.n_channels	= b43_2ghz_chantable_limited_size,
+	.bitrates	= b43_g_ratetable,
+	.n_bitrates	= b43_g_ratetable_size,
+};
+
 static void b43_wireless_core_exit(struct b43_wldev *dev);
 static int b43_wireless_core_init(struct b43_wldev *dev);
 static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev);
@@ -2953,6 +2980,45 @@
 	}
 }
 
+/* brcms_b_switch_macfreq */
+void b43_mac_switch_freq(struct b43_wldev *dev, u8 spurmode)
+{
+	u16 chip_id = dev->dev->chip_id;
+
+	if (chip_id == BCMA_CHIP_ID_BCM43217 ||
+	    chip_id == BCMA_CHIP_ID_BCM43222 ||
+	    chip_id == BCMA_CHIP_ID_BCM43224 ||
+	    chip_id == BCMA_CHIP_ID_BCM43225 ||
+	    chip_id == BCMA_CHIP_ID_BCM43227 ||
+	    chip_id == BCMA_CHIP_ID_BCM43228) {
+		switch (spurmode) {
+		case 2: /* 126 Mhz */
+			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, 0x2082);
+			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
+			break;
+		case 1: /* 123 Mhz */
+			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, 0x5341);
+			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
+			break;
+		default: /* 120 Mhz */
+			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, 0x8889);
+			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
+			break;
+		}
+	} else if (dev->phy.type == B43_PHYTYPE_LCN) {
+		switch (spurmode) {
+		case 1: /* 82 Mhz */
+			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, 0x7CE0);
+			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0xC);
+			break;
+		default: /* 80 Mhz */
+			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, 0xCCCD);
+			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0xC);
+			break;
+		}
+	}
+}
+
 static void b43_adjust_opmode(struct b43_wldev *dev)
 {
 	struct b43_wl *wl = dev->wl;
@@ -4335,8 +4401,9 @@
 	u8 phy_type;
 	u8 phy_rev;
 	u16 radio_manuf;
-	u16 radio_ver;
+	u16 radio_id;
 	u16 radio_rev;
+	u8 radio_ver;
 	int unsupported = 0;
 
 	/* Get PHY versioning */
@@ -4360,7 +4427,7 @@
 #endif
 #ifdef CONFIG_B43_PHY_N
 	case B43_PHYTYPE_N:
-		if (phy_rev > 9)
+		if (phy_rev >= 19)
 			unsupported = 1;
 		break;
 #endif
@@ -4402,7 +4469,9 @@
 		radio_rev = b43_read16(dev, B43_MMIO_RADIO24_DATA);
 
 		b43_write16(dev, B43_MMIO_RADIO24_CONTROL, 1);
-		radio_ver = b43_read16(dev, B43_MMIO_RADIO24_DATA);
+		radio_id = b43_read16(dev, B43_MMIO_RADIO24_DATA);
+
+		radio_ver = 0; /* Is there version somewhere? */
 	} else if (core_rev >= 24) {
 		u16 radio24[3];
 
@@ -4411,12 +4480,10 @@
 			radio24[tmp] = b43_read16(dev, B43_MMIO_RADIO24_DATA);
 		}
 
-		/* Broadcom uses "id" for our "ver" and has separated "ver" */
-		/* radio_ver = (radio24[0] & 0xF0) >> 4; */
-
 		radio_manuf = 0x17F;
-		radio_ver = (radio24[2] << 8) | radio24[1];
+		radio_id = (radio24[2] << 8) | radio24[1];
 		radio_rev = (radio24[0] & 0xF);
+		radio_ver = (radio24[0] & 0xF0) >> 4;
 	} else {
 		if (dev->dev->chip_id == 0x4317) {
 			if (dev->dev->chip_rev == 0)
@@ -4435,15 +4502,16 @@
 				<< 16;
 		}
 		radio_manuf = (tmp & 0x00000FFF);
-		radio_ver = (tmp & 0x0FFFF000) >> 12;
+		radio_id = (tmp & 0x0FFFF000) >> 12;
 		radio_rev = (tmp & 0xF0000000) >> 28;
+		radio_ver = 0; /* Probably not available on old hw */
 	}
 
 	if (radio_manuf != 0x17F /* Broadcom */)
 		unsupported = 1;
 	switch (phy_type) {
 	case B43_PHYTYPE_A:
-		if (radio_ver != 0x2060)
+		if (radio_id != 0x2060)
 			unsupported = 1;
 		if (radio_rev != 1)
 			unsupported = 1;
@@ -4451,43 +4519,49 @@
 			unsupported = 1;
 		break;
 	case B43_PHYTYPE_B:
-		if ((radio_ver & 0xFFF0) != 0x2050)
+		if ((radio_id & 0xFFF0) != 0x2050)
 			unsupported = 1;
 		break;
 	case B43_PHYTYPE_G:
-		if (radio_ver != 0x2050)
+		if (radio_id != 0x2050)
 			unsupported = 1;
 		break;
 	case B43_PHYTYPE_N:
-		if (radio_ver != 0x2055 && radio_ver != 0x2056)
+		if (radio_id != 0x2055 && radio_id != 0x2056 &&
+		    radio_id != 0x2057)
+			unsupported = 1;
+		if (radio_id == 0x2057 &&
+		    !(radio_rev == 9 || radio_rev == 14))
 			unsupported = 1;
 		break;
 	case B43_PHYTYPE_LP:
-		if (radio_ver != 0x2062 && radio_ver != 0x2063)
+		if (radio_id != 0x2062 && radio_id != 0x2063)
 			unsupported = 1;
 		break;
 	case B43_PHYTYPE_HT:
-		if (radio_ver != 0x2059)
+		if (radio_id != 0x2059)
 			unsupported = 1;
 		break;
 	case B43_PHYTYPE_LCN:
-		if (radio_ver != 0x2064)
+		if (radio_id != 0x2064)
 			unsupported = 1;
 		break;
 	default:
 		B43_WARN_ON(1);
 	}
 	if (unsupported) {
-		b43err(dev->wl, "FOUND UNSUPPORTED RADIO "
-		       "(Manuf 0x%X, Version 0x%X, Revision %u)\n",
-		       radio_manuf, radio_ver, radio_rev);
+		b43err(dev->wl,
+		       "FOUND UNSUPPORTED RADIO (Manuf 0x%X, ID 0x%X, Revision %u, Version %u)\n",
+		       radio_manuf, radio_id, radio_rev, radio_ver);
 		return -EOPNOTSUPP;
 	}
-	b43dbg(dev->wl, "Found Radio: Manuf 0x%X, Version 0x%X, Revision %u\n",
-	       radio_manuf, radio_ver, radio_rev);
+	b43info(dev->wl,
+		"Found Radio: Manuf 0x%X, ID 0x%X, Revision %u, Version %u\n",
+		radio_manuf, radio_id, radio_rev, radio_ver);
 
+	/* FIXME: b43 treats "id" as "ver" and ignores the real "ver" */
 	phy->radio_manuf = radio_manuf;
-	phy->radio_ver = radio_ver;
+	phy->radio_ver = radio_id;
 	phy->radio_rev = radio_rev;
 
 	phy->analog = analog_type;
@@ -5095,12 +5169,24 @@
 			   bool have_2ghz_phy, bool have_5ghz_phy)
 {
 	struct ieee80211_hw *hw = dev->wl->hw;
+	struct b43_phy *phy = &dev->phy;
+	bool limited_2g;
+	bool limited_5g;
+
+	/* We don't support all 2 GHz channels on some devices */
+	limited_2g = phy->radio_ver == 0x2057 &&
+		     (phy->radio_rev == 9 || phy->radio_rev == 14);
+	limited_5g = phy->radio_ver == 0x2057 &&
+		     phy->radio_rev == 9;
 
 	if (have_2ghz_phy)
-		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &b43_band_2GHz;
+		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = limited_2g ?
+			&b43_band_2ghz_limited : &b43_band_2GHz;
 	if (dev->phy.type == B43_PHYTYPE_N) {
 		if (have_5ghz_phy)
-			hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &b43_band_5GHz_nphy;
+			hw->wiphy->bands[IEEE80211_BAND_5GHZ] = limited_5g ?
+				&b43_band_5GHz_nphy_limited :
+				&b43_band_5GHz_nphy;
 	} else {
 		if (have_5ghz_phy)
 			hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &b43_band_5GHz_aphy;
@@ -5248,14 +5334,15 @@
 	b43_supported_bands(dev, &have_2ghz_phy, &have_5ghz_phy);
 
 	/* We don't support 5 GHz on some PHYs yet */
-	switch (dev->phy.type) {
-	case B43_PHYTYPE_A:
-	case B43_PHYTYPE_G:
-	case B43_PHYTYPE_N:
-	case B43_PHYTYPE_LP:
-	case B43_PHYTYPE_HT:
-		b43warn(wl, "5 GHz band is unsupported on this PHY\n");
-		have_5ghz_phy = false;
+	if (have_5ghz_phy) {
+		switch (dev->phy.type) {
+		case B43_PHYTYPE_A:
+		case B43_PHYTYPE_G:
+		case B43_PHYTYPE_LP:
+		case B43_PHYTYPE_HT:
+			b43warn(wl, "5 GHz band is unsupported on this PHY\n");
+			have_5ghz_phy = false;
+		}
 	}
 
 	if (!have_2ghz_phy && !have_5ghz_phy) {
diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
index f476fc3..9f22e4b 100644
--- a/drivers/net/wireless/b43/main.h
+++ b/drivers/net/wireless/b43/main.h
@@ -99,6 +99,7 @@
 void b43_mac_suspend(struct b43_wldev *dev);
 void b43_mac_enable(struct b43_wldev *dev);
 void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on);
+void b43_mac_switch_freq(struct b43_wldev *dev, u8 spurmode);
 
 
 struct b43_request_fw_context;
diff --git a/drivers/net/wireless/b43/phy_lcn.c b/drivers/net/wireless/b43/phy_lcn.c
index 0bafa3b..e76bbdf 100644
--- a/drivers/net/wireless/b43/phy_lcn.c
+++ b/drivers/net/wireless/b43/phy_lcn.c
@@ -54,39 +54,6 @@
 	B43_SENSE_VBAT,
 };
 
-/* In theory it's PHY common function, move if needed */
-/* brcms_b_switch_macfreq */
-static void b43_phy_switch_macfreq(struct b43_wldev *dev, u8 spurmode)
-{
-	if (dev->dev->chip_id == 43224 || dev->dev->chip_id == 43225) {
-		switch (spurmode) {
-		case 2:		/* 126 Mhz */
-			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, 0x2082);
-			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
-			break;
-		case 1:		/* 123 Mhz */
-			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, 0x5341);
-			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
-			break;
-		default:	/* 120 Mhz */
-			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, 0x8889);
-			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
-			break;
-		}
-	} else if (dev->phy.type == B43_PHYTYPE_LCN) {
-		switch (spurmode) {
-		case 1:		/* 82 Mhz */
-			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, 0x7CE0);
-			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0xC);
-			break;
-		default:	/* 80 Mhz */
-			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, 0xCCCD);
-			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0xC);
-			break;
-		}
-	}
-}
-
 /**************************************************
  * Radio 2064.
  **************************************************/
@@ -609,7 +576,7 @@
 		b43_phy_write(dev, 0x93b, ((0 << 13) + 23));
 		b43_phy_write(dev, 0x93c, ((0 << 13) + 1989));
 	}
-	b43_phy_switch_macfreq(dev, enable);
+	b43_mac_switch_freq(dev, enable);
 }
 
 /**************************************************
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 50ca6f8..d269fbb 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -36,6 +36,7 @@
 #include "main.h"
 
 struct nphy_txgains {
+	u16 tx_lpf[2];
 	u16 txgm[2];
 	u16 pga[2];
 	u16 pad[2];
@@ -43,6 +44,7 @@
 };
 
 struct nphy_iqcal_params {
+	u16 tx_lpf;
 	u16 txgm;
 	u16 pga;
 	u16 pad;
@@ -69,6 +71,14 @@
 	B43_RFSEQ_UPDATE_GAINU,
 };
 
+enum n_rf_ctl_over_cmd {
+	N_RF_CTL_OVER_CMD_RXRF_PU = 0,
+	N_RF_CTL_OVER_CMD_RX_PU = 1,
+	N_RF_CTL_OVER_CMD_TX_PU = 2,
+	N_RF_CTL_OVER_CMD_RX_GAIN = 3,
+	N_RF_CTL_OVER_CMD_TX_GAIN = 4,
+};
+
 enum n_intc_override {
 	N_INTC_OVERRIDE_OFF = 0,
 	N_INTC_OVERRIDE_TRSW = 1,
@@ -140,11 +150,19 @@
 	b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
 }
 
+static void b43_nphy_rf_ctl_override_rev19(struct b43_wldev *dev, u16 field,
+					   u16 value, u8 core, bool off,
+					   u8 override_id)
+{
+	/* TODO */
+}
+
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverrideRev7 */
 static void b43_nphy_rf_ctl_override_rev7(struct b43_wldev *dev, u16 field,
 					  u16 value, u8 core, bool off,
 					  u8 override)
 {
+	struct b43_phy *phy = &dev->phy;
 	const struct nphy_rf_control_override_rev7 *e;
 	u16 en_addrs[3][2] = {
 		{ 0x0E7, 0x0EC }, { 0x342, 0x343 }, { 0x346, 0x347 }
@@ -154,6 +172,11 @@
 	u16 val_addr;
 	u8 i;
 
+	if (phy->rev >= 19 || phy->rev < 3) {
+		B43_WARN_ON(1);
+		return;
+	}
+
 	/* Remember: we can get NULL! */
 	e = b43_nphy_get_rf_ctl_over_rev7(dev, field, override);
 
@@ -181,6 +204,50 @@
 	}
 }
 
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverideOneToMany */
+static void b43_nphy_rf_ctl_override_one_to_many(struct b43_wldev *dev,
+						 enum n_rf_ctl_over_cmd cmd,
+						 u16 value, u8 core, bool off)
+{
+	struct b43_phy *phy = &dev->phy;
+	u16 tmp;
+
+	B43_WARN_ON(phy->rev < 7);
+
+	switch (cmd) {
+	case N_RF_CTL_OVER_CMD_RXRF_PU:
+		b43_nphy_rf_ctl_override_rev7(dev, 0x20, value, core, off, 1);
+		b43_nphy_rf_ctl_override_rev7(dev, 0x10, value, core, off, 1);
+		b43_nphy_rf_ctl_override_rev7(dev, 0x08, value, core, off, 1);
+		break;
+	case N_RF_CTL_OVER_CMD_RX_PU:
+		b43_nphy_rf_ctl_override_rev7(dev, 0x4, value, core, off, 1);
+		b43_nphy_rf_ctl_override_rev7(dev, 0x2, value, core, off, 1);
+		b43_nphy_rf_ctl_override_rev7(dev, 0x1, value, core, off, 1);
+		b43_nphy_rf_ctl_override_rev7(dev, 0x2, value, core, off, 2);
+		b43_nphy_rf_ctl_override_rev7(dev, 0x0800, 0, core, off, 1);
+		break;
+	case N_RF_CTL_OVER_CMD_TX_PU:
+		b43_nphy_rf_ctl_override_rev7(dev, 0x4, value, core, off, 0);
+		b43_nphy_rf_ctl_override_rev7(dev, 0x2, value, core, off, 1);
+		b43_nphy_rf_ctl_override_rev7(dev, 0x1, value, core, off, 2);
+		b43_nphy_rf_ctl_override_rev7(dev, 0x0800, 1, core, off, 1);
+		break;
+	case N_RF_CTL_OVER_CMD_RX_GAIN:
+		tmp = value & 0xFF;
+		b43_nphy_rf_ctl_override_rev7(dev, 0x0800, tmp, core, off, 0);
+		tmp = value >> 8;
+		b43_nphy_rf_ctl_override_rev7(dev, 0x6000, tmp, core, off, 0);
+		break;
+	case N_RF_CTL_OVER_CMD_TX_GAIN:
+		tmp = value & 0x7FFF;
+		b43_nphy_rf_ctl_override_rev7(dev, 0x1000, tmp, core, off, 0);
+		tmp = value >> 14;
+		b43_nphy_rf_ctl_override_rev7(dev, 0x4000, tmp, core, off, 0);
+		break;
+	}
+}
+
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
 static void b43_nphy_rf_ctl_override(struct b43_wldev *dev, u16 field,
 				     u16 value, u8 core, bool off)
@@ -264,6 +331,8 @@
 	u16 reg, tmp, tmp2, val;
 	int core;
 
+	/* TODO: What about rev19+? Revs 3+ and 7+ are a bit similar */
+
 	for (core = 0; core < 2; core++) {
 		if ((core_sel == 1 && core != 0) ||
 		    (core_sel == 2 && core != 1))
@@ -274,6 +343,7 @@
 		switch (intc_override) {
 		case N_INTC_OVERRIDE_OFF:
 			b43_phy_write(dev, reg, 0);
+			b43_phy_mask(dev, 0x2ff, ~0x2000);
 			b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
 			break;
 		case N_INTC_OVERRIDE_TRSW:
@@ -505,6 +575,14 @@
 	}
 }
 
+/* http://bcm-v4.sipsolutions.net/PHY/N/Read_Lpf_Bw_Ctl */
+static u16 b43_nphy_read_lpf_ctl(struct b43_wldev *dev, u16 offset)
+{
+	if (!offset)
+		offset = b43_is_40mhz(dev) ? 0x159 : 0x154;
+	return b43_ntab_read(dev, B43_NTAB16(7, offset)) & 0x7;
+}
+
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */
 static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
 {
@@ -669,10 +747,63 @@
 			b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_C2, 0x8);
 		}
 		break;
-	/* TODO */
+	case 9: /* e.g. PHY rev 16 */
+		b43_radio_write(dev, R2057_LOGEN_PTAT_RESETS, 0x20);
+		b43_radio_write(dev, R2057_VCOBUF_IDACS, 0x18);
+		if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+			b43_radio_write(dev, R2057_LOGEN_PTAT_RESETS, 0x38);
+			b43_radio_write(dev, R2057_VCOBUF_IDACS, 0x0f);
+
+			if (b43_is_40mhz(dev)) {
+				/* TODO */
+			} else {
+				b43_radio_write(dev,
+						R2057_PAD_BIAS_FILTER_BWS_CORE0,
+						0x3c);
+				b43_radio_write(dev,
+						R2057_PAD_BIAS_FILTER_BWS_CORE1,
+						0x3c);
+			}
+		}
+		break;
+	case 14: /* 2 GHz only */
+		b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_R1, 0x1b);
+		b43_radio_write(dev, R2057_CP_KPD_IDAC, 0x3f);
+		b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_C1, 0x1f);
+		b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_C2, 0x1f);
+		break;
 	}
 
-	/* TODO */
+	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+		u16 txmix2g_tune_boost_pu = 0;
+		u16 pad2g_tune_pus = 0;
+
+		if (b43_nphy_ipa(dev)) {
+			switch (phy->radio_rev) {
+			case 9:
+				txmix2g_tune_boost_pu = 0x0041;
+				/* TODO */
+				break;
+			case 14:
+				txmix2g_tune_boost_pu = 0x21;
+				pad2g_tune_pus = 0x23;
+				break;
+			}
+		}
+
+		if (txmix2g_tune_boost_pu)
+			b43_radio_write(dev, R2057_TXMIX2G_TUNE_BOOST_PU_CORE0,
+					txmix2g_tune_boost_pu);
+		if (pad2g_tune_pus)
+			b43_radio_write(dev, R2057_PAD2G_TUNE_PUS_CORE0,
+					pad2g_tune_pus);
+		if (txmix2g_tune_boost_pu)
+			b43_radio_write(dev, R2057_TXMIX2G_TUNE_BOOST_PU_CORE1,
+					txmix2g_tune_boost_pu);
+		if (pad2g_tune_pus)
+			b43_radio_write(dev, R2057_PAD2G_TUNE_PUS_CORE1,
+					pad2g_tune_pus);
+	}
 
 	usleep_range(50, 100);
 
@@ -690,13 +821,62 @@
 static u8 b43_radio_2057_rcal(struct b43_wldev *dev)
 {
 	struct b43_phy *phy = &dev->phy;
+	u16 saved_regs_phy[12];
+	u16 saved_regs_phy_rf[6];
+	u16 saved_regs_radio[2] = { };
+	static const u16 phy_to_store[] = {
+		B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2,
+		B43_NPHY_RFCTL_LUT_TRSW_LO1, B43_NPHY_RFCTL_LUT_TRSW_LO2,
+		B43_NPHY_RFCTL_RXG1, B43_NPHY_RFCTL_RXG2,
+		B43_NPHY_RFCTL_TXG1, B43_NPHY_RFCTL_TXG2,
+		B43_NPHY_REV7_RF_CTL_MISC_REG3, B43_NPHY_REV7_RF_CTL_MISC_REG4,
+		B43_NPHY_REV7_RF_CTL_MISC_REG5, B43_NPHY_REV7_RF_CTL_MISC_REG6,
+	};
+	static const u16 phy_to_store_rf[] = {
+		B43_NPHY_REV3_RFCTL_OVER0, B43_NPHY_REV3_RFCTL_OVER1,
+		B43_NPHY_REV7_RF_CTL_OVER3, B43_NPHY_REV7_RF_CTL_OVER4,
+		B43_NPHY_REV7_RF_CTL_OVER5, B43_NPHY_REV7_RF_CTL_OVER6,
+	};
 	u16 tmp;
+	int i;
 
-	if (phy->radio_rev == 5) {
-		b43_phy_mask(dev, 0x342, ~0x2);
+	/* Save */
+	for (i = 0; i < ARRAY_SIZE(phy_to_store); i++)
+		saved_regs_phy[i] = b43_phy_read(dev, phy_to_store[i]);
+	for (i = 0; i < ARRAY_SIZE(phy_to_store_rf); i++)
+		saved_regs_phy_rf[i] = b43_phy_read(dev, phy_to_store_rf[i]);
+
+	/* Set */
+	for (i = 0; i < ARRAY_SIZE(phy_to_store); i++)
+		b43_phy_write(dev, phy_to_store[i], 0);
+	b43_phy_write(dev, B43_NPHY_REV3_RFCTL_OVER0, 0x07ff);
+	b43_phy_write(dev, B43_NPHY_REV3_RFCTL_OVER1, 0x07ff);
+	b43_phy_write(dev, B43_NPHY_REV7_RF_CTL_OVER3, 0x07ff);
+	b43_phy_write(dev, B43_NPHY_REV7_RF_CTL_OVER4, 0x07ff);
+	b43_phy_write(dev, B43_NPHY_REV7_RF_CTL_OVER5, 0x007f);
+	b43_phy_write(dev, B43_NPHY_REV7_RF_CTL_OVER6, 0x007f);
+
+	switch (phy->radio_rev) {
+	case 5:
+		b43_phy_mask(dev, B43_NPHY_REV7_RF_CTL_OVER3, ~0x2);
 		udelay(10);
 		b43_radio_set(dev, R2057_IQTEST_SEL_PU, 0x1);
-		b43_radio_maskset(dev, 0x1ca, ~0x2, 0x1);
+		b43_radio_maskset(dev, R2057v7_IQTEST_SEL_PU2, ~0x2, 0x1);
+		break;
+	case 9:
+		b43_phy_set(dev, B43_NPHY_REV7_RF_CTL_OVER3, 0x2);
+		b43_phy_set(dev, B43_NPHY_REV7_RF_CTL_MISC_REG3, 0x2);
+		saved_regs_radio[0] = b43_radio_read(dev, R2057_IQTEST_SEL_PU);
+		b43_radio_write(dev, R2057_IQTEST_SEL_PU, 0x11);
+		break;
+	case 14:
+		saved_regs_radio[0] = b43_radio_read(dev, R2057_IQTEST_SEL_PU);
+		saved_regs_radio[1] = b43_radio_read(dev, R2057v7_IQTEST_SEL_PU2);
+		b43_phy_set(dev, B43_NPHY_REV7_RF_CTL_MISC_REG3, 0x2);
+		b43_phy_set(dev, B43_NPHY_REV7_RF_CTL_OVER3, 0x2);
+		b43_radio_write(dev, R2057v7_IQTEST_SEL_PU2, 0x2);
+		b43_radio_write(dev, R2057_IQTEST_SEL_PU, 0x1);
+		break;
 	}
 
 	/* Enable */
@@ -720,14 +900,30 @@
 	/* Disable */
 	b43_radio_mask(dev, R2057_RCAL_CONFIG, ~0x1);
 
-	if (phy->radio_rev == 5) {
-		b43_radio_mask(dev, R2057_IPA2G_CASCONV_CORE0, ~0x1);
-		b43_radio_mask(dev, 0x1ca, ~0x2);
-	}
-	if (phy->radio_rev <= 4 || phy->radio_rev == 6) {
+	/* Restore */
+	for (i = 0; i < ARRAY_SIZE(phy_to_store_rf); i++)
+		b43_phy_write(dev, phy_to_store_rf[i], saved_regs_phy_rf[i]);
+	for (i = 0; i < ARRAY_SIZE(phy_to_store); i++)
+		b43_phy_write(dev, phy_to_store[i], saved_regs_phy[i]);
+
+	switch (phy->radio_rev) {
+	case 0 ... 4:
+	case 6:
 		b43_radio_maskset(dev, R2057_TEMPSENSE_CONFIG, ~0x3C, tmp);
 		b43_radio_maskset(dev, R2057_BANDGAP_RCAL_TRIM, ~0xF0,
 				  tmp << 2);
+		break;
+	case 5:
+		b43_radio_mask(dev, R2057_IPA2G_CASCONV_CORE0, ~0x1);
+		b43_radio_mask(dev, R2057v7_IQTEST_SEL_PU2, ~0x2);
+		break;
+	case 9:
+		b43_radio_write(dev, R2057_IQTEST_SEL_PU, saved_regs_radio[0]);
+		break;
+	case 14:
+		b43_radio_write(dev, R2057_IQTEST_SEL_PU, saved_regs_radio[0]);
+		b43_radio_write(dev, R2057v7_IQTEST_SEL_PU2, saved_regs_radio[1]);
+		break;
 	}
 
 	return tmp & 0x3e;
@@ -749,7 +945,7 @@
 		b43_radio_write(dev, R2057_RCCAL_TRC0, 0xC0);
 	} else {
 		b43_radio_write(dev, R2057v7_RCCAL_MASTER, 0x61);
-		b43_radio_write(dev, R2057_RCCAL_TRC0, 0xE1);
+		b43_radio_write(dev, R2057_RCCAL_TRC0, 0xE9);
 	}
 	b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
 
@@ -829,6 +1025,9 @@
 {
 	b43_radio_set(dev, R2057_XTALPUOVR_PINCTRL, 0x1);
 
+	if (0) /* FIXME: Is this BCM43217 specific? */
+		b43_radio_set(dev, R2057_XTALPUOVR_PINCTRL, 0x2);
+
 	b43_radio_set(dev, R2057_RFPLL_MISC_CAL_RESETN, 0x78);
 	b43_radio_set(dev, R2057_XTAL_CONFIG2, 0x80);
 	mdelay(2);
@@ -1386,6 +1585,7 @@
 				 u16 wait, bool iqmode, bool dac_test,
 				 bool modify_bbmult)
 {
+	struct b43_phy *phy = &dev->phy;
 	struct b43_phy_n *nphy = dev->phy.n;
 	int i;
 	u16 seq_mode;
@@ -1393,6 +1593,26 @@
 
 	b43_nphy_stay_in_carrier_search(dev, true);
 
+	if (phy->rev >= 7) {
+		bool lpf_bw3, lpf_bw4;
+
+		lpf_bw3 = b43_phy_read(dev, B43_NPHY_REV7_RF_CTL_OVER3) & 0x80;
+		lpf_bw4 = b43_phy_read(dev, B43_NPHY_REV7_RF_CTL_OVER4) & 0x80;
+
+		if (lpf_bw3 || lpf_bw4) {
+			/* TODO */
+		} else {
+			u16 value = b43_nphy_read_lpf_ctl(dev, 0);
+			if (phy->rev >= 19)
+				b43_nphy_rf_ctl_override_rev19(dev, 0x80, value,
+							       0, false, 1);
+			else
+				b43_nphy_rf_ctl_override_rev7(dev, 0x80, value,
+							      0, false, 1);
+			nphy->lpf_bw_overrode_for_sample_play = true;
+		}
+	}
+
 	if ((nphy->bb_mult_save & 0x80000000) == 0) {
 		tmp = b43_ntab_read(dev, B43_NTAB16(15, 87));
 		nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000;
@@ -1520,6 +1740,12 @@
 	}
 }
 
+static void b43_nphy_rssi_select_rev19(struct b43_wldev *dev, u8 code,
+				       enum n_rssi_type rssi_type)
+{
+	/* TODO */
+}
+
 static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code,
 				      enum n_rssi_type rssi_type)
 {
@@ -1589,13 +1815,15 @@
 					enum ieee80211_band band =
 						b43_current_band(dev->wl);
 
-					if (b43_nphy_ipa(dev))
-						val = (band == IEEE80211_BAND_5GHZ) ? 0xC : 0xE;
-					else
-						val = 0x11;
-					reg = (i == 0) ? 0x2000 : 0x3000;
-					reg |= B2055_PADDRV;
-					b43_radio_write(dev, reg, val);
+					if (dev->phy.rev < 7) {
+						if (b43_nphy_ipa(dev))
+							val = (band == IEEE80211_BAND_5GHZ) ? 0xC : 0xE;
+						else
+							val = 0x11;
+						reg = (i == 0) ? B2056_TX0 : B2056_TX1;
+						reg |= B2056_TX_TX_SSI_MUX;
+						b43_radio_write(dev, reg, val);
+					}
 
 					reg = (i == 0) ?
 						B43_NPHY_AFECTL_OVER1 :
@@ -1682,7 +1910,9 @@
 static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code,
 				 enum n_rssi_type type)
 {
-	if (dev->phy.rev >= 3)
+	if (dev->phy.rev >= 19)
+		b43_nphy_rssi_select_rev19(dev, code, type);
+	else if (dev->phy.rev >= 3)
 		b43_nphy_rev3_rssi_select(dev, code, type);
 	else
 		b43_nphy_rev2_rssi_select(dev, code, type);
@@ -1726,6 +1956,8 @@
 	u16 save_regs_phy[9];
 	u16 s[2];
 
+	/* TODO: rev7+ is treated like rev3+, what about rev19+? */
+
 	if (dev->phy.rev >= 3) {
 		save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
 		save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
@@ -1825,12 +2057,14 @@
 		B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER,
 		B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2,
 		B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER,
-		0x342, 0x343, 0x346, 0x347,
+		B43_NPHY_REV7_RF_CTL_OVER3, B43_NPHY_REV7_RF_CTL_OVER4,
+		B43_NPHY_REV7_RF_CTL_OVER5, B43_NPHY_REV7_RF_CTL_OVER6,
 		0x2ff,
 		B43_NPHY_TXF_40CO_B1S0, B43_NPHY_TXF_40CO_B32S1,
 		B43_NPHY_RFCTL_CMD,
 		B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2,
-		0x340, 0x341, 0x344, 0x345,
+		B43_NPHY_REV7_RF_CTL_MISC_REG3, B43_NPHY_REV7_RF_CTL_MISC_REG4,
+		B43_NPHY_REV7_RF_CTL_MISC_REG5, B43_NPHY_REV7_RF_CTL_MISC_REG6,
 		B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2
 	};
 	u16 *regs_to_store;
@@ -1877,9 +2111,24 @@
 	b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_TRSW, 1, 7);
 
 	if (dev->phy.rev >= 7) {
-		/* TODO */
+		b43_nphy_rf_ctl_override_one_to_many(dev,
+						     N_RF_CTL_OVER_CMD_RXRF_PU,
+						     0, 0, false);
+		b43_nphy_rf_ctl_override_one_to_many(dev,
+						     N_RF_CTL_OVER_CMD_RX_PU,
+						     1, 0, false);
+		b43_nphy_rf_ctl_override_rev7(dev, 0x80, 1, 0, false, 0);
+		b43_nphy_rf_ctl_override_rev7(dev, 0x40, 1, 0, false, 0);
 		if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+			b43_nphy_rf_ctl_override_rev7(dev, 0x20, 0, 0, false,
+						      0);
+			b43_nphy_rf_ctl_override_rev7(dev, 0x10, 1, 0, false,
+						      0);
 		} else {
+			b43_nphy_rf_ctl_override_rev7(dev, 0x10, 0, 0, false,
+						      0);
+			b43_nphy_rf_ctl_override_rev7(dev, 0x20, 1, 0, false,
+						      0);
 		}
 	} else {
 		b43_nphy_rf_ctl_override(dev, 0x1, 0, 0, false);
@@ -1908,7 +2157,10 @@
 		/* Grab RSSI results for every possible VCM */
 		for (vcm = 0; vcm < 8; vcm++) {
 			if (dev->phy.rev >= 7)
-				;
+				b43_radio_maskset(dev,
+						  core ? R2057_NB_MASTER_CORE1 :
+							 R2057_NB_MASTER_CORE0,
+						  ~R2057_VCM_MASK, vcm);
 			else
 				b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC,
 						  0xE3, vcm << 2);
@@ -1939,7 +2191,10 @@
 
 		/* Select the best VCM */
 		if (dev->phy.rev >= 7)
-			;
+			b43_radio_maskset(dev,
+					  core ? R2057_NB_MASTER_CORE1 :
+						 R2057_NB_MASTER_CORE0,
+					  ~R2057_VCM_MASK, vcm);
 		else
 			b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC,
 					  0xE3, vcm_final << 2);
@@ -2009,6 +2264,10 @@
 		rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G;
 	}
 	if (dev->phy.rev >= 7) {
+		rssical_radio_regs[0] = b43_radio_read(dev,
+						       R2057_NB_MASTER_CORE0);
+		rssical_radio_regs[1] = b43_radio_read(dev,
+						       R2057_NB_MASTER_CORE1);
 	} else {
 		rssical_radio_regs[0] = b43_radio_read(dev, B2056_RX0 |
 						       B2056_RX_RSSI_MISC);
@@ -2209,7 +2468,9 @@
  */
 static void b43_nphy_rssi_cal(struct b43_wldev *dev)
 {
-	if (dev->phy.rev >= 3) {
+	if (dev->phy.rev >= 19) {
+		/* TODO */
+	} else if (dev->phy.rev >= 3) {
 		b43_nphy_rev3_rssi_cal(dev);
 	} else {
 		b43_nphy_rev2_rssi_cal(dev, N_RSSI_NB);
@@ -2222,7 +2483,21 @@
  * Workarounds
  **************************************************/
 
-static void b43_nphy_gain_ctl_workarounds_rev3plus(struct b43_wldev *dev)
+static void b43_nphy_gain_ctl_workarounds_rev19(struct b43_wldev *dev)
+{
+	/* TODO */
+}
+
+static void b43_nphy_gain_ctl_workarounds_rev7(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+
+	switch (phy->rev) {
+	/* TODO */
+	}
+}
+
+static void b43_nphy_gain_ctl_workarounds_rev3(struct b43_wldev *dev)
 {
 	struct ssb_sprom *sprom = dev->dev->bus_sprom;
 
@@ -2419,46 +2694,54 @@
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
 static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev)
 {
-	if (dev->phy.rev >= 7)
-		; /* TODO */
+	if (dev->phy.rev >= 19)
+		b43_nphy_gain_ctl_workarounds_rev19(dev);
+	else if (dev->phy.rev >= 7)
+		b43_nphy_gain_ctl_workarounds_rev7(dev);
 	else if (dev->phy.rev >= 3)
-		b43_nphy_gain_ctl_workarounds_rev3plus(dev);
+		b43_nphy_gain_ctl_workarounds_rev3(dev);
 	else
 		b43_nphy_gain_ctl_workarounds_rev1_2(dev);
 }
 
-/* http://bcm-v4.sipsolutions.net/PHY/N/Read_Lpf_Bw_Ctl */
-static u16 b43_nphy_read_lpf_ctl(struct b43_wldev *dev, u16 offset)
-{
-	if (!offset)
-		offset = b43_is_40mhz(dev) ? 0x159 : 0x154;
-	return b43_ntab_read(dev, B43_NTAB16(7, offset)) & 0x7;
-}
-
 static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
 {
 	struct ssb_sprom *sprom = dev->dev->bus_sprom;
 	struct b43_phy *phy = &dev->phy;
 
+	/* TX to RX */
+	u8 tx2rx_events[7] = { 4, 3, 5, 2, 1, 8, 31, };
+	u8 tx2rx_delays[7] = { 8, 4, 4, 4, 4, 6, 1, };
+	/* RX to TX */
 	u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
 					0x1F };
 	u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
 
-	u16 ntab7_15e_16e[] = { 0x10f, 0x10f };
+	static const u16 ntab7_15e_16e[] = { 0, 0x10f, 0x10f };
 	u8 ntab7_138_146[] = { 0x11, 0x11 };
 	u8 ntab7_133[] = { 0x77, 0x11, 0x11 };
 
-	u16 lpf_20, lpf_40, lpf_11b;
-	u16 bcap_val, bcap_val_11b, bcap_val_11n_20, bcap_val_11n_40;
-	u16 scap_val, scap_val_11b, scap_val_11n_20, scap_val_11n_40;
+	u16 lpf_ofdm_20mhz[2], lpf_ofdm_40mhz[2], lpf_11b[2];
+	u16 bcap_val;
+	s16 bcap_val_11b[2], bcap_val_11n_20[2], bcap_val_11n_40[2];
+	u16 scap_val;
+	s16 scap_val_11b[2], scap_val_11n_20[2], scap_val_11n_40[2];
 	bool rccal_ovrd = false;
 
-	u16 rx2tx_lut_20_11b, rx2tx_lut_20_11n, rx2tx_lut_40_11n;
 	u16 bias, conv, filt;
 
+	u32 noise_tbl[2];
+
 	u32 tmp32;
 	u8 core;
 
+	b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125);
+	b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01b3);
+	b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105);
+	b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016e);
+	b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00cd);
+	b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020);
+
 	if (phy->rev == 7) {
 		b43_phy_set(dev, B43_NPHY_FINERX2_CGC, 0x10);
 		b43_phy_maskset(dev, B43_NPHY_FREQGAIN0, 0xFF80, 0x0020);
@@ -2478,11 +2761,18 @@
 		b43_phy_maskset(dev, B43_NPHY_FREQGAIN7, 0xFF80, 0x0040);
 		b43_phy_maskset(dev, B43_NPHY_FREQGAIN7, 0x80FF, 0x4000);
 	}
-	if (phy->rev <= 8) {
+
+	if (phy->rev >= 16) {
+		b43_phy_write(dev, B43_NPHY_FORCEFRONT0, 0x7ff);
+		b43_phy_write(dev, B43_NPHY_FORCEFRONT1, 0x7ff);
+	} else if (phy->rev <= 8) {
 		b43_phy_write(dev, B43_NPHY_FORCEFRONT0, 0x1B0);
 		b43_phy_write(dev, B43_NPHY_FORCEFRONT1, 0x1B0);
 	}
-	if (phy->rev >= 8)
+
+	if (phy->rev >= 16)
+		b43_phy_maskset(dev, B43_NPHY_TXTAILCNT, ~0xFF, 0xa0);
+	else if (phy->rev >= 8)
 		b43_phy_maskset(dev, B43_NPHY_TXTAILCNT, ~0xFF, 0x72);
 
 	b43_ntab_write(dev, B43_NTAB16(8, 0x00), 2);
@@ -2490,9 +2780,11 @@
 	tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
 	tmp32 &= 0xffffff;
 	b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
-	b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x15e), 2, ntab7_15e_16e);
-	b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x16e), 2, ntab7_15e_16e);
+	b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x15d), 3, ntab7_15e_16e);
+	b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x16d), 3, ntab7_15e_16e);
 
+	b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays,
+				 ARRAY_SIZE(tx2rx_events));
 	if (b43_nphy_ipa(dev))
 		b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa,
 				rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa));
@@ -2500,84 +2792,176 @@
 	b43_phy_maskset(dev, B43_NPHY_EPS_OVERRIDEI_0, 0x3FFF, 0x4000);
 	b43_phy_maskset(dev, B43_NPHY_EPS_OVERRIDEI_1, 0x3FFF, 0x4000);
 
-	lpf_20 = b43_nphy_read_lpf_ctl(dev, 0x154);
-	lpf_40 = b43_nphy_read_lpf_ctl(dev, 0x159);
-	lpf_11b = b43_nphy_read_lpf_ctl(dev, 0x152);
+	for (core = 0; core < 2; core++) {
+		lpf_ofdm_20mhz[core] = b43_nphy_read_lpf_ctl(dev, 0x154 + core * 0x10);
+		lpf_ofdm_40mhz[core] = b43_nphy_read_lpf_ctl(dev, 0x159 + core * 0x10);
+		lpf_11b[core] = b43_nphy_read_lpf_ctl(dev, 0x152 + core * 0x10);
+	}
+
+	bcap_val = b43_radio_read(dev, R2057_RCCAL_BCAP_VAL);
+	scap_val = b43_radio_read(dev, R2057_RCCAL_SCAP_VAL);
+
 	if (b43_nphy_ipa(dev)) {
-		if ((phy->radio_rev == 5 && b43_is_40mhz(dev)) ||
-		    phy->radio_rev == 7 || phy->radio_rev == 8) {
-			bcap_val = b43_radio_read(dev, 0x16b);
-			scap_val = b43_radio_read(dev, 0x16a);
-			scap_val_11b = scap_val;
-			bcap_val_11b = bcap_val;
-			if (phy->radio_rev == 5 && b43_is_40mhz(dev)) {
-				scap_val_11n_20 = scap_val;
-				bcap_val_11n_20 = bcap_val;
-				scap_val_11n_40 = bcap_val_11n_40 = 0xc;
-				rccal_ovrd = true;
-			} else { /* Rev 7/8 */
-				lpf_20 = 4;
-				lpf_11b = 1;
-				if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
-					scap_val_11n_20 = 0xc;
-					bcap_val_11n_20 = 0xc;
-					scap_val_11n_40 = 0xa;
-					bcap_val_11n_40 = 0xa;
-				} else {
-					scap_val_11n_20 = 0x14;
-					bcap_val_11n_20 = 0x14;
-					scap_val_11n_40 = 0xf;
-					bcap_val_11n_40 = 0xf;
+		bool ghz2 = b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ;
+
+		switch (phy->radio_rev) {
+		case 5:
+			/* Check radio version (to be 0) by PHY rev for now */
+			if (phy->rev == 8 && b43_is_40mhz(dev)) {
+				for (core = 0; core < 2; core++) {
+					scap_val_11b[core] = scap_val;
+					bcap_val_11b[core] = bcap_val;
+					scap_val_11n_20[core] = scap_val;
+					bcap_val_11n_20[core] = bcap_val;
+					scap_val_11n_40[core] = 0xc;
+					bcap_val_11n_40[core] = 0xc;
 				}
+
 				rccal_ovrd = true;
 			}
+			if (phy->rev == 9) {
+				/* TODO: Radio version 1 (e.g. BCM5357B0) */
+			}
+			break;
+		case 7:
+		case 8:
+			for (core = 0; core < 2; core++) {
+				scap_val_11b[core] = scap_val;
+				bcap_val_11b[core] = bcap_val;
+				lpf_ofdm_20mhz[core] = 4;
+				lpf_11b[core] = 1;
+				if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+					scap_val_11n_20[core] = 0xc;
+					bcap_val_11n_20[core] = 0xc;
+					scap_val_11n_40[core] = 0xa;
+					bcap_val_11n_40[core] = 0xa;
+				} else {
+					scap_val_11n_20[core] = 0x14;
+					bcap_val_11n_20[core] = 0x14;
+					scap_val_11n_40[core] = 0xf;
+					bcap_val_11n_40[core] = 0xf;
+				}
+			}
+
+			rccal_ovrd = true;
+			break;
+		case 9:
+			for (core = 0; core < 2; core++) {
+				bcap_val_11b[core] = bcap_val;
+				scap_val_11b[core] = scap_val;
+				lpf_11b[core] = 1;
+
+				if (ghz2) {
+					bcap_val_11n_20[core] = bcap_val + 13;
+					scap_val_11n_20[core] = scap_val + 15;
+				} else {
+					bcap_val_11n_20[core] = bcap_val + 14;
+					scap_val_11n_20[core] = scap_val + 15;
+				}
+				lpf_ofdm_20mhz[core] = 4;
+
+				if (ghz2) {
+					bcap_val_11n_40[core] = bcap_val - 7;
+					scap_val_11n_40[core] = scap_val - 5;
+				} else {
+					bcap_val_11n_40[core] = bcap_val + 2;
+					scap_val_11n_40[core] = scap_val + 4;
+				}
+				lpf_ofdm_40mhz[core] = 4;
+			}
+
+			rccal_ovrd = true;
+			break;
+		case 14:
+			for (core = 0; core < 2; core++) {
+				bcap_val_11b[core] = bcap_val;
+				scap_val_11b[core] = scap_val;
+				lpf_11b[core] = 1;
+			}
+
+			bcap_val_11n_20[0] = bcap_val + 20;
+			scap_val_11n_20[0] = scap_val + 20;
+			lpf_ofdm_20mhz[0] = 3;
+
+			bcap_val_11n_20[1] = bcap_val + 16;
+			scap_val_11n_20[1] = scap_val + 16;
+			lpf_ofdm_20mhz[1] = 3;
+
+			bcap_val_11n_40[0] = bcap_val + 20;
+			scap_val_11n_40[0] = scap_val + 20;
+			lpf_ofdm_40mhz[0] = 4;
+
+			bcap_val_11n_40[1] = bcap_val + 10;
+			scap_val_11n_40[1] = scap_val + 10;
+			lpf_ofdm_40mhz[1] = 4;
+
+			rccal_ovrd = true;
+			break;
 		}
 	} else {
 		if (phy->radio_rev == 5) {
-			lpf_20 = 1;
-			lpf_40 = 3;
-			bcap_val = b43_radio_read(dev, 0x16b);
-			scap_val = b43_radio_read(dev, 0x16a);
-			scap_val_11b = scap_val;
-			bcap_val_11b = bcap_val;
-			scap_val_11n_20 = 0x11;
-			scap_val_11n_40 = 0x11;
-			bcap_val_11n_20 = 0x13;
-			bcap_val_11n_40 = 0x13;
+			for (core = 0; core < 2; core++) {
+				lpf_ofdm_20mhz[core] = 1;
+				lpf_ofdm_40mhz[core] = 3;
+				scap_val_11b[core] = scap_val;
+				bcap_val_11b[core] = bcap_val;
+				scap_val_11n_20[core] = 0x11;
+				scap_val_11n_40[core] = 0x11;
+				bcap_val_11n_20[core] = 0x13;
+				bcap_val_11n_40[core] = 0x13;
+			}
+
 			rccal_ovrd = true;
 		}
 	}
 	if (rccal_ovrd) {
-		rx2tx_lut_20_11b = (bcap_val_11b << 8) |
-				   (scap_val_11b << 3) |
-				   lpf_11b;
-		rx2tx_lut_20_11n = (bcap_val_11n_20 << 8) |
-				   (scap_val_11n_20 << 3) |
-				   lpf_20;
-		rx2tx_lut_40_11n = (bcap_val_11n_40 << 8) |
-				   (scap_val_11n_40 << 3) |
-				   lpf_40;
+		u16 rx2tx_lut_20_11b[2], rx2tx_lut_20_11n[2], rx2tx_lut_40_11n[2];
+		u8 rx2tx_lut_extra = 1;
+
+		for (core = 0; core < 2; core++) {
+			bcap_val_11b[core] = clamp_val(bcap_val_11b[core], 0, 0x1f);
+			scap_val_11b[core] = clamp_val(scap_val_11b[core], 0, 0x1f);
+			bcap_val_11n_20[core] = clamp_val(bcap_val_11n_20[core], 0, 0x1f);
+			scap_val_11n_20[core] = clamp_val(scap_val_11n_20[core], 0, 0x1f);
+			bcap_val_11n_40[core] = clamp_val(bcap_val_11n_40[core], 0, 0x1f);
+			scap_val_11n_40[core] = clamp_val(scap_val_11n_40[core], 0, 0x1f);
+
+			rx2tx_lut_20_11b[core] = (rx2tx_lut_extra << 13) |
+						 (bcap_val_11b[core] << 8) |
+						 (scap_val_11b[core] << 3) |
+						 lpf_11b[core];
+			rx2tx_lut_20_11n[core] = (rx2tx_lut_extra << 13) |
+						 (bcap_val_11n_20[core] << 8) |
+						 (scap_val_11n_20[core] << 3) |
+						 lpf_ofdm_20mhz[core];
+			rx2tx_lut_40_11n[core] = (rx2tx_lut_extra << 13) |
+						 (bcap_val_11n_40[core] << 8) |
+						 (scap_val_11n_40[core] << 3) |
+						 lpf_ofdm_40mhz[core];
+		}
+
 		for (core = 0; core < 2; core++) {
 			b43_ntab_write(dev, B43_NTAB16(7, 0x152 + core * 16),
-				       rx2tx_lut_20_11b);
+				       rx2tx_lut_20_11b[core]);
 			b43_ntab_write(dev, B43_NTAB16(7, 0x153 + core * 16),
-				       rx2tx_lut_20_11n);
+				       rx2tx_lut_20_11n[core]);
 			b43_ntab_write(dev, B43_NTAB16(7, 0x154 + core * 16),
-				       rx2tx_lut_20_11n);
+				       rx2tx_lut_20_11n[core]);
 			b43_ntab_write(dev, B43_NTAB16(7, 0x155 + core * 16),
-				       rx2tx_lut_40_11n);
+				       rx2tx_lut_40_11n[core]);
 			b43_ntab_write(dev, B43_NTAB16(7, 0x156 + core * 16),
-				       rx2tx_lut_40_11n);
+				       rx2tx_lut_40_11n[core]);
 			b43_ntab_write(dev, B43_NTAB16(7, 0x157 + core * 16),
-				       rx2tx_lut_40_11n);
+				       rx2tx_lut_40_11n[core]);
 			b43_ntab_write(dev, B43_NTAB16(7, 0x158 + core * 16),
-				       rx2tx_lut_40_11n);
+				       rx2tx_lut_40_11n[core]);
 			b43_ntab_write(dev, B43_NTAB16(7, 0x159 + core * 16),
-				       rx2tx_lut_40_11n);
+				       rx2tx_lut_40_11n[core]);
 		}
-		b43_nphy_rf_ctl_override_rev7(dev, 16, 1, 3, false, 2);
 	}
+
 	b43_phy_write(dev, 0x32F, 0x3);
+
 	if (phy->radio_rev == 4 || phy->radio_rev == 6)
 		b43_nphy_rf_ctl_override_rev7(dev, 4, 1, 3, false, 0);
 
@@ -2625,7 +3009,8 @@
 								0x7f);
 				}
 			}
-			if (phy->radio_rev == 3) {
+			switch (phy->radio_rev) {
+			case 3:
 				for (core = 0; core < 2; core++) {
 					if (core == 0) {
 						b43_radio_write(dev, 0x64,
@@ -2651,7 +3036,9 @@
 								0x3E);
 					}
 				}
-			} else if (phy->radio_rev == 7 || phy->radio_rev == 8) {
+				break;
+			case 7:
+			case 8:
 				if (!b43_is_40mhz(dev)) {
 					b43_radio_write(dev, 0x5F, 0x14);
 					b43_radio_write(dev, 0xE8, 0x12);
@@ -2659,6 +3046,21 @@
 					b43_radio_write(dev, 0x5F, 0x16);
 					b43_radio_write(dev, 0xE8, 0x16);
 				}
+				break;
+			case 14:
+				for (core = 0; core < 2; core++) {
+					int o = core ? 0x85 : 0;
+
+					b43_radio_write(dev, o + R2057_IPA2G_CASCONV_CORE0, 0x13);
+					b43_radio_write(dev, o + R2057_TXMIX2G_TUNE_BOOST_PU_CORE0, 0x21);
+					b43_radio_write(dev, o + R2057_IPA2G_BIAS_FILTER_CORE0, 0xff);
+					b43_radio_write(dev, o + R2057_PAD2G_IDACS_CORE0, 0x88);
+					b43_radio_write(dev, o + R2057_PAD2G_TUNE_PUS_CORE0, 0x23);
+					b43_radio_write(dev, o + R2057_IPA2G_IMAIN_CORE0, 0x16);
+					b43_radio_write(dev, o + R2057_PAD_BIAS_FILTER_BWS_CORE0, 0x3e);
+					b43_radio_write(dev, o + R2057_BACKUP1_CORE0, 0x10);
+				}
+				break;
 			}
 		} else {
 			u16 freq = phy->chandef->chan->center_freq;
@@ -2706,8 +3108,8 @@
 		b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x1);
 		b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x1);
 		b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x1);
-		b43_ntab_write(dev, B43_NTAB16(8, 0x05), 0x20);
-		b43_ntab_write(dev, B43_NTAB16(8, 0x15), 0x20);
+		b43_ntab_write(dev, B43_NTAB16(8, 0x05), 0);
+		b43_ntab_write(dev, B43_NTAB16(8, 0x15), 0);
 
 		b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x4);
 		b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x4);
@@ -2718,20 +3120,20 @@
 	b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, 0x2);
 
 	b43_ntab_write(dev, B43_NTAB32(16, 0x100), 20);
-	b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x138), 2, ntab7_138_146);
+	b43_ntab_write_bulk(dev, B43_NTAB8(7, 0x138), 2, ntab7_138_146);
 	b43_ntab_write(dev, B43_NTAB16(7, 0x141), 0x77);
-	b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x133), 3, ntab7_133);
-	b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x146), 2, ntab7_138_146);
+	b43_ntab_write_bulk(dev, B43_NTAB8(7, 0x133), 3, ntab7_133);
+	b43_ntab_write_bulk(dev, B43_NTAB8(7, 0x146), 2, ntab7_138_146);
 	b43_ntab_write(dev, B43_NTAB16(7, 0x123), 0x77);
 	b43_ntab_write(dev, B43_NTAB16(7, 0x12A), 0x77);
 
-	if (!b43_is_40mhz(dev)) {
-		b43_ntab_write(dev, B43_NTAB32(16, 0x03), 0x18D);
-		b43_ntab_write(dev, B43_NTAB32(16, 0x7F), 0x18D);
-	} else {
-		b43_ntab_write(dev, B43_NTAB32(16, 0x03), 0x14D);
-		b43_ntab_write(dev, B43_NTAB32(16, 0x7F), 0x14D);
-	}
+	b43_ntab_read_bulk(dev, B43_NTAB32(16, 0x02), 1, noise_tbl);
+	noise_tbl[1] = b43_is_40mhz(dev) ? 0x14D : 0x18D;
+	b43_ntab_write_bulk(dev, B43_NTAB32(16, 0x02), 2, noise_tbl);
+
+	b43_ntab_read_bulk(dev, B43_NTAB32(16, 0x7E), 1, noise_tbl);
+	noise_tbl[1] = b43_is_40mhz(dev) ? 0x14D : 0x18D;
+	b43_ntab_write_bulk(dev, B43_NTAB32(16, 0x7E), 2, noise_tbl);
 
 	b43_nphy_gain_ctl_workarounds(dev);
 
@@ -3059,6 +3461,7 @@
 	b43_phy_set(dev, B43_NPHY_IQFLIP,
 		    B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
 
+	/* TODO: rev19+ */
 	if (dev->phy.rev >= 7)
 		b43_nphy_workarounds_rev7plus(dev);
 	else if (dev->phy.rev >= 3)
@@ -3120,6 +3523,7 @@
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */
 static void b43_nphy_stop_playback(struct b43_wldev *dev)
 {
+	struct b43_phy *phy = &dev->phy;
 	struct b43_phy_n *nphy = dev->phy.n;
 	u16 tmp;
 
@@ -3140,6 +3544,15 @@
 		nphy->bb_mult_save = 0;
 	}
 
+	if (phy->rev >= 7 && nphy->lpf_bw_overrode_for_sample_play) {
+		if (phy->rev >= 19)
+			b43_nphy_rf_ctl_override_rev19(dev, 0x80, 0, 0, true,
+						       1);
+		else
+			b43_nphy_rf_ctl_override_rev7(dev, 0x80, 0, 0, true, 1);
+		nphy->lpf_bw_overrode_for_sample_play = false;
+	}
+
 	if (nphy->hang_avoid)
 		b43_nphy_stay_in_carrier_search(dev, 0);
 }
@@ -3149,16 +3562,23 @@
 					struct nphy_txgains target,
 					struct nphy_iqcal_params *params)
 {
+	struct b43_phy *phy = &dev->phy;
 	int i, j, indx;
 	u16 gain;
 
 	if (dev->phy.rev >= 3) {
+		params->tx_lpf = target.tx_lpf[core]; /* Rev 7+ */
 		params->txgm = target.txgm[core];
 		params->pga = target.pga[core];
 		params->pad = target.pad[core];
 		params->ipa = target.ipa[core];
-		params->cal_gain = (params->txgm << 12) | (params->pga << 8) |
-					(params->pad << 4) | (params->ipa);
+		if (phy->rev >= 19) {
+			/* TODO */
+		} else if (phy->rev >= 7) {
+			params->cal_gain = (params->txgm << 12) | (params->pga << 8) | (params->pad << 3) | (params->ipa) | (params->tx_lpf << 15);
+		} else {
+			params->cal_gain = (params->txgm << 12) | (params->pga << 8) | (params->pad << 4) | (params->ipa);
+		}
 		for (j = 0; j < 5; j++)
 			params->ncorr[j] = 0x79;
 	} else {
@@ -3199,6 +3619,7 @@
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */
 static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
 {
+	struct b43_phy *phy = &dev->phy;
 	struct b43_phy_n *nphy = dev->phy.n;
 	u8 i;
 	u16 bmask, val, tmp;
@@ -3268,12 +3689,25 @@
 		b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val);
 
 		if (band == IEEE80211_BAND_5GHZ) {
-			b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
-					~B43_NPHY_TXPCTL_CMD_INIT, 0x64);
-			if (dev->phy.rev > 1)
+			if (phy->rev >= 19) {
+				/* TODO */
+			} else if (phy->rev >= 7) {
+				b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
+						~B43_NPHY_TXPCTL_CMD_INIT,
+						0x32);
 				b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT,
 						~B43_NPHY_TXPCTL_INIT_PIDXI1,
+						0x32);
+			} else {
+				b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
+						~B43_NPHY_TXPCTL_CMD_INIT,
 						0x64);
+				if (phy->rev > 1)
+					b43_phy_maskset(dev,
+							B43_NPHY_TXPCTL_INIT,
+							~B43_NPHY_TXPCTL_INIT_PIDXI1,
+							0x64);
+			}
 		}
 
 		if (dev->phy.rev >= 3) {
@@ -3290,6 +3724,10 @@
 			}
 		}
 
+		if (phy->rev >= 7) {
+			/* TODO */
+		}
+
 		if (dev->phy.rev >= 3) {
 			b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x100);
 			b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x100);
@@ -3331,6 +3769,7 @@
 	if (nphy->hang_avoid)
 		b43_nphy_stay_in_carrier_search(dev, 1);
 
+	/* TODO: rev19+ */
 	if (dev->phy.rev >= 7) {
 		txpi[0] = txpi[1] = 30;
 	} else if (dev->phy.rev >= 3) {
@@ -3433,7 +3872,9 @@
 	u8 core;
 	u16 r; /* routing */
 
-	if (phy->rev >= 7) {
+	if (phy->rev >= 19) {
+		/* TODO */
+	} else if (phy->rev >= 7) {
 		for (core = 0; core < 2; core++) {
 			r = core ? 0x190 : 0x170;
 			if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
@@ -3516,13 +3957,16 @@
 	u32 tmp;
 	s32 rssi[4] = { };
 
-	/* TODO: check if we can transmit */
+	if (phy->chandef->chan->flags & IEEE80211_CHAN_NO_IR)
+		return;
 
 	if (b43_nphy_ipa(dev))
 		b43_nphy_ipa_internal_tssi_setup(dev);
 
-	if (phy->rev >= 7)
-		b43_nphy_rf_ctl_override_rev7(dev, 0x2000, 0, 3, false, 0);
+	if (phy->rev >= 19)
+		b43_nphy_rf_ctl_override_rev19(dev, 0x1000, 0, 3, false, 0);
+	else if (phy->rev >= 7)
+		b43_nphy_rf_ctl_override_rev7(dev, 0x1000, 0, 3, false, 0);
 	else if (phy->rev >= 3)
 		b43_nphy_rf_ctl_override(dev, 0x2000, 0, 3, false);
 
@@ -3531,14 +3975,20 @@
 	udelay(20);
 	tmp = b43_nphy_poll_rssi(dev, N_RSSI_TSSI_2G, rssi, 1);
 	b43_nphy_stop_playback(dev);
+
 	b43_nphy_rssi_select(dev, 0, N_RSSI_W1);
 
-	if (phy->rev >= 7)
-		b43_nphy_rf_ctl_override_rev7(dev, 0x2000, 0, 3, true, 0);
+	if (phy->rev >= 19)
+		b43_nphy_rf_ctl_override_rev19(dev, 0x1000, 0, 3, true, 0);
+	else if (phy->rev >= 7)
+		b43_nphy_rf_ctl_override_rev7(dev, 0x1000, 0, 3, true, 0);
 	else if (phy->rev >= 3)
 		b43_nphy_rf_ctl_override(dev, 0x2000, 0, 3, true);
 
-	if (phy->rev >= 3) {
+	if (phy->rev >= 19) {
+		/* TODO */
+		return;
+	} else if (phy->rev >= 3) {
 		nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 24) & 0xFF;
 		nphy->pwr_ctl_info[1].idle_tssi_5g = (tmp >> 8) & 0xFF;
 	} else {
@@ -3730,7 +4180,9 @@
 		udelay(1);
 	}
 
-	if (dev->phy.rev >= 7) {
+	if (phy->rev >= 19) {
+		/* TODO */
+	} else if (phy->rev >= 7) {
 		b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
 				~B43_NPHY_TXPCTL_CMD_INIT, 0x19);
 		b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT,
@@ -3793,24 +4245,30 @@
 	b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, table);
 	b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, table);
 
-	if (phy->rev >= 3) {
+	if (phy->rev < 3)
+		return;
+
 #if 0
-		nphy->gmval = (table[0] >> 16) & 0x7000;
+	nphy->gmval = (table[0] >> 16) & 0x7000;
 #endif
 
-		for (i = 0; i < 128; i++) {
+	for (i = 0; i < 128; i++) {
+		if (phy->rev >= 19) {
+			/* TODO */
+			return;
+		} else if (phy->rev >= 7) {
+			/* TODO */
+			return;
+		} else {
 			pga_gain = (table[i] >> 24) & 0xF;
 			if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
-				rfpwr_offset =
-				 b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain];
+				rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain];
 			else
-				rfpwr_offset =
-				 0; /* FIXME */
-			b43_ntab_write(dev, B43_NTAB32(26, 576 + i),
-				       rfpwr_offset);
-			b43_ntab_write(dev, B43_NTAB32(27, 576 + i),
-				       rfpwr_offset);
+				rfpwr_offset = 0; /* FIXME */
 		}
+
+		b43_ntab_write(dev, B43_NTAB32(26, 576 + i), rfpwr_offset);
+		b43_ntab_write(dev, B43_NTAB32(27, 576 + i), rfpwr_offset);
 	}
 }
 
@@ -3827,7 +4285,9 @@
 		nphy->rfctrl_intc2_save = b43_phy_read(dev,
 						       B43_NPHY_RFCTL_INTC2);
 		band = b43_current_band(dev->wl);
-		if (dev->phy.rev >= 3) {
+		if (dev->phy.rev >= 7) {
+			tmp = 0x1480;
+		} else if (dev->phy.rev >= 3) {
 			if (band == IEEE80211_BAND_5GHZ)
 				tmp = 0x600;
 			else
@@ -4274,7 +4734,13 @@
 		rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G;
 	}
 
-	if (dev->phy.rev >= 7) {
+	if (dev->phy.rev >= 19) {
+		/* TODO */
+	} else if (dev->phy.rev >= 7) {
+		b43_radio_maskset(dev, R2057_NB_MASTER_CORE0, ~R2057_VCM_MASK,
+				  rssical_radio_regs[0]);
+		b43_radio_maskset(dev, R2057_NB_MASTER_CORE1, ~R2057_VCM_MASK,
+				  rssical_radio_regs[1]);
 	} else {
 		b43_radio_maskset(dev, B2056_RX0 | B2056_RX_RSSI_MISC, 0xE3,
 				  rssical_radio_regs[0]);
@@ -4298,15 +4764,78 @@
 	b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, rssical_phy_regs[11]);
 }
 
+static void b43_nphy_tx_cal_radio_setup_rev19(struct b43_wldev *dev)
+{
+	/* TODO */
+}
+
+static void b43_nphy_tx_cal_radio_setup_rev7(struct b43_wldev *dev)
+{
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_n *nphy = dev->phy.n;
+	u16 *save = nphy->tx_rx_cal_radio_saveregs;
+	int core, off;
+	u16 r, tmp;
+
+	for (core = 0; core < 2; core++) {
+		r = core ? 0x20 : 0;
+		off = core * 11;
+
+		save[off + 0] = b43_radio_read(dev, r + R2057_TX0_TX_SSI_MASTER);
+		save[off + 1] = b43_radio_read(dev, r + R2057_TX0_IQCAL_VCM_HG);
+		save[off + 2] = b43_radio_read(dev, r + R2057_TX0_IQCAL_IDAC);
+		save[off + 3] = b43_radio_read(dev, r + R2057_TX0_TSSI_VCM);
+		save[off + 4] = 0;
+		save[off + 5] = b43_radio_read(dev, r + R2057_TX0_TX_SSI_MUX);
+		if (phy->radio_rev != 5)
+			save[off + 6] = b43_radio_read(dev, r + R2057_TX0_TSSIA);
+		save[off + 7] = b43_radio_read(dev, r + R2057_TX0_TSSIG);
+		save[off + 8] = b43_radio_read(dev, r + R2057_TX0_TSSI_MISC1);
+
+		if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+			b43_radio_write(dev, r + R2057_TX0_TX_SSI_MASTER, 0xA);
+			b43_radio_write(dev, r + R2057_TX0_IQCAL_VCM_HG, 0x43);
+			b43_radio_write(dev, r + R2057_TX0_IQCAL_IDAC, 0x55);
+			b43_radio_write(dev, r + R2057_TX0_TSSI_VCM, 0);
+			b43_radio_write(dev, r + R2057_TX0_TSSIG, 0);
+			if (nphy->use_int_tx_iq_lo_cal) {
+				b43_radio_write(dev, r + R2057_TX0_TX_SSI_MUX, 0x4);
+				tmp = true ? 0x31 : 0x21; /* TODO */
+				b43_radio_write(dev, r + R2057_TX0_TSSIA, tmp);
+			}
+			b43_radio_write(dev, r + R2057_TX0_TSSI_MISC1, 0x00);
+		} else {
+			b43_radio_write(dev, r + R2057_TX0_TX_SSI_MASTER, 0x6);
+			b43_radio_write(dev, r + R2057_TX0_IQCAL_VCM_HG, 0x43);
+			b43_radio_write(dev, r + R2057_TX0_IQCAL_IDAC, 0x55);
+			b43_radio_write(dev, r + R2057_TX0_TSSI_VCM, 0);
+
+			if (phy->radio_rev != 5)
+				b43_radio_write(dev, r + R2057_TX0_TSSIA, 0);
+			if (nphy->use_int_tx_iq_lo_cal) {
+				b43_radio_write(dev, r + R2057_TX0_TX_SSI_MUX, 0x6);
+				tmp = true ? 0x31 : 0x21; /* TODO */
+				b43_radio_write(dev, r + R2057_TX0_TSSIG, tmp);
+			}
+			b43_radio_write(dev, r + R2057_TX0_TSSI_MISC1, 0);
+		}
+	}
+}
+
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */
 static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
 {
+	struct b43_phy *phy = &dev->phy;
 	struct b43_phy_n *nphy = dev->phy.n;
 	u16 *save = nphy->tx_rx_cal_radio_saveregs;
 	u16 tmp;
 	u8 offset, i;
 
-	if (dev->phy.rev >= 3) {
+	if (phy->rev >= 19) {
+		b43_nphy_tx_cal_radio_setup_rev19(dev);
+	} else if (phy->rev >= 7) {
+		b43_nphy_tx_cal_radio_setup_rev7(dev);
+	} else if (phy->rev >= 3) {
 	    for (i = 0; i < 2; i++) {
 		tmp = (i == 0) ? 0x2000 : 0x3000;
 		offset = i * 11;
@@ -4415,41 +4944,61 @@
 	}
 }
 
+static void b43_nphy_pa_set_tx_dig_filter(struct b43_wldev *dev, u16 offset,
+					  const s16 *filter)
+{
+	int i;
+
+	offset = B43_PHY_N(offset);
+
+	for (i = 0; i < 15; i++, offset++)
+		b43_phy_write(dev, offset, filter[i]);
+}
+
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ExtPaSetTxDigiFilts */
 static void b43_nphy_ext_pa_set_tx_dig_filters(struct b43_wldev *dev)
 {
-	int i;
-	for (i = 0; i < 15; i++)
-		b43_phy_write(dev, B43_PHY_N(0x2C5 + i),
-				tbl_tx_filter_coef_rev4[2][i]);
+	b43_nphy_pa_set_tx_dig_filter(dev, 0x2C5,
+				      tbl_tx_filter_coef_rev4[2]);
 }
 
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IpaSetTxDigiFilts */
 static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev)
 {
-	int i, j;
 	/* B43_NPHY_TXF_20CO_S0A1, B43_NPHY_TXF_40CO_S0A1, unknown */
 	static const u16 offset[] = { 0x186, 0x195, 0x2C5 };
+	static const s16 dig_filter_phy_rev16[] = {
+		-375, 136, -407, 208, -1527,
+		956, 93, 186, 93, 230,
+		-44, 230, 201, -191, 201,
+	};
+	int i;
 
 	for (i = 0; i < 3; i++)
-		for (j = 0; j < 15; j++)
-			b43_phy_write(dev, B43_PHY_N(offset[i] + j),
-					tbl_tx_filter_coef_rev4[i][j]);
+		b43_nphy_pa_set_tx_dig_filter(dev, offset[i],
+					      tbl_tx_filter_coef_rev4[i]);
 
-	if (b43_is_40mhz(dev)) {
-		for (j = 0; j < 15; j++)
-			b43_phy_write(dev, B43_PHY_N(offset[0] + j),
-					tbl_tx_filter_coef_rev4[3][j]);
-	} else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
-		for (j = 0; j < 15; j++)
-			b43_phy_write(dev, B43_PHY_N(offset[0] + j),
-					tbl_tx_filter_coef_rev4[5][j]);
+	/* Verified with BCM43227 and BCM43228 */
+	if (dev->phy.rev == 16)
+		b43_nphy_pa_set_tx_dig_filter(dev, 0x186, dig_filter_phy_rev16);
+
+	if (dev->dev->chip_id == BCMA_CHIP_ID_BCM43217) {
+		b43_nphy_pa_set_tx_dig_filter(dev, 0x186, dig_filter_phy_rev16);
+		b43_nphy_pa_set_tx_dig_filter(dev, 0x195,
+					      tbl_tx_filter_coef_rev4[1]);
 	}
 
-	if (dev->phy.channel == 14)
-		for (j = 0; j < 15; j++)
-			b43_phy_write(dev, B43_PHY_N(offset[0] + j),
-					tbl_tx_filter_coef_rev4[6][j]);
+	if (b43_is_40mhz(dev)) {
+		b43_nphy_pa_set_tx_dig_filter(dev, 0x186,
+					      tbl_tx_filter_coef_rev4[3]);
+	} else {
+		if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+			b43_nphy_pa_set_tx_dig_filter(dev, 0x186,
+						      tbl_tx_filter_coef_rev4[5]);
+		if (dev->phy.channel == 14)
+			b43_nphy_pa_set_tx_dig_filter(dev, 0x186,
+						      tbl_tx_filter_coef_rev4[6]);
+	}
 }
 
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetTxGain */
@@ -4471,7 +5020,13 @@
 			b43_nphy_stay_in_carrier_search(dev, false);
 
 		for (i = 0; i < 2; ++i) {
-			if (dev->phy.rev >= 3) {
+			if (dev->phy.rev >= 7) {
+				target.ipa[i] = curr_gain[i] & 0x0007;
+				target.pad[i] = (curr_gain[i] & 0x00F8) >> 3;
+				target.pga[i] = (curr_gain[i] & 0x0F00) >> 8;
+				target.txgm[i] = (curr_gain[i] & 0x7000) >> 12;
+				target.tx_lpf[i] = (curr_gain[i] & 0x8000) >> 15;
+			} else if (dev->phy.rev >= 3) {
 				target.ipa[i] = curr_gain[i] & 0x000F;
 				target.pad[i] = (curr_gain[i] & 0x00F0) >> 4;
 				target.pga[i] = (curr_gain[i] & 0x0F00) >> 8;
@@ -4498,7 +5053,13 @@
 			if (!table)
 				break;
 
-			if (dev->phy.rev >= 3) {
+			if (dev->phy.rev >= 7) {
+				target.ipa[i] = (table[index[i]] >> 16) & 0x7;
+				target.pad[i] = (table[index[i]] >> 19) & 0x1F;
+				target.pga[i] = (table[index[i]] >> 24) & 0xF;
+				target.txgm[i] = (table[index[i]] >> 28) & 0x7;
+				target.tx_lpf[i] = (table[index[i]] >> 31) & 0x1;
+			} else if (dev->phy.rev >= 3) {
 				target.ipa[i] = (table[index[i]] >> 16) & 0xF;
 				target.pad[i] = (table[index[i]] >> 20) & 0xF;
 				target.pga[i] = (table[index[i]] >> 24) & 0xF;
@@ -4547,6 +5108,8 @@
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhySetup */
 static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
 {
+	struct b43_phy *phy = &dev->phy;
+	struct b43_phy_n *nphy = dev->phy.n;
 	u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
 	u16 tmp;
 
@@ -4578,7 +5141,12 @@
 		regs[7] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
 		regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
 
-		b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_PA, 1, 3);
+		if (!nphy->use_int_tx_iq_lo_cal)
+			b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_PA,
+						      1, 3);
+		else
+			b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_PA,
+						      0, 3);
 		b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_TRSW, 2, 1);
 		b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_TRSW, 8, 2);
 
@@ -4586,6 +5154,33 @@
 		regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
 		b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001);
 		b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001);
+
+		tmp = b43_nphy_read_lpf_ctl(dev, 0);
+		if (phy->rev >= 19)
+			b43_nphy_rf_ctl_override_rev19(dev, 0x80, tmp, 0, false,
+						       1);
+		else if (phy->rev >= 7)
+			b43_nphy_rf_ctl_override_rev7(dev, 0x80, tmp, 0, false,
+						      1);
+
+		if (nphy->use_int_tx_iq_lo_cal && true /* FIXME */) {
+			if (phy->rev >= 19) {
+				b43_nphy_rf_ctl_override_rev19(dev, 0x8, 0, 0x3,
+							       false, 0);
+			} else if (phy->rev >= 8) {
+				b43_nphy_rf_ctl_override_rev7(dev, 0x8, 0, 0x3,
+							      false, 0);
+			} else if (phy->rev == 7) {
+				b43_radio_maskset(dev, R2057_OVR_REG0, 1 << 4, 1 << 4);
+				if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+					b43_radio_maskset(dev, R2057_PAD2G_TUNE_PUS_CORE0, ~1, 0);
+					b43_radio_maskset(dev, R2057_PAD2G_TUNE_PUS_CORE1, ~1, 0);
+				} else {
+					b43_radio_maskset(dev, R2057_IPA5G_CASCOFFV_PU_CORE0, ~1, 0);
+					b43_radio_maskset(dev, R2057_IPA5G_CASCOFFV_PU_CORE1, ~1, 0);
+				}
+			}
+		}
 	} else {
 		b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, 0xA000);
 		b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, 0xA000);
@@ -4614,6 +5209,7 @@
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SaveCal */
 static void b43_nphy_save_cal(struct b43_wldev *dev)
 {
+	struct b43_phy *phy = &dev->phy;
 	struct b43_phy_n *nphy = dev->phy.n;
 
 	struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
@@ -4638,7 +5234,26 @@
 
 	b43_nphy_rx_iq_coeffs(dev, false, rxcal_coeffs);
 	/* TODO use some definitions */
-	if (dev->phy.rev >= 3) {
+	if (phy->rev >= 19) {
+		/* TODO */
+	} else if (phy->rev >= 7) {
+		txcal_radio_regs[0] = b43_radio_read(dev,
+						     R2057_TX0_LOFT_FINE_I);
+		txcal_radio_regs[1] = b43_radio_read(dev,
+						     R2057_TX0_LOFT_FINE_Q);
+		txcal_radio_regs[4] = b43_radio_read(dev,
+						     R2057_TX0_LOFT_COARSE_I);
+		txcal_radio_regs[5] = b43_radio_read(dev,
+						     R2057_TX0_LOFT_COARSE_Q);
+		txcal_radio_regs[2] = b43_radio_read(dev,
+						     R2057_TX1_LOFT_FINE_I);
+		txcal_radio_regs[3] = b43_radio_read(dev,
+						     R2057_TX1_LOFT_FINE_Q);
+		txcal_radio_regs[6] = b43_radio_read(dev,
+						     R2057_TX1_LOFT_COARSE_I);
+		txcal_radio_regs[7] = b43_radio_read(dev,
+						     R2057_TX1_LOFT_COARSE_Q);
+	} else if (phy->rev >= 3) {
 		txcal_radio_regs[0] = b43_radio_read(dev, 0x2021);
 		txcal_radio_regs[1] = b43_radio_read(dev, 0x2022);
 		txcal_radio_regs[2] = b43_radio_read(dev, 0x3021);
@@ -4665,6 +5280,7 @@
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreCal */
 static void b43_nphy_restore_cal(struct b43_wldev *dev)
 {
+	struct b43_phy *phy = &dev->phy;
 	struct b43_phy_n *nphy = dev->phy.n;
 
 	u16 coef[4];
@@ -4712,7 +5328,26 @@
 	}
 
 	/* TODO use some definitions */
-	if (dev->phy.rev >= 3) {
+	if (phy->rev >= 19) {
+		/* TODO */
+	} else if (phy->rev >= 7) {
+		b43_radio_write(dev, R2057_TX0_LOFT_FINE_I,
+				txcal_radio_regs[0]);
+		b43_radio_write(dev, R2057_TX0_LOFT_FINE_Q,
+				txcal_radio_regs[1]);
+		b43_radio_write(dev, R2057_TX0_LOFT_COARSE_I,
+				txcal_radio_regs[4]);
+		b43_radio_write(dev, R2057_TX0_LOFT_COARSE_Q,
+				txcal_radio_regs[5]);
+		b43_radio_write(dev, R2057_TX1_LOFT_FINE_I,
+				txcal_radio_regs[2]);
+		b43_radio_write(dev, R2057_TX1_LOFT_FINE_Q,
+				txcal_radio_regs[3]);
+		b43_radio_write(dev, R2057_TX1_LOFT_COARSE_I,
+				txcal_radio_regs[6]);
+		b43_radio_write(dev, R2057_TX1_LOFT_COARSE_Q,
+				txcal_radio_regs[7]);
+	} else if (phy->rev >= 3) {
 		b43_radio_write(dev, 0x2021, txcal_radio_regs[0]);
 		b43_radio_write(dev, 0x2022, txcal_radio_regs[1]);
 		b43_radio_write(dev, 0x3021, txcal_radio_regs[2]);
@@ -4789,7 +5424,13 @@
 		}
 	}
 
-	b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8AA9);
+	if (phy->rev >= 19) {
+		/* TODO */
+	} else if (phy->rev >= 7) {
+		b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8AD9);
+	} else {
+		b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8AA9);
+	}
 
 	if (!b43_is_40mhz(dev))
 		freq = 2500;
@@ -5183,6 +5824,9 @@
 static int b43_nphy_cal_rx_iq(struct b43_wldev *dev,
 			struct nphy_txgains target, u8 type, bool debug)
 {
+	if (dev->phy.rev >= 7)
+		type = 0;
+
 	if (dev->phy.rev >= 3)
 		return b43_nphy_rev3_cal_rx_iq(dev, target, type, debug);
 	else
@@ -5269,6 +5913,9 @@
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */
 static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init)
 {
+	if (dev->phy.rev >= 7)
+		return;
+
 	if (dev->phy.rev >= 3) {
 		if (!init)
 			return;
@@ -5344,6 +5991,10 @@
 #endif
 		}
 	}
+	nphy->use_int_tx_iq_lo_cal = b43_nphy_ipa(dev) ||
+		phy->rev >= 7 ||
+		(phy->rev >= 5 &&
+		 sprom->boardflags2_hi & B43_BFH2_INTERNDET_TXIQCAL);
 	nphy->deaf_count = 0;
 	b43_nphy_tables_init(dev);
 	nphy->crsminpwr_adjusted = false;
@@ -5353,6 +6004,16 @@
 	if (dev->phy.rev >= 3) {
 		b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, 0);
 		b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0);
+		if (phy->rev >= 7) {
+			b43_phy_write(dev, B43_NPHY_REV7_RF_CTL_OVER3, 0);
+			b43_phy_write(dev, B43_NPHY_REV7_RF_CTL_OVER4, 0);
+			b43_phy_write(dev, B43_NPHY_REV7_RF_CTL_OVER5, 0);
+			b43_phy_write(dev, B43_NPHY_REV7_RF_CTL_OVER6, 0);
+		}
+		if (phy->rev >= 19) {
+			/* TODO */
+		}
+
 		b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, 0);
 		b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, 0);
 	} else {
@@ -5390,7 +6051,9 @@
 	b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x50);
 	b43_phy_write(dev, B43_NPHY_TXRIFS_FRDEL, 0x30);
 
-	b43_nphy_update_mimo_config(dev, nphy->preamble_override);
+	if (phy->rev < 8)
+		b43_nphy_update_mimo_config(dev, nphy->preamble_override);
+
 	b43_nphy_update_txrx_chain(dev);
 
 	if (phy->rev < 2) {
@@ -5422,10 +6085,12 @@
 
 	b43_mac_phy_clock_set(dev, true);
 
-	b43_nphy_pa_override(dev, false);
-	b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
-	b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
-	b43_nphy_pa_override(dev, true);
+	if (phy->rev < 7) {
+		b43_nphy_pa_override(dev, false);
+		b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
+		b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+		b43_nphy_pa_override(dev, true);
+	}
 
 	b43_nphy_classifier(dev, 0, 0);
 	b43_nphy_read_clip_detection(dev, clip);
@@ -5548,23 +6213,23 @@
 	struct b43_phy *phy = &dev->phy;
 	struct b43_phy_n *nphy = dev->phy.n;
 	int ch = new_channel->hw_value;
-
-	u16 old_band_5ghz;
 	u16 tmp16;
 
-	old_band_5ghz =
-		b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ;
-	if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) {
+	if (new_channel->band == IEEE80211_BAND_5GHZ) {
 		tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR);
 		b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
-		b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000);
+		/* Put BPHY in the reset */
+		b43_phy_set(dev, B43_PHY_B_BBCFG,
+			    B43_PHY_B_BBCFG_RSTCCA | B43_PHY_B_BBCFG_RSTRX);
 		b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16);
 		b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
-	} else if (new_channel->band == IEEE80211_BAND_2GHZ && old_band_5ghz) {
+	} else if (new_channel->band == IEEE80211_BAND_2GHZ) {
 		b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
 		tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR);
 		b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
-		b43_phy_mask(dev, B43_PHY_B_BBCFG, 0x3FFF);
+		/* Take BPHY out of the reset */
+		b43_phy_mask(dev, B43_PHY_B_BBCFG,
+			     (u16)~(B43_PHY_B_BBCFG_RSTCCA | B43_PHY_B_BBCFG_RSTRX));
 		b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16);
 	}
 
@@ -5589,31 +6254,45 @@
 
 	if (dev->phy.rev >= 3 &&
 	    dev->phy.n->spur_avoid != B43_SPUR_AVOID_DISABLE) {
-		bool avoid = false;
+		u8 spuravoid = 0;
+
 		if (dev->phy.n->spur_avoid == B43_SPUR_AVOID_FORCE) {
-			avoid = true;
-		} else if (!b43_is_40mhz(dev)) {
-			if ((ch >= 5 && ch <= 8) || ch == 13 || ch == 14)
-				avoid = true;
-		} else { /* 40MHz */
-			if (nphy->aband_spurwar_en &&
-			    (ch == 38 || ch == 102 || ch == 118))
-				avoid = dev->dev->chip_id == 0x4716;
+			spuravoid = 1;
+		} else if (phy->rev >= 19) {
+			/* TODO */
+		} else if (phy->rev >= 18) {
+			/* TODO */
+		} else if (phy->rev >= 17) {
+			/* TODO: Off for channels 1-11, but check 12-14! */
+		} else if (phy->rev >= 16) {
+			/* TODO: Off for 2 GHz, but check 5 GHz! */
+		} else if (phy->rev >= 7) {
+			if (!b43_is_40mhz(dev)) { /* 20MHz */
+				if (ch == 13 || ch == 14 || ch == 153)
+					spuravoid = 1;
+			} else { /* 40 MHz */
+				if (ch == 54)
+					spuravoid = 1;
+			}
+		} else {
+			if (!b43_is_40mhz(dev)) { /* 20MHz */
+				if ((ch >= 5 && ch <= 8) || ch == 13 || ch == 14)
+					spuravoid = 1;
+			} else { /* 40MHz */
+				if (nphy->aband_spurwar_en &&
+				    (ch == 38 || ch == 102 || ch == 118))
+					spuravoid = dev->dev->chip_id == 0x4716;
+			}
 		}
 
-		b43_nphy_pmu_spur_avoid(dev, avoid);
+		b43_nphy_pmu_spur_avoid(dev, spuravoid);
 
-		if (dev->dev->chip_id == 43222 || dev->dev->chip_id == 43224 ||
-		    dev->dev->chip_id == 43225) {
-			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW,
-				    avoid ? 0x5341 : 0x8889);
-			b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
-		}
+		b43_mac_switch_freq(dev, spuravoid);
 
 		if (dev->phy.rev == 3 || dev->phy.rev == 4)
 			; /* TODO: reset PLL */
 
-		if (avoid)
+		if (spuravoid)
 			b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTRX);
 		else
 			b43_phy_mask(dev, B43_NPHY_BBCFG,
@@ -5644,7 +6323,10 @@
 
 	u8 tmp;
 
-	if (phy->rev >= 7) {
+	if (phy->rev >= 19) {
+		return -ESRCH;
+		/* TODO */
+	} else if (phy->rev >= 7) {
 		r2057_get_chantabent_rev7(dev, channel->center_freq,
 					  &tabent_r7, &tabent_r7_2g);
 		if (!tabent_r7 && !tabent_r7_2g)
@@ -5681,7 +6363,9 @@
 			b43_phy_mask(dev, 0x310, (u16)~0x8000);
 	}
 
-	if (phy->rev >= 7) {
+	if (phy->rev >= 19) {
+		/* TODO */
+	} else if (phy->rev >= 7) {
 		const struct b43_phy_n_sfo_cfg *phy_regs = tabent_r7 ?
 			&(tabent_r7->phy_regs) : &(tabent_r7_2g->phy_regs);
 
@@ -5834,7 +6518,7 @@
 static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg)
 {
 	/* Register 1 is a 32-bit register. */
-	B43_WARN_ON(reg == 1);
+	B43_WARN_ON(dev->phy.rev < 7 && reg == 1);
 
 	if (dev->phy.rev >= 7)
 		reg |= 0x200; /* Radio 0x2057 */
@@ -5848,7 +6532,7 @@
 static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
 {
 	/* Register 1 is a 32-bit register. */
-	B43_WARN_ON(reg == 1);
+	B43_WARN_ON(dev->phy.rev < 7 && reg == 1);
 
 	b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
 	b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
@@ -5858,15 +6542,23 @@
 static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
 					bool blocked)
 {
+	struct b43_phy *phy = &dev->phy;
+
 	if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED)
 		b43err(dev->wl, "MAC not suspended\n");
 
 	if (blocked) {
-		b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
-				~B43_NPHY_RFCTL_CMD_CHIP0PU);
-		if (dev->phy.rev >= 7) {
+		if (phy->rev >= 19) {
 			/* TODO */
-		} else if (dev->phy.rev >= 3) {
+		} else if (phy->rev >= 8) {
+			b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+				     ~B43_NPHY_RFCTL_CMD_CHIP0PU);
+		} else if (phy->rev >= 7) {
+			/* Nothing needed */
+		} else if (phy->rev >= 3) {
+			b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+				     ~B43_NPHY_RFCTL_CMD_CHIP0PU);
+
 			b43_radio_mask(dev, 0x09, ~0x2);
 
 			b43_radio_write(dev, 0x204D, 0);
@@ -5884,11 +6576,13 @@
 			b43_radio_write(dev, 0x3064, 0);
 		}
 	} else {
-		if (dev->phy.rev >= 7) {
+		if (phy->rev >= 19) {
+			/* TODO */
+		} else if (phy->rev >= 7) {
 			if (!dev->phy.radio_on)
 				b43_radio_2057_init(dev);
 			b43_switch_channel(dev, dev->phy.channel);
-		} else if (dev->phy.rev >= 3) {
+		} else if (phy->rev >= 3) {
 			if (!dev->phy.radio_on)
 				b43_radio_init2056(dev);
 			b43_switch_channel(dev, dev->phy.channel);
@@ -5901,10 +6595,13 @@
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/Anacore */
 static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on)
 {
+	struct b43_phy *phy = &dev->phy;
 	u16 override = on ? 0x0 : 0x7FFF;
 	u16 core = on ? 0xD : 0x00FD;
 
-	if (dev->phy.rev >= 3) {
+	if (phy->rev >= 19) {
+		/* TODO */
+	} else if (phy->rev >= 3) {
 		if (on) {
 			b43_phy_write(dev, B43_NPHY_AFECTL_C1, core);
 			b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, override);
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index ecfbf66..30bec81 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -366,11 +366,13 @@
 #define B43_NPHY_TXF_40CO_B1S0			B43_PHY_N(0x0E5) /* TX filter 40 coeff B1 stage 0 */
 #define B43_NPHY_TXF_40CO_B32S1			B43_PHY_N(0x0E6) /* TX filter 40 coeff B32 stage 1 */
 #define B43_NPHY_TXF_40CO_B1S1			B43_PHY_N(0x0E7) /* TX filter 40 coeff B1 stage 1 */
+#define B43_NPHY_REV3_RFCTL_OVER0		B43_PHY_N(0x0E7)
 #define B43_NPHY_TXF_40CO_B32S2			B43_PHY_N(0x0E8) /* TX filter 40 coeff B32 stage 2 */
 #define B43_NPHY_TXF_40CO_B1S2			B43_PHY_N(0x0E9) /* TX filter 40 coeff B1 stage 2 */
 #define B43_NPHY_BIST_STAT2			B43_PHY_N(0x0EA) /* BIST status 2 */
 #define B43_NPHY_BIST_STAT3			B43_PHY_N(0x0EB) /* BIST status 3 */
 #define B43_NPHY_RFCTL_OVER			B43_PHY_N(0x0EC) /* RF control override */
+#define B43_NPHY_REV3_RFCTL_OVER1		B43_PHY_N(0x0EC)
 #define B43_NPHY_MIMOCFG			B43_PHY_N(0x0ED) /* MIMO config */
 #define  B43_NPHY_MIMOCFG_GFMIX			0x0004 /* Greenfield or mixed mode */
 #define  B43_NPHY_MIMOCFG_AUTO			0x0100 /* Greenfield/mixed mode auto */
@@ -857,7 +859,18 @@
 #define B43_NPHY_REV3_C2_CLIP2_GAIN_A		B43_PHY_N(0x2AF)
 #define B43_NPHY_REV3_C2_CLIP2_GAIN_B		B43_PHY_N(0x2B0)
 
+#define B43_NPHY_REV7_RF_CTL_MISC_REG3		B43_PHY_N(0x340)
+#define B43_NPHY_REV7_RF_CTL_MISC_REG4		B43_PHY_N(0x341)
+#define B43_NPHY_REV7_RF_CTL_OVER3		B43_PHY_N(0x342)
+#define B43_NPHY_REV7_RF_CTL_OVER4		B43_PHY_N(0x343)
+#define B43_NPHY_REV7_RF_CTL_MISC_REG5		B43_PHY_N(0x344)
+#define B43_NPHY_REV7_RF_CTL_MISC_REG6		B43_PHY_N(0x345)
+#define B43_NPHY_REV7_RF_CTL_OVER5		B43_PHY_N(0x346)
+#define B43_NPHY_REV7_RF_CTL_OVER6		B43_PHY_N(0x347)
+
 #define B43_PHY_B_BBCFG				B43_PHY_N_BMODE(0x001) /* BB config */
+#define  B43_PHY_B_BBCFG_RSTCCA			0x4000 /* Reset CCA */
+#define  B43_PHY_B_BBCFG_RSTRX			0x8000 /* Reset RX */
 #define B43_PHY_B_TEST				B43_PHY_N_BMODE(0x00A)
 
 struct b43_wldev;
@@ -935,6 +948,8 @@
 	bool gain_boost;
 	bool elna_gain_config;
 	bool band5g_pwrgain;
+	bool use_int_tx_iq_lo_cal;
+	bool lpf_bw_overrode_for_sample_play;
 
 	u8 mphase_cal_phase_id;
 	u16 mphase_txcal_cmdidx;
diff --git a/drivers/net/wireless/b43/radio_2057.c b/drivers/net/wireless/b43/radio_2057.c
index df35745..ff1e026 100644
--- a/drivers/net/wireless/b43/radio_2057.c
+++ b/drivers/net/wireless/b43/radio_2057.c
@@ -105,6 +105,27 @@
 };
 */
 
+/* Extracted from MMIO dump of 6.30.223.141 */
+static u16 r2057_rev9_init[][2] = {
+	{ 0x27, 0x1f }, { 0x28, 0x0a }, { 0x29, 0x2f }, { 0x42, 0x1f },
+	{ 0x48, 0x3f }, { 0x5c, 0x41 }, { 0x63, 0x14 }, { 0x64, 0x12 },
+	{ 0x66, 0xff }, { 0x74, 0xa3 }, { 0x7b, 0x14 }, { 0x7c, 0x14 },
+	{ 0x7d, 0xee }, { 0x86, 0xc0 }, { 0xc4, 0x10 }, { 0xc9, 0x01 },
+	{ 0xe1, 0x41 }, { 0xe8, 0x14 }, { 0xe9, 0x12 }, { 0xeb, 0xff },
+	{ 0xf5, 0x0a }, { 0xf8, 0x09 }, { 0xf9, 0xa3 }, { 0x100, 0x14 },
+	{ 0x101, 0x10 }, { 0x102, 0xee }, { 0x10b, 0xc0 }, { 0x149, 0x10 },
+	{ 0x14e, 0x01 }, { 0x1b7, 0x05 }, { 0x1c2, 0xa0 },
+};
+
+/* Extracted from MMIO dump of 6.30.223.248 */
+static u16 r2057_rev14_init[][2] = {
+	{ 0x011, 0xfc }, { 0x030, 0x24 }, { 0x040, 0x1c }, { 0x082, 0x08 },
+	{ 0x0b4, 0x44 }, { 0x0c8, 0x01 }, { 0x0c9, 0x01 }, { 0x107, 0x08 },
+	{ 0x14d, 0x01 }, { 0x14e, 0x01 }, { 0x1af, 0x40 }, { 0x1b0, 0x40 },
+	{ 0x1cc, 0x01 }, { 0x1cf, 0x10 }, { 0x1d0, 0x0f }, { 0x1d3, 0x10 },
+	{ 0x1d4, 0x0f },
+};
+
 #define RADIOREGS7(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \
 		   r10, r11, r12, r13, r14, r15, r16, r17, r18, r19, \
 		   r20, r21, r22, r23, r24, r25, r26, r27) \
@@ -137,6 +158,27 @@
 	.radio_lna2g_tune_core1			= r26,	\
 	.radio_lna5g_tune_core1			= r27
 
+#define RADIOREGS7_2G(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \
+		      r10, r11, r12, r13, r14, r15, r16, r17) \
+	.radio_vcocal_countval0			= r00,	\
+	.radio_vcocal_countval1			= r01,	\
+	.radio_rfpll_refmaster_sparextalsize	= r02,	\
+	.radio_rfpll_loopfilter_r1		= r03,	\
+	.radio_rfpll_loopfilter_c2		= r04,	\
+	.radio_rfpll_loopfilter_c1		= r05,	\
+	.radio_cp_kpd_idac			= r06,	\
+	.radio_rfpll_mmd0			= r07,	\
+	.radio_rfpll_mmd1			= r08,	\
+	.radio_vcobuf_tune			= r09,	\
+	.radio_logen_mx2g_tune			= r10,	\
+	.radio_logen_indbuf2g_tune		= r11,	\
+	.radio_txmix2g_tune_boost_pu_core0	= r12,	\
+	.radio_pad2g_tune_pus_core0		= r13,	\
+	.radio_lna2g_tune_core0			= r14,	\
+	.radio_txmix2g_tune_boost_pu_core1	= r15,	\
+	.radio_pad2g_tune_pus_core1		= r16,	\
+	.radio_lna2g_tune_core1			= r17
+
 #define PHYREGS(r0, r1, r2, r3, r4, r5)	\
 	.phy_regs.phy_bw1a	= r0,	\
 	.phy_regs.phy_bw2	= r1,	\
@@ -145,6 +187,353 @@
 	.phy_regs.phy_bw5	= r4,	\
 	.phy_regs.phy_bw6	= r5
 
+/* Copied from brcmsmac (5.75.11): chan_info_nphyrev8_2057_rev5 */
+static const struct b43_nphy_chantabent_rev7_2g b43_nphy_chantab_phy_rev8_radio_rev5[] = {
+	{
+		.freq			= 2412,
+		RADIOREGS7_2G(0x48, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x6c,
+			      0x09, 0x0d, 0x08, 0x0e, 0x61, 0x03, 0xff, 0x61,
+			      0x03, 0xff),
+		PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
+	},
+	{
+		.freq			= 2417,
+		RADIOREGS7_2G(0x4b, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x71,
+			      0x09, 0x0d, 0x08, 0x0e, 0x61, 0x03, 0xff, 0x61,
+			      0x03, 0xff),
+		PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
+	},
+	{
+		.freq			= 2422,
+		RADIOREGS7_2G(0x4e, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x76,
+			      0x09, 0x0d, 0x08, 0x0e, 0x61, 0x03, 0xef, 0x61,
+			      0x03, 0xef),
+		PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
+	},
+	{
+		.freq			= 2427,
+		RADIOREGS7_2G(0x52, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x7b,
+			      0x09, 0x0c, 0x08, 0x0e, 0x61, 0x03, 0xdf, 0x61,
+			      0x03, 0xdf),
+		PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
+	},
+	{
+		.freq			= 2432,
+		RADIOREGS7_2G(0x55, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x80,
+			      0x09, 0x0c, 0x07, 0x0d, 0x61, 0x03, 0xcf, 0x61,
+			      0x03, 0xcf),
+		PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
+	},
+	{
+		.freq			= 2437,
+		RADIOREGS7_2G(0x58, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x85,
+			      0x09, 0x0c, 0x07, 0x0d, 0x61, 0x03, 0xbf, 0x61,
+			      0x03, 0xbf),
+		PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
+	},
+	{
+		.freq			= 2442,
+		RADIOREGS7_2G(0x5c, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x8a,
+			      0x09, 0x0b, 0x07, 0x0d, 0x61, 0x03, 0xaf, 0x61,
+			      0x03, 0xaf),
+		PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
+	},
+	{
+		.freq			= 2447,
+		RADIOREGS7_2G(0x5f, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x8f,
+			      0x09, 0x0b, 0x07, 0x0d, 0x61, 0x03, 0x9f, 0x61,
+			      0x03, 0x9f),
+		PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
+	},
+	{
+		.freq			= 2452,
+		RADIOREGS7_2G(0x62, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x94,
+			      0x09, 0x0b, 0x07, 0x0d, 0x61, 0x03, 0x8f, 0x61,
+			      0x03, 0x8f),
+		PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
+	},
+	{
+		.freq			= 2457,
+		RADIOREGS7_2G(0x66, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x99,
+			      0x09, 0x0b, 0x07, 0x0c, 0x61, 0x03, 0x7f, 0x61,
+			      0x03, 0x7f),
+		PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
+	},
+	{
+		.freq			= 2462,
+		RADIOREGS7_2G(0x69, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x9e,
+			      0x09, 0x0b, 0x07, 0x0c, 0x61, 0x03, 0x6f, 0x61,
+			      0x03, 0x6f),
+		PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
+	},
+	{
+		.freq			= 2467,
+		RADIOREGS7_2G(0x6c, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0xa3,
+			      0x09, 0x0b, 0x06, 0x0c, 0x61, 0x03, 0x5f, 0x61,
+			      0x03, 0x5f),
+		PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
+	},
+	{
+		.freq			= 2472,
+		RADIOREGS7_2G(0x70, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0xa8,
+			      0x09, 0x0a, 0x06, 0x0b, 0x61, 0x03, 0x4f, 0x61,
+			      0x03, 0x4f),
+		PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
+	},
+	{
+		.freq			= 2484,
+		RADIOREGS7_2G(0x78, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0xb4,
+			      0x09, 0x0a, 0x06, 0x0b, 0x61, 0x03, 0x3f, 0x61,
+			      0x03, 0x3f),
+		PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
+	}
+};
+
+/* Extracted from MMIO dump of 6.30.223.248 */
+static const struct b43_nphy_chantabent_rev7_2g b43_nphy_chantab_phy_rev17_radio_rev14[] = {
+	{
+		.freq			= 2412,
+		RADIOREGS7_2G(0x48, 0x16, 0x30, 0x2b, 0x1f, 0x1f, 0x30, 0x6c,
+			      0x09, 0x0d, 0x09, 0x03, 0x21, 0x53, 0xff, 0x21,
+			      0x53, 0xff),
+		PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
+	},
+	{
+		.freq			= 2417,
+		RADIOREGS7_2G(0x4b, 0x16, 0x30, 0x2b, 0x1f, 0x1f, 0x30, 0x71,
+			      0x09, 0x0d, 0x08, 0x03, 0x21, 0x53, 0xff, 0x21,
+			      0x53, 0xff),
+		PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
+	},
+	{
+		.freq			= 2422,
+		RADIOREGS7_2G(0x4e, 0x16, 0x30, 0x2b, 0x1f, 0x1f, 0x30, 0x76,
+			      0x09, 0x0d, 0x08, 0x03, 0x21, 0x53, 0xff, 0x21,
+			      0x53, 0xff),
+		PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
+	},
+	{
+		.freq			= 2427,
+		RADIOREGS7_2G(0x52, 0x16, 0x30, 0x2b, 0x1f, 0x1f, 0x30, 0x7b,
+			      0x09, 0x0c, 0x08, 0x03, 0x21, 0x53, 0xff, 0x21,
+			      0x53, 0xff),
+		PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
+	},
+	{
+		.freq			= 2432,
+		RADIOREGS7_2G(0x55, 0x16, 0x30, 0x2b, 0x1f, 0x1f, 0x30, 0x80,
+			      0x09, 0x0c, 0x08, 0x03, 0x21, 0x53, 0xff, 0x21,
+			      0x53, 0xff),
+		PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
+	},
+	{
+		.freq			= 2437,
+		RADIOREGS7_2G(0x58, 0x16, 0x30, 0x2b, 0x1f, 0x1f, 0x30, 0x85,
+			      0x09, 0x0c, 0x08, 0x03, 0x21, 0x53, 0xff, 0x21,
+			      0x53, 0xff),
+		PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
+	},
+	{
+		.freq			= 2442,
+		RADIOREGS7_2G(0x5c, 0x16, 0x30, 0x2b, 0x1f, 0x1f, 0x30, 0x8a,
+			      0x09, 0x0c, 0x08, 0x03, 0x21, 0x43, 0xff, 0x21,
+			      0x43, 0xff),
+		PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
+	},
+	{
+		.freq			= 2447,
+		RADIOREGS7_2G(0x5f, 0x16, 0x30, 0x2b, 0x1f, 0x1f, 0x30, 0x8f,
+			      0x09, 0x0c, 0x08, 0x03, 0x21, 0x43, 0xff, 0x21,
+			      0x43, 0xff),
+		PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
+	},
+	{
+		.freq			= 2452,
+		RADIOREGS7_2G(0x62, 0x16, 0x30, 0x2b, 0x1f, 0x1f, 0x30, 0x94,
+			      0x09, 0x0c, 0x08, 0x03, 0x21, 0x43, 0xff, 0x21,
+			      0x43, 0xff),
+		PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
+	},
+	{
+		.freq			= 2457,
+		RADIOREGS7_2G(0x66, 0x16, 0x30, 0x2b, 0x1f, 0x1f, 0x30, 0x99,
+			      0x09, 0x0b, 0x07, 0x03, 0x21, 0x43, 0xff, 0x21,
+			      0x43, 0xff),
+		PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
+	},
+	{
+		.freq			= 2462,
+		RADIOREGS7_2G(0x69, 0x16, 0x30, 0x2b, 0x1f, 0x1f, 0x30, 0x9e,
+			      0x09, 0x0b, 0x07, 0x03, 0x01, 0x43, 0xff, 0x01,
+			      0x43, 0xff),
+		PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
+	},
+};
+
+/* Extracted from MMIO dump of 6.30.223.141 */
+static const struct b43_nphy_chantabent_rev7 b43_nphy_chantab_phy_rev16_radio_rev9[] = {
+	{
+		.freq			= 2412,
+		RADIOREGS7(0x48, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x6c,
+			   0x09, 0x0f, 0x0a, 0x00, 0x0a, 0x00, 0x41, 0x63,
+			   0x00, 0x00, 0x00, 0xf0, 0x00, 0x41, 0x63, 0x00,
+			   0x00, 0x00, 0xf0, 0x00),
+		PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
+	},
+	{
+		.freq			= 2417,
+		RADIOREGS7(0x4b, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x71,
+			   0x09, 0x0f, 0x0a, 0x00, 0x0a, 0x00, 0x41, 0x63,
+			   0x00, 0x00, 0x00, 0xf0, 0x00, 0x41, 0x63, 0x00,
+			   0x00, 0x00, 0xf0, 0x00),
+		PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
+	},
+	{
+		.freq			= 2422,
+		RADIOREGS7(0x4e, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x76,
+			   0x09, 0x0f, 0x09, 0x00, 0x09, 0x00, 0x41, 0x63,
+			   0x00, 0x00, 0x00, 0xf0, 0x00, 0x41, 0x63, 0x00,
+			   0x00, 0x00, 0xf0, 0x00),
+		PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
+	},
+	{
+		.freq			= 2427,
+		RADIOREGS7(0x52, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x7b,
+			   0x09, 0x0f, 0x09, 0x00, 0x09, 0x00, 0x41, 0x63,
+			   0x00, 0x00, 0x00, 0xf0, 0x00, 0x41, 0x63, 0x00,
+			   0x00, 0x00, 0xf0, 0x00),
+		PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
+	},
+	{
+		.freq			= 2432,
+		RADIOREGS7(0x55, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x80,
+			   0x09, 0x0f, 0x08, 0x00, 0x08, 0x00, 0x41, 0x63,
+			   0x00, 0x00, 0x00, 0xf0, 0x00, 0x41, 0x63, 0x00,
+			   0x00, 0x00, 0xf0, 0x00),
+		PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
+	},
+	{
+		.freq			= 2437,
+		RADIOREGS7(0x58, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x85,
+			   0x09, 0x0f, 0x08, 0x00, 0x08, 0x00, 0x41, 0x63,
+			   0x00, 0x00, 0x00, 0xf0, 0x00, 0x41, 0x63, 0x00,
+			   0x00, 0x00, 0xf0, 0x00),
+		PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
+	},
+	{
+		.freq			= 2442,
+		RADIOREGS7(0x5c, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x8a,
+			   0x09, 0x0f, 0x07, 0x00, 0x07, 0x00, 0x41, 0x63,
+			   0x00, 0x00, 0x00, 0xf0, 0x00, 0x41, 0x63, 0x00,
+			   0x00, 0x00, 0xf0, 0x00),
+		PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
+	},
+	{
+		.freq			= 2447,
+		RADIOREGS7(0x5f, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x8f,
+			   0x09, 0x0f, 0x07, 0x00, 0x07, 0x00, 0x41, 0x63,
+			   0x00, 0x00, 0x00, 0xf0, 0x00, 0x41, 0x63, 0x00,
+			   0x00, 0x00, 0xf0, 0x00),
+		PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
+	},
+	{
+		.freq			= 2452,
+		RADIOREGS7(0x62, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x94,
+			   0x09, 0x0f, 0x07, 0x00, 0x07, 0x00, 0x41, 0x63,
+			   0x00, 0x00, 0x00, 0xf0, 0x00, 0x41, 0x63, 0x00,
+			   0x00, 0x00, 0xf0, 0x00),
+		PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
+	},
+	{
+		.freq			= 2457,
+		RADIOREGS7(0x66, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x99,
+			   0x09, 0x0f, 0x06, 0x00, 0x06, 0x00, 0x41, 0x63,
+			   0x00, 0x00, 0x00, 0xf0, 0x00, 0x41, 0x63, 0x00,
+			   0x00, 0x00, 0xf0, 0x00),
+		PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
+	},
+	{
+		.freq			= 2462,
+		RADIOREGS7(0x69, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x9e,
+			   0x09, 0x0f, 0x06, 0x00, 0x06, 0x00, 0x41, 0x63,
+			   0x00, 0x00, 0x00, 0xf0, 0x00, 0x41, 0x63, 0x00,
+			   0x00, 0x00, 0xf0, 0x00),
+		PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
+	},
+	{
+		.freq			= 5180,
+		RADIOREGS7(0xbe, 0x16, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x06,
+			   0x02, 0x0e, 0x00, 0x0e, 0x00, 0x9e, 0x00, 0x00,
+			   0x9f, 0x2f, 0xa3, 0x00, 0xfc, 0x00, 0x00, 0x4f,
+			   0x3a, 0x83, 0x00, 0xfc),
+		PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
+	},
+	{
+		.freq			= 5200,
+		RADIOREGS7(0xc5, 0x16, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x08,
+			   0x02, 0x0e, 0x00, 0x0e, 0x00, 0x9e, 0x00, 0x00,
+			   0x7f, 0x2f, 0x83, 0x00, 0xf8, 0x00, 0x00, 0x4c,
+			   0x4a, 0x83, 0x00, 0xf8),
+		PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
+	},
+	{
+		.freq			= 5220,
+		RADIOREGS7(0xcc, 0x16, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x0a,
+			   0x02, 0x0e, 0x00, 0x0e, 0x00, 0x9e, 0x00, 0x00,
+			   0x6d, 0x3d, 0x83, 0x00, 0xf8, 0x00, 0x00, 0x2d,
+			   0x2a, 0x73, 0x00, 0xf8),
+		PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
+	},
+	{
+		.freq			= 5240,
+		RADIOREGS7(0xd2, 0x16, 0x10, 0x1f, 0x08, 0x08, 0x3f, 0x0c,
+			   0x02, 0x0d, 0x00, 0x0d, 0x00, 0x8d, 0x00, 0x00,
+			   0x4d, 0x1c, 0x73, 0x00, 0xf8, 0x00, 0x00, 0x4d,
+			   0x2b, 0x73, 0x00, 0xf8),
+		PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
+	},
+	{
+		.freq			= 5745,
+		RADIOREGS7(0x7b, 0x17, 0x20, 0x1f, 0x08, 0x08, 0x3f, 0x7d,
+			   0x04, 0x08, 0x00, 0x06, 0x00, 0x15, 0x00, 0x00,
+			   0x08, 0x03, 0x03, 0x00, 0x30, 0x00, 0x00, 0x06,
+			   0x02, 0x03, 0x00, 0x30),
+		PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
+	},
+	{
+		.freq			= 5765,
+		RADIOREGS7(0x81, 0x17, 0x20, 0x1f, 0x08, 0x08, 0x3f, 0x81,
+			   0x04, 0x08, 0x00, 0x06, 0x00, 0x15, 0x00, 0x00,
+			   0x06, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x05,
+			   0x02, 0x03, 0x00, 0x00),
+		PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
+	},
+	{
+		.freq			= 5785,
+		RADIOREGS7(0x88, 0x17, 0x20, 0x1f, 0x08, 0x08, 0x3f, 0x85,
+			   0x04, 0x08, 0x00, 0x06, 0x00, 0x15, 0x00, 0x00,
+			   0x08, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x05,
+			   0x21, 0x03, 0x00, 0x00),
+		PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
+	},
+	{
+		.freq			= 5805,
+		RADIOREGS7(0x8f, 0x17, 0x20, 0x1f, 0x08, 0x08, 0x3f, 0x89,
+			   0x04, 0x07, 0x00, 0x06, 0x00, 0x04, 0x00, 0x00,
+			   0x06, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x03,
+			   0x00, 0x03, 0x00, 0x00),
+		PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
+	},
+	{
+		.freq			= 5825,
+		RADIOREGS7(0x95, 0x17, 0x20, 0x1f, 0x08, 0x08, 0x3f, 0x8d,
+			   0x04, 0x07, 0x00, 0x05, 0x00, 0x03, 0x00, 0x00,
+			   0x05, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x03,
+			   0x00, 0x03, 0x00, 0x00),
+		PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
+	},
+};
+
 void r2057_upload_inittabs(struct b43_wldev *dev)
 {
 	struct b43_phy *phy = &dev->phy;
@@ -171,6 +560,18 @@
 			size = ARRAY_SIZE(r2057_rev5a_init);
 		}
 		break;
+	case 16:
+		if (phy->radio_rev == 9) {
+			table = r2057_rev9_init[0];
+			size = ARRAY_SIZE(r2057_rev9_init);
+		}
+		break;
+	case 17:
+		if (phy->radio_rev == 14) {
+			table = r2057_rev14_init[0];
+			size = ARRAY_SIZE(r2057_rev14_init);
+		}
+		break;
 	}
 
 	B43_WARN_ON(!table);
@@ -193,8 +594,25 @@
 	*tabent_r7 = NULL;
 	*tabent_r7_2g = NULL;
 
-	/* TODO */
 	switch (phy->rev) {
+	case 8:
+		if (phy->radio_rev == 5) {
+			e_r7_2g = b43_nphy_chantab_phy_rev8_radio_rev5;
+			len = ARRAY_SIZE(b43_nphy_chantab_phy_rev8_radio_rev5);
+		}
+		break;
+	case 16:
+		if (phy->radio_rev == 9) {
+			e_r7 = b43_nphy_chantab_phy_rev16_radio_rev9;
+			len = ARRAY_SIZE(b43_nphy_chantab_phy_rev16_radio_rev9);
+		}
+		break;
+	case 17:
+		if (phy->radio_rev == 14) {
+			e_r7_2g = b43_nphy_chantab_phy_rev17_radio_rev14;
+			len = ARRAY_SIZE(b43_nphy_chantab_phy_rev17_radio_rev14);
+		}
+		break;
 	default:
 		break;
 	}
diff --git a/drivers/net/wireless/b43/radio_2057.h b/drivers/net/wireless/b43/radio_2057.h
index 675d1bb..220d080 100644
--- a/drivers/net/wireless/b43/radio_2057.h
+++ b/drivers/net/wireless/b43/radio_2057.h
@@ -84,6 +84,8 @@
 #define R2057_CMOSBUF_RX_RCCR			0x04c
 #define R2057_LOGEN_SEL_PKDET			0x04d
 #define R2057_CMOSBUF_SHAREIQ_PTAT		0x04e
+
+/* MISC core 0 */
 #define R2057_RXTXBIAS_CONFIG_CORE0		0x04f
 #define R2057_TXGM_TXRF_PUS_CORE0		0x050
 #define R2057_TXGM_IDAC_BLEED_CORE0		0x051
@@ -204,6 +206,8 @@
 #define R2057_RXBB_GPAIOSEL_RXLPF_RCCAL_CORE0	0x0d1
 #define R2057_LPF_GAIN_CORE0			0x0d2
 #define R2057_DACBUF_IDACS_BW_CORE0		0x0d3
+
+/* MISC core 1 */
 #define R2057_RXTXBIAS_CONFIG_CORE1		0x0d4
 #define R2057_TXGM_TXRF_PUS_CORE1		0x0d5
 #define R2057_TXGM_IDAC_BLEED_CORE1		0x0d6
@@ -324,6 +328,7 @@
 #define R2057_RXBB_GPAIOSEL_RXLPF_RCCAL_CORE1	0x156
 #define R2057_LPF_GAIN_CORE1			0x157
 #define R2057_DACBUF_IDACS_BW_CORE1		0x158
+
 #define R2057_DACBUF_VINCM_CORE1		0x159
 #define R2057_RCCAL_START_R1_Q1_P1		0x15a
 #define R2057_RCCAL_X1				0x15b
@@ -345,6 +350,8 @@
 #define R2057_RCCAL_BCAP_VAL			0x16b
 #define R2057_RCCAL_HPC_VAL			0x16c
 #define R2057_RCCAL_OVERRIDES			0x16d
+
+/* TX core 0 */
 #define R2057_TX0_IQCAL_GAIN_BW			0x170
 #define R2057_TX0_LOFT_FINE_I			0x171
 #define R2057_TX0_LOFT_FINE_Q			0x172
@@ -362,6 +369,8 @@
 #define R2057_TX0_TXRXCOUPLE_2G_PWRUP		0x17e
 #define R2057_TX0_TXRXCOUPLE_5G_ATTEN		0x17f
 #define R2057_TX0_TXRXCOUPLE_5G_PWRUP		0x180
+
+/* TX core 1 */
 #define R2057_TX1_IQCAL_GAIN_BW			0x190
 #define R2057_TX1_LOFT_FINE_I			0x191
 #define R2057_TX1_LOFT_FINE_Q			0x192
@@ -379,6 +388,7 @@
 #define R2057_TX1_TXRXCOUPLE_2G_PWRUP		0x19e
 #define R2057_TX1_TXRXCOUPLE_5G_ATTEN		0x19f
 #define R2057_TX1_TXRXCOUPLE_5G_PWRUP		0x1a0
+
 #define R2057_AFE_VCM_CAL_MASTER_CORE0		0x1a1
 #define R2057_AFE_SET_VCM_I_CORE0		0x1a2
 #define R2057_AFE_SET_VCM_Q_CORE0		0x1a3
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index b28dce9..4b58850 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -2408,6 +2408,41 @@
 	0x1041003c, 0x1041003b, 0x10410039, 0x10410037,
 };
 
+static const u32 b43_ntab_tx_gain_epa_rev3_hi_pwr_2g[] = {
+	0x0f410044, 0x0f410042, 0x0f410040, 0x0f41003e,
+	0x0f41003c, 0x0f41003b, 0x0f410039, 0x0f410037,
+	0x0e410044, 0x0e410042, 0x0e410040, 0x0e41003e,
+	0x0e41003c, 0x0e41003b, 0x0e410039, 0x0e410037,
+	0x0d410044, 0x0d410042, 0x0d410040, 0x0d41003e,
+	0x0d41003c, 0x0d41003b, 0x0d410039, 0x0d410037,
+	0x0c410044, 0x0c410042, 0x0c410040, 0x0c41003e,
+	0x0c41003c, 0x0c41003b, 0x0c410039, 0x0c410037,
+	0x0b410044, 0x0b410042, 0x0b410040, 0x0b41003e,
+	0x0b41003c, 0x0b41003b, 0x0b410039, 0x0b410037,
+	0x0a410044, 0x0a410042, 0x0a410040, 0x0a41003e,
+	0x0a41003c, 0x0a41003b, 0x0a410039, 0x0a410037,
+	0x09410044, 0x09410042, 0x09410040, 0x0941003e,
+	0x0941003c, 0x0941003b, 0x09410039, 0x09410037,
+	0x08410044, 0x08410042, 0x08410040, 0x0841003e,
+	0x0841003c, 0x0841003b, 0x08410039, 0x08410037,
+	0x07410044, 0x07410042, 0x07410040, 0x0741003e,
+	0x0741003c, 0x0741003b, 0x07410039, 0x07410037,
+	0x06410044, 0x06410042, 0x06410040, 0x0641003e,
+	0x0641003c, 0x0641003b, 0x06410039, 0x06410037,
+	0x05410044, 0x05410042, 0x05410040, 0x0541003e,
+	0x0541003c, 0x0541003b, 0x05410039, 0x05410037,
+	0x04410044, 0x04410042, 0x04410040, 0x0441003e,
+	0x0441003c, 0x0441003b, 0x04410039, 0x04410037,
+	0x03410044, 0x03410042, 0x03410040, 0x0341003e,
+	0x0341003c, 0x0341003b, 0x03410039, 0x03410037,
+	0x02410044, 0x02410042, 0x02410040, 0x0241003e,
+	0x0241003c, 0x0241003b, 0x02410039, 0x02410037,
+	0x01410044, 0x01410042, 0x01410040, 0x0141003e,
+	0x0141003c, 0x0141003b, 0x01410039, 0x01410037,
+	0x00410044, 0x00410042, 0x00410040, 0x0041003e,
+	0x0041003c, 0x0041003b, 0x00410039, 0x00410037
+};
+
 /* EPA 5 GHz */
 
 static const u32 b43_ntab_tx_gain_epa_rev3_5g[] = {
@@ -2480,6 +2515,41 @@
 	0x20d2003a, 0x20d20038, 0x20d20036, 0x20d20034,
 };
 
+static const u32 b43_ntab_tx_gain_epa_rev4_hi_pwr_5g[] = {
+	0x2ff10044, 0x2ff10042, 0x2ff10040, 0x2ff1003e,
+	0x2ff1003c, 0x2ff1003b, 0x2ff10039, 0x2ff10037,
+	0x2ef10044, 0x2ef10042, 0x2ef10040, 0x2ef1003e,
+	0x2ef1003c, 0x2ef1003b, 0x2ef10039, 0x2ef10037,
+	0x2df10044, 0x2df10042, 0x2df10040, 0x2df1003e,
+	0x2df1003c, 0x2df1003b, 0x2df10039, 0x2df10037,
+	0x2cf10044, 0x2cf10042, 0x2cf10040, 0x2cf1003e,
+	0x2cf1003c, 0x2cf1003b, 0x2cf10039, 0x2cf10037,
+	0x2bf10044, 0x2bf10042, 0x2bf10040, 0x2bf1003e,
+	0x2bf1003c, 0x2bf1003b, 0x2bf10039, 0x2bf10037,
+	0x2af10044, 0x2af10042, 0x2af10040, 0x2af1003e,
+	0x2af1003c, 0x2af1003b, 0x2af10039, 0x2af10037,
+	0x29f10044, 0x29f10042, 0x29f10040, 0x29f1003e,
+	0x29f1003c, 0x29f1003b, 0x29f10039, 0x29f10037,
+	0x28f10044, 0x28f10042, 0x28f10040, 0x28f1003e,
+	0x28f1003c, 0x28f1003b, 0x28f10039, 0x28f10037,
+	0x27f10044, 0x27f10042, 0x27f10040, 0x27f1003e,
+	0x27f1003c, 0x27f1003b, 0x27f10039, 0x27f10037,
+	0x26f10044, 0x26f10042, 0x26f10040, 0x26f1003e,
+	0x26f1003c, 0x26f1003b, 0x26f10039, 0x26f10037,
+	0x25f10044, 0x25f10042, 0x25f10040, 0x25f1003e,
+	0x25f1003c, 0x25f1003b, 0x25f10039, 0x25f10037,
+	0x24f10044, 0x24f10042, 0x24f10040, 0x24f1003e,
+	0x24f1003c, 0x24f1003b, 0x24f10039, 0x24f10038,
+	0x23f10041, 0x23f10040, 0x23f1003f, 0x23f1003e,
+	0x23f1003c, 0x23f1003b, 0x23f10039, 0x23f10037,
+	0x22f10044, 0x22f10042, 0x22f10040, 0x22f1003e,
+	0x22f1003c, 0x22f1003b, 0x22f10039, 0x22f10037,
+	0x21f10044, 0x21f10042, 0x21f10040, 0x21f1003e,
+	0x21f1003c, 0x21f1003b, 0x21f10039, 0x21f10037,
+	0x20d10043, 0x20d10041, 0x20d1003e, 0x20d1003c,
+	0x20d1003a, 0x20d10038, 0x20d10036, 0x20d10034
+};
+
 static const u32 b43_ntab_tx_gain_epa_rev5_5g[] = {
 	0x0f62004a, 0x0f620048, 0x0f620046, 0x0f620044,
 	0x0f620042, 0x0f620040, 0x0f62003e, 0x0f62003c,
@@ -2622,6 +2692,42 @@
 	0x00f70028, 0x00f70027, 0x00f70026, 0x00f70025,
 };
 
+/* Copied from brcmsmac (5.75.11): nphy_tpc_txgain_ipa_2g_2057rev5 */
+static const u32 b43_ntab_tx_gain_ipa_2057_rev5_2g[] = {
+	0x30ff0031, 0x30e70031, 0x30e7002e, 0x30cf002e,
+	0x30bf002e, 0x30af002e, 0x309f002f, 0x307f0033,
+	0x307f0031, 0x307f002e, 0x3077002e, 0x306f002e,
+	0x3067002e, 0x305f002f, 0x30570030, 0x3057002d,
+	0x304f002e, 0x30470031, 0x3047002e, 0x3047002c,
+	0x30470029, 0x303f002c, 0x303f0029, 0x3037002d,
+	0x3037002a, 0x30370028, 0x302f002c, 0x302f002a,
+	0x302f0028, 0x302f0026, 0x3027002c, 0x30270029,
+	0x30270027, 0x30270025, 0x30270023, 0x301f002c,
+	0x301f002a, 0x301f0028, 0x301f0025, 0x301f0024,
+	0x301f0022, 0x301f001f, 0x3017002d, 0x3017002b,
+	0x30170028, 0x30170026, 0x30170024, 0x30170022,
+	0x30170020, 0x3017001e, 0x3017001d, 0x3017001b,
+	0x3017001a, 0x30170018, 0x30170017, 0x30170015,
+	0x300f002c, 0x300f0029, 0x300f0027, 0x300f0024,
+	0x300f0022, 0x300f0021, 0x300f001f, 0x300f001d,
+	0x300f001b, 0x300f001a, 0x300f0018, 0x300f0017,
+	0x300f0016, 0x300f0015, 0x300f0115, 0x300f0215,
+	0x300f0315, 0x300f0415, 0x300f0515, 0x300f0615,
+	0x300f0715, 0x300f0715, 0x300f0715, 0x300f0715,
+	0x300f0715, 0x300f0715, 0x300f0715, 0x300f0715,
+	0x300f0715, 0x300f0715, 0x300f0715, 0x300f0715,
+	0x300f0715, 0x300f0715, 0x300f0715, 0x300f0715,
+	0x300f0715, 0x300f0715, 0x300f0715, 0x300f0715,
+	0x300f0715, 0x300f0715, 0x300f0715, 0x300f0715,
+	0x300f0715, 0x300f0715, 0x300f0715, 0x300f0715,
+	0x300f0715, 0x300f0715, 0x300f0715, 0x300f0715,
+	0x300f0715, 0x300f0715, 0x300f0715, 0x300f0715,
+	0x300f0715, 0x300f0715, 0x300f0715, 0x300f0715,
+	0x300f0715, 0x300f0715, 0x300f0715, 0x300f0715,
+	0x300f0715, 0x300f0715, 0x300f0715, 0x300f0715,
+	0x300f0715, 0x300f0715, 0x300f0715, 0x300f0715,
+};
+
 /* Extracted from MMIO dump of 6.30.223.141 */
 static const u32 b43_ntab_tx_gain_ipa_2057_rev9_2g[] = {
 	0x60ff0031, 0x60e7002c, 0x60cf002a, 0x60c70029,
@@ -2658,6 +2764,42 @@
 	0x600f0715, 0x600f0715, 0x600f0715, 0x600f0715,
 };
 
+/* Extracted from MMIO dump of 6.30.223.248 */
+static const u32 b43_ntab_tx_gain_ipa_2057_rev14_2g[] = {
+	0x50df002e, 0x50cf002d, 0x50bf002c, 0x50b7002b,
+	0x50af002a, 0x50a70029, 0x509f0029, 0x50970028,
+	0x508f0027, 0x50870027, 0x507f0027, 0x50770027,
+	0x506f0027, 0x50670027, 0x505f0028, 0x50570029,
+	0x504f002b, 0x5047002e, 0x5047002b, 0x50470029,
+	0x503f002c, 0x503f0029, 0x5037002c, 0x5037002a,
+	0x50370028, 0x502f002d, 0x502f002b, 0x502f0028,
+	0x502f0026, 0x5027002d, 0x5027002a, 0x50270028,
+	0x50270026, 0x50270024, 0x501f002e, 0x501f002b,
+	0x501f0029, 0x501f0027, 0x501f0024, 0x501f0022,
+	0x501f0020, 0x501f001f, 0x5017002c, 0x50170029,
+	0x50170027, 0x50170024, 0x50170022, 0x50170021,
+	0x5017001f, 0x5017001d, 0x5017001b, 0x5017001a,
+	0x50170018, 0x50170017, 0x50170015, 0x500f002c,
+	0x500f002a, 0x500f0027, 0x500f0025, 0x500f0023,
+	0x500f0022, 0x500f001f, 0x500f001e, 0x500f001c,
+	0x500f001a, 0x500f0019, 0x500f0018, 0x500f0016,
+	0x500f0015, 0x500f0015, 0x500f0015, 0x500f0015,
+	0x500f0015, 0x500f0015, 0x500f0015, 0x500f0015,
+	0x500f0015, 0x500f0015, 0x500f0015, 0x500f0015,
+	0x500f0015, 0x500f0015, 0x500f0015, 0x500f0015,
+	0x500f0015, 0x500f0015, 0x500f0015, 0x500f0015,
+	0x500f0015, 0x500f0015, 0x500f0015, 0x500f0015,
+	0x500f0015, 0x500f0015, 0x500f0015, 0x500f0015,
+	0x500f0015, 0x500f0015, 0x500f0015, 0x500f0015,
+	0x500f0015, 0x500f0015, 0x500f0015, 0x500f0015,
+	0x500f0015, 0x500f0015, 0x500f0015, 0x500f0015,
+	0x500f0015, 0x500f0015, 0x500f0015, 0x500f0015,
+	0x500f0015, 0x500f0015, 0x500f0015, 0x500f0015,
+	0x500f0015, 0x500f0015, 0x500f0015, 0x500f0015,
+	0x500f0015, 0x500f0015, 0x500f0015, 0x500f0015,
+	0x500f0015, 0x500f0015, 0x500f0015, 0x500f0015,
+};
+
 /* IPA 2 5Hz */
 
 static const u32 b43_ntab_tx_gain_ipa_rev3_5g[] = {
@@ -2967,11 +3109,11 @@
 	{ 0x0010, 0x07A, 0x07D, 0x0010, 4 },
 	{ 0x0020, 0x07A, 0x07D, 0x0020, 5 },
 	{ 0x0040, 0x07A, 0x07D, 0x0040, 6 },
-	{ 0x0080, 0x0F8, 0x0FA, 0x0080, 7 },
+	{ 0x0080, 0x07A, 0x07D, 0x0080, 7 },
 	{ 0x0400, 0x0F8, 0x0FA, 0x0070, 4 },
 	{ 0x0800, 0x07B, 0x07E, 0xFFFF, 0 },
 	{ 0x1000, 0x07C, 0x07F, 0xFFFF, 0 },
-	{ 0x6000, 0x348, 0x349, 0xFFFF, 0 },
+	{ 0x6000, 0x348, 0x349, 0x00FF, 0 },
 	{ 0x2000, 0x348, 0x349, 0x000F, 0 },
 };
 
@@ -3477,9 +3619,18 @@
 
 	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
 		switch (phy->rev) {
+		case 17:
+			if (phy->radio_rev == 14)
+				return b43_ntab_tx_gain_ipa_2057_rev14_2g;
+			break;
 		case 16:
 			if (phy->radio_rev == 9)
 				return b43_ntab_tx_gain_ipa_2057_rev9_2g;
+			break;
+		case 8:
+			if (phy->radio_rev == 5)
+				return b43_ntab_tx_gain_ipa_2057_rev5_2g;
+			break;
 		case 6:
 			if (dev->dev->chip_id == BCMA_CHIP_ID_BCM47162)
 				return b43_ntab_tx_gain_ipa_rev5_2g;
@@ -3489,23 +3640,24 @@
 		case 4:
 		case 3:
 			return b43_ntab_tx_gain_ipa_rev3_2g;
-		default:
-			b43err(dev->wl,
-			       "No 2GHz IPA gain table available for this device\n");
-			return NULL;
 		}
+
+		b43err(dev->wl,
+		       "No 2GHz IPA gain table available for this device\n");
+		return NULL;
 	} else {
 		switch (phy->rev) {
 		case 16:
 			if (phy->radio_rev == 9)
 				return b43_ntab_tx_gain_ipa_2057_rev9_5g;
+			break;
 		case 3 ... 6:
 			return b43_ntab_tx_gain_ipa_rev3_5g;
-		default:
-			b43err(dev->wl,
-			       "No 5GHz IPA gain table available for this device\n");
-			return NULL;
 		}
+
+		b43err(dev->wl,
+		       "No 5GHz IPA gain table available for this device\n");
+		return NULL;
 	}
 }
 
@@ -3530,7 +3682,7 @@
 		case 4:
 			return sprom->fem.ghz5.extpa_gain == 3 ?
 				b43_ntab_tx_gain_epa_rev4_5g :
-				b43_ntab_tx_gain_epa_rev4_5g; /* FIXME */
+				b43_ntab_tx_gain_epa_rev4_hi_pwr_5g;
 		case 3:
 			return b43_ntab_tx_gain_epa_rev3_5g;
 		default:
@@ -3543,7 +3695,7 @@
 		case 6:
 		case 5:
 			if (sprom->fem.ghz5.extpa_gain == 3)
-				return b43_ntab_tx_gain_epa_rev3_2g; /* FIXME */
+				return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g;
 			/* fall through */
 		case 4:
 		case 3:
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 6e6ef3f..426dc13 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -80,9 +80,10 @@
 }
 
 /* Extract the bitrate index out of an OFDM PLCP header. */
-static int b43_plcp_get_bitrate_idx_ofdm(struct b43_plcp_hdr6 *plcp, bool aphy)
+static int b43_plcp_get_bitrate_idx_ofdm(struct b43_plcp_hdr6 *plcp, bool ghz5)
 {
-	int base = aphy ? 0 : 4;
+	/* For 2 GHz band first OFDM rate is at index 4, see main.c */
+	int base = ghz5 ? 0 : 4;
 
 	switch (plcp->raw[0] & 0xF) {
 	case 0xB:
@@ -767,7 +768,7 @@
 
 	if (phystat0 & B43_RX_PHYST0_OFDM)
 		rate_idx = b43_plcp_get_bitrate_idx_ofdm(plcp,
-						phytype == B43_PHYTYPE_A);
+					!!(chanstat & B43_RX_CHAN_5GHZ));
 	else
 		rate_idx = b43_plcp_get_bitrate_idx_cck(plcp);
 	if (unlikely(rate_idx == -1)) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 4cffb2e..de0cff3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -34,6 +34,7 @@
 		dhd_common.o \
 		dhd_linux.o \
 		firmware.o \
+		feature.o \
 		btcoex.o \
 		vendor.o
 brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index a16e644..f467caf 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -25,7 +25,6 @@
 #include <linux/mmc/sdio.h>
 #include <linux/mmc/core.h>
 #include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
 #include <linux/platform_device.h>
@@ -979,18 +978,20 @@
 	return ret;
 }
 
+#define BRCMF_SDIO_DEVICE(dev_id)	\
+	{SDIO_DEVICE(BRCM_SDIO_VENDOR_ID_BROADCOM, dev_id)}
+
 /* devices we support, null terminated */
 static const struct sdio_device_id brcmf_sdmmc_ids[] = {
-	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43143)},
-	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43241)},
-	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
-	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
-	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
-	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43362)},
-	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
-		     SDIO_DEVICE_ID_BROADCOM_4335_4339)},
-	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4354)},
-	{ /* end: all zeroes */ },
+	BRCMF_SDIO_DEVICE(BRCM_SDIO_43143_DEVICE_ID),
+	BRCMF_SDIO_DEVICE(BRCM_SDIO_43241_DEVICE_ID),
+	BRCMF_SDIO_DEVICE(BRCM_SDIO_4329_DEVICE_ID),
+	BRCMF_SDIO_DEVICE(BRCM_SDIO_4330_DEVICE_ID),
+	BRCMF_SDIO_DEVICE(BRCM_SDIO_4334_DEVICE_ID),
+	BRCMF_SDIO_DEVICE(BRCM_SDIO_43362_DEVICE_ID),
+	BRCMF_SDIO_DEVICE(BRCM_SDIO_4335_4339_DEVICE_ID),
+	BRCMF_SDIO_DEVICE(BRCM_SDIO_4354_DEVICE_ID),
+	{ /* end: all zeroes */ }
 };
 MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
index c7c9f15..96800db 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -482,30 +482,30 @@
 static void brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
 {
 	switch (ci->pub.chip) {
-	case BCM4329_CHIP_ID:
+	case BRCM_CC_4329_CHIP_ID:
 		ci->pub.ramsize = BCM4329_RAMSIZE;
 		break;
-	case BCM43143_CHIP_ID:
+	case BRCM_CC_43143_CHIP_ID:
 		ci->pub.ramsize = BCM43143_RAMSIZE;
 		break;
-	case BCM43241_CHIP_ID:
+	case BRCM_CC_43241_CHIP_ID:
 		ci->pub.ramsize = 0x90000;
 		break;
-	case BCM4330_CHIP_ID:
+	case BRCM_CC_4330_CHIP_ID:
 		ci->pub.ramsize = 0x48000;
 		break;
-	case BCM4334_CHIP_ID:
+	case BRCM_CC_4334_CHIP_ID:
 		ci->pub.ramsize = 0x80000;
 		break;
-	case BCM4335_CHIP_ID:
+	case BRCM_CC_4335_CHIP_ID:
 		ci->pub.ramsize = 0xc0000;
 		ci->pub.rambase = 0x180000;
 		break;
-	case BCM43362_CHIP_ID:
+	case BRCM_CC_43362_CHIP_ID:
 		ci->pub.ramsize = 0x3c000;
 		break;
-	case BCM4339_CHIP_ID:
-	case BCM4354_CHIP_ID:
+	case BRCM_CC_4339_CHIP_ID:
+	case BRCM_CC_4354_CHIP_ID:
 		ci->pub.ramsize = 0xc0000;
 		ci->pub.rambase = 0x180000;
 		break;
@@ -682,7 +682,7 @@
 		  ci->pub.chiprev);
 
 	if (socitype == SOCI_SB) {
-		if (ci->pub.chip != BCM4329_CHIP_ID) {
+		if (ci->pub.chip != BRCM_CC_4329_CHIP_ID) {
 			brcmf_err("SB chip is not supported\n");
 			return -ENODEV;
 		}
@@ -1008,13 +1008,13 @@
 	chip = container_of(pub, struct brcmf_chip_priv, pub);
 
 	switch (pub->chip) {
-	case BCM4354_CHIP_ID:
+	case BRCM_CC_4354_CHIP_ID:
 		/* explicitly check SR engine enable bit */
 		pmu_cc3_mask = BIT(2);
 		/* fall-through */
-	case BCM43241_CHIP_ID:
-	case BCM4335_CHIP_ID:
-	case BCM4339_CHIP_ID:
+	case BRCM_CC_43241_CHIP_ID:
+	case BRCM_CC_4335_CHIP_ID:
+	case BRCM_CC_4339_CHIP_ID:
 		/* read PMU chipcontrol register 3 */
 		addr = CORE_CC_REG(base, chipcontrol_addr);
 		chip->ops->write32(chip->ctx, addr, 3);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index a8998eb..7da6441 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -103,6 +103,10 @@
 
 	struct brcmf_ampdu_rx_reorder
 		*reorder_flows[BRCMF_AMPDU_RX_REORDER_MAXFLOWS];
+
+	u32 feat_flags;
+	u32 chip_quirks;
+
 #ifdef DEBUG
 	struct dentry *dbgfs_dir;
 #endif
@@ -175,7 +179,6 @@
 void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
 void brcmf_txflowblock_if(struct brcmf_if *ifp,
 			  enum brcmf_netif_stop_reason reason, bool state);
-u32 brcmf_get_chip_info(struct brcmf_if *ifp);
 void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
 		      bool success);
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
index 03fe8ac..be9f4f8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
@@ -41,37 +41,12 @@
 	root_folder = NULL;
 }
 
-static
-ssize_t brcmf_debugfs_chipinfo_read(struct file *f, char __user *data,
-				   size_t count, loff_t *ppos)
+static int brcmf_debugfs_chipinfo_read(struct seq_file *seq, void *data)
 {
-	struct brcmf_pub *drvr = f->private_data;
-	struct brcmf_bus *bus = drvr->bus_if;
-	char buf[40];
-	int res;
+	struct brcmf_bus *bus = dev_get_drvdata(seq->private);
 
-	/* only allow read from start */
-	if (*ppos > 0)
-		return 0;
-
-	res = scnprintf(buf, sizeof(buf), "chip: %x(%u) rev %u\n",
-			bus->chip, bus->chip, bus->chiprev);
-	return simple_read_from_buffer(data, count, ppos, buf, res);
-}
-
-static const struct file_operations brcmf_debugfs_chipinfo_ops = {
-	.owner = THIS_MODULE,
-	.open = simple_open,
-	.read = brcmf_debugfs_chipinfo_read
-};
-
-static int brcmf_debugfs_create_chipinfo(struct brcmf_pub *drvr)
-{
-	struct dentry *dentry =  drvr->dbgfs_dir;
-
-	if (!IS_ERR_OR_NULL(dentry))
-		debugfs_create_file("chipinfo", S_IRUGO, dentry, drvr,
-				    &brcmf_debugfs_chipinfo_ops);
+	seq_printf(seq, "chip: %x(%u) rev %u\n",
+		   bus->chip, bus->chip, bus->chiprev);
 	return 0;
 }
 
@@ -83,7 +58,8 @@
 		return -ENODEV;
 
 	drvr->dbgfs_dir = debugfs_create_dir(dev_name(dev), root_folder);
-	brcmf_debugfs_create_chipinfo(drvr);
+	brcmf_debugfs_add_entry(drvr, "chipinfo", brcmf_debugfs_chipinfo_read);
+
 	return PTR_ERR_OR_ZERO(drvr->dbgfs_dir);
 }
 
@@ -98,148 +74,44 @@
 	return drvr->dbgfs_dir;
 }
 
-static
-ssize_t brcmf_debugfs_sdio_counter_read(struct file *f, char __user *data,
-					size_t count, loff_t *ppos)
-{
-	struct brcmf_sdio_count *sdcnt = f->private_data;
-	char buf[750];
-	int res;
-
-	/* only allow read from start */
-	if (*ppos > 0)
-		return 0;
-
-	res = scnprintf(buf, sizeof(buf),
-			"intrcount:    %u\nlastintrs:    %u\n"
-			"pollcnt:      %u\nregfails:     %u\n"
-			"tx_sderrs:    %u\nfcqueued:     %u\n"
-			"rxrtx:        %u\nrx_toolong:   %u\n"
-			"rxc_errors:   %u\nrx_hdrfail:   %u\n"
-			"rx_badhdr:    %u\nrx_badseq:    %u\n"
-			"fc_rcvd:      %u\nfc_xoff:      %u\n"
-			"fc_xon:       %u\nrxglomfail:   %u\n"
-			"rxglomframes: %u\nrxglompkts:   %u\n"
-			"f2rxhdrs:     %u\nf2rxdata:     %u\n"
-			"f2txdata:     %u\nf1regdata:    %u\n"
-			"tickcnt:      %u\ntx_ctlerrs:   %lu\n"
-			"tx_ctlpkts:   %lu\nrx_ctlerrs:   %lu\n"
-			"rx_ctlpkts:   %lu\nrx_readahead: %lu\n",
-			sdcnt->intrcount, sdcnt->lastintrs,
-			sdcnt->pollcnt, sdcnt->regfails,
-			sdcnt->tx_sderrs, sdcnt->fcqueued,
-			sdcnt->rxrtx, sdcnt->rx_toolong,
-			sdcnt->rxc_errors, sdcnt->rx_hdrfail,
-			sdcnt->rx_badhdr, sdcnt->rx_badseq,
-			sdcnt->fc_rcvd, sdcnt->fc_xoff,
-			sdcnt->fc_xon, sdcnt->rxglomfail,
-			sdcnt->rxglomframes, sdcnt->rxglompkts,
-			sdcnt->f2rxhdrs, sdcnt->f2rxdata,
-			sdcnt->f2txdata, sdcnt->f1regdata,
-			sdcnt->tickcnt, sdcnt->tx_ctlerrs,
-			sdcnt->tx_ctlpkts, sdcnt->rx_ctlerrs,
-			sdcnt->rx_ctlpkts, sdcnt->rx_readahead_cnt);
-
-	return simple_read_from_buffer(data, count, ppos, buf, res);
-}
-
-static const struct file_operations brcmf_debugfs_sdio_counter_ops = {
-	.owner = THIS_MODULE,
-	.open = simple_open,
-	.read = brcmf_debugfs_sdio_counter_read
+struct brcmf_debugfs_entry {
+	int (*read)(struct seq_file *seq, void *data);
+	struct brcmf_pub *drvr;
 };
 
-void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr,
-				     struct brcmf_sdio_count *sdcnt)
+static int brcmf_debugfs_entry_open(struct inode *inode, struct file *f)
 {
-	struct dentry *dentry = drvr->dbgfs_dir;
+	struct brcmf_debugfs_entry *entry = inode->i_private;
 
-	if (!IS_ERR_OR_NULL(dentry))
-		debugfs_create_file("counters", S_IRUGO, dentry,
-				    sdcnt, &brcmf_debugfs_sdio_counter_ops);
+	return single_open(f, entry->read, entry->drvr->bus_if->dev);
 }
 
-static
-ssize_t brcmf_debugfs_fws_stats_read(struct file *f, char __user *data,
-				     size_t count, loff_t *ppos)
-{
-	struct brcmf_fws_stats *fwstats = f->private_data;
-	char buf[650];
-	int res;
-
-	/* only allow read from start */
-	if (*ppos > 0)
-		return 0;
-
-	res = scnprintf(buf, sizeof(buf),
-			"header_pulls:      %u\n"
-			"header_only_pkt:   %u\n"
-			"tlv_parse_failed:  %u\n"
-			"tlv_invalid_type:  %u\n"
-			"mac_update_fails:  %u\n"
-			"ps_update_fails:   %u\n"
-			"if_update_fails:   %u\n"
-			"pkt2bus:           %u\n"
-			"generic_error:     %u\n"
-			"rollback_success:  %u\n"
-			"rollback_failed:   %u\n"
-			"delayq_full:       %u\n"
-			"supprq_full:       %u\n"
-			"txs_indicate:      %u\n"
-			"txs_discard:       %u\n"
-			"txs_suppr_core:    %u\n"
-			"txs_suppr_ps:      %u\n"
-			"txs_tossed:        %u\n"
-			"txs_host_tossed:   %u\n"
-			"bus_flow_block:    %u\n"
-			"fws_flow_block:    %u\n"
-			"send_pkts:         BK:%u BE:%u VO:%u VI:%u BCMC:%u\n"
-			"requested_sent:    BK:%u BE:%u VO:%u VI:%u BCMC:%u\n",
-			fwstats->header_pulls,
-			fwstats->header_only_pkt,
-			fwstats->tlv_parse_failed,
-			fwstats->tlv_invalid_type,
-			fwstats->mac_update_failed,
-			fwstats->mac_ps_update_failed,
-			fwstats->if_update_failed,
-			fwstats->pkt2bus,
-			fwstats->generic_error,
-			fwstats->rollback_success,
-			fwstats->rollback_failed,
-			fwstats->delayq_full_error,
-			fwstats->supprq_full_error,
-			fwstats->txs_indicate,
-			fwstats->txs_discard,
-			fwstats->txs_supp_core,
-			fwstats->txs_supp_ps,
-			fwstats->txs_tossed,
-			fwstats->txs_host_tossed,
-			fwstats->bus_flow_block,
-			fwstats->fws_flow_block,
-			fwstats->send_pkts[0], fwstats->send_pkts[1],
-			fwstats->send_pkts[2], fwstats->send_pkts[3],
-			fwstats->send_pkts[4],
-			fwstats->requested_sent[0],
-			fwstats->requested_sent[1],
-			fwstats->requested_sent[2],
-			fwstats->requested_sent[3],
-			fwstats->requested_sent[4]);
-
-	return simple_read_from_buffer(data, count, ppos, buf, res);
-}
-
-static const struct file_operations brcmf_debugfs_fws_stats_ops = {
+static const struct file_operations brcmf_debugfs_def_ops = {
 	.owner = THIS_MODULE,
-	.open = simple_open,
-	.read = brcmf_debugfs_fws_stats_read
+	.open = brcmf_debugfs_entry_open,
+	.release = single_release,
+	.read = seq_read,
+	.llseek = seq_lseek
 };
 
-void brcmf_debugfs_create_fws_stats(struct brcmf_pub *drvr,
-				    struct brcmf_fws_stats *stats)
+int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
+			    int (*read_fn)(struct seq_file *seq, void *data))
 {
 	struct dentry *dentry =  drvr->dbgfs_dir;
+	struct brcmf_debugfs_entry *entry;
 
-	if (!IS_ERR_OR_NULL(dentry))
-		debugfs_create_file("fws_stats", S_IRUGO, dentry,
-				    stats, &brcmf_debugfs_fws_stats_ops);
+	if (IS_ERR_OR_NULL(dentry))
+		return -ENOENT;
+
+	entry = devm_kzalloc(drvr->bus_if->dev, sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+
+	entry->read = read_fn;
+	entry->drvr = drvr;
+
+	dentry = debugfs_create_file(fn, S_IRUGO, dentry, entry,
+				     &brcmf_debugfs_def_ops);
+
+	return PTR_ERR_OR_ZERO(dentry);
 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index ef52ed7..6eade7c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -100,68 +100,6 @@
 
 extern int brcmf_msg_level;
 
-/*
- * hold counter variables used in brcmfmac sdio driver.
- */
-struct brcmf_sdio_count {
-	uint intrcount;		/* Count of device interrupt callbacks */
-	uint lastintrs;		/* Count as of last watchdog timer */
-	uint pollcnt;		/* Count of active polls */
-	uint regfails;		/* Count of R_REG failures */
-	uint tx_sderrs;		/* Count of tx attempts with sd errors */
-	uint fcqueued;		/* Tx packets that got queued */
-	uint rxrtx;		/* Count of rtx requests (NAK to dongle) */
-	uint rx_toolong;	/* Receive frames too long to receive */
-	uint rxc_errors;	/* SDIO errors when reading control frames */
-	uint rx_hdrfail;	/* SDIO errors on header reads */
-	uint rx_badhdr;		/* Bad received headers (roosync?) */
-	uint rx_badseq;		/* Mismatched rx sequence number */
-	uint fc_rcvd;		/* Number of flow-control events received */
-	uint fc_xoff;		/* Number which turned on flow-control */
-	uint fc_xon;		/* Number which turned off flow-control */
-	uint rxglomfail;	/* Failed deglom attempts */
-	uint rxglomframes;	/* Number of glom frames (superframes) */
-	uint rxglompkts;	/* Number of packets from glom frames */
-	uint f2rxhdrs;		/* Number of header reads */
-	uint f2rxdata;		/* Number of frame data reads */
-	uint f2txdata;		/* Number of f2 frame writes */
-	uint f1regdata;		/* Number of f1 register accesses */
-	uint tickcnt;		/* Number of watchdog been schedule */
-	ulong tx_ctlerrs;	/* Err of sending ctrl frames */
-	ulong tx_ctlpkts;	/* Ctrl frames sent to dongle */
-	ulong rx_ctlerrs;	/* Err of processing rx ctrl frames */
-	ulong rx_ctlpkts;	/* Ctrl frames processed from dongle */
-	ulong rx_readahead_cnt;	/* packets where header read-ahead was used */
-};
-
-struct brcmf_fws_stats {
-	u32 tlv_parse_failed;
-	u32 tlv_invalid_type;
-	u32 header_only_pkt;
-	u32 header_pulls;
-	u32 pkt2bus;
-	u32 send_pkts[5];
-	u32 requested_sent[5];
-	u32 generic_error;
-	u32 mac_update_failed;
-	u32 mac_ps_update_failed;
-	u32 if_update_failed;
-	u32 packet_request_failed;
-	u32 credit_request_failed;
-	u32 rollback_success;
-	u32 rollback_failed;
-	u32 delayq_full_error;
-	u32 supprq_full_error;
-	u32 txs_indicate;
-	u32 txs_discard;
-	u32 txs_supp_core;
-	u32 txs_supp_ps;
-	u32 txs_tossed;
-	u32 txs_host_tossed;
-	u32 bus_flow_block;
-	u32 fws_flow_block;
-};
-
 struct brcmf_pub;
 #ifdef DEBUG
 void brcmf_debugfs_init(void);
@@ -169,10 +107,8 @@
 int brcmf_debugfs_attach(struct brcmf_pub *drvr);
 void brcmf_debugfs_detach(struct brcmf_pub *drvr);
 struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
-void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr,
-				     struct brcmf_sdio_count *sdcnt);
-void brcmf_debugfs_create_fws_stats(struct brcmf_pub *drvr,
-				    struct brcmf_fws_stats *stats);
+int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
+			    int (*read_fn)(struct seq_file *seq, void *data));
 #else
 static inline void brcmf_debugfs_init(void)
 {
@@ -187,9 +123,11 @@
 static inline void brcmf_debugfs_detach(struct brcmf_pub *drvr)
 {
 }
-static inline void brcmf_debugfs_create_fws_stats(struct brcmf_pub *drvr,
-						  struct brcmf_fws_stats *stats)
+static inline
+int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
+			    int (*read_fn)(struct seq_file *seq, void *data))
 {
+	return 0;
 }
 #endif
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 2699441..347b426 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -30,6 +30,7 @@
 #include "wl_cfg80211.h"
 #include "fwil.h"
 #include "fwsignal.h"
+#include "feature.h"
 #include "proto.h"
 
 MODULE_AUTHOR("Broadcom Corporation");
@@ -937,6 +938,8 @@
 	if (ret < 0)
 		goto fail;
 
+	brcmf_feat_attach(drvr);
+
 	ret = brcmf_fws_init(drvr);
 	if (ret < 0)
 		goto fail;
@@ -1074,16 +1077,6 @@
 	return !err;
 }
 
-/*
- * return chip id and rev of the device encoded in u32.
- */
-u32 brcmf_get_chip_info(struct brcmf_if *ifp)
-{
-	struct brcmf_bus *bus = ifp->drvr->bus_if;
-
-	return bus->chip << 4 | bus->chiprev;
-}
-
 static void brcmf_driver_register(struct work_struct *work)
 {
 #ifdef CONFIG_BRCMFMAC_SDIO
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 8fa0dbb..67d91d5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -391,6 +391,40 @@
 	u16 tail_pad;
 };
 
+/*
+ * hold counter variables
+ */
+struct brcmf_sdio_count {
+	uint intrcount;		/* Count of device interrupt callbacks */
+	uint lastintrs;		/* Count as of last watchdog timer */
+	uint pollcnt;		/* Count of active polls */
+	uint regfails;		/* Count of R_REG failures */
+	uint tx_sderrs;		/* Count of tx attempts with sd errors */
+	uint fcqueued;		/* Tx packets that got queued */
+	uint rxrtx;		/* Count of rtx requests (NAK to dongle) */
+	uint rx_toolong;	/* Receive frames too long to receive */
+	uint rxc_errors;	/* SDIO errors when reading control frames */
+	uint rx_hdrfail;	/* SDIO errors on header reads */
+	uint rx_badhdr;		/* Bad received headers (roosync?) */
+	uint rx_badseq;		/* Mismatched rx sequence number */
+	uint fc_rcvd;		/* Number of flow-control events received */
+	uint fc_xoff;		/* Number which turned on flow-control */
+	uint fc_xon;		/* Number which turned off flow-control */
+	uint rxglomfail;	/* Failed deglom attempts */
+	uint rxglomframes;	/* Number of glom frames (superframes) */
+	uint rxglompkts;	/* Number of packets from glom frames */
+	uint f2rxhdrs;		/* Number of header reads */
+	uint f2rxdata;		/* Number of frame data reads */
+	uint f2txdata;		/* Number of f2 frame writes */
+	uint f1regdata;		/* Number of f1 register accesses */
+	uint tickcnt;		/* Number of watchdog been schedule */
+	ulong tx_ctlerrs;	/* Err of sending ctrl frames */
+	ulong tx_ctlpkts;	/* Ctrl frames sent to dongle */
+	ulong rx_ctlerrs;	/* Err of processing rx ctrl frames */
+	ulong rx_ctlpkts;	/* Ctrl frames processed from dongle */
+	ulong rx_readahead_cnt;	/* packets where header read-ahead was used */
+};
+
 /* misc chip info needed by some of the routines */
 /* Private data for SDIO bus interaction */
 struct brcmf_sdio {
@@ -620,40 +654,46 @@
 	name ## _FIRMWARE_NAME, name ## _NVRAM_NAME
 
 static const struct brcmf_firmware_names brcmf_fwname_data[] = {
-	{ BCM43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
-	{ BCM43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
-	{ BCM43241_CHIP_ID, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
-	{ BCM4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
-	{ BCM4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
-	{ BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
-	{ BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
-	{ BCM43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
-	{ BCM4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) },
-	{ BCM4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
+	{ BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
+	{ BRCM_CC_43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
+	{ BRCM_CC_43241_CHIP_ID, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
+	{ BRCM_CC_4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
+	{ BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
+	{ BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
+	{ BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
+	{ BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
+	{ BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) },
+	{ BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
 };
 
-static const char *brcmf_sdio_get_fwname(struct brcmf_chip *ci,
-					 enum brcmf_firmware_type type)
+static int brcmf_sdio_get_fwnames(struct brcmf_chip *ci,
+				  struct brcmf_sdio_dev *sdiodev)
 {
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
 		if (brcmf_fwname_data[i].chipid == ci->chip &&
-		    brcmf_fwname_data[i].revmsk & BIT(ci->chiprev)) {
-			switch (type) {
-			case BRCMF_FIRMWARE_BIN:
-				return brcmf_fwname_data[i].bin;
-			case BRCMF_FIRMWARE_NVRAM:
-				return brcmf_fwname_data[i].nv;
-			default:
-				brcmf_err("invalid firmware type (%d)\n", type);
-				return NULL;
-			}
-		}
+		    brcmf_fwname_data[i].revmsk & BIT(ci->chiprev))
+			break;
 	}
-	brcmf_err("Unknown chipid %d [%d]\n",
-		  ci->chip, ci->chiprev);
-	return NULL;
+
+	if (i == ARRAY_SIZE(brcmf_fwname_data)) {
+		brcmf_err("Unknown chipid %d [%d]\n", ci->chip, ci->chiprev);
+		return -ENODEV;
+	}
+
+	/* check if firmware path is provided by module parameter */
+	if (brcmf_firmware_path[0] != '\0') {
+		if (brcmf_firmware_path[strlen(brcmf_firmware_path) - 1] != '/')
+			strcat(brcmf_firmware_path, "/");
+
+		strcpy(sdiodev->fw_name, brcmf_firmware_path);
+		strcpy(sdiodev->nvram_name, brcmf_firmware_path);
+	}
+	strcat(sdiodev->fw_name, brcmf_fwname_data[i].bin);
+	strcat(sdiodev->nvram_name, brcmf_fwname_data[i].nv);
+
+	return 0;
 }
 
 static void pkt_align(struct sk_buff *p, int len, int align)
@@ -2898,16 +2938,13 @@
 }
 
 #ifdef DEBUG
-static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
-				   struct sdpcm_shared *sh, char __user *data,
-				   size_t count)
+static int brcmf_sdio_dump_console(struct seq_file *seq, struct brcmf_sdio *bus,
+				   struct sdpcm_shared *sh)
 {
 	u32 addr, console_ptr, console_size, console_index;
 	char *conbuf = NULL;
 	__le32 sh_val;
 	int rv;
-	loff_t pos = 0;
-	int nbytes = 0;
 
 	/* obtain console information from device memory */
 	addr = sh->console_addr + offsetof(struct rte_console, log_le);
@@ -2945,33 +2982,24 @@
 	if (rv < 0)
 		goto done;
 
-	rv = simple_read_from_buffer(data, count, &pos,
-				     conbuf + console_index,
-				     console_size - console_index);
+	rv = seq_write(seq, conbuf + console_index,
+		       console_size - console_index);
 	if (rv < 0)
 		goto done;
 
-	nbytes = rv;
-	if (console_index > 0) {
-		pos = 0;
-		rv = simple_read_from_buffer(data+nbytes, count, &pos,
-					     conbuf, console_index - 1);
-		if (rv < 0)
-			goto done;
-		rv += nbytes;
-	}
+	if (console_index > 0)
+		rv = seq_write(seq, conbuf, console_index - 1);
+
 done:
 	vfree(conbuf);
 	return rv;
 }
 
-static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
-				char __user *data, size_t count)
+static int brcmf_sdio_trap_info(struct seq_file *seq, struct brcmf_sdio *bus,
+				struct sdpcm_shared *sh)
 {
-	int error, res;
-	char buf[350];
+	int error;
 	struct brcmf_trap_info tr;
-	loff_t pos = 0;
 
 	if ((sh->flags & SDPCM_SHARED_TRAP) == 0) {
 		brcmf_dbg(INFO, "no trap in firmware\n");
@@ -2983,34 +3011,30 @@
 	if (error < 0)
 		return error;
 
-	res = scnprintf(buf, sizeof(buf),
-			"dongle trap info: type 0x%x @ epc 0x%08x\n"
-			"  cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
-			"  lr   0x%08x pc   0x%08x offset 0x%x\n"
-			"  r0   0x%08x r1   0x%08x r2 0x%08x r3 0x%08x\n"
-			"  r4   0x%08x r5   0x%08x r6 0x%08x r7 0x%08x\n",
-			le32_to_cpu(tr.type), le32_to_cpu(tr.epc),
-			le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr),
-			le32_to_cpu(tr.r13), le32_to_cpu(tr.r14),
-			le32_to_cpu(tr.pc), sh->trap_addr,
-			le32_to_cpu(tr.r0), le32_to_cpu(tr.r1),
-			le32_to_cpu(tr.r2), le32_to_cpu(tr.r3),
-			le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
-			le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
+	seq_printf(seq,
+		   "dongle trap info: type 0x%x @ epc 0x%08x\n"
+		   "  cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
+		   "  lr   0x%08x pc   0x%08x offset 0x%x\n"
+		   "  r0   0x%08x r1   0x%08x r2 0x%08x r3 0x%08x\n"
+		   "  r4   0x%08x r5   0x%08x r6 0x%08x r7 0x%08x\n",
+		   le32_to_cpu(tr.type), le32_to_cpu(tr.epc),
+		   le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr),
+		   le32_to_cpu(tr.r13), le32_to_cpu(tr.r14),
+		   le32_to_cpu(tr.pc), sh->trap_addr,
+		   le32_to_cpu(tr.r0), le32_to_cpu(tr.r1),
+		   le32_to_cpu(tr.r2), le32_to_cpu(tr.r3),
+		   le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
+		   le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
 
-	return simple_read_from_buffer(data, count, &pos, buf, res);
+	return 0;
 }
 
-static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
-				  struct sdpcm_shared *sh, char __user *data,
-				  size_t count)
+static int brcmf_sdio_assert_info(struct seq_file *seq, struct brcmf_sdio *bus,
+				  struct sdpcm_shared *sh)
 {
 	int error = 0;
-	char buf[200];
 	char file[80] = "?";
 	char expr[80] = "<???>";
-	int res;
-	loff_t pos = 0;
 
 	if ((sh->flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
 		brcmf_dbg(INFO, "firmware not built with -assert\n");
@@ -3035,10 +3059,9 @@
 	}
 	sdio_release_host(bus->sdiodev->func[1]);
 
-	res = scnprintf(buf, sizeof(buf),
-			"dongle assert: %s:%d: assert(%s)\n",
-			file, sh->assert_line, expr);
-	return simple_read_from_buffer(data, count, &pos, buf, res);
+	seq_printf(seq, "dongle assert: %s:%d: assert(%s)\n",
+		   file, sh->assert_line, expr);
+	return 0;
 }
 
 static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
@@ -3062,59 +3085,75 @@
 	return 0;
 }
 
-static int brcmf_sdio_died_dump(struct brcmf_sdio *bus, char __user *data,
-				size_t count, loff_t *ppos)
+static int brcmf_sdio_died_dump(struct seq_file *seq, struct brcmf_sdio *bus)
 {
 	int error = 0;
 	struct sdpcm_shared sh;
-	int nbytes = 0;
-	loff_t pos = *ppos;
-
-	if (pos != 0)
-		return 0;
 
 	error = brcmf_sdio_readshared(bus, &sh);
 	if (error < 0)
 		goto done;
 
-	error = brcmf_sdio_assert_info(bus, &sh, data, count);
+	error = brcmf_sdio_assert_info(seq, bus, &sh);
 	if (error < 0)
 		goto done;
-	nbytes = error;
 
-	error = brcmf_sdio_trap_info(bus, &sh, data+nbytes, count);
+	error = brcmf_sdio_trap_info(seq, bus, &sh);
 	if (error < 0)
 		goto done;
-	nbytes += error;
 
-	error = brcmf_sdio_dump_console(bus, &sh, data+nbytes, count);
-	if (error < 0)
-		goto done;
-	nbytes += error;
+	error = brcmf_sdio_dump_console(seq, bus, &sh);
 
-	error = nbytes;
-	*ppos += nbytes;
 done:
 	return error;
 }
 
-static ssize_t brcmf_sdio_forensic_read(struct file *f, char __user *data,
-					size_t count, loff_t *ppos)
+static int brcmf_sdio_forensic_read(struct seq_file *seq, void *data)
 {
-	struct brcmf_sdio *bus = f->private_data;
-	int res;
+	struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
+	struct brcmf_sdio *bus = bus_if->bus_priv.sdio->bus;
 
-	res = brcmf_sdio_died_dump(bus, data, count, ppos);
-	if (res > 0)
-		*ppos += res;
-	return (ssize_t)res;
+	return brcmf_sdio_died_dump(seq, bus);
 }
 
-static const struct file_operations brcmf_sdio_forensic_ops = {
-	.owner = THIS_MODULE,
-	.open = simple_open,
-	.read = brcmf_sdio_forensic_read
-};
+static int brcmf_debugfs_sdio_count_read(struct seq_file *seq, void *data)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+	struct brcmf_sdio_count *sdcnt = &sdiodev->bus->sdcnt;
+
+	seq_printf(seq,
+		   "intrcount:    %u\nlastintrs:    %u\n"
+		   "pollcnt:      %u\nregfails:     %u\n"
+		   "tx_sderrs:    %u\nfcqueued:     %u\n"
+		   "rxrtx:        %u\nrx_toolong:   %u\n"
+		   "rxc_errors:   %u\nrx_hdrfail:   %u\n"
+		   "rx_badhdr:    %u\nrx_badseq:    %u\n"
+		   "fc_rcvd:      %u\nfc_xoff:      %u\n"
+		   "fc_xon:       %u\nrxglomfail:   %u\n"
+		   "rxglomframes: %u\nrxglompkts:   %u\n"
+		   "f2rxhdrs:     %u\nf2rxdata:     %u\n"
+		   "f2txdata:     %u\nf1regdata:    %u\n"
+		   "tickcnt:      %u\ntx_ctlerrs:   %lu\n"
+		   "tx_ctlpkts:   %lu\nrx_ctlerrs:   %lu\n"
+		   "rx_ctlpkts:   %lu\nrx_readahead: %lu\n",
+		   sdcnt->intrcount, sdcnt->lastintrs,
+		   sdcnt->pollcnt, sdcnt->regfails,
+		   sdcnt->tx_sderrs, sdcnt->fcqueued,
+		   sdcnt->rxrtx, sdcnt->rx_toolong,
+		   sdcnt->rxc_errors, sdcnt->rx_hdrfail,
+		   sdcnt->rx_badhdr, sdcnt->rx_badseq,
+		   sdcnt->fc_rcvd, sdcnt->fc_xoff,
+		   sdcnt->fc_xon, sdcnt->rxglomfail,
+		   sdcnt->rxglomframes, sdcnt->rxglompkts,
+		   sdcnt->f2rxhdrs, sdcnt->f2rxdata,
+		   sdcnt->f2txdata, sdcnt->f1regdata,
+		   sdcnt->tickcnt, sdcnt->tx_ctlerrs,
+		   sdcnt->tx_ctlpkts, sdcnt->rx_ctlerrs,
+		   sdcnt->rx_ctlpkts, sdcnt->rx_readahead_cnt);
+
+	return 0;
+}
 
 static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
 {
@@ -3124,9 +3163,9 @@
 	if (IS_ERR_OR_NULL(dentry))
 		return;
 
-	debugfs_create_file("forensics", S_IRUGO, dentry, bus,
-			    &brcmf_sdio_forensic_ops);
-	brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
+	brcmf_debugfs_add_entry(drvr, "forensics", brcmf_sdio_forensic_read);
+	brcmf_debugfs_add_entry(drvr, "counters",
+				brcmf_debugfs_sdio_count_read);
 	debugfs_create_u32("console_interval", 0644, dentry,
 			   &bus->console_interval);
 }
@@ -3598,17 +3637,17 @@
 		return;
 
 	switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
-	case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+	case SDIOD_DRVSTR_KEY(BRCM_CC_4330_CHIP_ID, 12):
 		str_tab = sdiod_drvstr_tab1_1v8;
 		str_mask = 0x00003800;
 		str_shift = 11;
 		break;
-	case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
+	case SDIOD_DRVSTR_KEY(BRCM_CC_4334_CHIP_ID, 17):
 		str_tab = sdiod_drvstr_tab6_1v8;
 		str_mask = 0x00001800;
 		str_shift = 11;
 		break;
-	case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
+	case SDIOD_DRVSTR_KEY(BRCM_CC_43143_CHIP_ID, 17):
 		/* note: 43143 does not support tristate */
 		i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
 		if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
@@ -3619,7 +3658,7 @@
 			brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
 				  ci->name, drivestrength);
 		break;
-	case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
+	case SDIOD_DRVSTR_KEY(BRCM_CC_43362_CHIP_ID, 13):
 		str_tab = sdiod_drive_strength_tab5_1v8;
 		str_mask = 0x00003800;
 		str_shift = 11;
@@ -3720,12 +3759,12 @@
 	u32 val, rev;
 
 	val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
-	if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
+	if (sdiodev->func[0]->device == BRCM_SDIO_4335_4339_DEVICE_ID &&
 	    addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
 		rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
 		if (rev >= 2) {
 			val &= ~CID_ID_MASK;
-			val |= BCM4339_CHIP_ID;
+			val |= BRCM_CC_4339_CHIP_ID;
 		}
 	}
 	return val;
@@ -4127,11 +4166,12 @@
 	brcmf_sdio_debugfs_create(bus);
 	brcmf_dbg(INFO, "completed!!\n");
 
+	ret = brcmf_sdio_get_fwnames(bus->ci, sdiodev);
+	if (ret)
+		goto fail;
+
 	ret = brcmf_fw_get_firmwares(sdiodev->dev, BRCMF_FW_REQUEST_NVRAM,
-				     brcmf_sdio_get_fwname(bus->ci,
-							   BRCMF_FIRMWARE_BIN),
-				     brcmf_sdio_get_fwname(bus->ci,
-							   BRCMF_FIRMWARE_NVRAM),
+				     sdiodev->fw_name, sdiodev->nvram_name,
 				     brcmf_sdio_firmware_callback);
 	if (ret != 0) {
 		brcmf_err("async firmware request failed: %d\n", ret);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
new file mode 100644
index 0000000..50877e3
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/netdevice.h>
+
+#include <brcm_hw_ids.h>
+#include "dhd.h"
+#include "dhd_bus.h"
+#include "dhd_dbg.h"
+#include "fwil.h"
+#include "feature.h"
+
+/*
+ * firmware error code received if iovar is unsupported.
+ */
+#define EBRCMF_FEAT_UNSUPPORTED		23
+
+/*
+ * expand feature list to array of feature strings.
+ */
+#define BRCMF_FEAT_DEF(_f) \
+	#_f,
+static const char *brcmf_feat_names[] = {
+	BRCMF_FEAT_LIST
+};
+#undef BRCMF_FEAT_DEF
+
+#ifdef DEBUG
+/*
+ * expand quirk list to array of quirk strings.
+ */
+#define BRCMF_QUIRK_DEF(_q) \
+	#_q,
+static const char * const brcmf_quirk_names[] = {
+	BRCMF_QUIRK_LIST
+};
+#undef BRCMF_QUIRK_DEF
+
+/**
+ * brcmf_feat_debugfs_read() - expose feature info to debugfs.
+ *
+ * @seq: sequence for debugfs entry.
+ * @data: raw data pointer.
+ */
+static int brcmf_feat_debugfs_read(struct seq_file *seq, void *data)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
+	u32 feats = bus_if->drvr->feat_flags;
+	u32 quirks = bus_if->drvr->chip_quirks;
+	int id;
+
+	seq_printf(seq, "Features: %08x\n", feats);
+	for (id = 0; id < BRCMF_FEAT_LAST; id++)
+		if (feats & BIT(id))
+			seq_printf(seq, "\t%s\n", brcmf_feat_names[id]);
+	seq_printf(seq, "\nQuirks:   %08x\n", quirks);
+	for (id = 0; id < BRCMF_FEAT_QUIRK_LAST; id++)
+		if (quirks & BIT(id))
+			seq_printf(seq, "\t%s\n", brcmf_quirk_names[id]);
+	return 0;
+}
+#else
+static int brcmf_feat_debugfs_read(struct seq_file *seq, void *data)
+{
+	return 0;
+}
+#endif /* DEBUG */
+
+/**
+ * brcmf_feat_iovar_int_get() - determine feature through iovar query.
+ *
+ * @ifp: interface to query.
+ * @id: feature id.
+ * @name: iovar name.
+ */
+static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,
+				     enum brcmf_feat_id id, char *name)
+{
+	u32 data;
+	int err;
+
+	err = brcmf_fil_iovar_int_get(ifp, name, &data);
+	if (err == 0) {
+		brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]);
+		ifp->drvr->feat_flags |= BIT(id);
+	} else {
+		brcmf_dbg(TRACE, "%s feature check failed: %d\n",
+			  brcmf_feat_names[id], err);
+	}
+}
+
+void brcmf_feat_attach(struct brcmf_pub *drvr)
+{
+	struct brcmf_if *ifp = drvr->iflist[0];
+
+	brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
+
+	/* set chip related quirks */
+	switch (drvr->bus_if->chip) {
+	case BRCM_CC_43236_CHIP_ID:
+		drvr->chip_quirks |= BIT(BRCMF_FEAT_QUIRK_AUTO_AUTH);
+		break;
+	case BRCM_CC_4329_CHIP_ID:
+		drvr->chip_quirks |= BIT(BRCMF_FEAT_QUIRK_NEED_MPC);
+		break;
+	default:
+		/* no quirks */
+		break;
+	}
+
+	brcmf_debugfs_add_entry(drvr, "features", brcmf_feat_debugfs_read);
+}
+
+bool brcmf_feat_is_enabled(struct brcmf_if *ifp, enum brcmf_feat_id id)
+{
+	return (ifp->drvr->feat_flags & BIT(id));
+}
+
+bool brcmf_feat_is_quirk_enabled(struct brcmf_if *ifp,
+				 enum brcmf_feat_quirk quirk)
+{
+	return (ifp->drvr->chip_quirks & BIT(quirk));
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/brcm80211/brcmfmac/feature.h
new file mode 100644
index 0000000..961d175f
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _BRCMF_FEATURE_H
+#define _BRCMF_FEATURE_H
+
+/*
+ * Features:
+ *
+ * MCHAN: multi-channel for concurrent P2P.
+ */
+#define BRCMF_FEAT_LIST \
+	BRCMF_FEAT_DEF(MCHAN)
+/*
+ * Quirks:
+ *
+ * AUTO_AUTH: workaround needed for automatic authentication type.
+ * NEED_MPC: driver needs to disable MPC during scanning operation.
+ */
+#define BRCMF_QUIRK_LIST \
+	BRCMF_QUIRK_DEF(AUTO_AUTH) \
+	BRCMF_QUIRK_DEF(NEED_MPC)
+
+#define BRCMF_FEAT_DEF(_f) \
+	BRCMF_FEAT_ ## _f,
+/*
+ * expand feature list to enumeration.
+ */
+enum brcmf_feat_id {
+	BRCMF_FEAT_LIST
+	BRCMF_FEAT_LAST
+};
+#undef BRCMF_FEAT_DEF
+
+#define BRCMF_QUIRK_DEF(_q) \
+	BRCMF_FEAT_QUIRK_ ## _q,
+/*
+ * expand quirk list to enumeration.
+ */
+enum brcmf_feat_quirk {
+	BRCMF_QUIRK_LIST
+	BRCMF_FEAT_QUIRK_LAST
+};
+#undef BRCMF_QUIRK_DEF
+
+/**
+ * brcmf_feat_attach() - determine features and quirks.
+ *
+ * @drvr: driver instance.
+ */
+void brcmf_feat_attach(struct brcmf_pub *drvr);
+
+/**
+ * brcmf_feat_is_enabled() - query feature.
+ *
+ * @ifp: interface instance.
+ * @id: feature id to check.
+ *
+ * Return: true is feature is enabled; otherwise false.
+ */
+bool brcmf_feat_is_enabled(struct brcmf_if *ifp, enum brcmf_feat_id id);
+
+/**
+ * brcmf_feat_is_quirk_enabled() - query chip quirk.
+ *
+ * @ifp: interface instance.
+ * @quirk: quirk id to check.
+ *
+ * Return: true is quirk is enabled; otherwise false.
+ */
+bool brcmf_feat_is_quirk_enabled(struct brcmf_if *ifp,
+				 enum brcmf_feat_quirk quirk);
+
+#endif /* _BRCMF_FEATURE_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
index 7b7d237..8ea9f28 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
@@ -18,10 +18,15 @@
 #include <linux/slab.h>
 #include <linux/device.h>
 #include <linux/firmware.h>
+#include <linux/module.h>
 
 #include "dhd_dbg.h"
 #include "firmware.h"
 
+char brcmf_firmware_path[BRCMF_FW_PATH_LEN];
+module_param_string(firmware_path, brcmf_firmware_path,
+		    BRCMF_FW_PATH_LEN, 0440);
+
 enum nvram_parser_state {
 	IDLE,
 	KEY,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
index 6431bfd..4d34823 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
@@ -21,6 +21,11 @@
 #define BRCMF_FW_REQ_FLAGS		0x00F0
 #define  BRCMF_FW_REQ_NV_OPTIONAL	0x0010
 
+#define	BRCMF_FW_PATH_LEN	256
+#define	BRCMF_FW_NAME_LEN	32
+
+extern char brcmf_firmware_path[];
+
 void brcmf_fw_nvram_free(void *nvram);
 /*
  * Request firmware(s) asynchronously. When the asynchronous request
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index 699908d..d42f7d0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -454,6 +454,34 @@
 	struct brcmf_fws_mac_descriptor other;
 };
 
+struct brcmf_fws_stats {
+	u32 tlv_parse_failed;
+	u32 tlv_invalid_type;
+	u32 header_only_pkt;
+	u32 header_pulls;
+	u32 pkt2bus;
+	u32 send_pkts[5];
+	u32 requested_sent[5];
+	u32 generic_error;
+	u32 mac_update_failed;
+	u32 mac_ps_update_failed;
+	u32 if_update_failed;
+	u32 packet_request_failed;
+	u32 credit_request_failed;
+	u32 rollback_success;
+	u32 rollback_failed;
+	u32 delayq_full_error;
+	u32 supprq_full_error;
+	u32 txs_indicate;
+	u32 txs_discard;
+	u32 txs_supp_core;
+	u32 txs_supp_ps;
+	u32 txs_tossed;
+	u32 txs_host_tossed;
+	u32 bus_flow_block;
+	u32 fws_flow_block;
+};
+
 struct brcmf_fws_info {
 	struct brcmf_pub *drvr;
 	spinlock_t spinlock;
@@ -2017,6 +2045,75 @@
 	brcmf_fws_unlock(fws);
 }
 
+#ifdef DEBUG
+static int brcmf_debugfs_fws_stats_read(struct seq_file *seq, void *data)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
+	struct brcmf_fws_stats *fwstats = &bus_if->drvr->fws->stats;
+
+	seq_printf(seq,
+		   "header_pulls:      %u\n"
+		   "header_only_pkt:   %u\n"
+		   "tlv_parse_failed:  %u\n"
+		   "tlv_invalid_type:  %u\n"
+		   "mac_update_fails:  %u\n"
+		   "ps_update_fails:   %u\n"
+		   "if_update_fails:   %u\n"
+		   "pkt2bus:           %u\n"
+		   "generic_error:     %u\n"
+		   "rollback_success:  %u\n"
+		   "rollback_failed:   %u\n"
+		   "delayq_full:       %u\n"
+		   "supprq_full:       %u\n"
+		   "txs_indicate:      %u\n"
+		   "txs_discard:       %u\n"
+		   "txs_suppr_core:    %u\n"
+		   "txs_suppr_ps:      %u\n"
+		   "txs_tossed:        %u\n"
+		   "txs_host_tossed:   %u\n"
+		   "bus_flow_block:    %u\n"
+		   "fws_flow_block:    %u\n"
+		   "send_pkts:         BK:%u BE:%u VO:%u VI:%u BCMC:%u\n"
+		   "requested_sent:    BK:%u BE:%u VO:%u VI:%u BCMC:%u\n",
+		   fwstats->header_pulls,
+		   fwstats->header_only_pkt,
+		   fwstats->tlv_parse_failed,
+		   fwstats->tlv_invalid_type,
+		   fwstats->mac_update_failed,
+		   fwstats->mac_ps_update_failed,
+		   fwstats->if_update_failed,
+		   fwstats->pkt2bus,
+		   fwstats->generic_error,
+		   fwstats->rollback_success,
+		   fwstats->rollback_failed,
+		   fwstats->delayq_full_error,
+		   fwstats->supprq_full_error,
+		   fwstats->txs_indicate,
+		   fwstats->txs_discard,
+		   fwstats->txs_supp_core,
+		   fwstats->txs_supp_ps,
+		   fwstats->txs_tossed,
+		   fwstats->txs_host_tossed,
+		   fwstats->bus_flow_block,
+		   fwstats->fws_flow_block,
+		   fwstats->send_pkts[0], fwstats->send_pkts[1],
+		   fwstats->send_pkts[2], fwstats->send_pkts[3],
+		   fwstats->send_pkts[4],
+		   fwstats->requested_sent[0],
+		   fwstats->requested_sent[1],
+		   fwstats->requested_sent[2],
+		   fwstats->requested_sent[3],
+		   fwstats->requested_sent[4]);
+
+	return 0;
+}
+#else
+static int brcmf_debugfs_fws_stats_read(struct seq_file *seq, void *data)
+{
+	return 0;
+}
+#endif
+
 int brcmf_fws_init(struct brcmf_pub *drvr)
 {
 	struct brcmf_fws_info *fws;
@@ -2107,7 +2204,8 @@
 			BRCMF_FWS_PSQ_LEN);
 
 	/* create debugfs file for statistics */
-	brcmf_debugfs_create_fws_stats(drvr, &fws->stats);
+	brcmf_debugfs_add_entry(drvr, "fws_stats",
+				brcmf_debugfs_fws_stats_read);
 
 	brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n",
 		  fws->fw_signals ? "enabled" : "disabled", tlv);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 3deab79..6c5e585 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -18,6 +18,8 @@
 #define	_BRCM_SDH_H_
 
 #include <linux/skbuff.h>
+#include <linux/firmware.h>
+#include "firmware.h"
 
 #define SDIO_FUNC_0		0
 #define SDIO_FUNC_1		1
@@ -182,6 +184,8 @@
 	uint max_segment_size;
 	uint txglomsz;
 	struct sg_table sgtable;
+	char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
+	char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
 };
 
 /* sdio core registers */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index b732a99..dc13591 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -21,6 +21,7 @@
 #include <linux/vmalloc.h>
 
 #include <brcmu_utils.h>
+#include <brcm_hw_ids.h>
 #include <brcmu_wifi.h>
 #include <dhd_bus.h>
 #include <dhd_dbg.h>
@@ -913,16 +914,16 @@
 static bool brcmf_usb_chip_support(int chipid, int chiprev)
 {
 	switch(chipid) {
-	case 43143:
+	case BRCM_CC_43143_CHIP_ID:
 		return true;
-	case 43235:
-	case 43236:
-	case 43238:
+	case BRCM_CC_43235_CHIP_ID:
+	case BRCM_CC_43236_CHIP_ID:
+	case BRCM_CC_43238_CHIP_ID:
 		return (chiprev == 3);
-	case 43242:
+	case BRCM_CC_43242_CHIP_ID:
 		return true;
-	case 43566:
-	case 43569:
+	case BRCM_CC_43566_CHIP_ID:
+	case BRCM_CC_43569_CHIP_ID:
 		return true;
 	default:
 		break;
@@ -1016,16 +1017,16 @@
 static const char *brcmf_usb_get_fwname(struct brcmf_usbdev_info *devinfo)
 {
 	switch (devinfo->bus_pub.devid) {
-	case 43143:
+	case BRCM_CC_43143_CHIP_ID:
 		return BRCMF_USB_43143_FW_NAME;
-	case 43235:
-	case 43236:
-	case 43238:
+	case BRCM_CC_43235_CHIP_ID:
+	case BRCM_CC_43236_CHIP_ID:
+	case BRCM_CC_43238_CHIP_ID:
 		return BRCMF_USB_43236_FW_NAME;
-	case 43242:
+	case BRCM_CC_43242_CHIP_ID:
 		return BRCMF_USB_43242_FW_NAME;
-	case 43566:
-	case 43569:
+	case BRCM_CC_43566_CHIP_ID:
+	case BRCM_CC_43569_CHIP_ID:
 		return BRCMF_USB_43569_FW_NAME;
 	default:
 		return NULL;
@@ -1366,21 +1367,17 @@
 				      brcmf_usb_probe_phase2);
 }
 
-#define BRCMF_USB_VENDOR_ID_BROADCOM	0x0a5c
-#define BRCMF_USB_DEVICE_ID_43143	0xbd1e
-#define BRCMF_USB_DEVICE_ID_43236	0xbd17
-#define BRCMF_USB_DEVICE_ID_43242	0xbd1f
-#define BRCMF_USB_DEVICE_ID_43569	0xbd27
-#define BRCMF_USB_DEVICE_ID_BCMFW	0x0bdc
+#define BRCMF_USB_DEVICE(dev_id)	\
+	{ USB_DEVICE(BRCM_USB_VENDOR_ID_BROADCOM, dev_id) }
 
 static struct usb_device_id brcmf_usb_devid_table[] = {
-	{ USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43143) },
-	{ USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43236) },
-	{ USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43242) },
-	{ USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43569) },
+	BRCMF_USB_DEVICE(BRCM_USB_43143_DEVICE_ID),
+	BRCMF_USB_DEVICE(BRCM_USB_43236_DEVICE_ID),
+	BRCMF_USB_DEVICE(BRCM_USB_43242_DEVICE_ID),
+	BRCMF_USB_DEVICE(BRCM_USB_43569_DEVICE_ID),
 	/* special entry for device with firmware loaded and running */
-	{ USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_BCMFW) },
-	{ }
+	BRCMF_USB_DEVICE(BRCM_USB_BCMFW_DEVICE_ID),
+	{ /* end: all zeroes */ }
 };
 
 MODULE_DEVICE_TABLE(usb, brcmf_usb_devid_table);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 9682cf2..48078a3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -33,6 +33,7 @@
 #include "p2p.h"
 #include "btcoex.h"
 #include "wl_cfg80211.h"
+#include "feature.h"
 #include "fwil.h"
 #include "vendor.h"
 
@@ -102,24 +103,6 @@
 	return true;
 }
 
-#define CHAN2G(_channel, _freq, _flags) {			\
-	.band			= IEEE80211_BAND_2GHZ,		\
-	.center_freq		= (_freq),			\
-	.hw_value		= (_channel),			\
-	.flags			= (_flags),			\
-	.max_antenna_gain	= 0,				\
-	.max_power		= 30,				\
-}
-
-#define CHAN5G(_channel, _flags) {				\
-	.band			= IEEE80211_BAND_5GHZ,		\
-	.center_freq		= 5000 + (5 * (_channel)),	\
-	.hw_value		= (_channel),			\
-	.flags			= (_flags),			\
-	.max_antenna_gain	= 0,				\
-	.max_power		= 30,				\
-}
-
 #define RATE_TO_BASE100KBPS(rate)   (((rate) * 10) / 2)
 #define RATETAB_ENT(_rateid, _flags) \
 	{                                                               \
@@ -148,58 +131,17 @@
 #define wl_g_rates		(__wl_rates + 0)
 #define wl_g_rates_size	12
 
-static struct ieee80211_channel __wl_2ghz_channels[] = {
-	CHAN2G(1, 2412, 0),
-	CHAN2G(2, 2417, 0),
-	CHAN2G(3, 2422, 0),
-	CHAN2G(4, 2427, 0),
-	CHAN2G(5, 2432, 0),
-	CHAN2G(6, 2437, 0),
-	CHAN2G(7, 2442, 0),
-	CHAN2G(8, 2447, 0),
-	CHAN2G(9, 2452, 0),
-	CHAN2G(10, 2457, 0),
-	CHAN2G(11, 2462, 0),
-	CHAN2G(12, 2467, 0),
-	CHAN2G(13, 2472, 0),
-	CHAN2G(14, 2484, 0),
-};
-
-static struct ieee80211_channel __wl_5ghz_a_channels[] = {
-	CHAN5G(34, 0), CHAN5G(36, 0),
-	CHAN5G(38, 0), CHAN5G(40, 0),
-	CHAN5G(42, 0), CHAN5G(44, 0),
-	CHAN5G(46, 0), CHAN5G(48, 0),
-	CHAN5G(52, 0), CHAN5G(56, 0),
-	CHAN5G(60, 0), CHAN5G(64, 0),
-	CHAN5G(100, 0), CHAN5G(104, 0),
-	CHAN5G(108, 0), CHAN5G(112, 0),
-	CHAN5G(116, 0), CHAN5G(120, 0),
-	CHAN5G(124, 0), CHAN5G(128, 0),
-	CHAN5G(132, 0), CHAN5G(136, 0),
-	CHAN5G(140, 0), CHAN5G(149, 0),
-	CHAN5G(153, 0), CHAN5G(157, 0),
-	CHAN5G(161, 0), CHAN5G(165, 0),
-	CHAN5G(184, 0), CHAN5G(188, 0),
-	CHAN5G(192, 0), CHAN5G(196, 0),
-	CHAN5G(200, 0), CHAN5G(204, 0),
-	CHAN5G(208, 0), CHAN5G(212, 0),
-	CHAN5G(216, 0),
-};
-
-static struct ieee80211_supported_band __wl_band_2ghz = {
+/* Band templates duplicated per wiphy. The channel info
+ * is filled in after querying the device.
+ */
+static const struct ieee80211_supported_band __wl_band_2ghz = {
 	.band = IEEE80211_BAND_2GHZ,
-	.channels = __wl_2ghz_channels,
-	.n_channels = ARRAY_SIZE(__wl_2ghz_channels),
 	.bitrates = wl_g_rates,
 	.n_bitrates = wl_g_rates_size,
-	.ht_cap = {IEEE80211_HT_CAP_SUP_WIDTH_20_40, true},
 };
 
-static struct ieee80211_supported_band __wl_band_5ghz_a = {
+static const struct ieee80211_supported_band __wl_band_5ghz_a = {
 	.band = IEEE80211_BAND_5GHZ,
-	.channels = __wl_5ghz_a_channels,
-	.n_channels = ARRAY_SIZE(__wl_5ghz_a_channels),
 	.bitrates = wl_a_rates,
 	.n_bitrates = wl_a_rates_size,
 };
@@ -592,7 +534,7 @@
 
 static void brcmf_scan_config_mpc(struct brcmf_if *ifp, int mpc)
 {
-	if ((brcmf_get_chip_info(ifp) >> 4) == 0x4329)
+	if (brcmf_feat_is_quirk_enabled(ifp, BRCMF_FEAT_QUIRK_NEED_MPC))
 		brcmf_set_mpc(ifp, mpc);
 }
 
@@ -1619,17 +1561,10 @@
 enum nl80211_auth_type brcmf_war_auth_type(struct brcmf_if *ifp,
 					   enum nl80211_auth_type type)
 {
-	u32 ci;
-	if (type == NL80211_AUTHTYPE_AUTOMATIC) {
-		/* shift to ignore chip revision */
-		ci = brcmf_get_chip_info(ifp) >> 4;
-		switch (ci) {
-		case 43236:
-			brcmf_dbg(CONN, "43236 WAR: use OPEN instead of AUTO\n");
-			return NL80211_AUTHTYPE_OPEN_SYSTEM;
-		default:
-			break;
-		}
+	if (type == NL80211_AUTHTYPE_AUTOMATIC &&
+	    brcmf_feat_is_quirk_enabled(ifp, BRCMF_FEAT_QUIRK_AUTO_AUTH)) {
+		brcmf_dbg(CONN, "WAR: use OPEN instead of AUTO\n");
+		type = NL80211_AUTHTYPE_OPEN_SYSTEM;
 	}
 	return type;
 }
@@ -4284,122 +4219,6 @@
 	.tdls_oper = brcmf_cfg80211_tdls_oper,
 };
 
-static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
-{
-	/* scheduled scan settings */
-	wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
-	wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
-	wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
-	wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
-}
-
-static const struct ieee80211_iface_limit brcmf_iface_limits[] = {
-	{
-		.max = 2,
-		.types = BIT(NL80211_IFTYPE_STATION) |
-			 BIT(NL80211_IFTYPE_ADHOC) |
-			 BIT(NL80211_IFTYPE_AP)
-	},
-	{
-		.max = 1,
-		.types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
-			 BIT(NL80211_IFTYPE_P2P_GO)
-	},
-	{
-		.max = 1,
-		.types = BIT(NL80211_IFTYPE_P2P_DEVICE)
-	}
-};
-static const struct ieee80211_iface_combination brcmf_iface_combos[] = {
-	{
-		 .max_interfaces = BRCMF_IFACE_MAX_CNT,
-		 .num_different_channels = 2,
-		 .n_limits = ARRAY_SIZE(brcmf_iface_limits),
-		 .limits = brcmf_iface_limits
-	}
-};
-
-static const struct ieee80211_txrx_stypes
-brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
-	[NL80211_IFTYPE_STATION] = {
-		.tx = 0xffff,
-		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
-		      BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
-	},
-	[NL80211_IFTYPE_P2P_CLIENT] = {
-		.tx = 0xffff,
-		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
-		      BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
-	},
-	[NL80211_IFTYPE_P2P_GO] = {
-		.tx = 0xffff,
-		.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
-		      BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
-		      BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
-		      BIT(IEEE80211_STYPE_DISASSOC >> 4) |
-		      BIT(IEEE80211_STYPE_AUTH >> 4) |
-		      BIT(IEEE80211_STYPE_DEAUTH >> 4) |
-		      BIT(IEEE80211_STYPE_ACTION >> 4)
-	},
-	[NL80211_IFTYPE_P2P_DEVICE] = {
-		.tx = 0xffff,
-		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
-		      BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
-	}
-};
-
-static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
-{
-	struct wiphy *wiphy;
-	s32 err = 0;
-
-	wiphy = wiphy_new(&wl_cfg80211_ops, sizeof(struct brcmf_cfg80211_info));
-	if (!wiphy) {
-		brcmf_err("Could not allocate wiphy device\n");
-		return ERR_PTR(-ENOMEM);
-	}
-	set_wiphy_dev(wiphy, phydev);
-	wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
-	wiphy->max_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
-	wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
-	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
-				 BIT(NL80211_IFTYPE_ADHOC) |
-				 BIT(NL80211_IFTYPE_AP) |
-				 BIT(NL80211_IFTYPE_P2P_CLIENT) |
-				 BIT(NL80211_IFTYPE_P2P_GO) |
-				 BIT(NL80211_IFTYPE_P2P_DEVICE);
-	wiphy->iface_combinations = brcmf_iface_combos;
-	wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos);
-	wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
-	wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
-	wiphy->cipher_suites = __wl_cipher_suites;
-	wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
-	wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT |
-			WIPHY_FLAG_OFFCHAN_TX |
-			WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
-			WIPHY_FLAG_SUPPORTS_TDLS;
-	if (!brcmf_roamoff)
-		wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
-	wiphy->mgmt_stypes = brcmf_txrx_stypes;
-	wiphy->max_remain_on_channel_duration = 5000;
-	brcmf_wiphy_pno_params(wiphy);
-	brcmf_dbg(INFO, "Registering custom regulatory\n");
-	wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
-	wiphy_apply_custom_regulatory(wiphy, &brcmf_regdom);
-
-	/* vendor commands/events support */
-	wiphy->vendor_commands = brcmf_vendor_cmds;
-	wiphy->n_vendor_commands = BRCMF_VNDR_CMDS_LAST - 1;
-
-	err = wiphy_register(wiphy);
-	if (err < 0) {
-		brcmf_err("Could not register wiphy device (%d)\n", err);
-		wiphy_free(wiphy);
-		return ERR_PTR(err);
-	}
-	return wiphy;
-}
-
 struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
 					   enum nl80211_iftype type,
 					   bool pm_block)
@@ -4943,138 +4762,6 @@
 	mutex_init(&event->vif_event_lock);
 }
 
-static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
-{
-	struct brcmf_fil_bwcap_le band_bwcap;
-	u32 val;
-	int err;
-
-	/* verify support for bw_cap command */
-	val = WLC_BAND_5G;
-	err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &val);
-
-	if (!err) {
-		/* only set 2G bandwidth using bw_cap command */
-		band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
-		band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
-		err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
-					       sizeof(band_bwcap));
-	} else {
-		brcmf_dbg(INFO, "fallback to mimo_bw_cap\n");
-		val = WLC_N_BW_40ALL;
-		err = brcmf_fil_iovar_int_set(ifp, "mimo_bw_cap", val);
-	}
-	return err;
-}
-
-struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
-						  struct device *busdev)
-{
-	struct net_device *ndev = drvr->iflist[0]->ndev;
-	struct brcmf_cfg80211_info *cfg;
-	struct wiphy *wiphy;
-	struct brcmf_cfg80211_vif *vif;
-	struct brcmf_if *ifp;
-	s32 err = 0;
-	s32 io_type;
-
-	if (!ndev) {
-		brcmf_err("ndev is invalid\n");
-		return NULL;
-	}
-
-	ifp = netdev_priv(ndev);
-	wiphy = brcmf_setup_wiphy(busdev);
-	if (IS_ERR(wiphy))
-		return NULL;
-
-	cfg = wiphy_priv(wiphy);
-	cfg->wiphy = wiphy;
-	cfg->pub = drvr;
-	init_vif_event(&cfg->vif_event);
-	INIT_LIST_HEAD(&cfg->vif_list);
-
-	vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_STATION, false);
-	if (IS_ERR(vif)) {
-		wiphy_free(wiphy);
-		return NULL;
-	}
-
-	vif->ifp = ifp;
-	vif->wdev.netdev = ndev;
-	ndev->ieee80211_ptr = &vif->wdev;
-	SET_NETDEV_DEV(ndev, wiphy_dev(cfg->wiphy));
-
-	err = wl_init_priv(cfg);
-	if (err) {
-		brcmf_err("Failed to init iwm_priv (%d)\n", err);
-		goto cfg80211_attach_out;
-	}
-	ifp->vif = vif;
-
-	err = brcmf_p2p_attach(cfg);
-	if (err) {
-		brcmf_err("P2P initilisation failed (%d)\n", err);
-		goto cfg80211_p2p_attach_out;
-	}
-	err = brcmf_btcoex_attach(cfg);
-	if (err) {
-		brcmf_err("BT-coex initialisation failed (%d)\n", err);
-		brcmf_p2p_detach(&cfg->p2p);
-		goto cfg80211_p2p_attach_out;
-	}
-
-	/* If cfg80211 didn't disable 40MHz HT CAP in wiphy_register(),
-	 * setup 40MHz in 2GHz band and enable OBSS scanning.
-	 */
-	if (wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap.cap &
-	    IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
-		err = brcmf_enable_bw40_2g(ifp);
-		if (!err)
-			err = brcmf_fil_iovar_int_set(ifp, "obss_coex",
-						      BRCMF_OBSS_COEX_AUTO);
-	}
-	/* clear for now and rely on update later */
-	wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap.ht_supported = false;
-	wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap.cap = 0;
-
-	err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1);
-	if (err) {
-		brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err);
-		wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_TDLS;
-	}
-
-	err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_VERSION,
-				    &io_type);
-	if (err) {
-		brcmf_err("Failed to get D11 version (%d)\n", err);
-		goto cfg80211_p2p_attach_out;
-	}
-	cfg->d11inf.io_type = (u8)io_type;
-	brcmu_d11_attach(&cfg->d11inf);
-
-	return cfg;
-
-cfg80211_p2p_attach_out:
-	wl_deinit_priv(cfg);
-
-cfg80211_attach_out:
-	brcmf_free_vif(vif);
-	return NULL;
-}
-
-void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
-{
-	if (!cfg)
-		return;
-
-	WARN_ON(!list_empty(&cfg->vif_list));
-	wiphy_unregister(cfg->wiphy);
-	brcmf_btcoex_detach(cfg);
-	wl_deinit_priv(cfg);
-	wiphy_free(cfg->wiphy);
-}
-
 static s32
 brcmf_dongle_roam(struct brcmf_if *ifp, u32 bcn_timeout)
 {
@@ -5167,25 +4854,77 @@
 	return err;
 }
 
+/* Filter the list of channels received from firmware counting only
+ * the 20MHz channels. The wiphy band data only needs those which get
+ * flagged to indicate if they can take part in higher bandwidth.
+ */
+static void brcmf_count_20mhz_channels(struct brcmf_cfg80211_info *cfg,
+				       struct brcmf_chanspec_list *chlist,
+				       u32 chcnt[])
+{
+	u32 total = le32_to_cpu(chlist->count);
+	struct brcmu_chan ch;
+	int i;
 
-static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
-				   u32 bw_cap[])
+	for (i = 0; i <= total; i++) {
+		ch.chspec = (u16)le32_to_cpu(chlist->element[i]);
+		cfg->d11inf.decchspec(&ch);
+
+		/* Firmware gives a ordered list. We skip non-20MHz
+		 * channels is 2G. For 5G we can abort upon reaching
+		 * a non-20MHz channel in the list.
+		 */
+		if (ch.bw != BRCMU_CHAN_BW_20) {
+			if (ch.band == BRCMU_CHAN_BAND_5G)
+				break;
+			else
+				continue;
+		}
+
+		if (ch.band == BRCMU_CHAN_BAND_2G)
+			chcnt[0] += 1;
+		else if (ch.band == BRCMU_CHAN_BAND_5G)
+			chcnt[1] += 1;
+	}
+}
+
+static void brcmf_update_bw40_channel_flag(struct ieee80211_channel *channel,
+					   struct brcmu_chan *ch)
+{
+	u32 ht40_flag;
+
+	ht40_flag = channel->flags & IEEE80211_CHAN_NO_HT40;
+	if (ch->sb == BRCMU_CHAN_SB_U) {
+		if (ht40_flag == IEEE80211_CHAN_NO_HT40)
+			channel->flags &= ~IEEE80211_CHAN_NO_HT40;
+		channel->flags |= IEEE80211_CHAN_NO_HT40PLUS;
+	} else {
+		/* It should be one of
+		 * IEEE80211_CHAN_NO_HT40 or
+		 * IEEE80211_CHAN_NO_HT40PLUS
+		 */
+		channel->flags &= ~IEEE80211_CHAN_NO_HT40;
+		if (ht40_flag == IEEE80211_CHAN_NO_HT40)
+			channel->flags |= IEEE80211_CHAN_NO_HT40MINUS;
+	}
+}
+
+static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
+				    u32 bw_cap[])
 {
 	struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
-	struct ieee80211_channel *band_chan_arr;
+	struct ieee80211_supported_band *band;
+	struct ieee80211_channel *channel;
+	struct wiphy *wiphy;
 	struct brcmf_chanspec_list *list;
 	struct brcmu_chan ch;
-	s32 err;
+	int err;
 	u8 *pbuf;
 	u32 i, j;
 	u32 total;
-	enum ieee80211_band band;
-	u32 channel;
-	u32 *n_cnt;
+	u32 chaninfo;
+	u32 chcnt[2] = { 0, 0 };
 	u32 index;
-	u32 ht40_flag;
-	bool update;
-	u32 array_size;
 
 	pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
 
@@ -5198,11 +4937,45 @@
 				       BRCMF_DCMD_MEDLEN);
 	if (err) {
 		brcmf_err("get chanspecs error (%d)\n", err);
-		goto exit;
+		goto fail_pbuf;
 	}
 
-	__wl_band_2ghz.n_channels = 0;
-	__wl_band_5ghz_a.n_channels = 0;
+	brcmf_count_20mhz_channels(cfg, list, chcnt);
+	wiphy = cfg_to_wiphy(cfg);
+	if (chcnt[0]) {
+		band = kmemdup(&__wl_band_2ghz, sizeof(__wl_band_2ghz),
+			       GFP_KERNEL);
+		if (band == NULL) {
+			err = -ENOMEM;
+			goto fail_pbuf;
+		}
+		band->channels = kcalloc(chcnt[0], sizeof(*channel),
+					 GFP_KERNEL);
+		if (band->channels == NULL) {
+			kfree(band);
+			err = -ENOMEM;
+			goto fail_pbuf;
+		}
+		band->n_channels = 0;
+		wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+	}
+	if (chcnt[1]) {
+		band = kmemdup(&__wl_band_5ghz_a, sizeof(__wl_band_5ghz_a),
+			       GFP_KERNEL);
+		if (band == NULL) {
+			err = -ENOMEM;
+			goto fail_band2g;
+		}
+		band->channels = kcalloc(chcnt[1], sizeof(*channel),
+					 GFP_KERNEL);
+		if (band->channels == NULL) {
+			kfree(band);
+			err = -ENOMEM;
+			goto fail_band2g;
+		}
+		band->n_channels = 0;
+		wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+	}
 
 	total = le32_to_cpu(list->count);
 	for (i = 0; i < total; i++) {
@@ -5210,97 +4983,148 @@
 		cfg->d11inf.decchspec(&ch);
 
 		if (ch.band == BRCMU_CHAN_BAND_2G) {
-			band_chan_arr = __wl_2ghz_channels;
-			array_size = ARRAY_SIZE(__wl_2ghz_channels);
-			n_cnt = &__wl_band_2ghz.n_channels;
-			band = IEEE80211_BAND_2GHZ;
+			band = wiphy->bands[IEEE80211_BAND_2GHZ];
 		} else if (ch.band == BRCMU_CHAN_BAND_5G) {
-			band_chan_arr = __wl_5ghz_a_channels;
-			array_size = ARRAY_SIZE(__wl_5ghz_a_channels);
-			n_cnt = &__wl_band_5ghz_a.n_channels;
-			band = IEEE80211_BAND_5GHZ;
+			band = wiphy->bands[IEEE80211_BAND_5GHZ];
 		} else {
 			brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec);
 			continue;
 		}
-		if (!(bw_cap[band] & WLC_BW_40MHZ_BIT) &&
+		if (!(bw_cap[band->band] & WLC_BW_40MHZ_BIT) &&
 		    ch.bw == BRCMU_CHAN_BW_40)
 			continue;
-		if (!(bw_cap[band] & WLC_BW_80MHZ_BIT) &&
+		if (!(bw_cap[band->band] & WLC_BW_80MHZ_BIT) &&
 		    ch.bw == BRCMU_CHAN_BW_80)
 			continue;
-		update = false;
-		for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) {
-			if (band_chan_arr[j].hw_value == ch.chnum) {
-				update = true;
+
+		channel = band->channels;
+		index = band->n_channels;
+		for (j = 0; j < band->n_channels; j++) {
+			if (channel[j].hw_value == ch.chnum) {
+				index = j;
 				break;
 			}
 		}
-		if (update)
-			index = j;
-		else
-			index = *n_cnt;
-		if (index <  array_size) {
-			band_chan_arr[index].center_freq =
-				ieee80211_channel_to_frequency(ch.chnum, band);
-			band_chan_arr[index].hw_value = ch.chnum;
+		channel[index].center_freq =
+			ieee80211_channel_to_frequency(ch.chnum, band->band);
+		channel[index].hw_value = ch.chnum;
 
-			/* assuming the chanspecs order is HT20,
-			 * HT40 upper, HT40 lower, and VHT80.
+		/* assuming the chanspecs order is HT20,
+		 * HT40 upper, HT40 lower, and VHT80.
+		 */
+		if (ch.bw == BRCMU_CHAN_BW_80) {
+			channel[index].flags &= ~IEEE80211_CHAN_NO_80MHZ;
+		} else if (ch.bw == BRCMU_CHAN_BW_40) {
+			brcmf_update_bw40_channel_flag(&channel[index], &ch);
+		} else {
+			/* disable other bandwidths for now as mentioned
+			 * order assure they are enabled for subsequent
+			 * chanspecs.
 			 */
-			if (ch.bw == BRCMU_CHAN_BW_80) {
-				band_chan_arr[index].flags &=
-					~IEEE80211_CHAN_NO_80MHZ;
-			} else if (ch.bw == BRCMU_CHAN_BW_40) {
-				ht40_flag = band_chan_arr[index].flags &
-					    IEEE80211_CHAN_NO_HT40;
-				if (ch.sb == BRCMU_CHAN_SB_U) {
-					if (ht40_flag == IEEE80211_CHAN_NO_HT40)
-						band_chan_arr[index].flags &=
-							~IEEE80211_CHAN_NO_HT40;
-					band_chan_arr[index].flags |=
-						IEEE80211_CHAN_NO_HT40PLUS;
-				} else {
-					/* It should be one of
-					 * IEEE80211_CHAN_NO_HT40 or
-					 * IEEE80211_CHAN_NO_HT40PLUS
-					 */
-					band_chan_arr[index].flags &=
-							~IEEE80211_CHAN_NO_HT40;
-					if (ht40_flag == IEEE80211_CHAN_NO_HT40)
-						band_chan_arr[index].flags |=
-						    IEEE80211_CHAN_NO_HT40MINUS;
-				}
-			} else {
-				/* disable other bandwidths for now as mentioned
-				 * order assure they are enabled for subsequent
-				 * chanspecs.
-				 */
-				band_chan_arr[index].flags =
-						IEEE80211_CHAN_NO_HT40 |
-						IEEE80211_CHAN_NO_80MHZ;
-				ch.bw = BRCMU_CHAN_BW_20;
-				cfg->d11inf.encchspec(&ch);
-				channel = ch.chspec;
-				err = brcmf_fil_bsscfg_int_get(ifp,
-							       "per_chan_info",
-							       &channel);
-				if (!err) {
-					if (channel & WL_CHAN_RADAR)
-						band_chan_arr[index].flags |=
-							(IEEE80211_CHAN_RADAR |
-							IEEE80211_CHAN_NO_IR);
-					if (channel & WL_CHAN_PASSIVE)
-						band_chan_arr[index].flags |=
-						    IEEE80211_CHAN_NO_IR;
-				}
+			channel[index].flags = IEEE80211_CHAN_NO_HT40 |
+					       IEEE80211_CHAN_NO_80MHZ;
+			ch.bw = BRCMU_CHAN_BW_20;
+			cfg->d11inf.encchspec(&ch);
+			chaninfo = ch.chspec;
+			err = brcmf_fil_bsscfg_int_get(ifp, "per_chan_info",
+						       &chaninfo);
+			if (!err) {
+				if (chaninfo & WL_CHAN_RADAR)
+					channel[index].flags |=
+						(IEEE80211_CHAN_RADAR |
+						 IEEE80211_CHAN_NO_IR);
+				if (chaninfo & WL_CHAN_PASSIVE)
+					channel[index].flags |=
+						IEEE80211_CHAN_NO_IR;
 			}
-			if (!update)
-				(*n_cnt)++;
+		}
+		if (index == band->n_channels)
+			band->n_channels++;
+	}
+	kfree(pbuf);
+	return 0;
+
+fail_band2g:
+	kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
+	kfree(wiphy->bands[IEEE80211_BAND_2GHZ]);
+	wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
+fail_pbuf:
+	kfree(pbuf);
+	return err;
+}
+
+static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
+{
+	struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+	struct ieee80211_supported_band *band;
+	struct brcmf_fil_bwcap_le band_bwcap;
+	struct brcmf_chanspec_list *list;
+	u8 *pbuf;
+	u32 val;
+	int err;
+	struct brcmu_chan ch;
+	u32 num_chan;
+	int i, j;
+
+	/* verify support for bw_cap command */
+	val = WLC_BAND_5G;
+	err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &val);
+
+	if (!err) {
+		/* only set 2G bandwidth using bw_cap command */
+		band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
+		band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
+		err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
+					       sizeof(band_bwcap));
+	} else {
+		brcmf_dbg(INFO, "fallback to mimo_bw_cap\n");
+		val = WLC_N_BW_40ALL;
+		err = brcmf_fil_iovar_int_set(ifp, "mimo_bw_cap", val);
+	}
+
+	if (!err) {
+		/* update channel info in 2G band */
+		pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
+
+		if (pbuf == NULL)
+			return -ENOMEM;
+
+		ch.band = BRCMU_CHAN_BAND_2G;
+		ch.bw = BRCMU_CHAN_BW_40;
+		ch.chnum = 0;
+		cfg->d11inf.encchspec(&ch);
+
+		/* pass encoded chanspec in query */
+		*(__le16 *)pbuf = cpu_to_le16(ch.chspec);
+
+		err = brcmf_fil_iovar_data_get(ifp, "chanspecs", pbuf,
+					       BRCMF_DCMD_MEDLEN);
+		if (err) {
+			brcmf_err("get chanspecs error (%d)\n", err);
+			kfree(pbuf);
+			return err;
+		}
+
+		band = cfg_to_wiphy(cfg)->bands[IEEE80211_BAND_2GHZ];
+		list = (struct brcmf_chanspec_list *)pbuf;
+		num_chan = le32_to_cpu(list->count);
+		for (i = 0; i < num_chan; i++) {
+			ch.chspec = (u16)le32_to_cpu(list->element[i]);
+			cfg->d11inf.decchspec(&ch);
+			if (WARN_ON(ch.band != BRCMU_CHAN_BAND_2G))
+				continue;
+			if (WARN_ON(ch.bw != BRCMU_CHAN_BW_40))
+				continue;
+			for (j = 0; j < band->n_channels; j++) {
+				if (band->channels[j].hw_value == ch.chnum)
+					break;
+			}
+			if (WARN_ON(j == band->n_channels))
+				continue;
+
+			brcmf_update_bw40_channel_flag(&band->channels[j], &ch);
 		}
 	}
-exit:
-	kfree(pbuf);
 	return err;
 }
 
@@ -5394,44 +5218,19 @@
 	band->vht_cap.vht_mcs.tx_mcs_map = mcs_map;
 }
 
-static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
+static int brcmf_setup_wiphybands(struct wiphy *wiphy)
 {
+	struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
 	struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
-	struct wiphy *wiphy;
-	s32 phy_list;
-	u32 band_list[3];
 	u32 nmode = 0;
 	u32 vhtmode = 0;
-	u32 bw_cap[2] = { 0, 0 };
+	u32 bw_cap[2] = { WLC_BW_20MHZ_BIT, WLC_BW_20MHZ_BIT };
 	u32 rxchain;
 	u32 nchain;
-	s8 phy;
-	s32 err;
-	u32 nband;
+	int err;
 	s32 i;
-	struct ieee80211_supported_band *bands[2] = { NULL, NULL };
 	struct ieee80211_supported_band *band;
 
-	err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_PHYLIST,
-				     &phy_list, sizeof(phy_list));
-	if (err) {
-		brcmf_err("BRCMF_C_GET_PHYLIST error (%d)\n", err);
-		return err;
-	}
-
-	phy = ((char *)&phy_list)[0];
-	brcmf_dbg(INFO, "BRCMF_C_GET_PHYLIST reported: %c phy\n", phy);
-
-
-	err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BANDLIST,
-				     &band_list, sizeof(band_list));
-	if (err) {
-		brcmf_err("BRCMF_C_GET_BANDLIST error (%d)\n", err);
-		return err;
-	}
-	brcmf_dbg(INFO, "BRCMF_C_GET_BANDLIST reported: 0x%08x 0x%08x 0x%08x phy\n",
-		  band_list[0], band_list[1], band_list[2]);
-
 	(void)brcmf_fil_iovar_int_get(ifp, "vhtmode", &vhtmode);
 	err = brcmf_fil_iovar_int_get(ifp, "nmode", &nmode);
 	if (err) {
@@ -5453,44 +5252,129 @@
 	}
 	brcmf_dbg(INFO, "nchain=%d\n", nchain);
 
-	err = brcmf_construct_reginfo(cfg, bw_cap);
+	err = brcmf_construct_chaninfo(cfg, bw_cap);
 	if (err) {
-		brcmf_err("brcmf_construct_reginfo failed (%d)\n", err);
+		brcmf_err("brcmf_construct_chaninfo failed (%d)\n", err);
 		return err;
 	}
 
-	nband = band_list[0];
-
-	for (i = 1; i <= nband && i < ARRAY_SIZE(band_list); i++) {
-		band = NULL;
-		if ((band_list[i] == WLC_BAND_5G) &&
-		    (__wl_band_5ghz_a.n_channels > 0))
-			band = &__wl_band_5ghz_a;
-		else if ((band_list[i] == WLC_BAND_2G) &&
-			 (__wl_band_2ghz.n_channels > 0))
-			band = &__wl_band_2ghz;
-		else
+	wiphy = cfg_to_wiphy(cfg);
+	for (i = 0; i < ARRAY_SIZE(wiphy->bands); i++) {
+		band = wiphy->bands[i];
+		if (band == NULL)
 			continue;
 
 		if (nmode)
 			brcmf_update_ht_cap(band, bw_cap, nchain);
 		if (vhtmode)
 			brcmf_update_vht_cap(band, bw_cap, nchain);
-		bands[band->band] = band;
 	}
 
-	wiphy = cfg_to_wiphy(cfg);
-	wiphy->bands[IEEE80211_BAND_2GHZ] = bands[IEEE80211_BAND_2GHZ];
-	wiphy->bands[IEEE80211_BAND_5GHZ] = bands[IEEE80211_BAND_5GHZ];
-	wiphy_apply_custom_regulatory(wiphy, &brcmf_regdom);
-
-	return err;
+	return 0;
 }
 
+static const struct ieee80211_iface_limit brcmf_iface_limits[] = {
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_STATION) |
+			 BIT(NL80211_IFTYPE_ADHOC) |
+			 BIT(NL80211_IFTYPE_AP)
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+			 BIT(NL80211_IFTYPE_P2P_GO)
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_DEVICE)
+	}
+};
+static struct ieee80211_iface_combination brcmf_iface_combos[] = {
+	{
+		 .max_interfaces = BRCMF_IFACE_MAX_CNT,
+		 .num_different_channels = 1,
+		 .n_limits = ARRAY_SIZE(brcmf_iface_limits),
+		 .limits = brcmf_iface_limits
+	}
+};
 
-static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_info *cfg)
+static const struct ieee80211_txrx_stypes
+brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
+	[NL80211_IFTYPE_STATION] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		      BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	},
+	[NL80211_IFTYPE_P2P_CLIENT] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		      BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	},
+	[NL80211_IFTYPE_P2P_GO] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+		      BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+		      BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+		      BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+		      BIT(IEEE80211_STYPE_AUTH >> 4) |
+		      BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+		      BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+	[NL80211_IFTYPE_P2P_DEVICE] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		      BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	}
+};
+
+static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
 {
-	return brcmf_update_wiphybands(cfg);
+	/* scheduled scan settings */
+	wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
+	wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
+	wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
+	wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+}
+
+static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
+{
+	struct ieee80211_iface_combination ifc_combo;
+	wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
+	wiphy->max_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
+	wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
+	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+				 BIT(NL80211_IFTYPE_ADHOC) |
+				 BIT(NL80211_IFTYPE_AP) |
+				 BIT(NL80211_IFTYPE_P2P_CLIENT) |
+				 BIT(NL80211_IFTYPE_P2P_GO) |
+				 BIT(NL80211_IFTYPE_P2P_DEVICE);
+	/* need VSDB firmware feature for concurrent channels */
+	ifc_combo = brcmf_iface_combos[0];
+	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN))
+		ifc_combo.num_different_channels = 2;
+	wiphy->iface_combinations = kmemdup(&ifc_combo,
+					    sizeof(ifc_combo),
+					    GFP_KERNEL);
+	wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos);
+	wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+	wiphy->cipher_suites = __wl_cipher_suites;
+	wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
+	wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT |
+			WIPHY_FLAG_OFFCHAN_TX |
+			WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+			WIPHY_FLAG_SUPPORTS_TDLS;
+	if (!brcmf_roamoff)
+		wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
+	wiphy->mgmt_stypes = brcmf_txrx_stypes;
+	wiphy->max_remain_on_channel_duration = 5000;
+	brcmf_wiphy_pno_params(wiphy);
+
+	/* vendor commands/events support */
+	wiphy->vendor_commands = brcmf_vendor_cmds;
+	wiphy->n_vendor_commands = BRCMF_VNDR_CMDS_LAST - 1;
+
+	return brcmf_setup_wiphybands(wiphy);
 }
 
 static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
@@ -5528,9 +5412,6 @@
 					  NULL, NULL);
 	if (err)
 		goto default_conf_out;
-	err = brcmf_dongle_probecap(cfg);
-	if (err)
-		goto default_conf_out;
 
 	brcmf_configure_arp_offload(ifp, true);
 
@@ -5658,3 +5539,150 @@
 				  vif_event_equals(event, action), timeout);
 }
 
+static void brcmf_free_wiphy(struct wiphy *wiphy)
+{
+	kfree(wiphy->iface_combinations);
+	if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
+		kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
+		kfree(wiphy->bands[IEEE80211_BAND_2GHZ]);
+	}
+	if (wiphy->bands[IEEE80211_BAND_5GHZ]) {
+		kfree(wiphy->bands[IEEE80211_BAND_5GHZ]->channels);
+		kfree(wiphy->bands[IEEE80211_BAND_5GHZ]);
+	}
+	wiphy_free(wiphy);
+}
+
+struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
+						  struct device *busdev)
+{
+	struct net_device *ndev = drvr->iflist[0]->ndev;
+	struct brcmf_cfg80211_info *cfg;
+	struct wiphy *wiphy;
+	struct brcmf_cfg80211_vif *vif;
+	struct brcmf_if *ifp;
+	s32 err = 0;
+	s32 io_type;
+	u16 *cap = NULL;
+
+	if (!ndev) {
+		brcmf_err("ndev is invalid\n");
+		return NULL;
+	}
+
+	ifp = netdev_priv(ndev);
+	wiphy = wiphy_new(&wl_cfg80211_ops, sizeof(struct brcmf_cfg80211_info));
+	if (!wiphy) {
+		brcmf_err("Could not allocate wiphy device\n");
+		return NULL;
+	}
+	set_wiphy_dev(wiphy, busdev);
+
+	cfg = wiphy_priv(wiphy);
+	cfg->wiphy = wiphy;
+	cfg->pub = drvr;
+	init_vif_event(&cfg->vif_event);
+	INIT_LIST_HEAD(&cfg->vif_list);
+
+	vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_STATION, false);
+	if (IS_ERR(vif))
+		goto wiphy_out;
+
+	vif->ifp = ifp;
+	vif->wdev.netdev = ndev;
+	ndev->ieee80211_ptr = &vif->wdev;
+	SET_NETDEV_DEV(ndev, wiphy_dev(cfg->wiphy));
+
+	err = wl_init_priv(cfg);
+	if (err) {
+		brcmf_err("Failed to init iwm_priv (%d)\n", err);
+		brcmf_free_vif(vif);
+		goto wiphy_out;
+	}
+	ifp->vif = vif;
+
+	/* determine d11 io type before wiphy setup */
+	err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_VERSION, &io_type);
+	if (err) {
+		brcmf_err("Failed to get D11 version (%d)\n", err);
+		goto priv_out;
+	}
+	cfg->d11inf.io_type = (u8)io_type;
+	brcmu_d11_attach(&cfg->d11inf);
+
+	err = brcmf_setup_wiphy(wiphy, ifp);
+	if (err < 0)
+		goto priv_out;
+
+	brcmf_dbg(INFO, "Registering custom regulatory\n");
+	wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
+	wiphy_apply_custom_regulatory(wiphy, &brcmf_regdom);
+
+	/* firmware defaults to 40MHz disabled in 2G band. We signal
+	 * cfg80211 here that we do and have it decide we can enable
+	 * it. But first check if device does support 2G operation.
+	 */
+	if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
+		cap = &wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap.cap;
+		*cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+	}
+	err = wiphy_register(wiphy);
+	if (err < 0) {
+		brcmf_err("Could not register wiphy device (%d)\n", err);
+		goto priv_out;
+	}
+
+	/* If cfg80211 didn't disable 40MHz HT CAP in wiphy_register(),
+	 * setup 40MHz in 2GHz band and enable OBSS scanning.
+	 */
+	if (cap && (*cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) {
+		err = brcmf_enable_bw40_2g(cfg);
+		if (!err)
+			err = brcmf_fil_iovar_int_set(ifp, "obss_coex",
+						      BRCMF_OBSS_COEX_AUTO);
+		else
+			*cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+	}
+
+	err = brcmf_p2p_attach(cfg);
+	if (err) {
+		brcmf_err("P2P initilisation failed (%d)\n", err);
+		goto wiphy_unreg_out;
+	}
+	err = brcmf_btcoex_attach(cfg);
+	if (err) {
+		brcmf_err("BT-coex initialisation failed (%d)\n", err);
+		brcmf_p2p_detach(&cfg->p2p);
+		goto wiphy_unreg_out;
+	}
+
+	err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1);
+	if (err) {
+		brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err);
+		wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_TDLS;
+	}
+
+	return cfg;
+
+wiphy_unreg_out:
+	wiphy_unregister(cfg->wiphy);
+priv_out:
+	wl_deinit_priv(cfg);
+	brcmf_free_vif(vif);
+wiphy_out:
+	brcmf_free_wiphy(wiphy);
+	return NULL;
+}
+
+void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
+{
+	if (!cfg)
+		return;
+
+	WARN_ON(!list_empty(&cfg->vif_list));
+	wiphy_unregister(cfg->wiphy);
+	brcmf_btcoex_detach(cfg);
+	brcmf_p2p_detach(&cfg->p2p);
+	wl_deinit_priv(cfg);
+	brcmf_free_wiphy(cfg->wiphy);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index af8ba64..1b47482 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -4707,41 +4707,6 @@
 	return err;
 }
 
-static void brcms_c_attach_antgain_init(struct brcms_c_info *wlc)
-{
-	uint unit;
-	unit = wlc->pub->unit;
-
-	if ((wlc->band->antgain == -1) && (wlc->pub->sromrev == 1)) {
-		/* default antenna gain for srom rev 1 is 2 dBm (8 qdbm) */
-		wlc->band->antgain = 8;
-	} else if (wlc->band->antgain == -1) {
-		wiphy_err(wlc->wiphy, "wl%d: %s: Invalid antennas available in"
-			  " srom, using 2dB\n", unit, __func__);
-		wlc->band->antgain = 8;
-	} else {
-		s8 gain, fract;
-		/* Older sroms specified gain in whole dbm only.  In order
-		 * be able to specify qdbm granularity and remain backward
-		 * compatible the whole dbms are now encoded in only
-		 * low 6 bits and remaining qdbms are encoded in the hi 2 bits.
-		 * 6 bit signed number ranges from -32 - 31.
-		 *
-		 * Examples:
-		 * 0x1 = 1 db,
-		 * 0xc1 = 1.75 db (1 + 3 quarters),
-		 * 0x3f = -1 (-1 + 0 quarters),
-		 * 0x7f = -.75 (-1 + 1 quarters) = -3 qdbm.
-		 * 0xbf = -.50 (-1 + 2 quarters) = -2 qdbm.
-		 */
-		gain = wlc->band->antgain & 0x3f;
-		gain <<= 2;	/* Sign extend */
-		gain >>= 2;
-		fract = (wlc->band->antgain & 0xc0) >> 6;
-		wlc->band->antgain = 4 * gain + fract;
-	}
-}
-
 static bool brcms_c_attach_stf_ant_init(struct brcms_c_info *wlc)
 {
 	int aa;
@@ -4780,8 +4745,6 @@
 	else
 		wlc->band->antgain = sprom->antenna_gain.a0;
 
-	brcms_c_attach_antgain_init(wlc);
-
 	return true;
 }
 
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index d816270..64d1a7b 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -17,32 +17,56 @@
 #ifndef	_BRCM_HW_IDS_H_
 #define	_BRCM_HW_IDS_H_
 
-#define BCM4313_D11N2G_ID	0x4727	/* 4313 802.11n 2.4G device */
+#include <linux/pci_ids.h>
+#include <linux/mmc/sdio_ids.h>
 
+#define BRCM_USB_VENDOR_ID_BROADCOM	0x0a5c
+#define BRCM_PCIE_VENDOR_ID_BROADCOM	PCI_VENDOR_ID_BROADCOM
+#define BRCM_SDIO_VENDOR_ID_BROADCOM	SDIO_VENDOR_ID_BROADCOM
+
+/* Chipcommon Core Chip IDs */
+#define BRCM_CC_43143_CHIP_ID		43143
+#define BRCM_CC_43235_CHIP_ID		43235
+#define BRCM_CC_43236_CHIP_ID		43236
+#define BRCM_CC_43238_CHIP_ID		43238
+#define BRCM_CC_43241_CHIP_ID		0x4324
+#define BRCM_CC_43242_CHIP_ID		43242
+#define BRCM_CC_4329_CHIP_ID		0x4329
+#define BRCM_CC_4330_CHIP_ID		0x4330
+#define BRCM_CC_4334_CHIP_ID		0x4334
+#define BRCM_CC_43362_CHIP_ID		43362
+#define BRCM_CC_4335_CHIP_ID		0x4335
+#define BRCM_CC_4339_CHIP_ID		0x4339
+#define BRCM_CC_4354_CHIP_ID		0x4354
+#define BRCM_CC_43566_CHIP_ID		43566
+#define BRCM_CC_43569_CHIP_ID		43569
+
+/* SDIO Device IDs */
+#define BRCM_SDIO_43143_DEVICE_ID	BRCM_CC_43143_CHIP_ID
+#define BRCM_SDIO_43241_DEVICE_ID	BRCM_CC_43241_CHIP_ID
+#define BRCM_SDIO_4329_DEVICE_ID	BRCM_CC_4329_CHIP_ID
+#define BRCM_SDIO_4330_DEVICE_ID	BRCM_CC_4330_CHIP_ID
+#define BRCM_SDIO_4334_DEVICE_ID	BRCM_CC_4334_CHIP_ID
+#define BRCM_SDIO_43362_DEVICE_ID	BRCM_CC_43362_CHIP_ID
+#define BRCM_SDIO_4335_4339_DEVICE_ID	BRCM_CC_4335_CHIP_ID
+#define BRCM_SDIO_4354_DEVICE_ID	BRCM_CC_4354_CHIP_ID
+
+/* USB Device IDs */
+#define BRCM_USB_43143_DEVICE_ID	0xbd1e
+#define BRCM_USB_43236_DEVICE_ID	0xbd17
+#define BRCM_USB_43242_DEVICE_ID	0xbd1f
+#define BRCM_USB_43569_DEVICE_ID	0xbd27
+#define BRCM_USB_BCMFW_DEVICE_ID	0x0bdc
+
+/* brcmsmac IDs */
+#define BCM4313_D11N2G_ID	0x4727	/* 4313 802.11n 2.4G device */
 #define BCM43224_D11N_ID	0x4353	/* 43224 802.11n dualband device */
 #define BCM43224_D11N_ID_VEN1	0x0576	/* Vendor specific 43224 802.11n db */
-
 #define BCM43225_D11N2G_ID	0x4357	/* 43225 802.11n 2.4GHz device */
-
 #define BCM43236_D11N_ID	0x4346	/* 43236 802.11n dualband device */
 #define BCM43236_D11N2G_ID	0x4347	/* 43236 802.11n 2.4GHz device */
 
-/* Chipcommon Core Chip IDs */
 #define BCM4313_CHIP_ID		0x4313
-#define BCM43143_CHIP_ID	43143
 #define BCM43224_CHIP_ID	43224
-#define BCM43225_CHIP_ID	43225
-#define BCM43235_CHIP_ID	43235
-#define BCM43236_CHIP_ID	43236
-#define BCM43238_CHIP_ID	43238
-#define BCM43241_CHIP_ID	0x4324
-#define BCM4329_CHIP_ID		0x4329
-#define BCM4330_CHIP_ID		0x4330
-#define BCM4331_CHIP_ID		0x4331
-#define BCM4334_CHIP_ID		0x4334
-#define BCM4335_CHIP_ID		0x4335
-#define BCM43362_CHIP_ID	43362
-#define BCM4339_CHIP_ID		0x4339
-#define BCM4354_CHIP_ID		0x4354
 
 #endif				/* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 7fd5042..6451d2b 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -20,16 +20,17 @@
 		Intel 2000 Series Wi-Fi Adapters
 		Intel 7260 Wi-Fi Adapter
 		Intel 3160 Wi-Fi Adapter
+		Intel 7265 Wi-Fi Adapter
 
 
 	  This driver uses the kernel's mac80211 subsystem.
 
-	  In order to use this driver, you will need a microcode (uCode)
+	  In order to use this driver, you will need a firmware
 	  image for it. You can obtain the microcode from:
 
-	          <http://intellinuxwireless.org/>.
+	          <http://wireless.kernel.org/en/users/Drivers/iwlwifi>.
 
-	  The microcode is typically installed in /lib/firmware. You can
+	  The firmware is typically installed in /lib/firmware. You can
 	  look in the hotplug script /etc/hotplug/firmware.agent to
 	  determine which directory FIRMWARE_DIR is set to when the script
 	  runs.
@@ -39,9 +40,10 @@
 	  say M here and read <file:Documentation/kbuild/modules.txt>.  The
 	  module will be called iwlwifi.
 
+if IWLWIFI
+
 config IWLWIFI_LEDS
 	bool
-	depends on IWLWIFI
 	depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI
 	select LEDS_TRIGGERS
 	select MAC80211_LEDS
@@ -49,7 +51,7 @@
 
 config IWLDVM
 	tristate "Intel Wireless WiFi DVM Firmware support"
-	depends on IWLWIFI
+	depends on m
 	default IWLWIFI
 	help
 	  This is the driver that supports the DVM firmware which is
@@ -58,7 +60,7 @@
 
 config IWLMVM
 	tristate "Intel Wireless WiFi MVM Firmware support"
-	depends on IWLWIFI
+	depends on m
 	help
 	  This is the driver that supports the MVM firmware which is
 	  currently only available for 7260 and 3160 devices.
@@ -70,7 +72,7 @@
 	default y if IWLMVM=m
 
 comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
-	depends on IWLWIFI && IWLDVM=n && IWLMVM=n
+	depends on IWLDVM=n && IWLMVM=n
 
 config IWLWIFI_BCAST_FILTERING
 	bool "Enable broadcast filtering"
@@ -86,11 +88,9 @@
 	  expect incoming broadcasts for their normal operations.
 
 menu "Debugging Options"
-	depends on IWLWIFI
 
 config IWLWIFI_DEBUG
 	bool "Enable full debugging output in the iwlwifi driver"
-	depends on IWLWIFI
 	---help---
 	  This option will enable debug tracing output for the iwlwifi drivers
 
@@ -115,7 +115,7 @@
 
 config IWLWIFI_DEBUGFS
         bool "iwlwifi debugfs support"
-        depends on IWLWIFI && MAC80211_DEBUGFS
+        depends on MAC80211_DEBUGFS
         ---help---
 	  Enable creation of debugfs files for the iwlwifi drivers. This
 	  is a low-impact option that allows getting insight into the
@@ -123,13 +123,12 @@
 
 config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
         bool "Experimental uCode support"
-        depends on IWLWIFI && IWLWIFI_DEBUG
+        depends on IWLWIFI_DEBUG
         ---help---
 	  Enable use of experimental ucode for testing and debugging.
 
 config IWLWIFI_DEVICE_TRACING
 	bool "iwlwifi device access tracing"
-	depends on IWLWIFI
 	depends on EVENT_TRACING
 	help
 	  Say Y here to trace all commands, including TX frames and IO
@@ -145,3 +144,5 @@
 	  If unsure, say Y so we can help you better when problems
 	  occur.
 endmenu
+
+endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index 51486cc..44b19e0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -85,6 +85,9 @@
 #define NVM_HW_SECTION_NUM_FAMILY_8000		10
 #define DEFAULT_NVM_FILE_FAMILY_8000		"iwl_nvm_8000.bin"
 
+/* Max SDIO RX aggregation size of the ADDBA request/response */
+#define MAX_RX_AGG_SIZE_8260_SDIO	28
+
 static const struct iwl_base_params iwl8000_base_params = {
 	.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
 	.num_of_queues = IWLAGN_NUM_QUEUES,
@@ -129,6 +132,7 @@
 	.nvm_ver = IWL8000_NVM_VERSION,
 	.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
 	.default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
+	.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
 };
 
 MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 034c2fc..8da596d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -240,6 +240,7 @@
  * @d0i3: device uses d0i3 instead of d3
  * @nvm_hw_section_num: the ID of the HW NVM section
  * @pwr_tx_backoffs: translation table between power limits and backoffs
+ * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response
  *
  * We enable the driver to be backward compatible wrt. hardware features.
  * API differences in uCode shouldn't be handled here but through TLVs
@@ -276,6 +277,7 @@
 	const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
 	bool no_power_up_nic_in_init;
 	const char *default_nvm_file;
+	unsigned int max_rx_agg_size;
 };
 
 /*
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
index c39a0b8..de5994a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
@@ -70,21 +70,24 @@
 /**
  * enum iwl_fw_error_dump_type - types of data in the dump file
  * @IWL_FW_ERROR_DUMP_SRAM:
- * @IWL_FW_ERROR_DUMP_REG:
+ * @IWL_FW_ERROR_DUMP_CSR: Control Status Registers - from offset 0
  * @IWL_FW_ERROR_DUMP_RXF:
  * @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as
  *	&struct iwl_fw_error_dump_txcmd packets
  * @IWL_FW_ERROR_DUMP_DEV_FW_INFO:  struct %iwl_fw_error_dump_info
  *	info on the device / firmware.
  * @IWL_FW_ERROR_DUMP_FW_MONITOR: firmware monitor
+ * @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several
+ *	sections like this in a single file.
  */
 enum iwl_fw_error_dump_type {
 	IWL_FW_ERROR_DUMP_SRAM = 0,
-	IWL_FW_ERROR_DUMP_REG = 1,
+	IWL_FW_ERROR_DUMP_CSR = 1,
 	IWL_FW_ERROR_DUMP_RXF = 2,
 	IWL_FW_ERROR_DUMP_TXCMD = 3,
 	IWL_FW_ERROR_DUMP_DEV_FW_INFO = 4,
 	IWL_FW_ERROR_DUMP_FW_MONITOR = 5,
+	IWL_FW_ERROR_DUMP_PRPH = 6,
 
 	IWL_FW_ERROR_DUMP_MAX,
 };
@@ -163,6 +166,16 @@
 } __packed;
 
 /**
+ * struct iwl_fw_error_dump_prph - periphery registers data
+ * @prph_start: address of the first register in this chunk
+ * @data: the content of the registers
+ */
+struct iwl_fw_error_dump_prph {
+	__le32 prph_start;
+	__le32 data[];
+};
+
+/**
  * iwl_fw_error_next_data - advance fw error dump data pointer
  * @data: previous data block
  * Returns: next data block
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index f2d39cb..71507cf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -99,7 +99,7 @@
  * @wd_disable: disable stuck queue check, default = 1
  * @bt_coex_active: enable bt coex, default = true
  * @led_mode: system default, default = 0
- * @power_save: disable power save, default = false
+ * @power_save: enable power save, default = false
  * @power_level: power level, default = 1
  * @debug_level: levels are IWL_DL_*
  * @ant_coupling: antenna coupling in dB, default = 0
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 34d49e1..656371a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -394,6 +394,11 @@
 	const char *const *command_names;
 };
 
+struct iwl_trans_dump_data {
+	u32 len;
+	u8 data[];
+};
+
 struct iwl_trans;
 
 /**
@@ -461,10 +466,8 @@
  * @unref: release a reference previously taken with @ref. Note that
  *	initially the reference count is 1, making an initial @unref
  *	necessary to allow low power states.
- * @dump_data: fill a data dump with debug data, maybe containing last
- *	TX'ed commands and similar. When called with a NULL buffer and
- *	zero buffer length, provide only the (estimated) required buffer
- *	length. Return the used buffer length.
+ * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
+ *	TX'ed commands and similar. The buffer will be vfree'd by the caller.
  *	Note that the transport must fill in the proper file headers.
  */
 struct iwl_trans_ops {
@@ -518,7 +521,7 @@
 	void (*unref)(struct iwl_trans *trans);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-	u32 (*dump_data)(struct iwl_trans *trans, void *buf, u32 buflen);
+	struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans);
 #endif
 };
 
@@ -685,12 +688,12 @@
 }
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-static inline u32 iwl_trans_dump_data(struct iwl_trans *trans,
-				      void *buf, u32 buflen)
+static inline struct iwl_trans_dump_data *
+iwl_trans_dump_data(struct iwl_trans *trans)
 {
 	if (!trans->ops->dump_data)
-		return 0;
-	return trans->ops->dump_data(trans, buf, buflen);
+		return NULL;
+	return trans->ops->dump_data(trans);
 }
 #endif
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 8110fe0..2291bbc 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -72,16 +72,56 @@
 
 #define BT_ANTENNA_COUPLING_THRESHOLD		(30)
 
-const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX] = {
-	[BT_KILL_MSK_DEFAULT] = 0xffff0000,
-	[BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
-	[BT_KILL_MSK_REDUCED_TXPOW] = 0,
+const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
+	[BT_KILL_MSK_DEFAULT] = 0xfffffc00,
+	[BT_KILL_MSK_NEVER] = 0xffffffff,
+	[BT_KILL_MSK_ALWAYS] = 0,
 };
 
-const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
-	[BT_KILL_MSK_DEFAULT] = 0xffff0000,
-	[BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
-	[BT_KILL_MSK_REDUCED_TXPOW] = 0,
+const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
+	{
+		BT_KILL_MSK_ALWAYS,
+		BT_KILL_MSK_ALWAYS,
+		BT_KILL_MSK_ALWAYS,
+	},
+	{
+		BT_KILL_MSK_NEVER,
+		BT_KILL_MSK_NEVER,
+		BT_KILL_MSK_NEVER,
+	},
+	{
+		BT_KILL_MSK_NEVER,
+		BT_KILL_MSK_NEVER,
+		BT_KILL_MSK_NEVER,
+	},
+	{
+		BT_KILL_MSK_DEFAULT,
+		BT_KILL_MSK_NEVER,
+		BT_KILL_MSK_DEFAULT,
+	},
+};
+
+const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
+	{
+		BT_KILL_MSK_ALWAYS,
+		BT_KILL_MSK_ALWAYS,
+		BT_KILL_MSK_ALWAYS,
+	},
+	{
+		BT_KILL_MSK_ALWAYS,
+		BT_KILL_MSK_ALWAYS,
+		BT_KILL_MSK_ALWAYS,
+	},
+	{
+		BT_KILL_MSK_ALWAYS,
+		BT_KILL_MSK_ALWAYS,
+		BT_KILL_MSK_ALWAYS,
+	},
+	{
+		BT_KILL_MSK_DEFAULT,
+		BT_KILL_MSK_ALWAYS,
+		BT_KILL_MSK_DEFAULT,
+	},
 };
 
 static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
@@ -611,54 +651,43 @@
 	return ret;
 }
 
-static int iwl_mvm_bt_udpate_sw_boost(struct iwl_mvm *mvm,
-				      bool reduced_tx_power)
+static int iwl_mvm_bt_udpate_sw_boost(struct iwl_mvm *mvm)
 {
-	enum iwl_bt_kill_msk bt_kill_msk;
-	struct iwl_bt_coex_sw_boost_update_cmd cmd = {};
 	struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
+	u32 primary_lut = le32_to_cpu(notif->primary_ch_lut);
+	u32 secondary_lut = le32_to_cpu(notif->secondary_ch_lut);
+	u32 ag = le32_to_cpu(notif->bt_activity_grading);
+	struct iwl_bt_coex_sw_boost_update_cmd cmd = {};
+	u8 ack_kill_msk[NUM_PHY_CTX] = {};
+	u8 cts_kill_msk[NUM_PHY_CTX] = {};
+	int i;
 
 	lockdep_assert_held(&mvm->mutex);
 
-	if (reduced_tx_power) {
-		/* Reduced Tx power has precedence on the type of the profile */
-		bt_kill_msk = BT_KILL_MSK_REDUCED_TXPOW;
-	} else {
-		/* Low latency BT profile is active: give higher prio to BT */
-		if (BT_MBOX_MSG(notif, 3, SCO_STATE)  ||
-		    BT_MBOX_MSG(notif, 3, A2DP_STATE) ||
-		    BT_MBOX_MSG(notif, 3, SNIFF_STATE))
-			bt_kill_msk = BT_KILL_MSK_SCO_HID_A2DP;
-		else
-			bt_kill_msk = BT_KILL_MSK_DEFAULT;
-	}
+	ack_kill_msk[0] = iwl_bt_ack_kill_msk[ag][primary_lut];
+	cts_kill_msk[0] = iwl_bt_cts_kill_msk[ag][primary_lut];
 
-	IWL_DEBUG_COEX(mvm,
-		       "Update kill_msk: %d - SCO %sactive A2DP %sactive SNIFF %sactive\n",
-		       bt_kill_msk,
-		       BT_MBOX_MSG(notif, 3, SCO_STATE) ? "" : "in",
-		       BT_MBOX_MSG(notif, 3, A2DP_STATE) ? "" : "in",
-		       BT_MBOX_MSG(notif, 3, SNIFF_STATE) ? "" : "in");
+	ack_kill_msk[1] = iwl_bt_ack_kill_msk[ag][secondary_lut];
+	cts_kill_msk[1] = iwl_bt_cts_kill_msk[ag][secondary_lut];
 
 	/* Don't send HCMD if there is no update */
-	if (bt_kill_msk == mvm->bt_kill_msk)
+	if (!memcmp(ack_kill_msk, mvm->bt_ack_kill_msk, sizeof(ack_kill_msk)) ||
+	    !memcmp(cts_kill_msk, mvm->bt_cts_kill_msk, sizeof(cts_kill_msk)))
 		return 0;
 
-	mvm->bt_kill_msk = bt_kill_msk;
+	memcpy(mvm->bt_ack_kill_msk, ack_kill_msk,
+	       sizeof(mvm->bt_ack_kill_msk));
+	memcpy(mvm->bt_cts_kill_msk, cts_kill_msk,
+	       sizeof(mvm->bt_cts_kill_msk));
 
-	cmd.boost_values[0].kill_ack_msk =
-		cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
-	cmd.boost_values[0].kill_cts_msk =
-		cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
+	BUILD_BUG_ON(ARRAY_SIZE(ack_kill_msk) < ARRAY_SIZE(cmd.boost_values));
 
-	cmd.boost_values[1].kill_ack_msk = cmd.boost_values[0].kill_ack_msk;
-	cmd.boost_values[2].kill_cts_msk = cmd.boost_values[0].kill_cts_msk;
-	cmd.boost_values[1].kill_ack_msk = cmd.boost_values[0].kill_ack_msk;
-	cmd.boost_values[2].kill_cts_msk = cmd.boost_values[0].kill_cts_msk;
-
-	IWL_DEBUG_COEX(mvm, "ACK Kill msk = 0x%08x, CTS Kill msk = 0x%08x\n",
-		       iwl_bt_ack_kill_msk[bt_kill_msk],
-		       iwl_bt_cts_kill_msk[bt_kill_msk]);
+	for (i = 0; i < ARRAY_SIZE(cmd.boost_values); i++) {
+		cmd.boost_values[i].kill_ack_msk =
+			cpu_to_le32(iwl_bt_ctl_kill_msk[ack_kill_msk[i]]);
+		cmd.boost_values[i].kill_cts_msk =
+			cpu_to_le32(iwl_bt_ctl_kill_msk[cts_kill_msk[i]]);
+	}
 
 	return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_SW_BOOST, 0,
 				    sizeof(cmd), &cmd);
@@ -700,8 +729,6 @@
 struct iwl_bt_iterator_data {
 	struct iwl_bt_coex_profile_notif *notif;
 	struct iwl_mvm *mvm;
-	u32 num_bss_ifaces;
-	bool reduced_tx_power;
 	struct ieee80211_chanctx_conf *primary;
 	struct ieee80211_chanctx_conf *secondary;
 	bool primary_ll;
@@ -737,22 +764,12 @@
 
 	switch (vif->type) {
 	case NL80211_IFTYPE_STATION:
-		/* Count BSSes vifs */
-		data->num_bss_ifaces++;
 		/* default smps_mode for BSS / P2P client is AUTOMATIC */
 		smps_mode = IEEE80211_SMPS_AUTOMATIC;
 		break;
 	case NL80211_IFTYPE_AP:
-		/* default smps_mode for AP / GO is OFF */
-		smps_mode = IEEE80211_SMPS_OFF;
-		if (!mvmvif->ap_ibss_active) {
-			iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
-					    smps_mode);
+		if (!mvmvif->ap_ibss_active)
 			return;
-		}
-
-		/* the Ack / Cts kill mask must be default if AP / GO */
-		data->reduced_tx_power = false;
 		break;
 	default:
 		return;
@@ -763,11 +780,10 @@
 	/* If channel context is invalid or not on 2.4GHz .. */
 	if ((!chanctx_conf ||
 	     chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
-		/* ... relax constraints and disable rssi events */
-		iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
-				    smps_mode);
-		data->reduced_tx_power = false;
 		if (vif->type == NL80211_IFTYPE_STATION) {
+			/* ... relax constraints and disable rssi events */
+			iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+					    smps_mode);
 			iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
 						    false);
 			iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
@@ -779,9 +795,7 @@
 	if (bt_activity_grading >= BT_HIGH_TRAFFIC)
 		smps_mode = IEEE80211_SMPS_STATIC;
 	else if (bt_activity_grading >= BT_LOW_TRAFFIC)
-		smps_mode = vif->type == NL80211_IFTYPE_AP ?
-				IEEE80211_SMPS_OFF :
-				IEEE80211_SMPS_DYNAMIC;
+		smps_mode = IEEE80211_SMPS_DYNAMIC;
 
 	/* relax SMPS contraints for next association */
 	if (!vif->bss_conf.assoc)
@@ -795,7 +809,9 @@
 		       "mac %d: bt_activity_grading %d smps_req %d\n",
 		       mvmvif->id, bt_activity_grading, smps_mode);
 
-	iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
+	if (vif->type == NL80211_IFTYPE_STATION)
+		iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+				    smps_mode);
 
 	/* low latency is always primary */
 	if (iwl_mvm_vif_low_latency(mvmvif)) {
@@ -846,7 +862,6 @@
 	if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
 	    mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
 	    le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) {
-		data->reduced_tx_power = false;
 		iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
 		iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
 		return;
@@ -861,23 +876,9 @@
 	if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) {
 		if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true))
 			IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
-
-		/*
-		 * bt_kill_msk can be BT_KILL_MSK_REDUCED_TXPOW only if all the
-		 * BSS / P2P clients have rssi above threshold.
-		 * We set the bt_kill_msk to BT_KILL_MSK_REDUCED_TXPOW before
-		 * the iteration, if one interface's rssi isn't good enough,
-		 * bt_kill_msk will be set to default values.
-		 */
 	} else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) {
 		if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
 			IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
-
-		/*
-		 * One interface hasn't rssi above threshold, bt_kill_msk must
-		 * be set to default values.
-		 */
-		data->reduced_tx_power = false;
 	}
 
 	/* Begin to monitor the RSSI: it may influence the reduced Tx power */
@@ -889,7 +890,6 @@
 	struct iwl_bt_iterator_data data = {
 		.mvm = mvm,
 		.notif = &mvm->last_bt_notif,
-		.reduced_tx_power = true,
 	};
 	struct iwl_bt_coex_ci_cmd cmd = {};
 	u8 ci_bw_idx;
@@ -959,14 +959,7 @@
 		memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
 	}
 
-	/*
-	 * If there are no BSS / P2P client interfaces, reduced Tx Power is
-	 * irrelevant since it is based on the RSSI coming from the beacon.
-	 * Use BT_KILL_MSK_DEFAULT in that case.
-	 */
-	data.reduced_tx_power = data.reduced_tx_power && data.num_bss_ifaces;
-
-	if (iwl_mvm_bt_udpate_sw_boost(mvm, data.reduced_tx_power))
+	if (iwl_mvm_bt_udpate_sw_boost(mvm))
 		IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
 }
 
@@ -1035,16 +1028,6 @@
 		return;
 
 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
-	data->num_bss_ifaces++;
-
-	/*
-	 * This interface doesn't support reduced Tx power (because of low
-	 * RSSI probably), then set bt_kill_msk to default values.
-	 */
-	if (!mvmsta->bt_reduced_txpower)
-		data->reduced_tx_power = false;
-	/* else - possibly leave it to BT_KILL_MSK_REDUCED_TXPOW */
 }
 
 void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -1053,7 +1036,6 @@
 	struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
 	struct iwl_bt_iterator_data data = {
 		.mvm = mvm,
-		.reduced_tx_power = true,
 	};
 	int ret;
 
@@ -1100,14 +1082,7 @@
 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
 		iwl_mvm_bt_rssi_iterator, &data);
 
-	/*
-	 * If there are no BSS / P2P client interfaces, reduced Tx Power is
-	 * irrelevant since it is based on the RSSI coming from the beacon.
-	 * Use BT_KILL_MSK_DEFAULT in that case.
-	 */
-	data.reduced_tx_power = data.reduced_tx_power && data.num_bss_ifaces;
-
-	if (iwl_mvm_bt_udpate_sw_boost(mvm, data.reduced_tx_power))
+	if (iwl_mvm_bt_udpate_sw_boost(mvm))
 		IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
 }
 
@@ -1150,7 +1125,7 @@
 	enum iwl_bt_coex_lut_type lut_type;
 
 	if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-		return iwl_mvm_coex_agg_time_limit_old(mvm, sta);
+		return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta);
 
 	if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
 		return true;
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
index ce50363..a3be333 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
@@ -649,10 +649,6 @@
 	       sizeof(iwl_bt_prio_boost));
 	memcpy(&bt_cmd->bt4_multiprio_lut, iwl_bt_mprio_lut,
 	       sizeof(iwl_bt_mprio_lut));
-	bt_cmd->kill_ack_msk =
-		cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]);
-	bt_cmd->kill_cts_msk =
-		cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]);
 
 send_cmd:
 	memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
@@ -664,12 +660,13 @@
 	return ret;
 }
 
-static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
-					   bool reduced_tx_power)
+static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm)
 {
-	enum iwl_bt_kill_msk bt_kill_msk;
-	struct iwl_bt_coex_cmd_old *bt_cmd;
 	struct iwl_bt_coex_profile_notif_old *notif = &mvm->last_bt_notif_old;
+	u32 primary_lut = le32_to_cpu(notif->primary_ch_lut);
+	u32 ag = le32_to_cpu(notif->bt_activity_grading);
+	struct iwl_bt_coex_cmd_old *bt_cmd;
+	u8 ack_kill_msk, cts_kill_msk;
 	struct iwl_host_cmd cmd = {
 		.id = BT_CONFIG,
 		.data[0] = &bt_cmd,
@@ -680,31 +677,15 @@
 
 	lockdep_assert_held(&mvm->mutex);
 
-	if (reduced_tx_power) {
-		/* Reduced Tx power has precedence on the type of the profile */
-		bt_kill_msk = BT_KILL_MSK_REDUCED_TXPOW;
-	} else {
-		/* Low latency BT profile is active: give higher prio to BT */
-		if (BT_MBOX_MSG(notif, 3, SCO_STATE)  ||
-		    BT_MBOX_MSG(notif, 3, A2DP_STATE) ||
-		    BT_MBOX_MSG(notif, 3, SNIFF_STATE))
-			bt_kill_msk = BT_KILL_MSK_SCO_HID_A2DP;
-		else
-			bt_kill_msk = BT_KILL_MSK_DEFAULT;
-	}
+	ack_kill_msk = iwl_bt_ack_kill_msk[ag][primary_lut];
+	cts_kill_msk = iwl_bt_cts_kill_msk[ag][primary_lut];
 
-	IWL_DEBUG_COEX(mvm,
-		       "Update kill_msk: %d - SCO %sactive A2DP %sactive SNIFF %sactive\n",
-		       bt_kill_msk,
-		       BT_MBOX_MSG(notif, 3, SCO_STATE) ? "" : "in",
-		       BT_MBOX_MSG(notif, 3, A2DP_STATE) ? "" : "in",
-		       BT_MBOX_MSG(notif, 3, SNIFF_STATE) ? "" : "in");
-
-	/* Don't send HCMD if there is no update */
-	if (bt_kill_msk == mvm->bt_kill_msk)
+	if (mvm->bt_ack_kill_msk[0] == ack_kill_msk &&
+	    mvm->bt_cts_kill_msk[0] == cts_kill_msk)
 		return 0;
 
-	mvm->bt_kill_msk = bt_kill_msk;
+	mvm->bt_ack_kill_msk[0] = ack_kill_msk;
+	mvm->bt_cts_kill_msk[0] = cts_kill_msk;
 
 	bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
 	if (!bt_cmd)
@@ -712,16 +693,12 @@
 	cmd.data[0] = bt_cmd;
 	bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
 
-	bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
-	bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
+	bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[ack_kill_msk]);
+	bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[cts_kill_msk]);
 	bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
 					     BT_VALID_KILL_ACK |
 					     BT_VALID_KILL_CTS);
 
-	IWL_DEBUG_COEX(mvm, "ACK Kill msk = 0x%08x, CTS Kill msk = 0x%08x\n",
-		       iwl_bt_ack_kill_msk[bt_kill_msk],
-		       iwl_bt_cts_kill_msk[bt_kill_msk]);
-
 	ret = iwl_mvm_send_cmd(mvm, &cmd);
 
 	kfree(bt_cmd);
@@ -777,8 +754,6 @@
 struct iwl_bt_iterator_data {
 	struct iwl_bt_coex_profile_notif_old *notif;
 	struct iwl_mvm *mvm;
-	u32 num_bss_ifaces;
-	bool reduced_tx_power;
 	struct ieee80211_chanctx_conf *primary;
 	struct ieee80211_chanctx_conf *secondary;
 	bool primary_ll;
@@ -814,22 +789,12 @@
 
 	switch (vif->type) {
 	case NL80211_IFTYPE_STATION:
-		/* Count BSSes vifs */
-		data->num_bss_ifaces++;
 		/* default smps_mode for BSS / P2P client is AUTOMATIC */
 		smps_mode = IEEE80211_SMPS_AUTOMATIC;
 		break;
 	case NL80211_IFTYPE_AP:
-		/* default smps_mode for AP / GO is OFF */
-		smps_mode = IEEE80211_SMPS_OFF;
-		if (!mvmvif->ap_ibss_active) {
-			iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
-					    smps_mode);
+		if (!mvmvif->ap_ibss_active)
 			return;
-		}
-
-		/* the Ack / Cts kill mask must be default if AP / GO */
-		data->reduced_tx_power = false;
 		break;
 	default:
 		return;
@@ -840,11 +805,10 @@
 	/* If channel context is invalid or not on 2.4GHz .. */
 	if ((!chanctx_conf ||
 	     chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
-		/* ... relax constraints and disable rssi events */
-		iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
-				    smps_mode);
-		data->reduced_tx_power = false;
 		if (vif->type == NL80211_IFTYPE_STATION) {
+			/* ... relax constraints and disable rssi events */
+			iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+					    smps_mode);
 			iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
 						    false);
 			iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
@@ -869,7 +833,9 @@
 		       mvmvif->id, data->notif->bt_status, bt_activity_grading,
 		       smps_mode);
 
-	iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
+	if (vif->type == NL80211_IFTYPE_STATION)
+		iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+				    smps_mode);
 
 	/* low latency is always primary */
 	if (iwl_mvm_vif_low_latency(mvmvif)) {
@@ -920,7 +886,6 @@
 	if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
 	    mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
 	    !data->notif->bt_status) {
-		data->reduced_tx_power = false;
 		iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
 		iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
 		return;
@@ -935,23 +900,9 @@
 	if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) {
 		if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true))
 			IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
-
-		/*
-		 * bt_kill_msk can be BT_KILL_MSK_REDUCED_TXPOW only if all the
-		 * BSS / P2P clients have rssi above threshold.
-		 * We set the bt_kill_msk to BT_KILL_MSK_REDUCED_TXPOW before
-		 * the iteration, if one interface's rssi isn't good enough,
-		 * bt_kill_msk will be set to default values.
-		 */
 	} else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) {
 		if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
 			IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
-
-		/*
-		 * One interface hasn't rssi above threshold, bt_kill_msk must
-		 * be set to default values.
-		 */
-		data->reduced_tx_power = false;
 	}
 
 	/* Begin to monitor the RSSI: it may influence the reduced Tx power */
@@ -963,7 +914,6 @@
 	struct iwl_bt_iterator_data data = {
 		.mvm = mvm,
 		.notif = &mvm->last_bt_notif_old,
-		.reduced_tx_power = true,
 	};
 	struct iwl_bt_coex_ci_cmd_old cmd = {};
 	u8 ci_bw_idx;
@@ -1037,14 +987,7 @@
 		memcpy(&mvm->last_bt_ci_cmd_old, &cmd, sizeof(cmd));
 	}
 
-	/*
-	 * If there are no BSS / P2P client interfaces, reduced Tx Power is
-	 * irrelevant since it is based on the RSSI coming from the beacon.
-	 * Use BT_KILL_MSK_DEFAULT in that case.
-	 */
-	data.reduced_tx_power = data.reduced_tx_power && data.num_bss_ifaces;
-
-	if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power))
+	if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm))
 		IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
 }
 
@@ -1115,16 +1058,6 @@
 		return;
 
 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
-	data->num_bss_ifaces++;
-
-	/*
-	 * This interface doesn't support reduced Tx power (because of low
-	 * RSSI probably), then set bt_kill_msk to default values.
-	 */
-	if (!mvmsta->bt_reduced_txpower)
-		data->reduced_tx_power = false;
-	/* else - possibly leave it to BT_KILL_MSK_REDUCED_TXPOW */
 }
 
 void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -1133,7 +1066,6 @@
 	struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
 	struct iwl_bt_iterator_data data = {
 		.mvm = mvm,
-		.reduced_tx_power = true,
 	};
 	int ret;
 
@@ -1175,14 +1107,7 @@
 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
 		iwl_mvm_bt_rssi_iterator, &data);
 
-	/*
-	 * If there are no BSS / P2P client interfaces, reduced Tx Power is
-	 * irrelevant since it is based on the RSSI coming from the beacon.
-	 * Use BT_KILL_MSK_DEFAULT in that case.
-	 */
-	data.reduced_tx_power = data.reduced_tx_power && data.num_bss_ifaces;
-
-	if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power))
+	if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm))
 		IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
 }
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index f131ef0..7d18f46 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -146,17 +146,47 @@
 					    char __user *user_buf,
 					    size_t count, loff_t *ppos)
 {
-	struct iwl_fw_error_dump_file *dump_file = file->private_data;
+	struct iwl_mvm_dump_ptrs *dump_ptrs = (void *)file->private_data;
+	ssize_t bytes_read = 0;
+	ssize_t bytes_read_trans = 0;
 
-	return simple_read_from_buffer(user_buf, count, ppos,
-				       dump_file,
-				       le32_to_cpu(dump_file->file_len));
+	if (*ppos < dump_ptrs->op_mode_len)
+		bytes_read +=
+			simple_read_from_buffer(user_buf, count, ppos,
+						dump_ptrs->op_mode_ptr,
+						dump_ptrs->op_mode_len);
+
+	if (bytes_read < 0 || *ppos < dump_ptrs->op_mode_len)
+		return bytes_read;
+
+	if (dump_ptrs->trans_ptr) {
+		*ppos -= dump_ptrs->op_mode_len;
+		bytes_read_trans =
+			simple_read_from_buffer(user_buf + bytes_read,
+						count - bytes_read, ppos,
+						dump_ptrs->trans_ptr->data,
+						dump_ptrs->trans_ptr->len);
+		*ppos += dump_ptrs->op_mode_len;
+
+		if (bytes_read_trans >= 0)
+			bytes_read += bytes_read_trans;
+		else if (!bytes_read)
+			/* propagate the failure */
+			return bytes_read_trans;
+	}
+
+	return bytes_read;
+
 }
 
 static int iwl_dbgfs_fw_error_dump_release(struct inode *inode,
 					   struct file *file)
 {
-	vfree(file->private_data);
+	struct iwl_mvm_dump_ptrs *dump_ptrs = (void *)file->private_data;
+
+	vfree(dump_ptrs->op_mode_ptr);
+	vfree(dump_ptrs->trans_ptr);
+	kfree(dump_ptrs);
 
 	return 0;
 }
@@ -514,9 +544,9 @@
 
 		pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n");
 		pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill Mask 0x%08x\n",
-				 iwl_bt_ack_kill_msk[mvm->bt_kill_msk]);
+				 iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[0]]);
 		pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill Mask 0x%08x\n",
-				 iwl_bt_cts_kill_msk[mvm->bt_kill_msk]);
+				 iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[0]]);
 
 	} else {
 		struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
@@ -531,10 +561,19 @@
 			       le64_to_cpu(cmd->bt_secondary_ci));
 
 		pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n");
-		pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill Mask 0x%08x\n",
-				 iwl_bt_ack_kill_msk[mvm->bt_kill_msk]);
-		pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill Mask 0x%08x\n",
-				 iwl_bt_cts_kill_msk[mvm->bt_kill_msk]);
+		pos += scnprintf(buf+pos, bufsz-pos,
+				 "\tPrimary: ACK Kill Mask 0x%08x\n",
+				 iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[0]]);
+		pos += scnprintf(buf+pos, bufsz-pos,
+				 "\tPrimary: CTS Kill Mask 0x%08x\n",
+				 iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[0]]);
+		pos += scnprintf(buf+pos, bufsz-pos,
+				 "\tSecondary: ACK Kill Mask 0x%08x\n",
+				 iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[1]]);
+		pos += scnprintf(buf+pos, bufsz-pos,
+				 "\tSecondary: CTS Kill Mask 0x%08x\n",
+				 iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[1]]);
+
 	}
 
 	mutex_unlock(&mvm->mutex);
@@ -830,8 +869,14 @@
 static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
 				      size_t count, loff_t *ppos)
 {
+	int ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_NMI);
+	if (ret)
+		return ret;
+
 	iwl_force_nmi(mvm->trans);
 
+	iwl_mvm_unref(mvm, IWL_MVM_REF_NMI);
+
 	return count;
 }
 
@@ -1115,11 +1160,11 @@
 }
 #endif
 
-#define PRINT_MVM_REF(ref) do {					\
-	if (test_bit(ref, mvm->ref_bitmap))			\
-		pos += scnprintf(buf + pos, bufsz - pos,	\
-				 "\t(0x%lx) %s\n",		\
-				 BIT(ref), #ref);		\
+#define PRINT_MVM_REF(ref) do {						\
+	if (mvm->refs[ref])						\
+		pos += scnprintf(buf + pos, bufsz - pos,		\
+				 "\t(0x%lx): %d %s\n",			\
+				 BIT(ref), mvm->refs[ref], #ref);	\
 } while (0)
 
 static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
@@ -1127,12 +1172,17 @@
 					size_t count, loff_t *ppos)
 {
 	struct iwl_mvm *mvm = file->private_data;
-	int pos = 0;
+	int i, pos = 0;
 	char buf[256];
 	const size_t bufsz = sizeof(buf);
+	u32 refs = 0;
 
-	pos += scnprintf(buf + pos, bufsz - pos, "taken mvm refs: 0x%lx\n",
-			 mvm->ref_bitmap[0]);
+	for (i = 0; i < IWL_MVM_REF_COUNT; i++)
+		if (mvm->refs[i])
+			refs |= BIT(i);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "taken mvm refs: 0x%x\n",
+			 refs);
 
 	PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
 	PRINT_MVM_REF(IWL_MVM_REF_SCAN);
@@ -1158,7 +1208,7 @@
 
 	mutex_lock(&mvm->mutex);
 
-	taken = test_bit(IWL_MVM_REF_USER, mvm->ref_bitmap);
+	taken = mvm->refs[IWL_MVM_REF_USER];
 	if (value == 1 && !taken)
 		iwl_mvm_ref(mvm, IWL_MVM_REF_USER);
 	else if (value == 0 && taken)
@@ -1194,14 +1244,21 @@
 	int pos = 0;
 	char buf[32];
 	const size_t bufsz = sizeof(buf);
+	int ret;
 
 	if (!mvm->dbgfs_prph_reg_addr)
 		return -EINVAL;
 
+	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_READ);
+	if (ret)
+		return ret;
+
 	pos += scnprintf(buf + pos, bufsz - pos, "Reg 0x%x: (0x%x)\n",
 		mvm->dbgfs_prph_reg_addr,
 		iwl_read_prph(mvm->trans, mvm->dbgfs_prph_reg_addr));
 
+	iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_READ);
+
 	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
@@ -1211,6 +1268,7 @@
 {
 	u8 args;
 	u32 value;
+	int ret;
 
 	args = sscanf(buf, "%i %i", &mvm->dbgfs_prph_reg_addr, &value);
 	/* if we only want to set the reg address - nothing more to do */
@@ -1221,7 +1279,13 @@
 	if (args != 2)
 		return -EINVAL;
 
+	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
+	if (ret)
+		return ret;
+
 	iwl_write_prph(mvm->trans, mvm->dbgfs_prph_reg_addr, value);
+
+	iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
 out:
 	return count;
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
index ab12aaa..6987571 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
@@ -385,6 +385,8 @@
 	BT_ON_NO_CONNECTION	= 1,
 	BT_LOW_TRAFFIC		= 2,
 	BT_HIGH_TRAFFIC		= 3,
+
+	BT_MAX_AG,
 }; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */
 
 enum iwl_bt_ci_compliance {
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index b8e4e78..95f5b32 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -133,6 +133,7 @@
 	/* Scan offload */
 	SCAN_OFFLOAD_REQUEST_CMD = 0x51,
 	SCAN_OFFLOAD_ABORT_CMD = 0x52,
+	HOT_SPOT_CMD = 0x53,
 	SCAN_OFFLOAD_COMPLETE = 0x6D,
 	SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E,
 	SCAN_OFFLOAD_CONFIG_CMD = 0x6f,
@@ -910,6 +911,72 @@
 	__le32 dsp_cfg_flags;
 } __packed; /* PHY_CONTEXT_CMD_API_VER_1 */
 
+/*
+ * Aux ROC command
+ *
+ * Command requests the firmware to create a time event for a certain duration
+ * and remain on the given channel. This is done by using the Aux framework in
+ * the FW.
+ * The command was first used for Hot Spot issues - but can be used regardless
+ * to Hot Spot.
+ *
+ * ( HOT_SPOT_CMD 0x53 )
+ *
+ * @id_and_color: ID and color of the MAC
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @event_unique_id: If the action FW_CTXT_ACTION_REMOVE then the
+ *	event_unique_id should be the id of the time event assigned by ucode.
+ *	Otherwise ignore the event_unique_id.
+ * @sta_id_and_color: station id and color, resumed during "Remain On Channel"
+ *	activity.
+ * @channel_info: channel info
+ * @node_addr: Our MAC Address
+ * @reserved: reserved for alignment
+ * @apply_time: GP2 value to start (should always be the current GP2 value)
+ * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max
+ *	time by which start of the event is allowed to be postponed.
+ * @duration: event duration in TU To calculate event duration:
+ *	timeEventDuration = min(duration, remainingQuota)
+ */
+struct iwl_hs20_roc_req {
+	/* COMMON_INDEX_HDR_API_S_VER_1 hdr */
+	__le32 id_and_color;
+	__le32 action;
+	__le32 event_unique_id;
+	__le32 sta_id_and_color;
+	struct iwl_fw_channel_info channel_info;
+	u8 node_addr[ETH_ALEN];
+	__le16 reserved;
+	__le32 apply_time;
+	__le32 apply_time_max_delay;
+	__le32 duration;
+} __packed; /* HOT_SPOT_CMD_API_S_VER_1 */
+
+/*
+ * values for AUX ROC result values
+ */
+enum iwl_mvm_hot_spot {
+	HOT_SPOT_RSP_STATUS_OK,
+	HOT_SPOT_RSP_STATUS_TOO_MANY_EVENTS,
+	HOT_SPOT_MAX_NUM_OF_SESSIONS,
+};
+
+/*
+ * Aux ROC command response
+ *
+ * In response to iwl_hs20_roc_req the FW sends this command to notify the
+ * driver the uid of the timevent.
+ *
+ * ( HOT_SPOT_CMD 0x53 )
+ *
+ * @event_unique_id: Unique ID of time event assigned by ucode
+ * @status: Return status 0 is success, all the rest used for specific errors
+ */
+struct iwl_hs20_roc_res {
+	__le32 event_unique_id;
+	__le32 status;
+} __packed; /* HOT_SPOT_RSP_API_S_VER_1 */
+
 #define IWL_RX_INFO_PHY_CNT 8
 #define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1
 #define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 96b9cf8..0e523e2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -1074,8 +1074,12 @@
 	/* Fill the common data for all mac context types */
 	iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
 
-	/* Also enable probe requests to pass */
-	cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+	/*
+	 * pass probe requests and beacons from other APs (needed
+	 * for ht protection)
+	 */
+	cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST |
+					MAC_FILTER_IN_BEACON);
 
 	/* Fill the data specific for ap mode */
 	iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap,
@@ -1096,6 +1100,13 @@
 	/* Fill the common data for all mac context types */
 	iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
 
+	/*
+	 * pass probe requests and beacons from other APs (needed
+	 * for ht protection)
+	 */
+	cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST |
+					MAC_FILTER_IN_BEACON);
+
 	/* Fill the data specific for GO mode */
 	iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap,
 				     action == FW_CTXT_ACTION_ADD);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 2eb6ebe..0d6a8b7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -211,7 +211,9 @@
 		return;
 
 	IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
-	WARN_ON(test_and_set_bit(ref_type, mvm->ref_bitmap));
+	spin_lock_bh(&mvm->refs_lock);
+	mvm->refs[ref_type]++;
+	spin_unlock_bh(&mvm->refs_lock);
 	iwl_trans_ref(mvm->trans);
 }
 
@@ -221,29 +223,35 @@
 		return;
 
 	IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
-	WARN_ON(!test_and_clear_bit(ref_type, mvm->ref_bitmap));
+	spin_lock_bh(&mvm->refs_lock);
+	WARN_ON(!mvm->refs[ref_type]--);
+	spin_unlock_bh(&mvm->refs_lock);
 	iwl_trans_unref(mvm->trans);
 }
 
-static void
-iwl_mvm_unref_all_except(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref)
+static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
+				     enum iwl_mvm_ref_type except_ref)
 {
-	int i;
+	int i, j;
 
 	if (!iwl_mvm_is_d0i3_supported(mvm))
 		return;
 
-	for_each_set_bit(i, mvm->ref_bitmap, IWL_MVM_REF_COUNT) {
-		if (ref == i)
+	spin_lock_bh(&mvm->refs_lock);
+	for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
+		if (except_ref == i || !mvm->refs[i])
 			continue;
 
-		IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d\n", i);
-		clear_bit(i, mvm->ref_bitmap);
-		iwl_trans_unref(mvm->trans);
+		IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n",
+			      i, mvm->refs[i]);
+		for (j = 0; j < mvm->refs[i]; j++)
+			iwl_trans_unref(mvm->trans);
+		mvm->refs[i] = 0;
 	}
+	spin_unlock_bh(&mvm->refs_lock);
 }
 
-static int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
+int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
 {
 	iwl_mvm_ref(mvm, ref_type);
 
@@ -321,13 +329,6 @@
 		hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
 	}
 
-	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT &&
-	    !iwlwifi_mod_params.uapsd_disable) {
-		hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
-		hw->uapsd_queues = IWL_UAPSD_AC_INFO;
-		hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
-	}
-
 	if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
 		hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
 
@@ -660,6 +661,7 @@
 	spin_unlock_bh(&mvm->time_event_lock);
 
 	mvmvif->phy_ctxt = NULL;
+	memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
 }
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -668,11 +670,11 @@
 	struct iwl_fw_error_dump_file *dump_file;
 	struct iwl_fw_error_dump_data *dump_data;
 	struct iwl_fw_error_dump_info *dump_info;
+	struct iwl_mvm_dump_ptrs *fw_error_dump;
 	const struct fw_img *img;
 	u32 sram_len, sram_ofs;
 	u32 file_len, rxf_len;
 	unsigned long flags;
-	u32 trans_len;
 	int reg_val;
 
 	lockdep_assert_held(&mvm->mutex);
@@ -680,6 +682,10 @@
 	if (mvm->fw_error_dump)
 		return;
 
+	fw_error_dump = kzalloc(sizeof(*mvm->fw_error_dump), GFP_KERNEL);
+	if (!fw_error_dump)
+		return;
+
 	img = &mvm->fw->img[mvm->cur_ucode];
 	sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
 	sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
@@ -697,18 +703,15 @@
 		   rxf_len +
 		   sizeof(*dump_info);
 
-	trans_len = iwl_trans_dump_data(mvm->trans, NULL, 0);
-	if (trans_len)
-		file_len += trans_len;
-
 	dump_file = vzalloc(file_len);
-	if (!dump_file)
+	if (!dump_file) {
+		kfree(fw_error_dump);
 		return;
+	}
 
-	mvm->fw_error_dump = dump_file;
+	fw_error_dump->op_mode_ptr = dump_file;
 
 	dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
-	dump_file->file_len = cpu_to_le32(file_len);
 	dump_data = (void *)dump_file->data;
 
 	dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
@@ -749,14 +752,12 @@
 	iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_data->data,
 				 sram_len);
 
-	if (trans_len) {
-		void *buf = iwl_fw_error_next_data(dump_data);
-		u32 real_trans_len = iwl_trans_dump_data(mvm->trans, buf,
-							 trans_len);
-		dump_data = (void *)((u8 *)buf + real_trans_len);
-		dump_file->file_len =
-			cpu_to_le32(file_len - trans_len + real_trans_len);
-	}
+	fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans);
+	fw_error_dump->op_mode_len = file_len;
+	if (fw_error_dump->trans_ptr)
+		file_len += fw_error_dump->trans_ptr->len;
+	dump_file->file_len = cpu_to_le32(file_len);
+	mvm->fw_error_dump = fw_error_dump;
 }
 #endif
 
@@ -788,6 +789,12 @@
 	iwl_mvm_reset_phy_ctxts(mvm);
 	memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
 	memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
+	memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
+	memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
+	memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
+	memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
+	memset(&mvm->bt_ack_kill_msk, 0, sizeof(mvm->bt_ack_kill_msk));
+	memset(&mvm->bt_cts_kill_msk, 0, sizeof(mvm->bt_cts_kill_msk));
 
 	ieee80211_wake_queues(mvm->hw);
 
@@ -1399,6 +1406,28 @@
 }
 #endif
 
+static void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
+{
+	struct ieee80211_sta *sta;
+	struct iwl_mvm_sta *mvmsta;
+	int i;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+						lockdep_is_held(&mvm->mutex));
+		if (!sta || IS_ERR(sta) || !sta->tdls)
+			continue;
+
+		mvmsta = iwl_mvm_sta_from_mac80211(sta);
+		ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
+				NL80211_TDLS_TEARDOWN,
+				WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
+				GFP_KERNEL);
+	}
+}
+
 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
 					     struct ieee80211_vif *vif,
 					     struct ieee80211_bss_conf *bss_conf,
@@ -1494,14 +1523,18 @@
 		 */
 		iwl_mvm_remove_time_event(mvm, mvmvif,
 					  &mvmvif->time_event_data);
-		iwl_mvm_sf_update(mvm, vif, false);
-		WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
 	} else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
 			      BSS_CHANGED_QOS)) {
 		ret = iwl_mvm_power_update_mac(mvm);
 		if (ret)
 			IWL_ERR(mvm, "failed to update power mode\n");
 	}
+
+	if (changes & BSS_CHANGED_BEACON_INFO) {
+		iwl_mvm_sf_update(mvm, vif, false);
+		WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+	}
+
 	if (changes & BSS_CHANGED_TXPOWER) {
 		IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
 				bss_conf->txpower);
@@ -1533,6 +1566,14 @@
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	int ret;
 
+	/*
+	 * iwl_mvm_mac_ctxt_add() might read directly from the device
+	 * (the system time), so make sure it is available.
+	 */
+	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP);
+	if (ret)
+		return ret;
+
 	mutex_lock(&mvm->mutex);
 
 	/* Send the beacon template */
@@ -1581,6 +1622,10 @@
 
 	iwl_mvm_bt_coex_vif_change(mvm);
 
+	/* we don't support TDLS during DCM */
+	if (iwl_mvm_phy_ctx_count(mvm) > 1)
+		iwl_mvm_teardown_tdls_peers(mvm);
+
 	mutex_unlock(&mvm->mutex);
 	return 0;
 
@@ -1594,6 +1639,7 @@
 	iwl_mvm_mac_ctxt_remove(mvm, vif);
 out_unlock:
 	mutex_unlock(&mvm->mutex);
+	iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP);
 	return ret;
 }
 
@@ -1671,6 +1717,14 @@
 {
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 
+	/*
+	 * iwl_mvm_bss_info_changed_station() might call
+	 * iwl_mvm_protect_session(), which reads directly from
+	 * the device (the system time), so make sure it is available.
+	 */
+	if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED))
+		return;
+
 	mutex_lock(&mvm->mutex);
 
 	if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
@@ -1690,8 +1744,50 @@
 	}
 
 	mutex_unlock(&mvm->mutex);
+	iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
 }
 
+static int iwl_mvm_cancel_scan_wait_notif(struct iwl_mvm *mvm,
+					  enum iwl_scan_status scan_type)
+{
+	int ret;
+	bool wait_for_handlers = false;
+
+	mutex_lock(&mvm->mutex);
+
+	if (mvm->scan_status != scan_type) {
+		ret = 0;
+		/* make sure there are no pending notifications */
+		wait_for_handlers = true;
+		goto out;
+	}
+
+	switch (scan_type) {
+	case IWL_MVM_SCAN_SCHED:
+		ret = iwl_mvm_scan_offload_stop(mvm, true);
+		break;
+	case IWL_MVM_SCAN_OS:
+		ret = iwl_mvm_cancel_scan(mvm);
+		break;
+	case IWL_MVM_SCAN_NONE:
+	default:
+		WARN_ON_ONCE(1);
+		ret = -EINVAL;
+		break;
+	}
+	if (ret)
+		goto out;
+
+	wait_for_handlers = true;
+out:
+	mutex_unlock(&mvm->mutex);
+
+	/* make sure we consume the completion notification */
+	if (wait_for_handlers)
+		iwl_mvm_wait_for_async_handlers(mvm);
+
+	return ret;
+}
 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
 			       struct ieee80211_vif *vif,
 			       struct ieee80211_scan_request *hw_req)
@@ -1704,19 +1800,13 @@
 	    req->n_channels > mvm->fw->ucode_capa.n_scan_channels)
 		return -EINVAL;
 
+	ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_SCHED);
+	if (ret)
+		return ret;
+
 	mutex_lock(&mvm->mutex);
 
-	switch (mvm->scan_status) {
-	case IWL_MVM_SCAN_SCHED:
-		ret = iwl_mvm_scan_offload_stop(mvm, true);
-		if (ret) {
-			ret = -EBUSY;
-			goto out;
-		}
-		break;
-	case IWL_MVM_SCAN_NONE:
-		break;
-	default:
+	if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
 		ret = -EBUSY;
 		goto out;
 	}
@@ -1732,8 +1822,6 @@
 		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
 out:
 	mutex_unlock(&mvm->mutex);
-	/* make sure to flush the Rx handler before the next scan arrives */
-	iwl_mvm_wait_for_async_handlers(mvm);
 	return ret;
 }
 
@@ -1885,28 +1973,6 @@
 		iwl_mvm_power_update_mac(mvm);
 }
 
-static void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
-{
-	struct ieee80211_sta *sta;
-	struct iwl_mvm_sta *mvmsta;
-	int i;
-
-	lockdep_assert_held(&mvm->mutex);
-
-	for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
-		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
-						lockdep_is_held(&mvm->mutex));
-		if (!sta || IS_ERR(sta) || !sta->tdls)
-			continue;
-
-		mvmsta = iwl_mvm_sta_from_mac80211(sta);
-		ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
-				NL80211_TDLS_TEARDOWN,
-				WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
-				GFP_KERNEL);
-	}
-}
-
 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
 				 struct ieee80211_vif *vif,
 				 struct ieee80211_sta *sta,
@@ -2065,10 +2131,19 @@
 	if (WARN_ON_ONCE(vif->bss_conf.assoc))
 		return;
 
+	/*
+	 * iwl_mvm_protect_session() reads directly from the device
+	 * (the system time), so make sure it is available.
+	 */
+	if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX))
+		return;
+
 	mutex_lock(&mvm->mutex);
 	/* Try really hard to protect the session and hear a beacon */
 	iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500);
 	mutex_unlock(&mvm->mutex);
+
+	iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX);
 }
 
 static void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
@@ -2077,10 +2152,19 @@
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 	u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
 
+	/*
+	 * iwl_mvm_protect_session() reads directly from the device
+	 * (the system time), so make sure it is available.
+	 */
+	if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS))
+		return;
+
 	mutex_lock(&mvm->mutex);
 	/* Protect the session to hear the TDLS setup response on the channel */
 	iwl_mvm_protect_session(mvm, vif, duration, duration, 100);
 	mutex_unlock(&mvm->mutex);
+
+	iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS);
 }
 
 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
@@ -2091,6 +2175,10 @@
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 	int ret;
 
+	ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_OS);
+	if (ret)
+		return ret;
+
 	mutex_lock(&mvm->mutex);
 
 	if (!iwl_mvm_is_idle(mvm)) {
@@ -2098,26 +2186,7 @@
 		goto out;
 	}
 
-	switch (mvm->scan_status) {
-	case IWL_MVM_SCAN_OS:
-		IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
-		ret = iwl_mvm_cancel_scan(mvm);
-		if (ret) {
-			ret = -EBUSY;
-			goto out;
-		}
-
-		/*
-		 * iwl_mvm_rx_scan_complete() will be called soon but will
-		 * not reset the scan status as it won't be IWL_MVM_SCAN_OS
-		 * any more since we queue the next scan immediately (below).
-		 * We make sure it is called before the next scan starts by
-		 * flushing the async-handlers work.
-		 */
-		break;
-	case IWL_MVM_SCAN_NONE:
-		break;
-	default:
+	if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
 		ret = -EBUSY;
 		goto out;
 	}
@@ -2145,8 +2214,6 @@
 	mvm->scan_status = IWL_MVM_SCAN_NONE;
 out:
 	mutex_unlock(&mvm->mutex);
-	/* make sure to flush the Rx handler before the next scan arrives */
-	iwl_mvm_wait_for_async_handlers(mvm);
 	return ret;
 }
 
@@ -2266,6 +2333,119 @@
 }
 
 
+static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
+			       struct iwl_rx_packet *pkt, void *data)
+{
+	struct iwl_mvm *mvm =
+		container_of(notif_wait, struct iwl_mvm, notif_wait);
+	struct iwl_hs20_roc_res *resp;
+	int resp_len = iwl_rx_packet_payload_len(pkt);
+	struct iwl_mvm_time_event_data *te_data = data;
+
+	if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
+		return true;
+
+	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
+		IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
+		return true;
+	}
+
+	resp = (void *)pkt->data;
+
+	IWL_DEBUG_TE(mvm,
+		     "Aux ROC: Recieved response from ucode: status=%d uid=%d\n",
+		     resp->status, resp->event_unique_id);
+
+	te_data->uid = le32_to_cpu(resp->event_unique_id);
+	IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
+		     te_data->uid);
+
+	spin_lock_bh(&mvm->time_event_lock);
+	list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
+	spin_unlock_bh(&mvm->time_event_lock);
+
+	return true;
+}
+
+#define AUX_ROC_MAX_DELAY_ON_CHANNEL 5000
+static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
+				    struct ieee80211_channel *channel,
+				    struct ieee80211_vif *vif,
+				    int duration)
+{
+	int res, time_reg = DEVICE_SYSTEM_TIME_REG;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
+	static const u8 time_event_response[] = { HOT_SPOT_CMD };
+	struct iwl_notification_wait wait_time_event;
+	struct iwl_hs20_roc_req aux_roc_req = {
+		.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+		.id_and_color =
+			cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
+		.sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
+		/* Set the channel info data */
+		.channel_info.band = (channel->band == IEEE80211_BAND_2GHZ) ?
+			PHY_BAND_24 : PHY_BAND_5,
+		.channel_info.channel = channel->hw_value,
+		.channel_info.width = PHY_VHT_CHANNEL_MODE20,
+		/* Set the time and duration */
+		.apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
+		.apply_time_max_delay =
+			cpu_to_le32(MSEC_TO_TU(AUX_ROC_MAX_DELAY_ON_CHANNEL)),
+		.duration = cpu_to_le32(MSEC_TO_TU(duration)),
+	 };
+
+	/* Set the node address */
+	memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
+
+	te_data->vif = vif;
+	te_data->duration = duration;
+	te_data->id = HOT_SPOT_CMD;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	spin_lock_bh(&mvm->time_event_lock);
+	list_add_tail(&te_data->list, &mvm->time_event_list);
+	spin_unlock_bh(&mvm->time_event_lock);
+
+	/*
+	 * Use a notification wait, which really just processes the
+	 * command response and doesn't wait for anything, in order
+	 * to be able to process the response and get the UID inside
+	 * the RX path. Using CMD_WANT_SKB doesn't work because it
+	 * stores the buffer and then wakes up this thread, by which
+	 * time another notification (that the time event started)
+	 * might already be processed unsuccessfully.
+	 */
+	iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
+				   time_event_response,
+				   ARRAY_SIZE(time_event_response),
+				   iwl_mvm_rx_aux_roc, te_data);
+
+	res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
+				   &aux_roc_req);
+
+	if (res) {
+		IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
+		iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
+		goto out_clear_te;
+	}
+
+	/* No need to wait for anything, so just pass 1 (0 isn't valid) */
+	res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
+	/* should never fail */
+	WARN_ON_ONCE(res);
+
+	if (res) {
+ out_clear_te:
+		spin_lock_bh(&mvm->time_event_lock);
+		iwl_mvm_te_clear_data(mvm, te_data);
+		spin_unlock_bh(&mvm->time_event_lock);
+	}
+
+	return res;
+}
+
 static int iwl_mvm_roc(struct ieee80211_hw *hw,
 		       struct ieee80211_vif *vif,
 		       struct ieee80211_channel *channel,
@@ -2281,8 +2461,17 @@
 	IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
 			   duration, type);
 
-	if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
-		IWL_ERR(mvm, "vif isn't a P2P_DEVICE: %d\n", vif->type);
+	switch (vif->type) {
+	case NL80211_IFTYPE_STATION:
+		/* Use aux roc framework (HS20) */
+		ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
+					       vif, duration);
+		return ret;
+	case NL80211_IFTYPE_P2P_DEVICE:
+		/* handle below */
+		break;
+	default:
+		IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
 		return -EINVAL;
 	}
 
@@ -2661,6 +2850,10 @@
 		goto out_remove;
 	}
 
+	/* we don't support TDLS during DCM - can be caused by channel switch */
+	if (iwl_mvm_phy_ctx_count(mvm) > 1)
+		iwl_mvm_teardown_tdls_peers(mvm);
+
 	goto out;
 
 out_remove:
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 785e5232..2e73d3b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -82,6 +82,8 @@
 /* RSSI offset for WkP */
 #define IWL_RSSI_OFFSET 50
 #define IWL_MVM_MISSED_BEACONS_THRESHOLD 8
+/* A TimeUnit is 1024 microsecond */
+#define MSEC_TO_TU(_msec)	(_msec*1000/1024)
 
 /*
  * The CSA NoA is scheduled IWL_MVM_CHANNEL_SWITCH_TIME TUs before "beacon 0"
@@ -126,6 +128,21 @@
 };
 extern struct iwl_mvm_mod_params iwlmvm_mod_params;
 
+/**
+ * struct iwl_mvm_dump_ptrs - set of pointers needed for the fw-error-dump
+ *
+ * @op_mode_ptr: pointer to the buffer coming from the mvm op_mode
+ * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the
+ *	transport's data.
+ * @trans_len: length of the valid data in trans_ptr
+ * @op_mode_len: length of the valid data in op_mode_ptr
+ */
+struct iwl_mvm_dump_ptrs {
+	struct iwl_trans_dump_data *trans_ptr;
+	void *op_mode_ptr;
+	u32 op_mode_len;
+};
+
 struct iwl_mvm_phy_ctxt {
 	u16 id;
 	u16 color;
@@ -249,6 +266,15 @@
 	IWL_MVM_REF_TX,
 	IWL_MVM_REF_TX_AGG,
 	IWL_MVM_REF_ADD_IF,
+	IWL_MVM_REF_START_AP,
+	IWL_MVM_REF_BSS_CHANGED,
+	IWL_MVM_REF_PREPARE_TX,
+	IWL_MVM_REF_PROTECT_TDLS,
+	IWL_MVM_REF_CHECK_CTKILL,
+	IWL_MVM_REF_PRPH_READ,
+	IWL_MVM_REF_PRPH_WRITE,
+	IWL_MVM_REF_NMI,
+	IWL_MVM_REF_TM_CMD,
 	IWL_MVM_REF_EXIT_WORK,
 
 	IWL_MVM_REF_COUNT,
@@ -327,6 +353,7 @@
 	 */
 	struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
 	struct iwl_mvm_time_event_data time_event_data;
+	struct iwl_mvm_time_event_data hs_time_event_data;
 
 	struct iwl_mvm_int_sta bcast_sta;
 
@@ -606,14 +633,15 @@
 	 */
 	unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
 
-	/* A bitmap of reference types taken by the driver. */
-	unsigned long ref_bitmap[BITS_TO_LONGS(IWL_MVM_REF_COUNT)];
+	/* references taken by the driver and spinlock protecting them */
+	spinlock_t refs_lock;
+	u8 refs[IWL_MVM_REF_COUNT];
 
 	u8 vif_count;
 
 	/* -1 for always, 0 for never, >0 for that many times */
 	s8 restart_fw;
-	void *fw_error_dump;
+	struct iwl_mvm_dump_ptrs *fw_error_dump;
 
 #ifdef CONFIG_IWLWIFI_LEDS
 	struct led_classdev led;
@@ -647,7 +675,8 @@
 	wait_queue_head_t d0i3_exit_waitq;
 
 	/* BT-Coex */
-	u8 bt_kill_msk;
+	u8 bt_ack_kill_msk[NUM_PHY_CTX];
+	u8 bt_cts_kill_msk[NUM_PHY_CTX];
 
 	struct iwl_bt_coex_profile_notif_old last_bt_notif_old;
 	struct iwl_bt_coex_ci_cmd_old last_bt_ci_cmd_old;
@@ -659,6 +688,9 @@
 	u8 bt_tx_prio;
 	enum iwl_bt_force_ant_mode bt_force_ant_mode;
 
+	/* Aux ROC */
+	struct list_head aux_roc_te_list;
+
 	/* Thermal Throttling and CTkill */
 	struct iwl_mvm_tt_mgmt thermal_throttle;
 	s32 temperature;	/* Celsius */
@@ -697,6 +729,7 @@
 	IWL_MVM_STATUS_ROC_RUNNING,
 	IWL_MVM_STATUS_IN_HW_RESTART,
 	IWL_MVM_STATUS_IN_D0I3,
+	IWL_MVM_STATUS_ROC_AUX_RUNNING,
 };
 
 static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
@@ -988,6 +1021,7 @@
 /* D0i3 */
 void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
+int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
 void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
 int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
 
@@ -1029,12 +1063,14 @@
 
 enum iwl_bt_kill_msk {
 	BT_KILL_MSK_DEFAULT,
-	BT_KILL_MSK_SCO_HID_A2DP,
-	BT_KILL_MSK_REDUCED_TXPOW,
+	BT_KILL_MSK_NEVER,
+	BT_KILL_MSK_ALWAYS,
 	BT_KILL_MSK_MAX,
 };
-extern const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX];
-extern const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX];
+
+extern const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT];
+extern const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT];
+extern const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX];
 
 /* beacon filtering */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index b04805ccb4..cfdd314 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -265,7 +265,7 @@
 	if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
 		if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
 		    !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
-			IWL_ERR(mvm, "Can't parse empty NVM sections\n");
+			IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
 			return NULL;
 		}
 	} else {
@@ -273,7 +273,7 @@
 		if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
 		    !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) {
 			IWL_ERR(mvm,
-				"Can't parse empty family 8000 NVM sections\n");
+				"Can't parse empty family 8000 OTP/NVM sections\n");
 			return NULL;
 		}
 		/* MAC_OVERRIDE or at least HW section must exist */
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 7d7b2fb..610dbcb 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -289,6 +289,7 @@
 	CMD(MATCH_FOUND_NOTIFICATION),
 	CMD(SCAN_OFFLOAD_REQUEST_CMD),
 	CMD(SCAN_OFFLOAD_ABORT_CMD),
+	CMD(HOT_SPOT_CMD),
 	CMD(SCAN_OFFLOAD_COMPLETE),
 	CMD(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
 	CMD(SCAN_ITERATION_COMPLETE),
@@ -391,6 +392,9 @@
 	if (!hw)
 		return NULL;
 
+	if (cfg->max_rx_agg_size)
+		hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;
+
 	op_mode = hw->priv;
 	op_mode->ops = &iwl_mvm_ops;
 
@@ -416,6 +420,7 @@
 	mutex_init(&mvm->d0i3_suspend_mutex);
 	spin_lock_init(&mvm->async_handlers_lock);
 	INIT_LIST_HEAD(&mvm->time_event_list);
+	INIT_LIST_HEAD(&mvm->aux_roc_te_list);
 	INIT_LIST_HEAD(&mvm->async_handlers_list);
 	spin_lock_init(&mvm->time_event_lock);
 
@@ -425,6 +430,7 @@
 	INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
 
 	spin_lock_init(&mvm->d0i3_tx_lock);
+	spin_lock_init(&mvm->refs_lock);
 	skb_queue_head_init(&mvm->d0i3_tx);
 	init_waitqueue_head(&mvm->d0i3_exit_waitq);
 
@@ -539,7 +545,7 @@
 	memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
 
 	/* rpm starts with a taken ref. only set the appropriate bit here. */
-	set_bit(IWL_MVM_REF_UCODE_DOWN, mvm->ref_bitmap);
+	mvm->refs[IWL_MVM_REF_UCODE_DOWN] = 1;
 
 	return op_mode;
 
@@ -567,7 +573,11 @@
 	ieee80211_unregister_hw(mvm->hw);
 
 	kfree(mvm->scan_cmd);
-	vfree(mvm->fw_error_dump);
+	if (mvm->fw_error_dump) {
+		vfree(mvm->fw_error_dump->op_mode_ptr);
+		vfree(mvm->fw_error_dump->trans_ptr);
+		kfree(mvm->fw_error_dump);
+	}
 	kfree(mvm->mcast_filter_cmd);
 	mvm->mcast_filter_cmd = NULL;
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 8128139..7635488 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -98,23 +98,21 @@
 			   bool update)
 {
 	struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
-	struct iwl_mvm_add_sta_cmd add_sta_cmd;
+	struct iwl_mvm_add_sta_cmd add_sta_cmd = {
+		.sta_id = mvm_sta->sta_id,
+		.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
+		.add_modify = update ? 1 : 0,
+		.station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
+						 STA_FLG_MIMO_EN_MSK),
+	};
 	int ret;
 	u32 status;
 	u32 agg_size = 0, mpdu_dens = 0;
 
-	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
-
-	add_sta_cmd.sta_id = mvm_sta->sta_id;
-	add_sta_cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
 	if (!update) {
 		add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
 		memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
 	}
-	add_sta_cmd.add_modify = update ? 1 : 0;
-
-	add_sta_cmd.station_flags_msk |= cpu_to_le32(STA_FLG_FAT_EN_MSK |
-						     STA_FLG_MIMO_EN_MSK);
 
 	switch (sta->bandwidth) {
 	case IEEE80211_STA_RX_BW_160:
@@ -528,8 +526,12 @@
 
 	lockdep_assert_held(&mvm->mutex);
 
-	/* Add the aux station, but without any queues */
-	ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, 0,
+	/* Map Aux queue to fifo - needs to happen before adding Aux station */
+	iwl_trans_ac_txq_enable(mvm->trans, mvm->aux_queue,
+				IWL_MVM_TX_FIFO_MCAST);
+
+	/* Allocate aux station and assign to it the aux queue */
+	ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
 				       NL80211_IFTYPE_UNSPECIFIED);
 	if (ret)
 		return ret;
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index ae52613..33e5041 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -72,9 +72,6 @@
 #include "iwl-io.h"
 #include "iwl-prph.h"
 
-/* A TimeUnit is 1024 microsecond */
-#define MSEC_TO_TU(_msec)	(_msec*1000/1024)
-
 /*
  * For the high priority TE use a time event type that has similar priority to
  * the FW's action scan priority.
@@ -100,6 +97,21 @@
 void iwl_mvm_roc_done_wk(struct work_struct *wk)
 {
 	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
+	u32 queues = 0;
+
+	/*
+	 * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
+	 * This will cause the TX path to drop offchannel transmissions.
+	 * That would also be done by mac80211, but it is racy, in particular
+	 * in the case that the time event actually completed in the firmware
+	 * (which is handled in iwl_mvm_te_handle_notif).
+	 */
+	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
+		queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
+	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
+		queues |= BIT(mvm->aux_queue);
+
+	iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
 
 	synchronize_net();
 
@@ -113,22 +125,12 @@
 	 * issue as it will have to complete before the next command is
 	 * executed, and a new time event means a new command.
 	 */
-	iwl_mvm_flush_tx_path(mvm, BIT(IWL_MVM_OFFCHANNEL_QUEUE), false);
+	iwl_mvm_flush_tx_path(mvm, queues, false);
 }
 
 static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
 {
 	/*
-	 * First, clear the ROC_RUNNING status bit. This will cause the TX
-	 * path to drop offchannel transmissions. That would also be done
-	 * by mac80211, but it is racy, in particular in the case that the
-	 * time event actually completed in the firmware (which is handled
-	 * in iwl_mvm_te_handle_notif).
-	 */
-	clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
-	iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
-
-	/*
 	 * Of course, our status bit is just as racy as mac80211, so in
 	 * addition, fire off the work struct which will drop all frames
 	 * from the hardware queues that made it through the race. First
@@ -263,6 +265,60 @@
 }
 
 /*
+ * Handle A Aux ROC time event
+ */
+static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
+					   struct iwl_time_event_notif *notif)
+{
+	struct iwl_mvm_time_event_data *te_data, *tmp;
+	bool aux_roc_te = false;
+
+	list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) {
+		if (le32_to_cpu(notif->unique_id) == te_data->uid) {
+			aux_roc_te = true;
+			break;
+		}
+	}
+	if (!aux_roc_te) /* Not a Aux ROC time event */
+		return -EINVAL;
+
+	if (!le32_to_cpu(notif->status)) {
+		IWL_DEBUG_TE(mvm,
+			     "ERROR: Aux ROC Time Event %s notification failure\n",
+			     (le32_to_cpu(notif->action) &
+			      TE_V2_NOTIF_HOST_EVENT_START) ? "start" : "end");
+		return -EINVAL;
+	}
+
+	IWL_DEBUG_TE(mvm,
+		     "Aux ROC time event notification  - UID = 0x%x action %d\n",
+		     le32_to_cpu(notif->unique_id),
+		     le32_to_cpu(notif->action));
+
+	if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
+		/* End TE, notify mac80211 */
+		ieee80211_remain_on_channel_expired(mvm->hw);
+		iwl_mvm_roc_finished(mvm); /* flush aux queue */
+		list_del(&te_data->list); /* remove from list */
+		te_data->running = false;
+		te_data->vif = NULL;
+		te_data->uid = 0;
+	} else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
+		set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+		set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
+		te_data->running = true;
+		ieee80211_ready_on_channel(mvm->hw); /* Start TE */
+	} else {
+		IWL_DEBUG_TE(mvm,
+			     "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
+			     le32_to_cpu(notif->action));
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
  * The Rx handler for time event notifications
  */
 int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
@@ -278,10 +334,15 @@
 		     le32_to_cpu(notif->action));
 
 	spin_lock_bh(&mvm->time_event_lock);
+	/* This time event is triggered for Aux ROC request */
+	if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif))
+		goto unlock;
+
 	list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
 		if (le32_to_cpu(notif->unique_id) == te_data->uid)
 			iwl_mvm_te_handle_notif(mvm, te_data, notif);
 	}
+unlock:
 	spin_unlock_bh(&mvm->time_event_lock);
 
 	return 0;
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index 8685615..0464599 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -140,9 +140,9 @@
 
 	/* TODO: move parsing to NVM code */
 	calib = mvm->nvm_sections[NVM_SECTION_TYPE_CALIBRATION].data;
-	ptat = calib[OTP_DTS_DIODE_DEVIATION];
-	pa1 = calib[OTP_DTS_DIODE_DEVIATION + 1];
-	pa2 = calib[OTP_DTS_DIODE_DEVIATION + 2];
+	ptat = calib[OTP_DTS_DIODE_DEVIATION * 2];
+	pa1 = calib[OTP_DTS_DIODE_DEVIATION * 2 + 1];
+	pa2 = calib[OTP_DTS_DIODE_DEVIATION * 2 + 2];
 
 	/* get the median: */
 	if (ptat > pa1) {
@@ -338,10 +338,16 @@
 
 	duration = tt->params->ct_kill_duration;
 
+	/* make sure the device is available for direct read/writes */
+	if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_CHECK_CTKILL))
+		goto reschedule;
+
 	iwl_trans_start_hw(mvm->trans);
 	temp = check_nic_temperature(mvm);
 	iwl_trans_stop_device(mvm->trans);
 
+	iwl_mvm_unref(mvm, IWL_MVM_REF_CHECK_CTKILL);
+
 	if (temp < MIN_TEMPERATURE || temp > MAX_TEMPERATURE) {
 		IWL_DEBUG_TEMP(mvm, "Failed to measure NIC temperature\n");
 		goto reschedule;
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index e9ff386..dbc8707 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -311,6 +311,16 @@
 		return -1;
 
 	/*
+	 * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
+	 * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
+	 * queue. STATION (HS2.0) uses the auxiliary context of the FW,
+	 * and hence needs to be sent on the aux queue
+	 */
+	if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
+	    info->control.vif->type == NL80211_IFTYPE_STATION)
+		IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
+
+	/*
 	 * If the interface on which frame is sent is the P2P_DEVICE
 	 * or an AP/GO interface use the broadcast station associated
 	 * with it; otherwise use the AUX station.
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 5b5b0d8..06e04aa 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -67,6 +67,7 @@
 #include <linux/sched.h>
 #include <linux/bitops.h>
 #include <linux/gfp.h>
+#include <linux/vmalloc.h>
 
 #include "iwl-drv.h"
 #include "iwl-trans.h"
@@ -1773,28 +1774,207 @@
 	return cmdlen;
 }
 
-static u32 iwl_trans_pcie_dump_data(struct iwl_trans *trans,
-				    void *buf, u32 buflen)
+static const struct {
+	u32 start, end;
+} iwl_prph_dump_addr[] = {
+	{ .start = 0x00a00000, .end = 0x00a00000 },
+	{ .start = 0x00a0000c, .end = 0x00a00024 },
+	{ .start = 0x00a0002c, .end = 0x00a0003c },
+	{ .start = 0x00a00410, .end = 0x00a00418 },
+	{ .start = 0x00a00420, .end = 0x00a00420 },
+	{ .start = 0x00a00428, .end = 0x00a00428 },
+	{ .start = 0x00a00430, .end = 0x00a0043c },
+	{ .start = 0x00a00444, .end = 0x00a00444 },
+	{ .start = 0x00a004c0, .end = 0x00a004cc },
+	{ .start = 0x00a004d8, .end = 0x00a004d8 },
+	{ .start = 0x00a004e0, .end = 0x00a004f0 },
+	{ .start = 0x00a00840, .end = 0x00a00840 },
+	{ .start = 0x00a00850, .end = 0x00a00858 },
+	{ .start = 0x00a01004, .end = 0x00a01008 },
+	{ .start = 0x00a01010, .end = 0x00a01010 },
+	{ .start = 0x00a01018, .end = 0x00a01018 },
+	{ .start = 0x00a01024, .end = 0x00a01024 },
+	{ .start = 0x00a0102c, .end = 0x00a01034 },
+	{ .start = 0x00a0103c, .end = 0x00a01040 },
+	{ .start = 0x00a01048, .end = 0x00a01094 },
+	{ .start = 0x00a01c00, .end = 0x00a01c20 },
+	{ .start = 0x00a01c58, .end = 0x00a01c58 },
+	{ .start = 0x00a01c7c, .end = 0x00a01c7c },
+	{ .start = 0x00a01c28, .end = 0x00a01c54 },
+	{ .start = 0x00a01c5c, .end = 0x00a01c5c },
+	{ .start = 0x00a01c84, .end = 0x00a01c84 },
+	{ .start = 0x00a01ce0, .end = 0x00a01d0c },
+	{ .start = 0x00a01d18, .end = 0x00a01d20 },
+	{ .start = 0x00a01d2c, .end = 0x00a01d30 },
+	{ .start = 0x00a01d40, .end = 0x00a01d5c },
+	{ .start = 0x00a01d80, .end = 0x00a01d80 },
+	{ .start = 0x00a01d98, .end = 0x00a01d98 },
+	{ .start = 0x00a01dc0, .end = 0x00a01dfc },
+	{ .start = 0x00a01e00, .end = 0x00a01e2c },
+	{ .start = 0x00a01e40, .end = 0x00a01e60 },
+	{ .start = 0x00a01e84, .end = 0x00a01e90 },
+	{ .start = 0x00a01e9c, .end = 0x00a01ec4 },
+	{ .start = 0x00a01ed0, .end = 0x00a01ed0 },
+	{ .start = 0x00a01f00, .end = 0x00a01f14 },
+	{ .start = 0x00a01f44, .end = 0x00a01f58 },
+	{ .start = 0x00a01f80, .end = 0x00a01fa8 },
+	{ .start = 0x00a01fb0, .end = 0x00a01fbc },
+	{ .start = 0x00a01ff8, .end = 0x00a01ffc },
+	{ .start = 0x00a02000, .end = 0x00a02048 },
+	{ .start = 0x00a02068, .end = 0x00a020f0 },
+	{ .start = 0x00a02100, .end = 0x00a02118 },
+	{ .start = 0x00a02140, .end = 0x00a0214c },
+	{ .start = 0x00a02168, .end = 0x00a0218c },
+	{ .start = 0x00a021c0, .end = 0x00a021c0 },
+	{ .start = 0x00a02400, .end = 0x00a02410 },
+	{ .start = 0x00a02418, .end = 0x00a02420 },
+	{ .start = 0x00a02428, .end = 0x00a0242c },
+	{ .start = 0x00a02434, .end = 0x00a02434 },
+	{ .start = 0x00a02440, .end = 0x00a02460 },
+	{ .start = 0x00a02468, .end = 0x00a024b0 },
+	{ .start = 0x00a024c8, .end = 0x00a024cc },
+	{ .start = 0x00a02500, .end = 0x00a02504 },
+	{ .start = 0x00a0250c, .end = 0x00a02510 },
+	{ .start = 0x00a02540, .end = 0x00a02554 },
+	{ .start = 0x00a02580, .end = 0x00a025f4 },
+	{ .start = 0x00a02600, .end = 0x00a0260c },
+	{ .start = 0x00a02648, .end = 0x00a02650 },
+	{ .start = 0x00a02680, .end = 0x00a02680 },
+	{ .start = 0x00a026c0, .end = 0x00a026d0 },
+	{ .start = 0x00a02700, .end = 0x00a0270c },
+	{ .start = 0x00a02804, .end = 0x00a02804 },
+	{ .start = 0x00a02818, .end = 0x00a0281c },
+	{ .start = 0x00a02c00, .end = 0x00a02db4 },
+	{ .start = 0x00a02df4, .end = 0x00a02fb0 },
+	{ .start = 0x00a03000, .end = 0x00a03014 },
+	{ .start = 0x00a0301c, .end = 0x00a0302c },
+	{ .start = 0x00a03034, .end = 0x00a03038 },
+	{ .start = 0x00a03040, .end = 0x00a03048 },
+	{ .start = 0x00a03060, .end = 0x00a03068 },
+	{ .start = 0x00a03070, .end = 0x00a03074 },
+	{ .start = 0x00a0307c, .end = 0x00a0307c },
+	{ .start = 0x00a03080, .end = 0x00a03084 },
+	{ .start = 0x00a0308c, .end = 0x00a03090 },
+	{ .start = 0x00a03098, .end = 0x00a03098 },
+	{ .start = 0x00a030a0, .end = 0x00a030a0 },
+	{ .start = 0x00a030a8, .end = 0x00a030b4 },
+	{ .start = 0x00a030bc, .end = 0x00a030bc },
+	{ .start = 0x00a030c0, .end = 0x00a0312c },
+	{ .start = 0x00a03c00, .end = 0x00a03c5c },
+	{ .start = 0x00a04400, .end = 0x00a04454 },
+	{ .start = 0x00a04460, .end = 0x00a04474 },
+	{ .start = 0x00a044c0, .end = 0x00a044ec },
+	{ .start = 0x00a04500, .end = 0x00a04504 },
+	{ .start = 0x00a04510, .end = 0x00a04538 },
+	{ .start = 0x00a04540, .end = 0x00a04548 },
+	{ .start = 0x00a04560, .end = 0x00a0457c },
+	{ .start = 0x00a04590, .end = 0x00a04598 },
+	{ .start = 0x00a045c0, .end = 0x00a045f4 },
+};
+
+static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans,
+				    struct iwl_fw_error_dump_data **data)
+{
+	struct iwl_fw_error_dump_prph *prph;
+	unsigned long flags;
+	u32 prph_len = 0, i;
+
+	if (!iwl_trans_grab_nic_access(trans, false, &flags))
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
+		/* The range includes both boundaries */
+		int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
+			 iwl_prph_dump_addr[i].start + 4;
+		int reg;
+		__le32 *val;
+
+		prph_len += sizeof(*data) + sizeof(*prph) +
+			num_bytes_in_chunk;
+
+		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
+		(*data)->len = cpu_to_le32(sizeof(*prph) +
+					num_bytes_in_chunk);
+		prph = (void *)(*data)->data;
+		prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
+		val = (void *)prph->data;
+
+		for (reg = iwl_prph_dump_addr[i].start;
+		     reg <= iwl_prph_dump_addr[i].end;
+		     reg += 4)
+			*val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
+								      reg));
+		*data = iwl_fw_error_next_data(*data);
+	}
+
+	iwl_trans_release_nic_access(trans, &flags);
+
+	return prph_len;
+}
+
+#define IWL_CSR_TO_DUMP (0x250)
+
+static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
+				   struct iwl_fw_error_dump_data **data)
+{
+	u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
+	__le32 *val;
+	int i;
+
+	(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
+	(*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
+	val = (void *)(*data)->data;
+
+	for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
+		*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
+
+	*data = iwl_fw_error_next_data(*data);
+
+	return csr_len;
+}
+
+static
+struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_fw_error_dump_data *data;
 	struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
 	struct iwl_fw_error_dump_txcmd *txcmd;
+	struct iwl_trans_dump_data *dump_data;
 	u32 len;
 	int i, ptr;
 
-	len = sizeof(*data) +
+	/* transport dump header */
+	len = sizeof(*dump_data);
+
+	/* host commands */
+	len += sizeof(*data) +
 		cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
 
+	/* CSR registers */
+	len += sizeof(*data) + IWL_CSR_TO_DUMP;
+
+	/* PRPH registers */
+	for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
+		/* The range includes both boundaries */
+		int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
+			iwl_prph_dump_addr[i].start + 4;
+
+		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
+			num_bytes_in_chunk;
+	}
+
+	/* FW monitor */
 	if (trans_pcie->fw_mon_page)
 		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
 			trans_pcie->fw_mon_size;
 
-	if (!buf)
-		return len;
+	dump_data = vzalloc(len);
+	if (!dump_data)
+		return NULL;
 
 	len = 0;
-	data = buf;
+	data = (void *)dump_data->data;
 	data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
 	txcmd = (void *)data->data;
 	spin_lock_bh(&cmdq->lock);
@@ -1820,11 +2000,15 @@
 
 	data->len = cpu_to_le32(len);
 	len += sizeof(*data);
+	data = iwl_fw_error_next_data(data);
+
+	len += iwl_trans_pcie_dump_prph(trans, &data);
+	len += iwl_trans_pcie_dump_csr(trans, &data);
+	/* data is already pointing to the next section */
 
 	if (trans_pcie->fw_mon_page) {
 		struct iwl_fw_error_dump_fw_mon *fw_mon_data;
 
-		data = iwl_fw_error_next_data(data);
 		data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
 		data->len = cpu_to_le32(trans_pcie->fw_mon_size +
 					sizeof(*fw_mon_data));
@@ -1852,7 +2036,9 @@
 			trans_pcie->fw_mon_size;
 	}
 
-	return len;
+	dump_data->len = len;
+
+	return dump_data;
 }
 #else
 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 5ea65fc..1326f61 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -685,11 +685,16 @@
 	struct mac80211_hwsim_data *data = hw->priv;
 	u64 now = mac80211_hwsim_get_tsf(hw, vif);
 	u32 bcn_int = data->beacon_int;
-	s64 delta = tsf - now;
+	u64 delta = abs64(tsf - now);
 
-	data->tsf_offset += delta;
 	/* adjust after beaconing with new timestamp at old TBTT */
-	data->bcn_delta = do_div(delta, bcn_int);
+	if (tsf > now) {
+		data->tsf_offset += delta;
+		data->bcn_delta = do_div(delta, bcn_int);
+	} else {
+		data->tsf_offset -= delta;
+		data->bcn_delta = -do_div(delta, bcn_int);
+	}
 }
 
 static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 9d6d8d9..62f5dbe 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -541,7 +541,6 @@
 int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
 {
 	struct host_cmd_ds_11n_addba_req add_ba_req;
-	struct mwifiex_sta_node *sta_ptr;
 	u32 tx_win_size = priv->add_ba_param.tx_win_size;
 	static u8 dialog_tok;
 	int ret;
@@ -553,6 +552,8 @@
 	    ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
 	    priv->adapter->is_hw_11ac_capable &&
 	    memcmp(priv->cfg_bssid, peer_mac, ETH_ALEN)) {
+		struct mwifiex_sta_node *sta_ptr;
+
 		sta_ptr = mwifiex_get_sta_entry(priv, peer_mac);
 		if (!sta_ptr) {
 			dev_warn(priv->adapter->dev,
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index b22bae3..06a2c21 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -249,13 +249,22 @@
  * buffered in Rx reordering table.
  */
 static int
-mwifiex_11n_find_last_seq_num(struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr)
+mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
 {
+	struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
+	struct mwifiex_private *priv = ctx->priv;
+	unsigned long flags;
 	int i;
 
-	for (i = (rx_reorder_tbl_ptr->win_size - 1); i >= 0; --i)
-		if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
+	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+	for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
+		if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
+			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+					       flags);
 			return i;
+		}
+	}
+	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
 	return -1;
 }
@@ -274,7 +283,7 @@
 		(struct reorder_tmr_cnxt *) context;
 	int start_win, seq_num;
 
-	seq_num = mwifiex_11n_find_last_seq_num(ctx->ptr);
+	seq_num = mwifiex_11n_find_last_seq_num(ctx);
 
 	if (seq_num < 0)
 		return;
@@ -729,9 +738,9 @@
 		mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
 		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	}
+	INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
 	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
-	INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
 	mwifiex_reset_11n_rx_seq_num(priv);
 }
 
@@ -749,10 +758,14 @@
 		priv = adapter->priv[i];
 		if (!priv)
 			continue;
-		if (list_empty(&priv->rx_reorder_tbl_ptr))
-			continue;
 
 		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, lock_flags);
+		if (list_empty(&priv->rx_reorder_tbl_ptr)) {
+			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+					       lock_flags);
+			continue;
+		}
+
 		list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
 			tbl->flags = flags;
 		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, lock_flags);
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index ca87f92..e2e6bf1 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1604,9 +1604,6 @@
 		return -EINVAL;
 	}
 
-	/* disconnect before try to associate */
-	mwifiex_deauthenticate(priv, NULL);
-
 	/* As this is new association, clear locally stored
 	 * keys and security related flags */
 	priv->sec_info.wpa_enabled = false;
@@ -1744,6 +1741,11 @@
 		return -EINVAL;
 	}
 
+	if (priv->wdev && priv->wdev->current_bss) {
+		wiphy_warn(wiphy, "%s: already connected\n", dev->name);
+		return -EALREADY;
+	}
+
 	wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
 		  (char *) sme->ssid, sme->bssid);
 
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 5899eee..baf0aab 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -137,7 +137,6 @@
 	struct host_cmd_ds_command *host_cmd;
 	uint16_t cmd_code;
 	uint16_t cmd_size;
-	struct timeval tstamp;
 	unsigned long flags;
 	__le32 tmp;
 
@@ -198,10 +197,8 @@
 		 */
 		skb_put(cmd_node->cmd_skb, cmd_size - cmd_node->cmd_skb->len);
 
-	do_gettimeofday(&tstamp);
-	dev_dbg(adapter->dev, "cmd: DNLD_CMD: (%lu.%lu): %#x, act %#x, len %d,"
-		" seqno %#x\n",
-		tstamp.tv_sec, tstamp.tv_usec, cmd_code,
+	dev_dbg(adapter->dev,
+		"cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", cmd_code,
 		le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)), cmd_size,
 		le16_to_cpu(host_cmd->seq_num));
 
@@ -273,7 +270,6 @@
 				(struct mwifiex_opt_sleep_confirm *)
 						adapter->sleep_cfm->data;
 	struct sk_buff *sleep_cfm_tmp;
-	struct timeval ts;
 	__le32 tmp;
 
 	priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
@@ -284,10 +280,9 @@
 					(adapter->seq_num, priv->bss_num,
 					 priv->bss_type)));
 
-	do_gettimeofday(&ts);
 	dev_dbg(adapter->dev,
-		"cmd: DNLD_CMD: (%lu.%lu): %#x, act %#x, len %d, seqno %#x\n",
-		ts.tv_sec, ts.tv_usec, le16_to_cpu(sleep_cfm_buf->command),
+		"cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n",
+		le16_to_cpu(sleep_cfm_buf->command),
 		le16_to_cpu(sleep_cfm_buf->action),
 		le16_to_cpu(sleep_cfm_buf->size),
 		le16_to_cpu(sleep_cfm_buf->seq_num));
@@ -442,7 +437,6 @@
 		mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
 	struct sk_buff *skb = adapter->event_skb;
 	u32 eventcause = adapter->event_cause;
-	struct timeval tstamp;
 	struct mwifiex_rxinfo *rx_info;
 
 	/* Save the last event to debug log */
@@ -467,9 +461,7 @@
 		rx_info->bss_type = priv->bss_type;
 	}
 
-	do_gettimeofday(&tstamp);
-	dev_dbg(adapter->dev, "EVENT: %lu.%lu: cause: %#x\n",
-		tstamp.tv_sec, tstamp.tv_usec, eventcause);
+	dev_dbg(adapter->dev, "EVENT: cause: %#x\n", eventcause);
 	if (eventcause == EVENT_PS_SLEEP || eventcause == EVENT_PS_AWAKE) {
 		/* Handle PS_SLEEP/AWAKE events on STA */
 		priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
@@ -781,7 +773,6 @@
 	uint16_t orig_cmdresp_no;
 	uint16_t cmdresp_no;
 	uint16_t cmdresp_result;
-	struct timeval tstamp;
 	unsigned long flags;
 
 	/* Now we got response from FW, cancel the command timer */
@@ -839,11 +830,10 @@
 	adapter->dbg.last_cmd_resp_id[adapter->dbg.last_cmd_resp_index] =
 								orig_cmdresp_no;
 
-	do_gettimeofday(&tstamp);
-	dev_dbg(adapter->dev, "cmd: CMD_RESP: (%lu.%lu): 0x%x, result %d,"
-		" len %d, seqno 0x%x\n",
-	       tstamp.tv_sec, tstamp.tv_usec, orig_cmdresp_no, cmdresp_result,
-	       le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num));
+	dev_dbg(adapter->dev,
+		"cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n",
+		orig_cmdresp_no, cmdresp_result,
+		le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num));
 
 	if (!(orig_cmdresp_no & HostCmd_RET_BIT)) {
 		dev_err(adapter->dev, "CMD_RESP: invalid cmd resp\n");
@@ -903,7 +893,6 @@
 	struct mwifiex_adapter *adapter =
 		(struct mwifiex_adapter *) function_context;
 	struct cmd_ctrl_node *cmd_node;
-	struct timeval tstamp;
 
 	adapter->is_cmd_timedout = 1;
 	if (!adapter->curr_cmd) {
@@ -916,10 +905,8 @@
 			adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index];
 		adapter->dbg.timeout_cmd_act =
 			adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index];
-		do_gettimeofday(&tstamp);
 		dev_err(adapter->dev,
-			"%s: Timeout cmd id (%lu.%lu) = %#x, act = %#x\n",
-			__func__, tstamp.tv_sec, tstamp.tv_usec,
+			"%s: Timeout cmd id = %#x, act = %#x\n", __func__,
 			adapter->dbg.timeout_cmd_id,
 			adapter->dbg.timeout_cmd_act);
 
@@ -1237,18 +1224,15 @@
 	uint16_t result = le16_to_cpu(cmd->result);
 	uint16_t command = le16_to_cpu(cmd->command);
 	uint16_t seq_num = le16_to_cpu(cmd->seq_num);
-	struct timeval ts;
 
 	if (!upld_len) {
 		dev_err(adapter->dev, "%s: cmd size is 0\n", __func__);
 		return;
 	}
 
-	do_gettimeofday(&ts);
 	dev_dbg(adapter->dev,
-		"cmd: CMD_RESP: (%lu.%lu): 0x%x, result %d, len %d, seqno 0x%x\n",
-		ts.tv_sec, ts.tv_usec, command, result, le16_to_cpu(cmd->size),
-		seq_num);
+		"cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n",
+		command, result, le16_to_cpu(cmd->size), seq_num);
 
 	/* Get BSS number and corresponding priv */
 	priv = mwifiex_get_priv_by_id(adapter, HostCmd_GET_BSS_NO(seq_num),
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 5561573..49da2d5 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -713,7 +713,7 @@
 	u8 ie[MWIFIEX_MAX_VSIE_LEN];
 };
 
-#define MWIFIEX_TDLS_IDLE_TIMEOUT	60
+#define MWIFIEX_TDLS_IDLE_TIMEOUT_IN_SEC	60
 
 struct mwifiex_ie_types_tdls_idle_timeout {
 	struct mwifiex_ie_types_header header;
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index fc13564..8d6c259 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -949,7 +949,7 @@
 				chan_tlv->chan_scan_param[0].radio_type |=
 					(IEEE80211_HT_PARAM_CHA_SEC_ABOVE << 4);
 			else if (adapter->sec_chan_offset ==
-					    IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
+					    IEEE80211_HT_PARAM_CHA_SEC_BELOW)
 				chan_tlv->chan_scan_param[0].radio_type |=
 					(IEEE80211_HT_PARAM_CHA_SEC_BELOW << 4);
 		}
@@ -1288,8 +1288,6 @@
 int mwifiex_associate(struct mwifiex_private *priv,
 		      struct mwifiex_bssdescriptor *bss_desc)
 {
-	u8 current_bssid[ETH_ALEN];
-
 	/* Return error if the adapter is not STA role or table entry
 	 * is not marked as infra.
 	 */
@@ -1304,10 +1302,6 @@
 	else
 		mwifiex_set_ba_params(priv);
 
-	memcpy(&current_bssid,
-	       &priv->curr_bss_params.bss_descriptor.mac_address,
-	       sizeof(current_bssid));
-
 	/* Clear any past association response stored for application
 	   retrieval */
 	priv->assoc_rsp_size = 0;
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 3e5194f..dfa37ea 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -33,6 +33,7 @@
 	struct mwifiex_private *priv = (struct mwifiex_private *)data;
 	struct mwifiex_adapter *adapter = priv->adapter;
 	struct cmd_ctrl_node *cmd_node, *tmp_node;
+	spinlock_t *scan_q_lock = &adapter->scan_pending_q_lock;
 	unsigned long flags;
 
 	if (adapter->surprise_removed)
@@ -44,13 +45,13 @@
 		 * Abort scan operation by cancelling all pending scan
 		 * commands
 		 */
-		spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+		spin_lock_irqsave(scan_q_lock, flags);
 		list_for_each_entry_safe(cmd_node, tmp_node,
 					 &adapter->scan_pending_q, list) {
 			list_del(&cmd_node->list);
 			mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
 		}
-		spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+		spin_unlock_irqrestore(scan_q_lock, flags);
 
 		spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
 		adapter->scan_processing = false;
@@ -79,12 +80,17 @@
 			 */
 			adapter->scan_delay_cnt = 0;
 			adapter->empty_tx_q_cnt = 0;
-			spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+			spin_lock_irqsave(scan_q_lock, flags);
+
+			if (list_empty(&adapter->scan_pending_q)) {
+				spin_unlock_irqrestore(scan_q_lock, flags);
+				goto done;
+			}
+
 			cmd_node = list_first_entry(&adapter->scan_pending_q,
 						    struct cmd_ctrl_node, list);
 			list_del(&cmd_node->list);
-			spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
-					       flags);
+			spin_unlock_irqrestore(scan_q_lock, flags);
 
 			mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
 							true);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 5f7afff..c16dd2c 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -2238,7 +2238,6 @@
 	struct pcie_service_card *card = adapter->card;
 	const struct mwifiex_pcie_card_reg *creg = card->pcie.reg;
 	unsigned int reg, reg_start, reg_end;
-	struct timeval t;
 	u8 *dbg_ptr, *end_ptr, dump_num, idx, i, read_reg, doneflag = 0;
 	enum rdwr_status stat;
 	u32 memory_size;
@@ -2257,9 +2256,7 @@
 		entry->mem_size = 0;
 	}
 
-	do_gettimeofday(&t);
-	dev_info(adapter->dev, "== mwifiex firmware dump start: %u.%06u ==\n",
-		 (u32)t.tv_sec, (u32)t.tv_usec);
+	dev_info(adapter->dev, "== mwifiex firmware dump start ==\n");
 
 	/* Read the number of the memories which will dump */
 	stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
@@ -2303,9 +2300,8 @@
 		end_ptr = dbg_ptr + memory_size;
 
 		doneflag = entry->done_flag;
-		do_gettimeofday(&t);
-		dev_info(adapter->dev, "Start %s output %u.%06u, please wait...\n",
-			 entry->mem_name, (u32)t.tv_sec, (u32)t.tv_usec);
+		dev_info(adapter->dev, "Start %s output, please wait...\n",
+			 entry->mem_name);
 
 		do {
 			stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
@@ -2331,9 +2327,7 @@
 			break;
 		} while (true);
 	}
-	do_gettimeofday(&t);
-	dev_info(adapter->dev, "== mwifiex firmware dump end: %u.%06u ==\n",
-		 (u32)t.tv_sec, (u32)t.tv_usec);
+	dev_info(adapter->dev, "== mwifiex firmware dump end ==\n");
 
 	kobject_uevent_env(&adapter->wiphy->dev.kobj, KOBJ_CHANGE, env);
 
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 1da04a0..1770fa3 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -1954,6 +1954,7 @@
 	mmc_remove_host(target);
 	/* 20ms delay is based on experiment with sdhci controller */
 	mdelay(20);
+	target->rescan_entered = 0; /* rescan non-removable cards */
 	mmc_add_host(target);
 }
 
@@ -2012,7 +2013,6 @@
 	int ret = 0;
 	unsigned int reg, reg_start, reg_end;
 	u8 *dbg_ptr, *end_ptr, dump_num, idx, i, read_reg, doneflag = 0;
-	struct timeval t;
 	enum rdwr_status stat;
 	u32 memory_size;
 	static char *env[] = { "DRIVER=mwifiex_sdio", "EVENT=fw_dump", NULL };
@@ -2033,9 +2033,7 @@
 	mwifiex_pm_wakeup_card(adapter);
 	sdio_claim_host(card->func);
 
-	do_gettimeofday(&t);
-	dev_info(adapter->dev, "== mwifiex firmware dump start: %u.%06u ==\n",
-		 (u32)t.tv_sec, (u32)t.tv_usec);
+	dev_info(adapter->dev, "== mwifiex firmware dump start ==\n");
 
 	stat = mwifiex_sdio_rdwr_firmware(adapter, doneflag);
 	if (stat == RDWR_STATUS_FAILURE)
@@ -2087,9 +2085,8 @@
 		end_ptr = dbg_ptr + memory_size;
 
 		doneflag = entry->done_flag;
-		do_gettimeofday(&t);
-		dev_info(adapter->dev, "Start %s output %u.%06u, please wait...\n",
-			 entry->mem_name, (u32)t.tv_sec, (u32)t.tv_usec);
+		dev_info(adapter->dev, "Start %s output, please wait...\n",
+			 entry->mem_name);
 
 		do {
 			stat = mwifiex_sdio_rdwr_firmware(adapter, doneflag);
@@ -2120,9 +2117,7 @@
 			break;
 		} while (1);
 	}
-	do_gettimeofday(&t);
-	dev_info(adapter->dev, "== mwifiex firmware dump end: %u.%06u ==\n",
-		 (u32)t.tv_sec, (u32)t.tv_usec);
+	dev_info(adapter->dev, "== mwifiex firmware dump end ==\n");
 
 	kobject_uevent_env(&adapter->wiphy->dev.kobj, KOBJ_CHANGE, env);
 
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 0f077aa..733de92 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -1647,7 +1647,7 @@
 		timeout = (void *)(pos + config_len);
 		timeout->header.type = cpu_to_le16(TLV_TYPE_TDLS_IDLE_TIMEOUT);
 		timeout->header.len = cpu_to_le16(sizeof(timeout->value));
-		timeout->value = cpu_to_le16(MWIFIEX_TDLS_IDLE_TIMEOUT);
+		timeout->value = cpu_to_le16(MWIFIEX_TDLS_IDLE_TIMEOUT_IN_SEC);
 		config_len += sizeof(struct mwifiex_ie_types_tdls_idle_timeout);
 
 		break;
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 822357b..08b78ba 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -908,7 +908,7 @@
 		break;
 	default:
 		dev_err(priv->adapter->dev,
-			"Unknown TDLS command action respnse %d", action);
+			"Unknown TDLS command action response %d", action);
 		return -1;
 	}
 
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 1a03d4d..caae973 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -283,10 +283,6 @@
 	    priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
 		u8 config_bands;
 
-		ret = mwifiex_deauthenticate(priv, NULL);
-		if (ret)
-			goto done;
-
 		if (!bss_desc)
 			return -1;
 
@@ -345,12 +341,6 @@
 			goto done;
 		}
 
-		/* Exit Adhoc mode first */
-		dev_dbg(adapter->dev, "info: Sending Adhoc Stop\n");
-		ret = mwifiex_deauthenticate(priv, NULL);
-		if (ret)
-			goto done;
-
 		priv->adhoc_is_link_sensed = false;
 
 		ret = mwifiex_check_network_compatibility(priv, bss_desc);
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index a414161..4c5fd95 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -781,6 +781,7 @@
 	struct mwifiex_sta_node *sta_ptr;
 	u8 *peer, *pos, *end;
 	u8 i, action, basic;
+	__le16 cap = 0;
 	int ie_len = 0;
 
 	if (len < (sizeof(struct ethhdr) + 3))
@@ -792,18 +793,9 @@
 
 	peer = buf + ETH_ALEN;
 	action = *(buf + sizeof(struct ethhdr) + 2);
-
-	/* just handle TDLS setup request/response/confirm */
-	if (action > WLAN_TDLS_SETUP_CONFIRM)
-		return;
-
 	dev_dbg(priv->adapter->dev,
 		"rx:tdls action: peer=%pM, action=%d\n", peer, action);
 
-	sta_ptr = mwifiex_add_sta_entry(priv, peer);
-	if (!sta_ptr)
-		return;
-
 	switch (action) {
 	case WLAN_TDLS_SETUP_REQUEST:
 		if (len < (sizeof(struct ethhdr) + TDLS_REQ_FIX_LEN))
@@ -811,7 +803,7 @@
 
 		pos = buf + sizeof(struct ethhdr) + 4;
 		/* payload 1+ category 1 + action 1 + dialog 1 */
-		sta_ptr->tdls_cap.capab = cpu_to_le16(*(u16 *)pos);
+		cap = cpu_to_le16(*(u16 *)pos);
 		ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
 		pos += 2;
 		break;
@@ -821,7 +813,7 @@
 			return;
 		/* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/
 		pos = buf + sizeof(struct ethhdr) + 6;
-		sta_ptr->tdls_cap.capab = cpu_to_le16(*(u16 *)pos);
+		cap = cpu_to_le16(*(u16 *)pos);
 		ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
 		pos += 2;
 		break;
@@ -833,10 +825,16 @@
 		ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
 		break;
 	default:
-		dev_warn(priv->adapter->dev, "Unknown TDLS frame type.\n");
+		dev_dbg(priv->adapter->dev, "Unknown TDLS frame type.\n");
 		return;
 	}
 
+	sta_ptr = mwifiex_add_sta_entry(priv, peer);
+	if (!sta_ptr)
+		return;
+
+	sta_ptr->tdls_cap.capab = cap;
+
 	for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) {
 		if (pos + 2 + pos[1] > end)
 			break;
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 832006b..573897b 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -1284,6 +1284,8 @@
 	/* Arcadyan */
 	{ USB_DEVICE(0x043e, 0x7a12) },
 	{ USB_DEVICE(0x043e, 0x7a32) },
+	/* ASUS */
+	{ USB_DEVICE(0x0b05, 0x17e8) },
 	/* Azurewave */
 	{ USB_DEVICE(0x13d3, 0x3329) },
 	{ USB_DEVICE(0x13d3, 0x3365) },
@@ -1320,6 +1322,7 @@
 	{ USB_DEVICE(0x057c, 0x8501) },
 	/* Buffalo */
 	{ USB_DEVICE(0x0411, 0x0241) },
+	{ USB_DEVICE(0x0411, 0x0253) },
 	/* D-Link */
 	{ USB_DEVICE(0x2001, 0x3c1a) },
 	{ USB_DEVICE(0x2001, 0x3c21) },
@@ -1410,6 +1413,7 @@
 	{ USB_DEVICE(0x0df6, 0x0053) },
 	{ USB_DEVICE(0x0df6, 0x0069) },
 	{ USB_DEVICE(0x0df6, 0x006f) },
+	{ USB_DEVICE(0x0df6, 0x0078) },
 	/* SMC */
 	{ USB_DEVICE(0x083a, 0xa512) },
 	{ USB_DEVICE(0x083a, 0xc522) },
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index d50dfac..0bccf12 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1668,7 +1668,7 @@
 {
 	u8 thold;
 
-	if (test_bit(hlid, (unsigned long *)&wl->fw_fast_lnk_map))
+	if (test_bit(hlid, &wl->fw_fast_lnk_map))
 		thold = wl->conf.tx.fast_link_thold;
 	else
 		thold = wl->conf.tx.slow_link_thold;
diff --git a/drivers/net/wireless/ti/wl18xx/cmd.c b/drivers/net/wireless/ti/wl18xx/cmd.c
index 7649c75..44f0b20 100644
--- a/drivers/net/wireless/ti/wl18xx/cmd.c
+++ b/drivers/net/wireless/ti/wl18xx/cmd.c
@@ -78,3 +78,92 @@
 out:
 	return ret;
 }
+
+int wl18xx_cmd_smart_config_start(struct wl1271 *wl, u32 group_bitmap)
+{
+	struct wl18xx_cmd_smart_config_start *cmd;
+	int ret = 0;
+
+	wl1271_debug(DEBUG_CMD, "cmd smart config start group_bitmap=0x%x",
+		     group_bitmap);
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (!cmd) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	cmd->group_id_bitmask = cpu_to_le32(group_bitmap);
+
+	ret = wl1271_cmd_send(wl, CMD_SMART_CONFIG_START, cmd, sizeof(*cmd), 0);
+	if (ret < 0) {
+		wl1271_error("failed to send smart config start command");
+		goto out_free;
+	}
+
+out_free:
+	kfree(cmd);
+out:
+	return ret;
+}
+
+int wl18xx_cmd_smart_config_stop(struct wl1271 *wl)
+{
+	struct wl1271_cmd_header *cmd;
+	int ret = 0;
+
+	wl1271_debug(DEBUG_CMD, "cmd smart config stop");
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (!cmd) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = wl1271_cmd_send(wl, CMD_SMART_CONFIG_STOP, cmd, sizeof(*cmd), 0);
+	if (ret < 0) {
+		wl1271_error("failed to send smart config stop command");
+		goto out_free;
+	}
+
+out_free:
+	kfree(cmd);
+out:
+	return ret;
+}
+
+int wl18xx_cmd_smart_config_set_group_key(struct wl1271 *wl, u16 group_id,
+					  u8 key_len, u8 *key)
+{
+	struct wl18xx_cmd_smart_config_set_group_key *cmd;
+	int ret = 0;
+
+	wl1271_debug(DEBUG_CMD, "cmd smart config set group key id=0x%x",
+		     group_id);
+
+	if (key_len != sizeof(cmd->key)) {
+		wl1271_error("invalid group key size: %d", key_len);
+		return -E2BIG;
+	}
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (!cmd) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	cmd->group_id = cpu_to_le32(group_id);
+	memcpy(cmd->key, key, key_len);
+
+	ret = wl1271_cmd_send(wl, CMD_SMART_CONFIG_SET_GROUP_KEY, cmd,
+			      sizeof(*cmd), 0);
+	if (ret < 0) {
+		wl1271_error("failed to send smart config set group key cmd");
+		goto out_free;
+	}
+
+out_free:
+	kfree(cmd);
+out:
+	return ret;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/cmd.h b/drivers/net/wireless/ti/wl18xx/cmd.h
index 6687d10..92499e2 100644
--- a/drivers/net/wireless/ti/wl18xx/cmd.h
+++ b/drivers/net/wireless/ti/wl18xx/cmd.h
@@ -45,8 +45,25 @@
 	u8 padding[2];
 } __packed;
 
+struct wl18xx_cmd_smart_config_start {
+	struct wl1271_cmd_header header;
+
+	__le32 group_id_bitmask;
+} __packed;
+
+struct wl18xx_cmd_smart_config_set_group_key {
+	struct wl1271_cmd_header header;
+
+	__le32 group_id;
+
+	u8 key[16];
+} __packed;
+
 int wl18xx_cmd_channel_switch(struct wl1271 *wl,
 			      struct wl12xx_vif *wlvif,
 			      struct ieee80211_channel_switch *ch_switch);
-
+int wl18xx_cmd_smart_config_start(struct wl1271 *wl, u32 group_bitmap);
+int wl18xx_cmd_smart_config_stop(struct wl1271 *wl);
+int wl18xx_cmd_smart_config_set_group_key(struct wl1271 *wl, u16 group_id,
+					  u8 key_len, u8 *key);
 #endif
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index c9199d7..eb1848e 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -19,10 +19,12 @@
  *
  */
 
+#include <net/genetlink.h>
 #include "event.h"
 #include "scan.h"
 #include "../wlcore/cmd.h"
 #include "../wlcore/debug.h"
+#include "../wlcore/vendor_cmd.h"
 
 int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
 			  bool *timeout)
@@ -45,6 +47,58 @@
 	return wlcore_cmd_wait_for_event_or_timeout(wl, local_event, timeout);
 }
 
+static int wlcore_smart_config_sync_event(struct wl1271 *wl, u8 sync_channel,
+					  u8 sync_band)
+{
+	struct sk_buff *skb;
+	enum ieee80211_band band;
+	int freq;
+
+	if (sync_band == WLCORE_BAND_5GHZ)
+		band = IEEE80211_BAND_5GHZ;
+	else
+		band = IEEE80211_BAND_2GHZ;
+
+	freq = ieee80211_channel_to_frequency(sync_channel, band);
+
+	wl1271_debug(DEBUG_EVENT,
+		     "SMART_CONFIG_SYNC_EVENT_ID, freq: %d (chan: %d band %d)",
+		     freq, sync_channel, sync_band);
+	skb = cfg80211_vendor_event_alloc(wl->hw->wiphy, 20,
+					  WLCORE_VENDOR_EVENT_SC_SYNC,
+					  GFP_KERNEL);
+
+	if (nla_put_u32(skb, WLCORE_VENDOR_ATTR_FREQ, freq)) {
+		kfree_skb(skb);
+		return -EMSGSIZE;
+	}
+	cfg80211_vendor_event(skb, GFP_KERNEL);
+	return 0;
+}
+
+static int wlcore_smart_config_decode_event(struct wl1271 *wl,
+					    u8 ssid_len, u8 *ssid,
+					    u8 pwd_len, u8 *pwd)
+{
+	struct sk_buff *skb;
+
+	wl1271_debug(DEBUG_EVENT, "SMART_CONFIG_DECODE_EVENT_ID");
+	wl1271_dump_ascii(DEBUG_EVENT, "SSID:", ssid, ssid_len);
+
+	skb = cfg80211_vendor_event_alloc(wl->hw->wiphy,
+					  ssid_len + pwd_len + 20,
+					  WLCORE_VENDOR_EVENT_SC_DECODE,
+					  GFP_KERNEL);
+
+	if (nla_put(skb, WLCORE_VENDOR_ATTR_SSID, ssid_len, ssid) ||
+	    nla_put(skb, WLCORE_VENDOR_ATTR_PSK, pwd_len, pwd)) {
+		kfree_skb(skb);
+		return -EMSGSIZE;
+	}
+	cfg80211_vendor_event(skb, GFP_KERNEL);
+	return 0;
+}
+
 int wl18xx_process_mailbox_events(struct wl1271 *wl)
 {
 	struct wl18xx_event_mailbox *mbox = wl->mbox;
@@ -107,5 +161,16 @@
 	if (vector & REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID)
 		wlcore_event_roc_complete(wl);
 
+	if (vector & SMART_CONFIG_SYNC_EVENT_ID)
+		wlcore_smart_config_sync_event(wl, mbox->sc_sync_channel,
+					       mbox->sc_sync_band);
+
+	if (vector & SMART_CONFIG_DECODE_EVENT_ID)
+		wlcore_smart_config_decode_event(wl,
+						 mbox->sc_ssid_len,
+						 mbox->sc_ssid,
+						 mbox->sc_pwd_len,
+						 mbox->sc_pwd);
+
 	return 0;
 }
diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h
index a76e98e..0680312 100644
--- a/drivers/net/wireless/ti/wl18xx/event.h
+++ b/drivers/net/wireless/ti/wl18xx/event.h
@@ -38,6 +38,8 @@
 	REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID      = BIT(18),
 	DFS_CHANNELS_CONFIG_COMPLETE_EVENT       = BIT(19),
 	PERIODIC_SCAN_REPORT_EVENT_ID            = BIT(20),
+	SMART_CONFIG_SYNC_EVENT_ID		 = BIT(22),
+	SMART_CONFIG_DECODE_EVENT_ID		 = BIT(23),
 };
 
 struct wl18xx_event_mailbox {
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index de5b4fa..7af1936 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -992,7 +992,10 @@
 		REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
 		INACTIVE_STA_EVENT_ID |
 		CHANNEL_SWITCH_COMPLETE_EVENT_ID |
-		DFS_CHANNELS_CONFIG_COMPLETE_EVENT;
+		DFS_CHANNELS_CONFIG_COMPLETE_EVENT |
+		SMART_CONFIG_SYNC_EVENT_ID |
+		SMART_CONFIG_DECODE_EVENT_ID;
+;
 
 	wl->ap_event_mask = MAX_TX_FAILURE_EVENT_ID;
 
@@ -1606,15 +1609,20 @@
 	u8 thold;
 	struct wl18xx_fw_status_priv *status_priv =
 		(struct wl18xx_fw_status_priv *)wl->fw_status->priv;
-	u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
+	unsigned long suspend_bitmap;
+
+	/* if we don't have the link map yet, assume they all low prio */
+	if (!status_priv)
+		return false;
 
 	/* suspended links are never high priority */
-	if (test_bit(hlid, (unsigned long *)&suspend_bitmap))
+	suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
+	if (test_bit(hlid, &suspend_bitmap))
 		return false;
 
 	/* the priority thresholds are taken from FW */
-	if (test_bit(hlid, (unsigned long *)&wl->fw_fast_lnk_map) &&
-	    !test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map))
+	if (test_bit(hlid, &wl->fw_fast_lnk_map) &&
+	    !test_bit(hlid, &wl->ap_fw_ps_map))
 		thold = status_priv->tx_fast_link_prio_threshold;
 	else
 		thold = status_priv->tx_slow_link_prio_threshold;
@@ -1628,12 +1636,17 @@
 	u8 thold;
 	struct wl18xx_fw_status_priv *status_priv =
 		(struct wl18xx_fw_status_priv *)wl->fw_status->priv;
-	u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
+	unsigned long suspend_bitmap;
 
-	if (test_bit(hlid, (unsigned long *)&suspend_bitmap))
+	/* if we don't have the link map yet, assume they all low prio */
+	if (!status_priv)
+		return true;
+
+	suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
+	if (test_bit(hlid, &suspend_bitmap))
 		thold = status_priv->tx_suspend_threshold;
-	else if (test_bit(hlid, (unsigned long *)&wl->fw_fast_lnk_map) &&
-		 !test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map))
+	else if (test_bit(hlid, &wl->fw_fast_lnk_map) &&
+		 !test_bit(hlid, &wl->ap_fw_ps_map))
 		thold = status_priv->tx_fast_stop_threshold;
 	else
 		thold = status_priv->tx_slow_stop_threshold;
@@ -1687,6 +1700,9 @@
 	.convert_hwaddr = wl18xx_convert_hwaddr,
 	.lnk_high_prio	= wl18xx_lnk_high_prio,
 	.lnk_low_prio	= wl18xx_lnk_low_prio,
+	.smart_config_start = wl18xx_cmd_smart_config_start,
+	.smart_config_stop  = wl18xx_cmd_smart_config_stop,
+	.smart_config_set_group_key = wl18xx_cmd_smart_config_set_group_key,
 };
 
 /* HT cap appropriate for wide channels in 2Ghz */
diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c
index be1ebd5..3406ffb 100644
--- a/drivers/net/wireless/ti/wl18xx/tx.c
+++ b/drivers/net/wireless/ti/wl18xx/tx.c
@@ -30,7 +30,7 @@
 
 static
 void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif,
-			     struct ieee80211_tx_rate *rate)
+			     u8 band, struct ieee80211_tx_rate *rate)
 {
 	u8 fw_rate = wl->fw_status->counters.tx_last_rate;
 
@@ -43,6 +43,8 @@
 
 	if (fw_rate <= CONF_HW_RATE_INDEX_54MBPS) {
 		rate->idx = fw_rate;
+		if (band == IEEE80211_BAND_5GHZ)
+			rate->idx -= CONF_HW_RATE_INDEX_6MBPS;
 		rate->flags = 0;
 	} else {
 		rate->flags = IEEE80211_TX_RC_MCS;
@@ -102,7 +104,8 @@
 	 * first pass info->control.vif while it's valid, and then fill out
 	 * the info->status structures
 	 */
-	wl18xx_get_last_tx_rate(wl, info->control.vif, &info->status.rates[0]);
+	wl18xx_get_last_tx_rate(wl, info->control.vif,
+				info->band, &info->status.rates[0]);
 
 	info->status.rates[0].count = 1; /* no data about retries */
 	info->status.ack_signal = -1;
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
index eb7cfe8..6a2b880 100644
--- a/drivers/net/wireless/ti/wl18xx/wl18xx.h
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -38,7 +38,7 @@
 #define WL18XX_NUM_TX_DESCRIPTORS 32
 #define WL18XX_NUM_RX_DESCRIPTORS 32
 
-#define WL18XX_NUM_MAC_ADDRESSES 3
+#define WL18XX_NUM_MAC_ADDRESSES 2
 
 #define WL18XX_RX_BA_MAX_SESSIONS 13
 
diff --git a/drivers/net/wireless/ti/wlcore/Makefile b/drivers/net/wireless/ti/wlcore/Makefile
index 4f23931..0a69c13 100644
--- a/drivers/net/wireless/ti/wlcore/Makefile
+++ b/drivers/net/wireless/ti/wlcore/Makefile
@@ -1,5 +1,5 @@
 wlcore-objs		= main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
-			  boot.o init.o debugfs.o scan.o sysfs.o
+			  boot.o init.o debugfs.o scan.o sysfs.o vendor_cmd.o
 
 wlcore_spi-objs 	= spi.o
 wlcore_sdio-objs	= sdio.o
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index e269c0a..05604ee 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -372,9 +372,9 @@
 	wl1271_tx_reset_link_queues(wl, *hlid);
 	wl->links[*hlid].wlvif = NULL;
 
-	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
-	    (wlvif->bss_type == BSS_TYPE_AP_BSS &&
-	     *hlid == wlvif->ap.bcast_hlid)) {
+	if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
+	    *hlid == wlvif->ap.bcast_hlid) {
+		u32 sqn_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
 		/*
 		 * save the total freed packets in the wlvif, in case this is
 		 * recovery or suspend
@@ -385,9 +385,11 @@
 		 * increment the initial seq number on recovery to account for
 		 * transmitted packets that we haven't yet got in the FW status
 		 */
+		if (wlvif->encryption_type == KEY_GEM)
+			sqn_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
+
 		if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
-			wlvif->total_freed_pkts +=
-					WL1271_TX_SQN_POST_RECOVERY_PADDING;
+			wlvif->total_freed_pkts += sqn_padding;
 	}
 
 	wl->links[*hlid].total_freed_pkts = 0;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index 6788d73..ca6a28b 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -170,6 +170,9 @@
 
 	/* start of 18xx specific commands */
 	CMD_DFS_CHANNEL_CONFIG		= 60,
+	CMD_SMART_CONFIG_START		= 61,
+	CMD_SMART_CONFIG_STOP		= 62,
+	CMD_SMART_CONFIG_SET_GROUP_KEY	= 63,
 
 	MAX_COMMAND_ID = 0xFFFF,
 };
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index 89893c7..0be21f6 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -496,7 +496,7 @@
 	DRIVER_STATE_PRINT_INT(sg_enabled);
 	DRIVER_STATE_PRINT_INT(enable_11a);
 	DRIVER_STATE_PRINT_INT(noise);
-	DRIVER_STATE_PRINT_HEX(ap_fw_ps_map);
+	DRIVER_STATE_PRINT_LHEX(ap_fw_ps_map);
 	DRIVER_STATE_PRINT_LHEX(ap_ps_map);
 	DRIVER_STATE_PRINT_HEX(quirks);
 	DRIVER_STATE_PRINT_HEX(irq);
diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h
index 1555ff9..aa9f82c 100644
--- a/drivers/net/wireless/ti/wlcore/hw_ops.h
+++ b/drivers/net/wireless/ti/wlcore/hw_ops.h
@@ -260,4 +260,31 @@
 	return wl->ops->lnk_low_prio(wl, hlid, lnk);
 }
 
+static inline int
+wlcore_smart_config_start(struct wl1271 *wl, u32 group_bitmap)
+{
+	if (!wl->ops->smart_config_start)
+		return -EINVAL;
+
+	return wl->ops->smart_config_start(wl, group_bitmap);
+}
+
+static inline int
+wlcore_smart_config_stop(struct wl1271 *wl)
+{
+	if (!wl->ops->smart_config_stop)
+		return -EINVAL;
+
+	return wl->ops->smart_config_stop(wl);
+}
+
+static inline int
+wlcore_smart_config_set_group_key(struct wl1271 *wl, u16 group_id,
+				  u8 key_len, u8 *key)
+{
+	if (!wl->ops->smart_config_set_group_key)
+		return -EINVAL;
+
+	return wl->ops->smart_config_set_group_key(wl, group_id, key_len, key);
+}
 #endif
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 48f8386..575c8f6 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -37,6 +37,7 @@
 #include "init.h"
 #include "debugfs.h"
 #include "testmode.h"
+#include "vendor_cmd.h"
 #include "scan.h"
 #include "hw_ops.h"
 #include "sysfs.h"
@@ -332,7 +333,7 @@
 {
 	bool fw_ps;
 
-	fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
 
 	/*
 	 * Wake up from high level PS if the STA is asleep with too little
@@ -359,13 +360,13 @@
 					   struct wl12xx_vif *wlvif,
 					   struct wl_fw_status *status)
 {
-	u32 cur_fw_ps_map;
+	unsigned long cur_fw_ps_map;
 	u8 hlid;
 
 	cur_fw_ps_map = status->link_ps_bitmap;
 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
 		wl1271_debug(DEBUG_PSM,
-			     "link ps prev 0x%x cur 0x%x changed 0x%x",
+			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
 			     wl->ap_fw_ps_map, cur_fw_ps_map,
 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
 
@@ -898,6 +899,44 @@
 	wlcore_set_partition(wl, &old_part);
 }
 
+static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+				   u8 hlid, struct ieee80211_sta *sta)
+{
+	struct wl1271_station *wl_sta;
+	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
+
+	wl_sta = (void *)sta->drv_priv;
+	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
+
+	/*
+	 * increment the initial seq number on recovery to account for
+	 * transmitted packets that we haven't yet got in the FW status
+	 */
+	if (wlvif->encryption_type == KEY_GEM)
+		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
+
+	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
+		wl_sta->total_freed_pkts += sqn_recovery_padding;
+}
+
+static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
+					struct wl12xx_vif *wlvif,
+					u8 hlid, const u8 *addr)
+{
+	struct ieee80211_sta *sta;
+	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+
+	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
+		    is_zero_ether_addr(addr)))
+		return;
+
+	rcu_read_lock();
+	sta = ieee80211_find_sta(vif, addr);
+	if (sta)
+		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
+	rcu_read_unlock();
+}
+
 static void wlcore_print_recovery(struct wl1271 *wl)
 {
 	u32 pc = 0;
@@ -961,6 +1000,13 @@
 		wlvif = list_first_entry(&wl->wlvif_list,
 				       struct wl12xx_vif, list);
 		vif = wl12xx_wlvif_to_vif(wlvif);
+
+		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
+		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
+			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
+						    vif->bss_conf.bssid);
+		}
+
 		__wl1271_op_remove_interface(wl, vif, false);
 	}
 
@@ -4703,36 +4749,18 @@
 
 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
 {
-	struct wl1271_station *wl_sta;
-	struct ieee80211_sta *sta;
-	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
-
 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
 		return;
 
 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
 	__clear_bit(hlid, &wl->ap_ps_map);
-	__clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+	__clear_bit(hlid, &wl->ap_fw_ps_map);
 
 	/*
 	 * save the last used PN in the private part of iee80211_sta,
 	 * in case of recovery/suspend
 	 */
-	rcu_read_lock();
-	sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
-	if (sta) {
-		wl_sta = (void *)sta->drv_priv;
-		wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
-
-		/*
-		 * increment the initial seq number on recovery to account for
-		 * transmitted packets that we haven't yet got in the FW status
-		 */
-		if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
-			wl_sta->total_freed_pkts +=
-					WL1271_TX_SQN_POST_RECOVERY_PADDING;
-	}
-	rcu_read_unlock();
+	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
 
 	wl12xx_free_link(wl, wlvif, &hlid);
 	wl->active_sta_count--;
@@ -4915,6 +4943,21 @@
 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
 	}
 
+	/* save seq number on disassoc (suspend) */
+	if (is_sta &&
+	    old_state == IEEE80211_STA_ASSOC &&
+	    new_state == IEEE80211_STA_AUTH) {
+		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
+		wlvif->total_freed_pkts = 0;
+	}
+
+	/* restore seq number on assoc (resume) */
+	if (is_sta &&
+	    old_state == IEEE80211_STA_AUTH &&
+	    new_state == IEEE80211_STA_ASSOC) {
+		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
+	}
+
 	/* clear ROCs on failure or authorization */
 	if (is_sta &&
 	    (new_state == IEEE80211_STA_AUTHORIZED ||
@@ -5149,6 +5192,10 @@
 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
 		wl12xx_for_each_wlvif_sta(wl, wlvif) {
 			struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+
+			if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+				continue;
+
 			ieee80211_chswitch_done(vif, false);
 		}
 		goto out;
@@ -5164,6 +5211,9 @@
 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
 		unsigned long delay_usec;
 
+		if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+			continue;
+
 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
 		if (ret)
 			goto out_sleep;
@@ -5619,7 +5669,7 @@
 		memcpy(&wl->addresses[idx], &wl->addresses[0],
 		       sizeof(wl->addresses[0]));
 		/* LAA bit */
-		wl->addresses[idx].addr[2] |= BIT(1);
+		wl->addresses[idx].addr[0] |= BIT(1);
 	}
 
 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
@@ -5764,7 +5814,7 @@
 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
 		sizeof(struct ieee80211_header);
 
-	wl->hw->wiphy->max_remain_on_channel_duration = 5000;
+	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
 
 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
@@ -5833,6 +5883,9 @@
 	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
 	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
 
+	/* register vendor commands */
+	wlcore_set_vendor_commands(wl->hw->wiphy);
+
 	SET_IEEE80211_DEV(wl->hw, wl->dev);
 
 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 40b4311..f0ac361 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -126,7 +126,7 @@
 	if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
 		return;
 
-	fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
 	tx_pkts = wl->links[hlid].allocated_pkts;
 
 	/*
diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
new file mode 100644
index 0000000..ad86a48
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
@@ -0,0 +1,197 @@
+/*
+ * This file is part of wlcore
+ *
+ * Copyright (C) 2014 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <net/mac80211.h>
+#include <net/netlink.h>
+
+#include "wlcore.h"
+#include "debug.h"
+#include "ps.h"
+#include "hw_ops.h"
+#include "vendor_cmd.h"
+
+static const
+struct nla_policy wlcore_vendor_attr_policy[NUM_WLCORE_VENDOR_ATTR] = {
+	[WLCORE_VENDOR_ATTR_FREQ]		= { .type = NLA_U32 },
+	[WLCORE_VENDOR_ATTR_GROUP_ID]		= { .type = NLA_U32 },
+	[WLCORE_VENDOR_ATTR_GROUP_KEY]		= { .type = NLA_U32,
+						    .len = WLAN_MAX_KEY_LEN },
+};
+
+static int
+wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy,
+				     struct wireless_dev *wdev,
+				     const void *data, int data_len)
+{
+	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+	struct wl1271 *wl = hw->priv;
+	struct nlattr *tb[NUM_WLCORE_VENDOR_ATTR];
+	int ret;
+
+	wl1271_debug(DEBUG_CMD, "vendor cmd smart config start");
+
+	if (!data)
+		return -EINVAL;
+
+	ret = nla_parse(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len,
+			wlcore_vendor_attr_policy);
+	if (ret)
+		return ret;
+
+	if (!tb[WLCORE_VENDOR_ATTR_GROUP_ID])
+		return -EINVAL;
+
+	mutex_lock(&wl->mutex);
+
+	if (unlikely(wl->state != WLCORE_STATE_ON)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = wl1271_ps_elp_wakeup(wl);
+	if (ret < 0)
+		goto out;
+
+	ret = wlcore_smart_config_start(wl,
+			nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]));
+
+	wl1271_ps_elp_sleep(wl);
+out:
+	mutex_unlock(&wl->mutex);
+
+	return 0;
+}
+
+static int
+wlcore_vendor_cmd_smart_config_stop(struct wiphy *wiphy,
+				    struct wireless_dev *wdev,
+				    const void *data, int data_len)
+{
+	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+	struct wl1271 *wl = hw->priv;
+	int ret;
+
+	wl1271_debug(DEBUG_CMD, "testmode cmd smart config stop");
+
+	mutex_lock(&wl->mutex);
+
+	if (unlikely(wl->state != WLCORE_STATE_ON)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = wl1271_ps_elp_wakeup(wl);
+	if (ret < 0)
+		goto out;
+
+	ret = wlcore_smart_config_stop(wl);
+
+	wl1271_ps_elp_sleep(wl);
+out:
+	mutex_unlock(&wl->mutex);
+
+	return ret;
+}
+
+static int
+wlcore_vendor_cmd_smart_config_set_group_key(struct wiphy *wiphy,
+					     struct wireless_dev *wdev,
+					     const void *data, int data_len)
+{
+	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+	struct wl1271 *wl = hw->priv;
+	struct nlattr *tb[NUM_WLCORE_VENDOR_ATTR];
+	int ret;
+
+	wl1271_debug(DEBUG_CMD, "testmode cmd smart config set group key");
+
+	if (!data)
+		return -EINVAL;
+
+	ret = nla_parse(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len,
+			wlcore_vendor_attr_policy);
+	if (ret)
+		return ret;
+
+	if (!tb[WLCORE_VENDOR_ATTR_GROUP_ID] ||
+	    !tb[WLCORE_VENDOR_ATTR_GROUP_KEY])
+		return -EINVAL;
+
+	mutex_lock(&wl->mutex);
+
+	if (unlikely(wl->state != WLCORE_STATE_ON)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = wl1271_ps_elp_wakeup(wl);
+	if (ret < 0)
+		goto out;
+
+	ret = wlcore_smart_config_set_group_key(wl,
+			nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]),
+			nla_len(tb[WLCORE_VENDOR_ATTR_GROUP_KEY]),
+			nla_data(tb[WLCORE_VENDOR_ATTR_GROUP_KEY]));
+
+	wl1271_ps_elp_sleep(wl);
+out:
+	mutex_unlock(&wl->mutex);
+
+	return ret;
+}
+
+static const struct wiphy_vendor_command wlcore_vendor_commands[] = {
+	{
+		.info = {
+			.vendor_id = TI_OUI,
+			.subcmd = WLCORE_VENDOR_CMD_SMART_CONFIG_START,
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_NETDEV |
+			 WIPHY_VENDOR_CMD_NEED_RUNNING,
+		.doit = wlcore_vendor_cmd_smart_config_start,
+	},
+	{
+		.info = {
+			.vendor_id = TI_OUI,
+			.subcmd = WLCORE_VENDOR_CMD_SMART_CONFIG_STOP,
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_NETDEV |
+			 WIPHY_VENDOR_CMD_NEED_RUNNING,
+		.doit = wlcore_vendor_cmd_smart_config_stop,
+	},
+	{
+		.info = {
+			.vendor_id = TI_OUI,
+			.subcmd = WLCORE_VENDOR_CMD_SMART_CONFIG_SET_GROUP_KEY,
+		},
+		.flags = WIPHY_VENDOR_CMD_NEED_NETDEV |
+			 WIPHY_VENDOR_CMD_NEED_RUNNING,
+		.doit = wlcore_vendor_cmd_smart_config_set_group_key,
+	},
+};
+
+static const struct nl80211_vendor_cmd_info wlcore_vendor_events[] = {
+	{
+		.vendor_id = TI_OUI,
+		.subcmd = WLCORE_VENDOR_EVENT_SC_SYNC,
+	},
+	{
+		.vendor_id = TI_OUI,
+		.subcmd = WLCORE_VENDOR_EVENT_SC_DECODE,
+	},
+};
+
+void wlcore_set_vendor_commands(struct wiphy *wiphy)
+{
+	wiphy->vendor_commands = wlcore_vendor_commands;
+	wiphy->n_vendor_commands = ARRAY_SIZE(wlcore_vendor_commands);
+	wiphy->vendor_events = wlcore_vendor_events;
+	wiphy->n_vendor_events = ARRAY_SIZE(wlcore_vendor_events);
+}
diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.h b/drivers/net/wireless/ti/wlcore/vendor_cmd.h
new file mode 100644
index 0000000..6e0c15e
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.h
@@ -0,0 +1,45 @@
+/*
+ * This file is part of wlcore
+ *
+ * Copyright (C) 2014 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#ifndef __WLCORE_VENDOR_H__
+#define __WLCORE_VENDOR_H__
+
+#ifdef __KERNEL__
+void wlcore_set_vendor_commands(struct wiphy *wiphy);
+#endif
+
+#define TI_OUI	0x080028
+
+enum wlcore_vendor_commands {
+	WLCORE_VENDOR_CMD_SMART_CONFIG_START,
+	WLCORE_VENDOR_CMD_SMART_CONFIG_STOP,
+	WLCORE_VENDOR_CMD_SMART_CONFIG_SET_GROUP_KEY,
+
+	NUM_WLCORE_VENDOR_CMD,
+	MAX_WLCORE_VENDOR_CMD = NUM_WLCORE_VENDOR_CMD - 1
+};
+
+enum wlcore_vendor_attributes {
+	WLCORE_VENDOR_ATTR_FREQ,
+	WLCORE_VENDOR_ATTR_PSK,
+	WLCORE_VENDOR_ATTR_SSID,
+	WLCORE_VENDOR_ATTR_GROUP_ID,
+	WLCORE_VENDOR_ATTR_GROUP_KEY,
+
+	NUM_WLCORE_VENDOR_ATTR,
+	MAX_WLCORE_VENDOR_ATTR = NUM_WLCORE_VENDOR_ATTR - 1
+};
+
+enum wlcore_vendor_events {
+	WLCORE_VENDOR_EVENT_SC_SYNC,
+	WLCORE_VENDOR_EVENT_SC_DECODE,
+};
+
+#endif /* __WLCORE_VENDOR_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 7132050..df78cf1 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -117,6 +117,10 @@
 			      struct wl1271_link *lnk);
 	bool (*lnk_low_prio)(struct wl1271 *wl, u8 hlid,
 			     struct wl1271_link *lnk);
+	int (*smart_config_start)(struct wl1271 *wl, u32 group_bitmap);
+	int (*smart_config_stop)(struct wl1271 *wl);
+	int (*smart_config_set_group_key)(struct wl1271 *wl, u16 group_id,
+					  u8 key_len, u8 *key);
 };
 
 enum wlcore_partitions {
@@ -384,10 +388,10 @@
 	int active_link_count;
 
 	/* Fast/slow links bitmap according to FW */
-	u32 fw_fast_lnk_map;
+	unsigned long fw_fast_lnk_map;
 
 	/* AP-mode - a bitmap of links currently in PS mode according to FW */
-	u32 ap_fw_ps_map;
+	unsigned long ap_fw_ps_map;
 
 	/* AP-mode - a bitmap of links currently in PS mode in mac80211 */
 	unsigned long ap_ps_map;
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index c2c34a8..0e52556 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -45,6 +45,9 @@
 #define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
 #define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
 #define WL1271_TX_SQN_POST_RECOVERY_PADDING 0xff
+/* Use smaller padding for GEM, as some  APs have issues when it's too big */
+#define WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM 0x20
+
 
 #define WL1271_CIPHER_SUITE_GEM 0x00147201
 
@@ -324,6 +327,7 @@
 	 * total freed FW packets on the link to the STA - used for tracking the
 	 * AES/TKIP PN across recoveries. Re-initialized each time from the
 	 * wl1271_station structure.
+	 * Used in both AP and STA mode.
 	 */
 	u64 total_freed_pkts;
 };
@@ -460,21 +464,19 @@
 	struct delayed_work pending_auth_complete_work;
 
 	/*
+	 * total freed FW packets on the link.
+	 * For STA this holds the PN of the link to the AP.
+	 * For AP this holds the PN of the broadcast link.
+	 */
+	u64 total_freed_pkts;
+
+	/*
 	 * This struct must be last!
 	 * data that has to be saved acrossed reconfigs (e.g. recovery)
 	 * should be declared in this struct.
 	 */
 	struct {
 		u8 persistent[0];
-
-		/*
-		 * total freed FW packets on the link - used for
-		 * storing the AES/TKIP PN during recovery, as this
-		 * structure is not zeroed out.
-		 * For STA this holds the PN of the link to the AP.
-		 * For AP this holds the PN of the broadcast link.
-		 */
-		u64 total_freed_pkts;
 	};
 };
 
diff --git a/drivers/net/wireless/zd1211rw/Kconfig b/drivers/net/wireless/zd1211rw/Kconfig
index 96c8e1d..95920581 100644
--- a/drivers/net/wireless/zd1211rw/Kconfig
+++ b/drivers/net/wireless/zd1211rw/Kconfig
@@ -3,11 +3,11 @@
 	depends on USB && MAC80211
 	select FW_LOADER
 	---help---
-	  This is an experimental driver for the ZyDAS ZD1211/ZD1211B wireless
+	  This is a driver for the ZyDAS ZD1211/ZD1211B wireless
 	  chip, present in many USB-wireless adapters.
 
 	  Device firmware is required alongside this driver. You can download
-	  the firmware distribution from http://zd1211.ath.cx/get-firmware
+	  the firmware distribution from http://sf.net/projects/zd1211/files/
 
 config ZD1211RW_DEBUG
 	bool "ZyDAS ZD1211 debugging"
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index b777d8f..9aa012e 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -26,6 +26,54 @@
 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
 #include <asm/page.h>
 
+/*
+ * of_fdt_limit_memory - limit the number of regions in the /memory node
+ * @limit: maximum entries
+ *
+ * Adjust the flattened device tree to have at most 'limit' number of
+ * memory entries in the /memory node. This function may be called
+ * any time after initial_boot_param is set.
+ */
+void of_fdt_limit_memory(int limit)
+{
+	int memory;
+	int len;
+	const void *val;
+	int nr_address_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
+	int nr_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
+	const uint32_t *addr_prop;
+	const uint32_t *size_prop;
+	int root_offset;
+	int cell_size;
+
+	root_offset = fdt_path_offset(initial_boot_params, "/");
+	if (root_offset < 0)
+		return;
+
+	addr_prop = fdt_getprop(initial_boot_params, root_offset,
+				"#address-cells", NULL);
+	if (addr_prop)
+		nr_address_cells = fdt32_to_cpu(*addr_prop);
+
+	size_prop = fdt_getprop(initial_boot_params, root_offset,
+				"#size-cells", NULL);
+	if (size_prop)
+		nr_size_cells = fdt32_to_cpu(*size_prop);
+
+	cell_size = sizeof(uint32_t)*(nr_address_cells + nr_size_cells);
+
+	memory = fdt_path_offset(initial_boot_params, "/memory");
+	if (memory > 0) {
+		val = fdt_getprop(initial_boot_params, memory, "reg", &len);
+		if (len > limit*cell_size) {
+			len = limit*cell_size;
+			pr_debug("Limiting number of entries to %d\n", limit);
+			fdt_setprop(initial_boot_params, memory, "reg", val,
+					len);
+		}
+	}
+}
+
 /**
  * of_fdt_is_compatible - Return true if given node from the given blob has
  * compat in its compatible list
@@ -937,7 +985,7 @@
 }
 #endif
 
-bool __init early_init_dt_scan(void *params)
+bool __init early_init_dt_verify(void *params)
 {
 	if (!params)
 		return false;
@@ -951,6 +999,12 @@
 		return false;
 	}
 
+	return true;
+}
+
+
+void __init early_init_dt_scan_nodes(void)
+{
 	/* Retrieve various information from the /chosen node */
 	of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
 
@@ -959,7 +1013,17 @@
 
 	/* Setup memory, calling early_init_dt_add_memory_arch */
 	of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+}
 
+bool __init early_init_dt_scan(void *params)
+{
+	bool status;
+
+	status = early_init_dt_verify(params);
+	if (!status)
+		return false;
+
+	early_init_dt_scan_nodes();
 	return true;
 }
 
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 2872ece..44333bd 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -5,6 +5,12 @@
 # Parport configuration.
 #
 
+config ARCH_MIGHT_HAVE_PC_PARPORT
+	bool
+	help
+	  Select this config option from the architecture Kconfig if
+	  the architecture might have PC parallel port hardware.
+
 menuconfig PARPORT
 	tristate "Parallel port support"
 	depends on HAS_IOMEM
@@ -31,12 +37,6 @@
 
 	  If unsure, say Y.
 
-config ARCH_MIGHT_HAVE_PC_PARPORT
-	bool
-	help
-	  Select this config option from the architecture Kconfig if
-	  the architecture might have PC parallel port hardware.
-
 if PARPORT
 
 config PARPORT_PC
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 1bd6363bc9..9f43916 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1431,7 +1431,7 @@
 
 	status = readl(info->irqmux_base);
 
-	for_each_set_bit(n, &status, ST_GPIO_PINS_PER_BANK)
+	for_each_set_bit(n, &status, info->nbanks)
 		__gpio_irq_handler(&info->banks[n]);
 
 	chained_irq_exit(chip, desc);
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index a8dc95e..0f28c08 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -326,13 +326,13 @@
 	return err;
 }
 
-static s8 r123_extract_antgain(u8 sprom_revision, const u16 *in,
-			       u16 mask, u16 shift)
+static s8 sprom_extract_antgain(u8 sprom_revision, const u16 *in, u16 offset,
+				u16 mask, u16 shift)
 {
 	u16 v;
 	u8 gain;
 
-	v = in[SPOFF(SSB_SPROM1_AGAIN)];
+	v = in[SPOFF(offset)];
 	gain = (v & mask) >> shift;
 	if (gain == 0xFF)
 		gain = 2; /* If unset use 2dBm */
@@ -416,12 +416,14 @@
 	SPEX(alpha2[1], SSB_SPROM1_CCODE, 0x00ff, 0);
 
 	/* Extract the antenna gain values. */
-	out->antenna_gain.a0 = r123_extract_antgain(out->revision, in,
-						    SSB_SPROM1_AGAIN_BG,
-						    SSB_SPROM1_AGAIN_BG_SHIFT);
-	out->antenna_gain.a1 = r123_extract_antgain(out->revision, in,
-						    SSB_SPROM1_AGAIN_A,
-						    SSB_SPROM1_AGAIN_A_SHIFT);
+	out->antenna_gain.a0 = sprom_extract_antgain(out->revision, in,
+						     SSB_SPROM1_AGAIN,
+						     SSB_SPROM1_AGAIN_BG,
+						     SSB_SPROM1_AGAIN_BG_SHIFT);
+	out->antenna_gain.a1 = sprom_extract_antgain(out->revision, in,
+						     SSB_SPROM1_AGAIN,
+						     SSB_SPROM1_AGAIN_A,
+						     SSB_SPROM1_AGAIN_A_SHIFT);
 	if (out->revision >= 2)
 		sprom_extract_r23(out, in);
 }
@@ -468,7 +470,15 @@
 
 static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in)
 {
+	static const u16 pwr_info_offset[] = {
+		SSB_SPROM4_PWR_INFO_CORE0, SSB_SPROM4_PWR_INFO_CORE1,
+		SSB_SPROM4_PWR_INFO_CORE2, SSB_SPROM4_PWR_INFO_CORE3
+	};
 	u16 il0mac_offset;
+	int i;
+
+	BUILD_BUG_ON(ARRAY_SIZE(pwr_info_offset) !=
+		     ARRAY_SIZE(out->core_pwr_info));
 
 	if (out->revision == 4)
 		il0mac_offset = SSB_SPROM4_IL0MAC;
@@ -524,14 +534,59 @@
 	}
 
 	/* Extract the antenna gain values. */
-	SPEX(antenna_gain.a0, SSB_SPROM4_AGAIN01,
-	     SSB_SPROM4_AGAIN0, SSB_SPROM4_AGAIN0_SHIFT);
-	SPEX(antenna_gain.a1, SSB_SPROM4_AGAIN01,
-	     SSB_SPROM4_AGAIN1, SSB_SPROM4_AGAIN1_SHIFT);
-	SPEX(antenna_gain.a2, SSB_SPROM4_AGAIN23,
-	     SSB_SPROM4_AGAIN2, SSB_SPROM4_AGAIN2_SHIFT);
-	SPEX(antenna_gain.a3, SSB_SPROM4_AGAIN23,
-	     SSB_SPROM4_AGAIN3, SSB_SPROM4_AGAIN3_SHIFT);
+	out->antenna_gain.a0 = sprom_extract_antgain(out->revision, in,
+						     SSB_SPROM4_AGAIN01,
+						     SSB_SPROM4_AGAIN0,
+						     SSB_SPROM4_AGAIN0_SHIFT);
+	out->antenna_gain.a1 = sprom_extract_antgain(out->revision, in,
+						     SSB_SPROM4_AGAIN01,
+						     SSB_SPROM4_AGAIN1,
+						     SSB_SPROM4_AGAIN1_SHIFT);
+	out->antenna_gain.a2 = sprom_extract_antgain(out->revision, in,
+						     SSB_SPROM4_AGAIN23,
+						     SSB_SPROM4_AGAIN2,
+						     SSB_SPROM4_AGAIN2_SHIFT);
+	out->antenna_gain.a3 = sprom_extract_antgain(out->revision, in,
+						     SSB_SPROM4_AGAIN23,
+						     SSB_SPROM4_AGAIN3,
+						     SSB_SPROM4_AGAIN3_SHIFT);
+
+	/* Extract cores power info info */
+	for (i = 0; i < ARRAY_SIZE(pwr_info_offset); i++) {
+		u16 o = pwr_info_offset[i];
+
+		SPEX(core_pwr_info[i].itssi_2g, o + SSB_SPROM4_2G_MAXP_ITSSI,
+			SSB_SPROM4_2G_ITSSI, SSB_SPROM4_2G_ITSSI_SHIFT);
+		SPEX(core_pwr_info[i].maxpwr_2g, o + SSB_SPROM4_2G_MAXP_ITSSI,
+			SSB_SPROM4_2G_MAXP, 0);
+
+		SPEX(core_pwr_info[i].pa_2g[0], o + SSB_SPROM4_2G_PA_0, ~0, 0);
+		SPEX(core_pwr_info[i].pa_2g[1], o + SSB_SPROM4_2G_PA_1, ~0, 0);
+		SPEX(core_pwr_info[i].pa_2g[2], o + SSB_SPROM4_2G_PA_2, ~0, 0);
+		SPEX(core_pwr_info[i].pa_2g[3], o + SSB_SPROM4_2G_PA_3, ~0, 0);
+
+		SPEX(core_pwr_info[i].itssi_5g, o + SSB_SPROM4_5G_MAXP_ITSSI,
+			SSB_SPROM4_5G_ITSSI, SSB_SPROM4_5G_ITSSI_SHIFT);
+		SPEX(core_pwr_info[i].maxpwr_5g, o + SSB_SPROM4_5G_MAXP_ITSSI,
+			SSB_SPROM4_5G_MAXP, 0);
+		SPEX(core_pwr_info[i].maxpwr_5gh, o + SSB_SPROM4_5GHL_MAXP,
+			SSB_SPROM4_5GH_MAXP, 0);
+		SPEX(core_pwr_info[i].maxpwr_5gl, o + SSB_SPROM4_5GHL_MAXP,
+			SSB_SPROM4_5GL_MAXP, SSB_SPROM4_5GL_MAXP_SHIFT);
+
+		SPEX(core_pwr_info[i].pa_5gl[0], o + SSB_SPROM4_5GL_PA_0, ~0, 0);
+		SPEX(core_pwr_info[i].pa_5gl[1], o + SSB_SPROM4_5GL_PA_1, ~0, 0);
+		SPEX(core_pwr_info[i].pa_5gl[2], o + SSB_SPROM4_5GL_PA_2, ~0, 0);
+		SPEX(core_pwr_info[i].pa_5gl[3], o + SSB_SPROM4_5GL_PA_3, ~0, 0);
+		SPEX(core_pwr_info[i].pa_5g[0], o + SSB_SPROM4_5G_PA_0, ~0, 0);
+		SPEX(core_pwr_info[i].pa_5g[1], o + SSB_SPROM4_5G_PA_1, ~0, 0);
+		SPEX(core_pwr_info[i].pa_5g[2], o + SSB_SPROM4_5G_PA_2, ~0, 0);
+		SPEX(core_pwr_info[i].pa_5g[3], o + SSB_SPROM4_5G_PA_3, ~0, 0);
+		SPEX(core_pwr_info[i].pa_5gh[0], o + SSB_SPROM4_5GH_PA_0, ~0, 0);
+		SPEX(core_pwr_info[i].pa_5gh[1], o + SSB_SPROM4_5GH_PA_1, ~0, 0);
+		SPEX(core_pwr_info[i].pa_5gh[2], o + SSB_SPROM4_5GH_PA_2, ~0, 0);
+		SPEX(core_pwr_info[i].pa_5gh[3], o + SSB_SPROM4_5GH_PA_3, ~0, 0);
+	}
 
 	sprom_extract_r458(out, in);
 
@@ -621,14 +676,22 @@
 	SPEX32(ofdm5ghpo, SSB_SPROM8_OFDM5GHPO, 0xFFFFFFFF, 0);
 
 	/* Extract the antenna gain values. */
-	SPEX(antenna_gain.a0, SSB_SPROM8_AGAIN01,
-	     SSB_SPROM8_AGAIN0, SSB_SPROM8_AGAIN0_SHIFT);
-	SPEX(antenna_gain.a1, SSB_SPROM8_AGAIN01,
-	     SSB_SPROM8_AGAIN1, SSB_SPROM8_AGAIN1_SHIFT);
-	SPEX(antenna_gain.a2, SSB_SPROM8_AGAIN23,
-	     SSB_SPROM8_AGAIN2, SSB_SPROM8_AGAIN2_SHIFT);
-	SPEX(antenna_gain.a3, SSB_SPROM8_AGAIN23,
-	     SSB_SPROM8_AGAIN3, SSB_SPROM8_AGAIN3_SHIFT);
+	out->antenna_gain.a0 = sprom_extract_antgain(out->revision, in,
+						     SSB_SPROM8_AGAIN01,
+						     SSB_SPROM8_AGAIN0,
+						     SSB_SPROM8_AGAIN0_SHIFT);
+	out->antenna_gain.a1 = sprom_extract_antgain(out->revision, in,
+						     SSB_SPROM8_AGAIN01,
+						     SSB_SPROM8_AGAIN1,
+						     SSB_SPROM8_AGAIN1_SHIFT);
+	out->antenna_gain.a2 = sprom_extract_antgain(out->revision, in,
+						     SSB_SPROM8_AGAIN23,
+						     SSB_SPROM8_AGAIN2,
+						     SSB_SPROM8_AGAIN2_SHIFT);
+	out->antenna_gain.a3 = sprom_extract_antgain(out->revision, in,
+						     SSB_SPROM8_AGAIN23,
+						     SSB_SPROM8_AGAIN3,
+						     SSB_SPROM8_AGAIN3_SHIFT);
 
 	/* Extract cores power info info */
 	for (i = 0; i < ARRAY_SIZE(pwr_info_offset); i++) {
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 5d4de88..eeba754 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1195,18 +1195,20 @@
 int gnttab_init(void)
 {
 	int i;
+	unsigned long max_nr_grant_frames;
 	unsigned int max_nr_glist_frames, nr_glist_frames;
 	unsigned int nr_init_grefs;
 	int ret;
 
 	gnttab_request_version();
+	max_nr_grant_frames = gnttab_max_grant_frames();
 	nr_grant_frames = 1;
 
 	/* Determine the maximum number of frames required for the
 	 * grant reference free list on the current hypervisor.
 	 */
 	BUG_ON(grefs_per_grant_frame == 0);
-	max_nr_glist_frames = (gnttab_max_grant_frames() *
+	max_nr_glist_frames = (max_nr_grant_frames *
 			       grefs_per_grant_frame / RPP);
 
 	gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
@@ -1223,6 +1225,11 @@
 		}
 	}
 
+	ret = arch_gnttab_init(max_nr_grant_frames,
+			       nr_status_frames(max_nr_grant_frames));
+	if (ret < 0)
+		goto ini_nomem;
+
 	if (gnttab_setup() < 0) {
 		ret = -ENODEV;
 		goto ini_nomem;
diff --git a/fs/afs/main.c b/fs/afs/main.c
index 42dd2e4..35de0c0 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -55,13 +55,13 @@
 	afs_uuid.time_low = uuidtime;
 	afs_uuid.time_mid = uuidtime >> 32;
 	afs_uuid.time_hi_and_version = (uuidtime >> 48) & AFS_UUID_TIMEHI_MASK;
-	afs_uuid.time_hi_and_version = AFS_UUID_VERSION_TIME;
+	afs_uuid.time_hi_and_version |= AFS_UUID_VERSION_TIME;
 
 	get_random_bytes(&clockseq, 2);
 	afs_uuid.clock_seq_low = clockseq;
 	afs_uuid.clock_seq_hi_and_reserved =
 		(clockseq >> 8) & AFS_UUID_CLOCKHI_MASK;
-	afs_uuid.clock_seq_hi_and_reserved = AFS_UUID_VARIANT_STD;
+	afs_uuid.clock_seq_hi_and_reserved |= AFS_UUID_VARIANT_STD;
 
 	_debug("AFS UUID: %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x",
 	       afs_uuid.time_low,
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index e822890..afec645 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -59,7 +59,7 @@
 #include <linux/gfp.h>
 
 #include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci.h>
+#include <net/bluetooth/hci_sock.h>
 #include <net/bluetooth/rfcomm.h>
 
 #include <linux/capi.h>
diff --git a/fs/coredump.c b/fs/coredump.c
index 0b2528f..a93f7e6 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -306,7 +306,7 @@
 	if (unlikely(nr < 0))
 		return nr;
 
-	tsk->flags = PF_DUMPCORE;
+	tsk->flags |= PF_DUMPCORE;
 	if (atomic_read(&mm->mm_users) == nr + 1)
 		goto done;
 	/*
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 98040ba..194d0d1 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -198,9 +198,8 @@
  * L1 cache.
  */
 static inline struct page *dio_get_page(struct dio *dio,
-		struct dio_submit *sdio, size_t *from, size_t *to)
+					struct dio_submit *sdio)
 {
-	int n;
 	if (dio_pages_present(sdio) == 0) {
 		int ret;
 
@@ -209,10 +208,7 @@
 			return ERR_PTR(ret);
 		BUG_ON(dio_pages_present(sdio) == 0);
 	}
-	n = sdio->head++;
-	*from = n ? 0 : sdio->from;
-	*to = (n == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
-	return dio->pages[n];
+	return dio->pages[sdio->head];
 }
 
 /**
@@ -911,11 +907,15 @@
 	while (sdio->block_in_file < sdio->final_block_in_request) {
 		struct page *page;
 		size_t from, to;
-		page = dio_get_page(dio, sdio, &from, &to);
+
+		page = dio_get_page(dio, sdio);
 		if (IS_ERR(page)) {
 			ret = PTR_ERR(page);
 			goto out;
 		}
+		from = sdio->head ? 0 : sdio->from;
+		to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
+		sdio->head++;
 
 		while (from < to) {
 			unsigned this_chunk_bytes;	/* # of bytes mapped */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 8474028..03246cd 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -907,9 +907,6 @@
 				fc->writeback_cache = 1;
 			if (arg->time_gran && arg->time_gran <= 1000000000)
 				fc->sb->s_time_gran = arg->time_gran;
-			else
-				fc->sb->s_time_gran = 1000000000;
-
 		} else {
 			ra_pages = fc->max_read / PAGE_CACHE_SIZE;
 			fc->no_lock = 1;
@@ -938,7 +935,7 @@
 		FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
 		FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
 		FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
-		FUSE_WRITEBACK_CACHE;
+		FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT;
 	req->in.h.opcode = FUSE_INIT;
 	req->in.numargs = 1;
 	req->in.args[0].size = sizeof(*arg);
diff --git a/fs/namei.c b/fs/namei.c
index 985c6f3..9eb787e 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2256,9 +2256,10 @@
 		goto out;
 	}
 	path->dentry = dentry;
-	path->mnt = mntget(nd->path.mnt);
+	path->mnt = nd->path.mnt;
 	if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW))
 		return 1;
+	mntget(path->mnt);
 	follow_mount(path);
 	error = 0;
 out:
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index b56b1cc0..944275c 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2879,6 +2879,7 @@
 		 * return the conflicting open:
 		 */
 		if (conf->len) {
+			kfree(conf->data);
 			conf->len = 0;
 			conf->data = NULL;
 			goto again;
@@ -2891,6 +2892,7 @@
 	if (conf->len) {
 		p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8);
 		p = xdr_encode_opaque(p, conf->data, conf->len);
+		kfree(conf->data);
 	}  else {  /* non - nfsv4 lock in conflict, no clientid nor owner */
 		p = xdr_encode_hyper(p, (u64)0); /* clientid */
 		*p++ = cpu_to_be32(0); /* length of owner name */
@@ -2907,7 +2909,7 @@
 		nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid);
 	else if (nfserr == nfserr_denied)
 		nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied);
-	kfree(lock->lk_denied.ld_owner.data);
+
 	return nfserr;
 }
 
diff --git a/fs/xattr.c b/fs/xattr.c
index 3377dff..c69e6d4 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -843,7 +843,7 @@
 
 	/* wrap around? */
 	len = sizeof(*new_xattr) + size;
-	if (len <= sizeof(*new_xattr))
+	if (len < sizeof(*new_xattr))
 		return NULL;
 
 	new_xattr = kmalloc(len, GFP_KERNEL);
diff --git a/include/dt-bindings/pinctrl/dra.h b/include/dt-bindings/pinctrl/dra.h
index 002a285..3d33794 100644
--- a/include/dt-bindings/pinctrl/dra.h
+++ b/include/dt-bindings/pinctrl/dra.h
@@ -30,7 +30,8 @@
 #define MUX_MODE14	0xe
 #define MUX_MODE15	0xf
 
-#define PULL_ENA		(1 << 16)
+#define PULL_ENA		(0 << 16)
+#define PULL_DIS		(1 << 16)
 #define PULL_UP			(1 << 17)
 #define INPUT_EN		(1 << 18)
 #define SLEWCONTROL		(1 << 19)
@@ -38,10 +39,10 @@
 #define WAKEUP_EVENT		(1 << 25)
 
 /* Active pin states */
-#define PIN_OUTPUT		0
+#define PIN_OUTPUT		(0 | PULL_DIS)
 #define PIN_OUTPUT_PULLUP	(PIN_OUTPUT | PULL_ENA | PULL_UP)
 #define PIN_OUTPUT_PULLDOWN	(PIN_OUTPUT | PULL_ENA)
-#define PIN_INPUT		INPUT_EN
+#define PIN_INPUT		(INPUT_EN | PULL_DIS)
 #define PIN_INPUT_SLEW		(INPUT_EN | SLEWCONTROL)
 #define PIN_INPUT_PULLUP	(PULL_ENA | INPUT_EN | PULL_UP)
 #define PIN_INPUT_PULLDOWN	(PULL_ENA | INPUT_EN)
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 452286a..969af0f 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -158,6 +158,8 @@
 /* Chip IDs of PCIe devices */
 #define BCMA_CHIP_ID_BCM4313	0x4313
 #define BCMA_CHIP_ID_BCM43142	43142
+#define BCMA_CHIP_ID_BCM43217	43217
+#define BCMA_CHIP_ID_BCM43222	43222
 #define BCMA_CHIP_ID_BCM43224	43224
 #define  BCMA_PKG_ID_BCM43224_FAB_CSM	0x8
 #define  BCMA_PKG_ID_BCM43224_FAB_SMIC	0xa
diff --git a/include/linux/filter.h b/include/linux/filter.h
index c43c825..20dd50e 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -82,7 +82,7 @@
 /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
 
 #define BPF_ALU64_REG(OP, DST, SRC)				\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,	\
 		.dst_reg = DST,					\
 		.src_reg = SRC,					\
@@ -90,7 +90,7 @@
 		.imm   = 0 })
 
 #define BPF_ALU32_REG(OP, DST, SRC)				\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = BPF_ALU | BPF_OP(OP) | BPF_X,		\
 		.dst_reg = DST,					\
 		.src_reg = SRC,					\
@@ -100,7 +100,7 @@
 /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
 
 #define BPF_ALU64_IMM(OP, DST, IMM)				\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,	\
 		.dst_reg = DST,					\
 		.src_reg = 0,					\
@@ -108,7 +108,7 @@
 		.imm   = IMM })
 
 #define BPF_ALU32_IMM(OP, DST, IMM)				\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = BPF_ALU | BPF_OP(OP) | BPF_K,		\
 		.dst_reg = DST,					\
 		.src_reg = 0,					\
@@ -118,7 +118,7 @@
 /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
 
 #define BPF_ENDIAN(TYPE, DST, LEN)				\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = BPF_ALU | BPF_END | BPF_SRC(TYPE),	\
 		.dst_reg = DST,					\
 		.src_reg = 0,					\
@@ -128,7 +128,7 @@
 /* Short form of mov, dst_reg = src_reg */
 
 #define BPF_MOV64_REG(DST, SRC)					\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = BPF_ALU64 | BPF_MOV | BPF_X,		\
 		.dst_reg = DST,					\
 		.src_reg = SRC,					\
@@ -136,7 +136,7 @@
 		.imm   = 0 })
 
 #define BPF_MOV32_REG(DST, SRC)					\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = BPF_ALU | BPF_MOV | BPF_X,		\
 		.dst_reg = DST,					\
 		.src_reg = SRC,					\
@@ -146,7 +146,7 @@
 /* Short form of mov, dst_reg = imm32 */
 
 #define BPF_MOV64_IMM(DST, IMM)					\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = BPF_ALU64 | BPF_MOV | BPF_K,		\
 		.dst_reg = DST,					\
 		.src_reg = 0,					\
@@ -154,7 +154,7 @@
 		.imm   = IMM })
 
 #define BPF_MOV32_IMM(DST, IMM)					\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = BPF_ALU | BPF_MOV | BPF_K,		\
 		.dst_reg = DST,					\
 		.src_reg = 0,					\
@@ -164,7 +164,7 @@
 /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
 
 #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM)			\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE),	\
 		.dst_reg = DST,					\
 		.src_reg = SRC,					\
@@ -172,7 +172,7 @@
 		.imm   = IMM })
 
 #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM)			\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = BPF_ALU | BPF_MOV | BPF_SRC(TYPE),	\
 		.dst_reg = DST,					\
 		.src_reg = SRC,					\
@@ -182,7 +182,7 @@
 /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
 
 #define BPF_LD_ABS(SIZE, IMM)					\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,	\
 		.dst_reg = 0,					\
 		.src_reg = 0,					\
@@ -192,7 +192,7 @@
 /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
 
 #define BPF_LD_IND(SIZE, SRC, IMM)				\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_IND,	\
 		.dst_reg = 0,					\
 		.src_reg = SRC,					\
@@ -202,7 +202,7 @@
 /* Memory load, dst_reg = *(uint *) (src_reg + off16) */
 
 #define BPF_LDX_MEM(SIZE, DST, SRC, OFF)			\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,	\
 		.dst_reg = DST,					\
 		.src_reg = SRC,					\
@@ -212,7 +212,7 @@
 /* Memory store, *(uint *) (dst_reg + off16) = src_reg */
 
 #define BPF_STX_MEM(SIZE, DST, SRC, OFF)			\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,	\
 		.dst_reg = DST,					\
 		.src_reg = SRC,					\
@@ -222,7 +222,7 @@
 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
 
 #define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,	\
 		.dst_reg = DST,					\
 		.src_reg = 0,					\
@@ -232,7 +232,7 @@
 /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
 
 #define BPF_JMP_REG(OP, DST, SRC, OFF)				\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = BPF_JMP | BPF_OP(OP) | BPF_X,		\
 		.dst_reg = DST,					\
 		.src_reg = SRC,					\
@@ -242,7 +242,7 @@
 /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
 
 #define BPF_JMP_IMM(OP, DST, IMM, OFF)				\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = BPF_JMP | BPF_OP(OP) | BPF_K,		\
 		.dst_reg = DST,					\
 		.src_reg = 0,					\
@@ -252,7 +252,7 @@
 /* Function call */
 
 #define BPF_EMIT_CALL(FUNC)					\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = BPF_JMP | BPF_CALL,			\
 		.dst_reg = 0,					\
 		.src_reg = 0,					\
@@ -262,7 +262,7 @@
 /* Raw code statement block */
 
 #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)			\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = CODE,					\
 		.dst_reg = DST,					\
 		.src_reg = SRC,					\
@@ -272,7 +272,7 @@
 /* Program exit */
 
 #define BPF_EXIT_INSN()						\
-	((struct sock_filter_int) {				\
+	((struct bpf_insn) {					\
 		.code  = BPF_JMP | BPF_EXIT,			\
 		.dst_reg = 0,					\
 		.src_reg = 0,					\
@@ -298,7 +298,7 @@
 /* Macro to invoke filter function. */
 #define SK_RUN_FILTER(filter, ctx)  (*filter->bpf_func)(ctx, filter->insnsi)
 
-struct sock_filter_int {
+struct bpf_insn {
 	__u8	code;		/* opcode */
 	__u8	dst_reg:4;	/* dest register */
 	__u8	src_reg:4;	/* source register */
@@ -330,10 +330,10 @@
 	struct sock_fprog_kern	*orig_prog;	/* Original BPF program */
 	struct rcu_head		rcu;
 	unsigned int		(*bpf_func)(const struct sk_buff *skb,
-					    const struct sock_filter_int *filter);
+					    const struct bpf_insn *filter);
 	union {
 		struct sock_filter	insns[0];
-		struct sock_filter_int	insnsi[0];
+		struct bpf_insn		insnsi[0];
 		struct work_struct	work;
 	};
 };
@@ -353,7 +353,7 @@
 void sk_filter_free(struct sk_filter *fp);
 
 int sk_convert_filter(struct sock_filter *prog, int len,
-		      struct sock_filter_int *new_prog, int *new_len);
+		      struct bpf_insn *new_prog, int *new_len);
 
 int sk_unattached_filter_create(struct sk_filter **pfp,
 				struct sock_fprog_kern *fprog);
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 75d17e1..63ab3873 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1001,6 +1001,26 @@
 	u8 oui_type;
 } __packed;
 
+struct ieee80211_wmm_ac_param {
+	u8 aci_aifsn; /* AIFSN, ACM, ACI */
+	u8 cw; /* ECWmin, ECWmax (CW = 2^ECW - 1) */
+	__le16 txop_limit;
+} __packed;
+
+struct ieee80211_wmm_param_ie {
+	u8 element_id; /* Element ID: 221 (0xdd); */
+	u8 len; /* Length: 24 */
+	/* required fields for WMM version 1 */
+	u8 oui[3]; /* 00:50:f2 */
+	u8 oui_type; /* 2 */
+	u8 oui_subtype; /* 1 */
+	u8 version; /* 1 for WMM version 1.0 */
+	u8 qos_info; /* AP/STA specific QoS info */
+	u8 reserved; /* 0 */
+	/* AC_BE, AC_BK, AC_VI, AC_VO */
+	struct ieee80211_wmm_ac_param ac[4];
+} __packed;
+
 /* Control frames */
 struct ieee80211_rts {
 	__le16 frame_control;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 5ab4e3a..92abb49 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -593,6 +593,7 @@
 	struct device 		*dev;
 	void __iomem * const	*iomap;
 	unsigned int		n_ports;
+	unsigned int		n_tags;			/* nr of NCQ tags */
 	void			*private_data;
 	struct ata_port_operations *ops;
 	unsigned long		flags;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index fa660ae..e15b154 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1254,4 +1254,11 @@
 int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port);
 int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
 				 int enable);
+
+/* Returns true if running in low memory profile (kdump kernel) */
+static inline bool mlx4_low_memory_profile(void)
+{
+	return reset_devices;
+}
+
 #endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 3406cfb..3349471 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -456,9 +456,6 @@
 	u8	syndrome;
 };
 
-struct mlx5_eqe_dropped_packet {
-};
-
 struct mlx5_eqe_port_state {
 	u8	reserved0[8];
 	u8	port;
@@ -498,7 +495,6 @@
 	struct mlx5_eqe_comp		comp;
 	struct mlx5_eqe_qp_srq		qp_srq;
 	struct mlx5_eqe_cq_err		cq_err;
-	struct mlx5_eqe_dropped_packet	dp;
 	struct mlx5_eqe_port_state	port;
 	struct mlx5_eqe_gpio		gpio;
 	struct mlx5_eqe_congestion	cong;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 2bce4aa..9f3a547 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -381,8 +381,8 @@
 	struct mlx5_buf_list   *page_list;
 	int			nbufs;
 	int			npages;
-	int			page_shift;
 	int			size;
+	u8			page_shift;
 };
 
 struct mlx5_eq {
@@ -543,6 +543,10 @@
 	/* protect mkey key part */
 	spinlock_t		mkey_lock;
 	u8			mkey_key;
+
+	struct list_head        dev_list;
+	struct list_head        ctx_list;
+	spinlock_t              ctx_lock;
 };
 
 struct mlx5_core_dev {
@@ -555,7 +559,7 @@
 	struct mlx5_init_seg __iomem *iseg;
 	void			(*event) (struct mlx5_core_dev *dev,
 					  enum mlx5_dev_event event,
-					  void *data);
+					  unsigned long param);
 	struct mlx5_priv	priv;
 	struct mlx5_profile	*profile;
 	atomic_t		num_qps;
@@ -686,8 +690,6 @@
 	return key & 0xffffff00u;
 }
 
-int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev);
-void mlx5_dev_cleanup(struct mlx5_core_dev *dev);
 int mlx5_cmd_init(struct mlx5_core_dev *dev);
 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
 void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
@@ -734,7 +736,7 @@
 int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
 int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
 int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
-		      u16 opmod, int port);
+		      u16 opmod, u8 port);
 void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
 int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
@@ -767,7 +769,7 @@
 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
 			 int size_in, void *data_out, int size_out,
 			 u16 reg_num, int arg, int write);
-int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps);
+int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
 
 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
@@ -811,9 +813,20 @@
 	MAX_MR_CACHE_ENTRIES    = 16,
 };
 
+struct mlx5_interface {
+	void *			(*add)(struct mlx5_core_dev *dev);
+	void			(*remove)(struct mlx5_core_dev *dev, void *context);
+	void			(*event)(struct mlx5_core_dev *dev, void *context,
+					 enum mlx5_dev_event event, unsigned long param);
+	struct list_head	list;
+};
+
+int mlx5_register_interface(struct mlx5_interface *intf);
+void mlx5_unregister_interface(struct mlx5_interface *intf);
+
 struct mlx5_profile {
 	u64	mask;
-	u32	log_max_qp;
+	u8	log_max_qp;
 	struct {
 		int	size;
 		int	limit;
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index d99800c..dcfdecb 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -176,4 +176,12 @@
 				 NETIF_F_HW_VLAN_STAG_RX | \
 				 NETIF_F_HW_VLAN_STAG_TX)
 
+#define NETIF_F_GSO_ENCAP_ALL	(NETIF_F_GSO_GRE |			\
+				 NETIF_F_GSO_GRE_CSUM |			\
+				 NETIF_F_GSO_IPIP |			\
+				 NETIF_F_GSO_SIT |			\
+				 NETIF_F_GSO_UDP_TUNNEL |		\
+				 NETIF_F_GSO_UDP_TUNNEL_CSUM |		\
+				 NETIF_F_GSO_MPLS)
+
 #endif	/* _LINUX_NETDEV_FEATURES_H */
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 0511789..0ff360d 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -73,6 +73,8 @@
 				   int depth, void *data);
 
 extern bool early_init_dt_scan(void *params);
+extern bool early_init_dt_verify(void *params);
+extern void early_init_dt_scan_nodes(void);
 
 extern const char *of_flat_dt_get_machine_name(void);
 extern const void *of_flat_dt_match_machine(const void *default_match,
@@ -84,6 +86,7 @@
 extern void early_init_devtree(void *);
 extern void early_get_first_memblock_info(void *, phys_addr_t *);
 extern u64 fdt_translate_address(const void *blob, int node_offset);
+extern void of_fdt_limit_memory(int limit);
 #else /* CONFIG_OF_FLATTREE */
 static inline void early_init_fdt_scan_reserved_mem(void) {}
 static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 0a97b58..e1474ae 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -399,6 +399,18 @@
 }
 
 /*
+ * Get the offset in PAGE_SIZE.
+ * (TODO: hugepage should have ->index in PAGE_SIZE)
+ */
+static inline pgoff_t page_to_pgoff(struct page *page)
+{
+	if (unlikely(PageHeadHuge(page)))
+		return page->index << compound_order(page);
+	else
+		return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+}
+
+/*
  * Return byte-offset into filesystem object for page.
  */
 static inline loff_t page_offset(struct page *page)
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 6804144..ed39956 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -545,6 +545,24 @@
 	 */
 	void (*link_change_notify)(struct phy_device *dev);
 
+	/* A function provided by a phy specific driver to override the
+	 * the PHY driver framework support for reading a MMD register
+	 * from the PHY. If not supported, return -1. This function is
+	 * optional for PHY specific drivers, if not provided then the
+	 * default MMD read function is used by the PHY framework.
+	 */
+	int (*read_mmd_indirect)(struct phy_device *dev, int ptrad,
+				 int devnum, int regnum);
+
+	/* A function provided by a phy specific driver to override the
+	 * the PHY driver framework support for writing a MMD register
+	 * from the PHY. This function is optional for PHY specific drivers,
+	 * if not provided then the default MMD read function is used by
+	 * the PHY framework.
+	 */
+	void (*write_mmd_indirect)(struct phy_device *dev, int ptrad,
+				   int devnum, int regnum, u32 val);
+
 	struct device_driver driver;
 };
 #define to_phy_driver(d) container_of(d, struct phy_driver, driver)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 3694303..281dece 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -112,8 +112,7 @@
 #define CHECKSUM_COMPLETE	2
 #define CHECKSUM_PARTIAL	3
 
-#define SKB_DATA_ALIGN(X)	(((X) + (SMP_CACHE_BYTES - 1)) & \
-				 ~(SMP_CACHE_BYTES - 1))
+#define SKB_DATA_ALIGN(X)	ALIGN(X, SMP_CACHE_BYTES)
 #define SKB_WITH_OVERHEAD(X)	\
 	((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 #define SKB_MAX_ORDER(X, ORDER) \
@@ -211,20 +210,9 @@
  * struct skb_shared_hwtstamps - hardware time stamps
  * @hwtstamp:	hardware time stamp transformed into duration
  *		since arbitrary point in time
- * @syststamp:	hwtstamp transformed to system time base (deprecated)
  *
  * Software time stamps generated by ktime_get_real() are stored in
- * skb->tstamp. The relation between the different kinds of time
- * stamps is as follows:
- *
- * syststamp and tstamp can be compared against each other in
- * arbitrary combinations.  The accuracy of a
- * syststamp/tstamp/"syststamp from other device" comparison is
- * limited by the accuracy of the transformation into system time
- * base. This depends on the device driver and its underlying
- * hardware. The syststamp implementation is deprecated in favor
- * of hwtstamps and hw PTP clock sources exposed directly to
- * userspace.
+ * skb->tstamp.
  *
  * hwtstamps can only be compared against other hwtstamps from
  * the same device.
@@ -234,7 +222,6 @@
  */
 struct skb_shared_hwtstamps {
 	ktime_t	hwtstamp;
-	ktime_t	syststamp;
 };
 
 /* Definitions for tx_flags in struct skb_shared_info */
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
index f9f931c..f7b9100 100644
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -345,6 +345,43 @@
 #define  SSB_SPROM4_TXPID5GH2_SHIFT	0
 #define  SSB_SPROM4_TXPID5GH3		0xFF00
 #define  SSB_SPROM4_TXPID5GH3_SHIFT	8
+
+/* There are 4 blocks with power info sharing the same layout */
+#define SSB_SPROM4_PWR_INFO_CORE0	0x0080
+#define SSB_SPROM4_PWR_INFO_CORE1	0x00AE
+#define SSB_SPROM4_PWR_INFO_CORE2	0x00DC
+#define SSB_SPROM4_PWR_INFO_CORE3	0x010A
+
+#define SSB_SPROM4_2G_MAXP_ITSSI	0x00	/* 2 GHz ITSSI and 2 GHz Max Power */
+#define  SSB_SPROM4_2G_MAXP		0x00FF
+#define  SSB_SPROM4_2G_ITSSI		0xFF00
+#define  SSB_SPROM4_2G_ITSSI_SHIFT	8
+#define SSB_SPROM4_2G_PA_0		0x02	/* 2 GHz power amp */
+#define SSB_SPROM4_2G_PA_1		0x04
+#define SSB_SPROM4_2G_PA_2		0x06
+#define SSB_SPROM4_2G_PA_3		0x08
+#define SSB_SPROM4_5G_MAXP_ITSSI	0x0A	/* 5 GHz ITSSI and 5.3 GHz Max Power */
+#define  SSB_SPROM4_5G_MAXP		0x00FF
+#define  SSB_SPROM4_5G_ITSSI		0xFF00
+#define  SSB_SPROM4_5G_ITSSI_SHIFT	8
+#define SSB_SPROM4_5GHL_MAXP		0x0C	/* 5.2 GHz and 5.8 GHz Max Power */
+#define  SSB_SPROM4_5GH_MAXP		0x00FF
+#define  SSB_SPROM4_5GL_MAXP		0xFF00
+#define  SSB_SPROM4_5GL_MAXP_SHIFT	8
+#define SSB_SPROM4_5G_PA_0		0x0E	/* 5.3 GHz power amp */
+#define SSB_SPROM4_5G_PA_1		0x10
+#define SSB_SPROM4_5G_PA_2		0x12
+#define SSB_SPROM4_5G_PA_3		0x14
+#define SSB_SPROM4_5GL_PA_0		0x16	/* 5.2 GHz power amp */
+#define SSB_SPROM4_5GL_PA_1		0x18
+#define SSB_SPROM4_5GL_PA_2		0x1A
+#define SSB_SPROM4_5GL_PA_3		0x1C
+#define SSB_SPROM4_5GH_PA_0		0x1E	/* 5.8 GHz power amp */
+#define SSB_SPROM4_5GH_PA_1		0x20
+#define SSB_SPROM4_5GH_PA_2		0x22
+#define SSB_SPROM4_5GH_PA_3		0x24
+
+/* TODO: Make it deprecated */
 #define SSB_SPROM4_MAXP_BG		0x0080  /* Max Power BG in path 1 */
 #define  SSB_SPROM4_MAXP_BG_MASK	0x00FF  /* Mask for Max Power BG */
 #define  SSB_SPROM4_ITSSI_BG		0xFF00	/* Mask for path 1 itssi_bg */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index a01236e..f0a3d88 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -202,33 +202,6 @@
 #define HCI_PERSISTENT_MASK (BIT(HCI_LE_SCAN) | BIT(HCI_PERIODIC_INQ) | \
 			      BIT(HCI_FAST_CONNECTABLE) | BIT(HCI_LE_ADV))
 
-/* HCI ioctl defines */
-#define HCIDEVUP	_IOW('H', 201, int)
-#define HCIDEVDOWN	_IOW('H', 202, int)
-#define HCIDEVRESET	_IOW('H', 203, int)
-#define HCIDEVRESTAT	_IOW('H', 204, int)
-
-#define HCIGETDEVLIST	_IOR('H', 210, int)
-#define HCIGETDEVINFO	_IOR('H', 211, int)
-#define HCIGETCONNLIST	_IOR('H', 212, int)
-#define HCIGETCONNINFO	_IOR('H', 213, int)
-#define HCIGETAUTHINFO	_IOR('H', 215, int)
-
-#define HCISETRAW	_IOW('H', 220, int)
-#define HCISETSCAN	_IOW('H', 221, int)
-#define HCISETAUTH	_IOW('H', 222, int)
-#define HCISETENCRYPT	_IOW('H', 223, int)
-#define HCISETPTYPE	_IOW('H', 224, int)
-#define HCISETLINKPOL	_IOW('H', 225, int)
-#define HCISETLINKMODE	_IOW('H', 226, int)
-#define HCISETACLMTU	_IOW('H', 227, int)
-#define HCISETSCOMTU	_IOW('H', 228, int)
-
-#define HCIBLOCKADDR	_IOW('H', 230, int)
-#define HCIUNBLOCKADDR	_IOW('H', 231, int)
-
-#define HCIINQUIRY	_IOR('H', 240, int)
-
 /* HCI timeouts */
 #define HCI_DISCONN_TIMEOUT	msecs_to_jiffies(2000)	/* 2 seconds */
 #define HCI_PAIRING_TIMEOUT	msecs_to_jiffies(60000)	/* 60 seconds */
@@ -356,6 +329,7 @@
 #define LMP_HOST_SC		0x08
 
 /* LE features */
+#define HCI_LE_ENCRYPTION		0x01
 #define HCI_LE_CONN_PARAM_REQ_PROC	0x02
 #define HCI_LE_PING			0x10
 
@@ -427,6 +401,9 @@
 /* The core spec defines 127 as the "not available" value */
 #define HCI_TX_POWER_INVALID	127
 
+#define HCI_ROLE_MASTER		0x00
+#define HCI_ROLE_SLAVE		0x01
+
 /* Extended Inquiry Response field types */
 #define EIR_FLAGS		0x01 /* flags */
 #define EIR_UUID16_SOME		0x02 /* 16-bit UUID, more available */
@@ -1739,9 +1716,6 @@
 
 #define HCI_EV_SLAVE_PAGE_RESP_TIMEOUT	0x54
 
-/* Low energy meta events */
-#define LE_CONN_ROLE_MASTER	0x00
-
 #define HCI_EV_LE_CONN_COMPLETE		0x01
 struct hci_ev_le_conn_complete {
 	__u8     status;
@@ -1871,126 +1845,4 @@
 #define hci_handle(h)		(h & 0x0fff)
 #define hci_flags(h)		(h >> 12)
 
-/* ---- HCI Sockets ---- */
-
-/* Socket options */
-#define HCI_DATA_DIR	1
-#define HCI_FILTER	2
-#define HCI_TIME_STAMP	3
-
-/* CMSG flags */
-#define HCI_CMSG_DIR	0x0001
-#define HCI_CMSG_TSTAMP	0x0002
-
-struct sockaddr_hci {
-	sa_family_t    hci_family;
-	unsigned short hci_dev;
-	unsigned short hci_channel;
-};
-#define HCI_DEV_NONE	0xffff
-
-#define HCI_CHANNEL_RAW		0
-#define HCI_CHANNEL_USER	1
-#define HCI_CHANNEL_MONITOR	2
-#define HCI_CHANNEL_CONTROL	3
-
-struct hci_filter {
-	unsigned long type_mask;
-	unsigned long event_mask[2];
-	__le16 opcode;
-};
-
-struct hci_ufilter {
-	__u32  type_mask;
-	__u32  event_mask[2];
-	__le16 opcode;
-};
-
-#define HCI_FLT_TYPE_BITS	31
-#define HCI_FLT_EVENT_BITS	63
-#define HCI_FLT_OGF_BITS	63
-#define HCI_FLT_OCF_BITS	127
-
-/* ---- HCI Ioctl requests structures ---- */
-struct hci_dev_stats {
-	__u32 err_rx;
-	__u32 err_tx;
-	__u32 cmd_tx;
-	__u32 evt_rx;
-	__u32 acl_tx;
-	__u32 acl_rx;
-	__u32 sco_tx;
-	__u32 sco_rx;
-	__u32 byte_rx;
-	__u32 byte_tx;
-};
-
-struct hci_dev_info {
-	__u16 dev_id;
-	char  name[8];
-
-	bdaddr_t bdaddr;
-
-	__u32 flags;
-	__u8  type;
-
-	__u8  features[8];
-
-	__u32 pkt_type;
-	__u32 link_policy;
-	__u32 link_mode;
-
-	__u16 acl_mtu;
-	__u16 acl_pkts;
-	__u16 sco_mtu;
-	__u16 sco_pkts;
-
-	struct hci_dev_stats stat;
-};
-
-struct hci_conn_info {
-	__u16    handle;
-	bdaddr_t bdaddr;
-	__u8     type;
-	__u8     out;
-	__u16    state;
-	__u32    link_mode;
-};
-
-struct hci_dev_req {
-	__u16  dev_id;
-	__u32  dev_opt;
-};
-
-struct hci_dev_list_req {
-	__u16  dev_num;
-	struct hci_dev_req dev_req[0];	/* hci_dev_req structures */
-};
-
-struct hci_conn_list_req {
-	__u16  dev_id;
-	__u16  conn_num;
-	struct hci_conn_info conn_info[0];
-};
-
-struct hci_conn_info_req {
-	bdaddr_t bdaddr;
-	__u8     type;
-	struct   hci_conn_info conn_info[0];
-};
-
-struct hci_auth_info_req {
-	bdaddr_t bdaddr;
-	__u8     type;
-};
-
-struct hci_inquiry_req {
-	__u16 dev_id;
-	__u16 flags;
-	__u8  lap[3];
-	__u8  length;
-	__u8  num_rsp;
-};
-#define IREQ_CACHE_FLUSH 0x0001
-
 #endif /* __HCI_H */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index e69c2b0..996ed06 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -26,6 +26,7 @@
 #define __HCI_CORE_H
 
 #include <net/bluetooth/hci.h>
+#include <net/bluetooth/hci_sock.h>
 
 /* HCI priority */
 #define HCI_PRIO_MAX	7
@@ -82,6 +83,7 @@
 	unsigned int     amp_num;
 	unsigned int     sco_num;
 	unsigned int     le_num;
+	unsigned int     le_num_slave;
 };
 
 struct bdaddr_list {
@@ -370,6 +372,7 @@
 	__u16		state;
 	__u8		mode;
 	__u8		type;
+	__u8		role;
 	bool		out;
 	__u8		attempt;
 	__u8		dev_class[3];
@@ -539,12 +542,12 @@
 	HCI_CONN_POWER_SAVE,
 	HCI_CONN_REMOTE_OOB,
 	HCI_CONN_FLUSH_KEY,
-	HCI_CONN_MASTER,
 	HCI_CONN_ENCRYPT,
 	HCI_CONN_AUTH,
 	HCI_CONN_SECURE,
 	HCI_CONN_FIPS,
 	HCI_CONN_STK_ENCRYPT,
+	HCI_CONN_AUTH_INITIATOR,
 };
 
 static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
@@ -574,6 +577,8 @@
 		break;
 	case LE_LINK:
 		h->le_num++;
+		if (c->role == HCI_ROLE_SLAVE)
+			h->le_num_slave++;
 		break;
 	case SCO_LINK:
 	case ESCO_LINK:
@@ -598,6 +603,8 @@
 		break;
 	case LE_LINK:
 		h->le_num--;
+		if (c->role == HCI_ROLE_SLAVE)
+			h->le_num_slave--;
 		break;
 	case SCO_LINK:
 	case ESCO_LINK:
@@ -694,7 +701,8 @@
 bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
 void hci_sco_setup(struct hci_conn *conn, __u8 status);
 
-struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
+struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+			      u8 role);
 int hci_conn_del(struct hci_conn *conn);
 void hci_conn_hash_flush(struct hci_dev *hdev);
 void hci_conn_check_pending(struct hci_dev *hdev);
@@ -706,14 +714,15 @@
 
 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
 				u8 dst_type, u8 sec_level, u16 conn_timeout,
-				bool master);
+				u8 role);
 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
 				 u8 sec_level, u8 auth_type);
 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
 				 __u16 setting);
 int hci_conn_check_link_mode(struct hci_conn *conn);
 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
-int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
+int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
+		      bool initiator);
 int hci_conn_change_link_key(struct hci_conn *conn);
 int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
 
@@ -880,12 +889,12 @@
 				  bdaddr_t *bdaddr, u8 *val, u8 type,
 				  u8 pin_len, bool *persistent);
 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
-			     bool master);
+			     u8 role);
 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
 			    u8 addr_type, u8 type, u8 authenticated,
 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand);
 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
-				     u8 addr_type, bool master);
+				     u8 addr_type, u8 role);
 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type);
 void hci_smp_ltks_clear(struct hci_dev *hdev);
 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
@@ -1239,6 +1248,7 @@
 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
 		    const void *param, u8 event);
 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status);
+bool hci_req_pending(struct hci_dev *hdev);
 
 void hci_req_add_le_scan_disable(struct hci_request *req);
 void hci_req_add_le_passive_scan(struct hci_request *req);
@@ -1286,10 +1296,8 @@
 void mgmt_index_removed(struct hci_dev *hdev);
 void mgmt_set_powered_failed(struct hci_dev *hdev, int err);
 int mgmt_powered(struct hci_dev *hdev, u8 powered);
+int mgmt_update_adv_data(struct hci_dev *hdev);
 void mgmt_discoverable_timeout(struct hci_dev *hdev);
-void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
-void mgmt_connectable(struct hci_dev *hdev, u8 connectable);
-void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
 		       bool persistent);
 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
@@ -1350,34 +1358,6 @@
 void mgmt_reenable_advertising(struct hci_dev *hdev);
 void mgmt_smp_complete(struct hci_conn *conn, bool complete);
 
-/* HCI info for socket */
-#define hci_pi(sk) ((struct hci_pinfo *) sk)
-
-struct hci_pinfo {
-	struct bt_sock    bt;
-	struct hci_dev    *hdev;
-	struct hci_filter filter;
-	__u32             cmsg_mask;
-	unsigned short   channel;
-};
-
-/* HCI security filter */
-#define HCI_SFLT_MAX_OGF  5
-
-struct hci_sec_filter {
-	__u32 type_mask;
-	__u32 event_mask[2];
-	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
-};
-
-/* ----- HCI requests ----- */
-#define HCI_REQ_DONE	  0
-#define HCI_REQ_PEND	  1
-#define HCI_REQ_CANCELED  2
-
-#define hci_req_lock(d)		mutex_lock(&d->req_lock)
-#define hci_req_unlock(d)	mutex_unlock(&d->req_lock)
-
 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
 		      u16 to_multiplier);
 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
diff --git a/include/net/bluetooth/hci_sock.h b/include/net/bluetooth/hci_sock.h
new file mode 100644
index 0000000..9a46d66
--- /dev/null
+++ b/include/net/bluetooth/hci_sock.h
@@ -0,0 +1,175 @@
+/*
+   BlueZ - Bluetooth protocol stack for Linux
+   Copyright (C) 2000-2001 Qualcomm Incorporated
+
+   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License version 2 as
+   published by the Free Software Foundation;
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+   SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __HCI_SOCK_H
+#define __HCI_SOCK_H
+
+/* Socket options */
+#define HCI_DATA_DIR	1
+#define HCI_FILTER	2
+#define HCI_TIME_STAMP	3
+
+/* CMSG flags */
+#define HCI_CMSG_DIR	0x0001
+#define HCI_CMSG_TSTAMP	0x0002
+
+struct sockaddr_hci {
+	sa_family_t    hci_family;
+	unsigned short hci_dev;
+	unsigned short hci_channel;
+};
+#define HCI_DEV_NONE	0xffff
+
+#define HCI_CHANNEL_RAW		0
+#define HCI_CHANNEL_USER	1
+#define HCI_CHANNEL_MONITOR	2
+#define HCI_CHANNEL_CONTROL	3
+
+struct hci_filter {
+	unsigned long type_mask;
+	unsigned long event_mask[2];
+	__le16 opcode;
+};
+
+struct hci_ufilter {
+	__u32  type_mask;
+	__u32  event_mask[2];
+	__le16 opcode;
+};
+
+#define HCI_FLT_TYPE_BITS	31
+#define HCI_FLT_EVENT_BITS	63
+#define HCI_FLT_OGF_BITS	63
+#define HCI_FLT_OCF_BITS	127
+
+/* Ioctl defines */
+#define HCIDEVUP	_IOW('H', 201, int)
+#define HCIDEVDOWN	_IOW('H', 202, int)
+#define HCIDEVRESET	_IOW('H', 203, int)
+#define HCIDEVRESTAT	_IOW('H', 204, int)
+
+#define HCIGETDEVLIST	_IOR('H', 210, int)
+#define HCIGETDEVINFO	_IOR('H', 211, int)
+#define HCIGETCONNLIST	_IOR('H', 212, int)
+#define HCIGETCONNINFO	_IOR('H', 213, int)
+#define HCIGETAUTHINFO	_IOR('H', 215, int)
+
+#define HCISETRAW	_IOW('H', 220, int)
+#define HCISETSCAN	_IOW('H', 221, int)
+#define HCISETAUTH	_IOW('H', 222, int)
+#define HCISETENCRYPT	_IOW('H', 223, int)
+#define HCISETPTYPE	_IOW('H', 224, int)
+#define HCISETLINKPOL	_IOW('H', 225, int)
+#define HCISETLINKMODE	_IOW('H', 226, int)
+#define HCISETACLMTU	_IOW('H', 227, int)
+#define HCISETSCOMTU	_IOW('H', 228, int)
+
+#define HCIBLOCKADDR	_IOW('H', 230, int)
+#define HCIUNBLOCKADDR	_IOW('H', 231, int)
+
+#define HCIINQUIRY	_IOR('H', 240, int)
+
+/* Ioctl requests structures */
+struct hci_dev_stats {
+	__u32 err_rx;
+	__u32 err_tx;
+	__u32 cmd_tx;
+	__u32 evt_rx;
+	__u32 acl_tx;
+	__u32 acl_rx;
+	__u32 sco_tx;
+	__u32 sco_rx;
+	__u32 byte_rx;
+	__u32 byte_tx;
+};
+
+struct hci_dev_info {
+	__u16 dev_id;
+	char  name[8];
+
+	bdaddr_t bdaddr;
+
+	__u32 flags;
+	__u8  type;
+
+	__u8  features[8];
+
+	__u32 pkt_type;
+	__u32 link_policy;
+	__u32 link_mode;
+
+	__u16 acl_mtu;
+	__u16 acl_pkts;
+	__u16 sco_mtu;
+	__u16 sco_pkts;
+
+	struct hci_dev_stats stat;
+};
+
+struct hci_conn_info {
+	__u16    handle;
+	bdaddr_t bdaddr;
+	__u8     type;
+	__u8     out;
+	__u16    state;
+	__u32    link_mode;
+};
+
+struct hci_dev_req {
+	__u16  dev_id;
+	__u32  dev_opt;
+};
+
+struct hci_dev_list_req {
+	__u16  dev_num;
+	struct hci_dev_req dev_req[0];	/* hci_dev_req structures */
+};
+
+struct hci_conn_list_req {
+	__u16  dev_id;
+	__u16  conn_num;
+	struct hci_conn_info conn_info[0];
+};
+
+struct hci_conn_info_req {
+	bdaddr_t bdaddr;
+	__u8     type;
+	struct   hci_conn_info conn_info[0];
+};
+
+struct hci_auth_info_req {
+	bdaddr_t bdaddr;
+	__u8     type;
+};
+
+struct hci_inquiry_req {
+	__u16 dev_id;
+	__u16 flags;
+	__u8  lap[3];
+	__u8  length;
+	__u8  num_rsp;
+};
+#define IREQ_CACHE_FLUSH 0x0001
+
+#endif /* __HCI_SOCK_H */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index e0c6a9a..8df15ad 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -134,9 +134,12 @@
 #define L2CAP_FCS_CRC16		0x01
 
 /* L2CAP fixed channels */
-#define L2CAP_FC_L2CAP		0x02
+#define L2CAP_FC_SIG_BREDR	0x02
 #define L2CAP_FC_CONNLESS	0x04
 #define L2CAP_FC_A2MP		0x08
+#define L2CAP_FC_ATT		0x10
+#define L2CAP_FC_SIG_LE		0x20
+#define L2CAP_FC_SMP_LE		0x40
 
 /* L2CAP Control Field bit masks */
 #define L2CAP_CTRL_SAR			0xC000
@@ -622,11 +625,10 @@
 
 	struct delayed_work	info_timer;
 
-	spinlock_t		lock;
-
 	struct sk_buff		*rx_skb;
 	__u32			rx_len;
 	__u8			tx_ident;
+	struct mutex		ident_lock;
 
 	struct sk_buff_head	pending_rx;
 	struct work_struct	pending_rx_work;
@@ -903,7 +905,7 @@
 		       bdaddr_t *dst, u8 dst_type);
 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
 void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
-int l2cap_chan_check_security(struct l2cap_chan *chan);
+int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator);
 void l2cap_chan_set_defaults(struct l2cap_chan *chan);
 int l2cap_ertm_init(struct l2cap_chan *chan);
 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h
index 2019d1a..f40ddb4 100644
--- a/include/net/bluetooth/sco.h
+++ b/include/net/bluetooth/sco.h
@@ -27,11 +27,6 @@
 
 /* SCO defaults */
 #define SCO_DEFAULT_MTU		500
-#define SCO_DEFAULT_FLUSH_TO	0xFFFF
-
-#define SCO_CONN_TIMEOUT	(HZ * 40)
-#define SCO_DISCONN_TIMEOUT	(HZ * 2)
-#define SCO_CONN_IDLE_TIMEOUT	(HZ * 60)
 
 /* SCO socket address */
 struct sockaddr_sco {
@@ -51,29 +46,4 @@
 	__u8  dev_class[3];
 };
 
-/* ---- SCO connections ---- */
-struct sco_conn {
-	struct hci_conn	*hcon;
-
-	spinlock_t	lock;
-	struct sock	*sk;
-
-	unsigned int    mtu;
-};
-
-#define sco_conn_lock(c)	spin_lock(&c->lock);
-#define sco_conn_unlock(c)	spin_unlock(&c->lock);
-
-/* ----- SCO socket info ----- */
-#define sco_pi(sk) ((struct sco_pinfo *) sk)
-
-struct sco_pinfo {
-	struct bt_sock	bt;
-	bdaddr_t	src;
-	bdaddr_t	dst;
-	__u32		flags;
-	__u16		setting;
-	struct sco_conn	*conn;
-};
-
 #endif /* __SCO_H */
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 6f59de9..6f4930a 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -4,10 +4,6 @@
 #include <linux/percpu_counter.h>
 
 struct netns_frags {
-	int			nqueues;
-	struct list_head	lru_list;
-	spinlock_t		lru_lock;
-
 	/* The percpu_counter "mem" need to be cacheline aligned.
 	 *  mem.count must not share cacheline with other writers
 	 */
@@ -22,7 +18,6 @@
 struct inet_frag_queue {
 	spinlock_t		lock;
 	struct timer_list	timer;      /* when will this queue expire? */
-	struct list_head	lru_list;   /* lru list member */
 	struct hlist_node	list;
 	atomic_t		refcnt;
 	struct sk_buff		*fragments; /* list of received fragments */
@@ -32,6 +27,7 @@
 	int			meat;
 	__u8			last_in;    /* first/last segment arrived? */
 
+#define INET_FRAG_EVICTED	8
 #define INET_FRAG_COMPLETE	4
 #define INET_FRAG_FIRST_IN	2
 #define INET_FRAG_LAST_IN	1
@@ -48,7 +44,7 @@
  *	       rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
  *	       struct frag_queue))
  */
-#define INETFRAGS_MAXDEPTH		128
+#define INETFRAGS_MAXDEPTH	128
 
 struct inet_frag_bucket {
 	struct hlist_head	chain;
@@ -57,24 +53,27 @@
 
 struct inet_frags {
 	struct inet_frag_bucket	hash[INETFRAGS_HASHSZ];
-	/* This rwlock is a global lock (seperate per IPv4, IPv6 and
-	 * netfilter). Important to keep this on a seperate cacheline.
-	 * Its primarily a rebuild protection rwlock.
-	 */
-	rwlock_t		lock ____cacheline_aligned_in_smp;
-	int			secret_interval;
-	struct timer_list	secret_timer;
+
+	struct work_struct	frags_work;
+	unsigned int next_bucket;
+	unsigned long last_rebuild_jiffies;
+	bool rebuild;
 
 	/* The first call to hashfn is responsible to initialize
 	 * rnd. This is best done with net_get_random_once.
+	 *
+	 * rnd_seqlock is used to let hash insertion detect
+	 * when it needs to re-lookup the hash chain to use.
 	 */
 	u32			rnd;
+	seqlock_t		rnd_seqlock;
 	int			qsize;
 
-	unsigned int		(*hashfn)(struct inet_frag_queue *);
-	bool			(*match)(struct inet_frag_queue *q, void *arg);
+	unsigned int		(*hashfn)(const struct inet_frag_queue *);
+	bool			(*match)(const struct inet_frag_queue *q,
+					 const void *arg);
 	void			(*constructor)(struct inet_frag_queue *q,
-						void *arg);
+					       const void *arg);
 	void			(*destructor)(struct inet_frag_queue *);
 	void			(*skb_free)(struct sk_buff *);
 	void			(*frag_expire)(unsigned long data);
@@ -87,19 +86,17 @@
 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
 
 void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
-void inet_frag_destroy(struct inet_frag_queue *q,
-				struct inet_frags *f, int *work);
-int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
+void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
-		struct inet_frags *f, void *key, unsigned int hash)
-	__releases(&f->lock);
+		struct inet_frags *f, void *key, unsigned int hash);
+
 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
 				   const char *prefix);
 
 static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
 {
 	if (atomic_dec_and_test(&q->refcnt))
-		inet_frag_destroy(q, f, NULL);
+		inet_frag_destroy(q, f);
 }
 
 /* Memory Tracking Functions. */
@@ -131,9 +128,9 @@
 	percpu_counter_init(&nf->mem, 0);
 }
 
-static inline int sum_frag_mem_limit(struct netns_frags *nf)
+static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
 {
-	int res;
+	unsigned int res;
 
 	local_bh_disable();
 	res = percpu_counter_sum_positive(&nf->mem);
@@ -142,31 +139,6 @@
 	return res;
 }
 
-static inline void inet_frag_lru_move(struct inet_frag_queue *q)
-{
-	spin_lock(&q->net->lru_lock);
-	if (!list_empty(&q->lru_list))
-		list_move_tail(&q->lru_list, &q->net->lru_list);
-	spin_unlock(&q->net->lru_lock);
-}
-
-static inline void inet_frag_lru_del(struct inet_frag_queue *q)
-{
-	spin_lock(&q->net->lru_lock);
-	list_del_init(&q->lru_list);
-	q->net->nqueues--;
-	spin_unlock(&q->net->lru_lock);
-}
-
-static inline void inet_frag_lru_add(struct netns_frags *nf,
-				     struct inet_frag_queue *q)
-{
-	spin_lock(&nf->lru_lock);
-	list_add_tail(&q->lru_list, &nf->lru_list);
-	q->net->nqueues++;
-	spin_unlock(&nf->lru_lock);
-}
-
 /* RFC 3168 support :
  * We want to check ECN values of all fragments, do detect invalid combinations.
  * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
diff --git a/include/net/ip.h b/include/net/ip.h
index 2e8f055..db4a771 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -216,6 +216,12 @@
 		return 0;
 	return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
 }
+
+static inline bool sysctl_dev_name_is_allowed(const char *name)
+{
+	return strcmp(name, "default") != 0  && strcmp(name, "all") != 0;
+}
+
 #else
 static inline int inet_is_local_reserved_port(struct net *net, int port)
 {
@@ -310,16 +316,7 @@
 	}
 }
 
-#define IP_IDENTS_SZ 2048u
-extern atomic_t *ip_idents;
-
-static inline u32 ip_idents_reserve(u32 hash, int segs)
-{
-	atomic_t *id_ptr = ip_idents + hash % IP_IDENTS_SZ;
-
-	return atomic_add_return(segs, id_ptr) - segs;
-}
-
+u32 ip_idents_reserve(u32 hash, int segs);
 void __ip_select_ident(struct iphdr *iph, int segs);
 
 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
@@ -495,7 +492,6 @@
 }
 #endif
 int ip_frag_mem(struct net *net);
-int ip_frag_nqueues(struct net *net);
 
 /*
  *	Functions provided by ip_forward.c
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index a250172..a2db816 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -299,11 +299,6 @@
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
-static inline int ip6_frag_nqueues(struct net *net)
-{
-	return net->ipv6.frags.nqueues;
-}
-
 static inline int ip6_frag_mem(struct net *net)
 {
 	return sum_frag_mem_limit(&net->ipv6.frags);
@@ -496,8 +491,8 @@
 	u8 ecn;
 };
 
-void ip6_frag_init(struct inet_frag_queue *q, void *a);
-bool ip6_frag_match(struct inet_frag_queue *q, void *a);
+void ip6_frag_init(struct inet_frag_queue *q, const void *a);
+bool ip6_frag_match(const struct inet_frag_queue *q, const void *a);
 
 /*
  *	Equivalent of ipv4 struct ip
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 9ce5cb1..dae2e24 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -4552,6 +4552,40 @@
  */
 void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn);
 
+/**
+ * ieee80211_start_rx_ba_session_offl - start a Rx BA session
+ *
+ * Some device drivers may offload part of the Rx aggregation flow including
+ * AddBa/DelBa negotiation but may otherwise be incapable of full Rx
+ * reordering.
+ *
+ * Create structures responsible for reordering so device drivers may call here
+ * when they complete AddBa negotiation.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback
+ * @addr: station mac address
+ * @tid: the rx tid
+ */
+void ieee80211_start_rx_ba_session_offl(struct ieee80211_vif *vif,
+					const u8 *addr, u16 tid);
+
+/**
+ * ieee80211_stop_rx_ba_session_offl - stop a Rx BA session
+ *
+ * Some device drivers may offload part of the Rx aggregation flow including
+ * AddBa/DelBa negotiation but may otherwise be incapable of full Rx
+ * reordering.
+ *
+ * Destroy structures responsible for reordering so device drivers may call here
+ * when they complete DelBa negotiation.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback
+ * @addr: station mac address
+ * @tid: the rx tid
+ */
+void ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif,
+				       const u8 *addr, u16 tid);
+
 /* Rate control API */
 
 /**
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 2b47eaa..6c10762 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -949,12 +949,12 @@
  * nla_put_msecs - Add a msecs netlink attribute to a socket buffer
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
- * @jiffies: number of msecs in jiffies
+ * @njiffies: number of jiffies to convert to msecs
  */
 static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
-				unsigned long jiffies)
+				unsigned long njiffies)
 {
-	u64 tmp = jiffies_to_msecs(jiffies);
+	u64 tmp = jiffies_to_msecs(njiffies);
 	return nla_put(skb, attrtype, sizeof(u64), &tmp);
 }
 
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 307728f..8c337cd 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -311,7 +311,7 @@
 	SCTP_XMIT_OK,
 	SCTP_XMIT_PMTU_FULL,
 	SCTP_XMIT_RWND_FULL,
-	SCTP_XMIT_NAGLE_DELAY,
+	SCTP_XMIT_DELAY,
 } sctp_xmit_t;
 
 /* These are the commands for manipulating transports.  */
diff --git a/include/net/sock.h b/include/net/sock.h
index 28f7346..b91c886 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -707,7 +707,6 @@
 	SOCK_TIMESTAMPING_RX_SOFTWARE,  /* %SOF_TIMESTAMPING_RX_SOFTWARE */
 	SOCK_TIMESTAMPING_SOFTWARE,     /* %SOF_TIMESTAMPING_SOFTWARE */
 	SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */
-	SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */
 	SOCK_FASYNC, /* fasync() active */
 	SOCK_RXQ_OVFL,
 	SOCK_ZEROCOPY, /* buffers from userspace */
@@ -810,8 +809,7 @@
  * Do not take into account this skb truesize,
  * to allow even a single big packet to come.
  */
-static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb,
-				     unsigned int limit)
+static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
 {
 	unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
 
@@ -822,7 +820,7 @@
 static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
 					      unsigned int limit)
 {
-	if (sk_rcvqueues_full(sk, skb, limit))
+	if (sk_rcvqueues_full(sk, limit))
 		return -ENOBUFS;
 
 	__sk_add_backlog(sk, skb);
@@ -2167,16 +2165,13 @@
 	 * - software time stamp available and wanted
 	 *   (SOCK_TIMESTAMPING_SOFTWARE)
 	 * - hardware time stamps available and wanted
-	 *   (SOCK_TIMESTAMPING_SYS_HARDWARE or
-	 *   SOCK_TIMESTAMPING_RAW_HARDWARE)
+	 *   SOCK_TIMESTAMPING_RAW_HARDWARE
 	 */
 	if (sock_flag(sk, SOCK_RCVTSTAMP) ||
 	    sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE) ||
 	    (kt.tv64 && sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) ||
 	    (hwtstamps->hwtstamp.tv64 &&
-	     sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) ||
-	    (hwtstamps->syststamp.tv64 &&
-	     sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)))
+	     sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)))
 		__sock_recv_timestamp(msg, sk, skb);
 	else
 		sk->sk_stamp = kt;
@@ -2194,8 +2189,7 @@
 #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL)			| \
 			   (1UL << SOCK_RCVTSTAMP)			| \
 			   (1UL << SOCK_TIMESTAMPING_SOFTWARE)		| \
-			   (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE)	| \
-			   (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE))
+			   (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE))
 
 	if (sk->sk_flags & FLAGS_TS_OR_DROPS)
 		__sock_recv_ts_and_drops(msg, sk, skb);
diff --git a/include/uapi/linux/dcbnl.h b/include/uapi/linux/dcbnl.h
index 6bb4338..e711f20 100644
--- a/include/uapi/linux/dcbnl.h
+++ b/include/uapi/linux/dcbnl.h
@@ -148,7 +148,8 @@
  *
  * @selector: protocol identifier type
  * @protocol: protocol of type indicated
- * @priority: 3-bit unsigned integer indicating priority
+ * @priority: 3-bit unsigned integer indicating priority for IEEE
+ *            8-bit 802.1p user priority bitmap for CEE
  *
  * ----
  *  Selector field values
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 40b5ca8..25084a0 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -101,6 +101,7 @@
  *  - add FATTR_CTIME
  *  - add ctime and ctimensec to fuse_setattr_in
  *  - add FUSE_RENAME2 request
+ *  - add FUSE_NO_OPEN_SUPPORT flag
  */
 
 #ifndef _LINUX_FUSE_H
@@ -229,6 +230,7 @@
  * FUSE_READDIRPLUS_AUTO: adaptive readdirplus
  * FUSE_ASYNC_DIO: asynchronous direct I/O submission
  * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
+ * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
  */
 #define FUSE_ASYNC_READ		(1 << 0)
 #define FUSE_POSIX_LOCKS	(1 << 1)
@@ -247,6 +249,7 @@
 #define FUSE_READDIRPLUS_AUTO	(1 << 14)
 #define FUSE_ASYNC_DIO		(1 << 15)
 #define FUSE_WRITEBACK_CACHE	(1 << 16)
+#define FUSE_NO_OPEN_SUPPORT	(1 << 17)
 
 /**
  * CUSE INIT request/reply flags
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index bac27fa..da2d668 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -108,7 +108,7 @@
 
 /* Rx and Tx ring - header status */
 #define TP_STATUS_TS_SOFTWARE		(1 << 29)
-#define TP_STATUS_TS_SYS_HARDWARE	(1 << 30)
+#define TP_STATUS_TS_SYS_HARDWARE	(1 << 30) /* deprecated, never set */
 #define TP_STATUS_TS_RAW_HARDWARE	(1 << 31)
 
 /* Rx ring - feature request bits */
diff --git a/include/uapi/linux/netfilter/xt_bpf.h b/include/uapi/linux/netfilter/xt_bpf.h
index 5dda450..2ec9fbc 100644
--- a/include/uapi/linux/netfilter/xt_bpf.h
+++ b/include/uapi/linux/netfilter/xt_bpf.h
@@ -6,6 +6,8 @@
 
 #define XT_BPF_MAX_NUM_INSTR	64
 
+struct sk_filter;
+
 struct xt_bpf_info {
 	__u16 bpf_program_num_elem;
 	struct sock_filter bpf_program[XT_BPF_MAX_NUM_INSTR];
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 0b979ee..a794d1d 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -118,6 +118,9 @@
 /* Allow last Netlink attribute to be unaligned */
 #define OVS_DP_F_UNALIGNED	(1 << 0)
 
+/* Allow datapath to associate multiple Netlink PIDs to each vport */
+#define OVS_DP_F_VPORT_PIDS	(1 << 1)
+
 /* Fixed logical ports. */
 #define OVSP_LOCAL      ((__u32)0)
 
@@ -203,9 +206,10 @@
  * this is the name of the network device.  Maximum length %IFNAMSIZ-1 bytes
  * plus a null terminator.
  * @OVS_VPORT_ATTR_OPTIONS: Vport-specific configuration information.
- * @OVS_VPORT_ATTR_UPCALL_PID: The Netlink socket in userspace that
- * OVS_PACKET_CMD_MISS upcalls will be directed to for packets received on
- * this port.  A value of zero indicates that upcalls should not be sent.
+ * @OVS_VPORT_ATTR_UPCALL_PID: The array of Netlink socket pids in userspace
+ * among which OVS_PACKET_CMD_MISS upcalls will be distributed for packets
+ * received on this port.  If this is a single-element array of value 0,
+ * upcalls should not be sent.
  * @OVS_VPORT_ATTR_STATS: A &struct ovs_vport_stats giving statistics for
  * packets sent or received through the vport.
  *
@@ -228,7 +232,8 @@
 	OVS_VPORT_ATTR_TYPE,	/* u32 OVS_VPORT_TYPE_* constant. */
 	OVS_VPORT_ATTR_NAME,	/* string name, up to IFNAMSIZ bytes long */
 	OVS_VPORT_ATTR_OPTIONS, /* nested attributes, varies by vport type */
-	OVS_VPORT_ATTR_UPCALL_PID, /* u32 Netlink PID to receive upcalls */
+	OVS_VPORT_ATTR_UPCALL_PID, /* array of u32 Netlink socket PIDs for */
+				/* receiving upcalls */
 	OVS_VPORT_ATTR_STATS,	/* struct ovs_vport_stats */
 	__OVS_VPORT_ATTR_MAX
 };
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index a5af2a2..5c1aba1 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -170,6 +170,7 @@
 	unmap->dev_bus_addr = 0;
 }
 
+int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
 int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
 			   unsigned long max_nr_gframes,
 			   void **__shared);
diff --git a/kernel/Makefile b/kernel/Makefile
index f2a8b62..e7360b7 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -87,6 +87,7 @@
 obj-$(CONFIG_TRACEPOINTS) += trace/
 obj-$(CONFIG_IRQ_WORK) += irq_work.o
 obj-$(CONFIG_CPU_PM) += cpu_pm.o
+obj-$(CONFIG_NET) += bpf/
 
 obj-$(CONFIG_PERF_EVENTS) += events/
 
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
new file mode 100644
index 0000000..6a71145
--- /dev/null
+++ b/kernel/bpf/Makefile
@@ -0,0 +1 @@
+obj-y := core.o
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
new file mode 100644
index 0000000..265a02c
--- /dev/null
+++ b/kernel/bpf/core.c
@@ -0,0 +1,536 @@
+/*
+ * Linux Socket Filter - Kernel level socket filtering
+ *
+ * Based on the design of the Berkeley Packet Filter. The new
+ * internal format has been designed by PLUMgrid:
+ *
+ *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
+ *
+ * Authors:
+ *
+ *	Jay Schulist <jschlst@samba.org>
+ *	Alexei Starovoitov <ast@plumgrid.com>
+ *	Daniel Borkmann <dborkman@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Andi Kleen - Fix a few bad bugs and races.
+ * Kris Katterjohn - Added many additional checks in sk_chk_filter()
+ */
+#include <linux/filter.h>
+#include <linux/skbuff.h>
+#include <asm/unaligned.h>
+
+/* Registers */
+#define BPF_R0	regs[BPF_REG_0]
+#define BPF_R1	regs[BPF_REG_1]
+#define BPF_R2	regs[BPF_REG_2]
+#define BPF_R3	regs[BPF_REG_3]
+#define BPF_R4	regs[BPF_REG_4]
+#define BPF_R5	regs[BPF_REG_5]
+#define BPF_R6	regs[BPF_REG_6]
+#define BPF_R7	regs[BPF_REG_7]
+#define BPF_R8	regs[BPF_REG_8]
+#define BPF_R9	regs[BPF_REG_9]
+#define BPF_R10	regs[BPF_REG_10]
+
+/* Named registers */
+#define DST	regs[insn->dst_reg]
+#define SRC	regs[insn->src_reg]
+#define FP	regs[BPF_REG_FP]
+#define ARG1	regs[BPF_REG_ARG1]
+#define CTX	regs[BPF_REG_CTX]
+#define IMM	insn->imm
+
+/* No hurry in this branch
+ *
+ * Exported for the bpf jit load helper.
+ */
+void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
+{
+	u8 *ptr = NULL;
+
+	if (k >= SKF_NET_OFF)
+		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
+	else if (k >= SKF_LL_OFF)
+		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
+	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
+		return ptr;
+
+	return NULL;
+}
+
+/* Base function for offset calculation. Needs to go into .text section,
+ * therefore keeping it non-static as well; will also be used by JITs
+ * anyway later on, so do not let the compiler omit it.
+ */
+noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+	return 0;
+}
+
+/**
+ *	__sk_run_filter - run a filter on a given context
+ *	@ctx: buffer to run the filter on
+ *	@insn: filter to apply
+ *
+ * Decode and apply filter instructions to the skb->data. Return length to
+ * keep, 0 for none. @ctx is the data we are operating on, @insn is the
+ * array of filter instructions.
+ */
+static unsigned int __sk_run_filter(void *ctx, const struct bpf_insn *insn)
+{
+	u64 stack[MAX_BPF_STACK / sizeof(u64)];
+	u64 regs[MAX_BPF_REG], tmp;
+	static const void *jumptable[256] = {
+		[0 ... 255] = &&default_label,
+		/* Now overwrite non-defaults ... */
+		/* 32 bit ALU operations */
+		[BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
+		[BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
+		[BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
+		[BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
+		[BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
+		[BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
+		[BPF_ALU | BPF_OR | BPF_X]  = &&ALU_OR_X,
+		[BPF_ALU | BPF_OR | BPF_K]  = &&ALU_OR_K,
+		[BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
+		[BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
+		[BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
+		[BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
+		[BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
+		[BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
+		[BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
+		[BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
+		[BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
+		[BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
+		[BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
+		[BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
+		[BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
+		[BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
+		[BPF_ALU | BPF_NEG] = &&ALU_NEG,
+		[BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
+		[BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
+		/* 64 bit ALU operations */
+		[BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
+		[BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
+		[BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
+		[BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
+		[BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
+		[BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
+		[BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
+		[BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
+		[BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
+		[BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
+		[BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
+		[BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
+		[BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
+		[BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
+		[BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
+		[BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
+		[BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
+		[BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
+		[BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
+		[BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
+		[BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
+		[BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
+		[BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
+		[BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
+		[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
+		/* Call instruction */
+		[BPF_JMP | BPF_CALL] = &&JMP_CALL,
+		/* Jumps */
+		[BPF_JMP | BPF_JA] = &&JMP_JA,
+		[BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
+		[BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
+		[BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
+		[BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
+		[BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
+		[BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
+		[BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
+		[BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
+		[BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
+		[BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
+		[BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
+		[BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
+		[BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
+		[BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
+		/* Program return */
+		[BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
+		/* Store instructions */
+		[BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
+		[BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
+		[BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
+		[BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
+		[BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
+		[BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
+		[BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
+		[BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
+		[BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
+		[BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
+		/* Load instructions */
+		[BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
+		[BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
+		[BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
+		[BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
+		[BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
+		[BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
+		[BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
+		[BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
+		[BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
+		[BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
+	};
+	void *ptr;
+	int off;
+
+#define CONT	 ({ insn++; goto select_insn; })
+#define CONT_JMP ({ insn++; goto select_insn; })
+
+	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
+	ARG1 = (u64) (unsigned long) ctx;
+
+	/* Registers used in classic BPF programs need to be reset first. */
+	regs[BPF_REG_A] = 0;
+	regs[BPF_REG_X] = 0;
+
+select_insn:
+	goto *jumptable[insn->code];
+
+	/* ALU */
+#define ALU(OPCODE, OP)			\
+	ALU64_##OPCODE##_X:		\
+		DST = DST OP SRC;	\
+		CONT;			\
+	ALU_##OPCODE##_X:		\
+		DST = (u32) DST OP (u32) SRC;	\
+		CONT;			\
+	ALU64_##OPCODE##_K:		\
+		DST = DST OP IMM;		\
+		CONT;			\
+	ALU_##OPCODE##_K:		\
+		DST = (u32) DST OP (u32) IMM;	\
+		CONT;
+
+	ALU(ADD,  +)
+	ALU(SUB,  -)
+	ALU(AND,  &)
+	ALU(OR,   |)
+	ALU(LSH, <<)
+	ALU(RSH, >>)
+	ALU(XOR,  ^)
+	ALU(MUL,  *)
+#undef ALU
+	ALU_NEG:
+		DST = (u32) -DST;
+		CONT;
+	ALU64_NEG:
+		DST = -DST;
+		CONT;
+	ALU_MOV_X:
+		DST = (u32) SRC;
+		CONT;
+	ALU_MOV_K:
+		DST = (u32) IMM;
+		CONT;
+	ALU64_MOV_X:
+		DST = SRC;
+		CONT;
+	ALU64_MOV_K:
+		DST = IMM;
+		CONT;
+	ALU64_ARSH_X:
+		(*(s64 *) &DST) >>= SRC;
+		CONT;
+	ALU64_ARSH_K:
+		(*(s64 *) &DST) >>= IMM;
+		CONT;
+	ALU64_MOD_X:
+		if (unlikely(SRC == 0))
+			return 0;
+		tmp = DST;
+		DST = do_div(tmp, SRC);
+		CONT;
+	ALU_MOD_X:
+		if (unlikely(SRC == 0))
+			return 0;
+		tmp = (u32) DST;
+		DST = do_div(tmp, (u32) SRC);
+		CONT;
+	ALU64_MOD_K:
+		tmp = DST;
+		DST = do_div(tmp, IMM);
+		CONT;
+	ALU_MOD_K:
+		tmp = (u32) DST;
+		DST = do_div(tmp, (u32) IMM);
+		CONT;
+	ALU64_DIV_X:
+		if (unlikely(SRC == 0))
+			return 0;
+		do_div(DST, SRC);
+		CONT;
+	ALU_DIV_X:
+		if (unlikely(SRC == 0))
+			return 0;
+		tmp = (u32) DST;
+		do_div(tmp, (u32) SRC);
+		DST = (u32) tmp;
+		CONT;
+	ALU64_DIV_K:
+		do_div(DST, IMM);
+		CONT;
+	ALU_DIV_K:
+		tmp = (u32) DST;
+		do_div(tmp, (u32) IMM);
+		DST = (u32) tmp;
+		CONT;
+	ALU_END_TO_BE:
+		switch (IMM) {
+		case 16:
+			DST = (__force u16) cpu_to_be16(DST);
+			break;
+		case 32:
+			DST = (__force u32) cpu_to_be32(DST);
+			break;
+		case 64:
+			DST = (__force u64) cpu_to_be64(DST);
+			break;
+		}
+		CONT;
+	ALU_END_TO_LE:
+		switch (IMM) {
+		case 16:
+			DST = (__force u16) cpu_to_le16(DST);
+			break;
+		case 32:
+			DST = (__force u32) cpu_to_le32(DST);
+			break;
+		case 64:
+			DST = (__force u64) cpu_to_le64(DST);
+			break;
+		}
+		CONT;
+
+	/* CALL */
+	JMP_CALL:
+		/* Function call scratches BPF_R1-BPF_R5 registers,
+		 * preserves BPF_R6-BPF_R9, and stores return value
+		 * into BPF_R0.
+		 */
+		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
+						       BPF_R4, BPF_R5);
+		CONT;
+
+	/* JMP */
+	JMP_JA:
+		insn += insn->off;
+		CONT;
+	JMP_JEQ_X:
+		if (DST == SRC) {
+			insn += insn->off;
+			CONT_JMP;
+		}
+		CONT;
+	JMP_JEQ_K:
+		if (DST == IMM) {
+			insn += insn->off;
+			CONT_JMP;
+		}
+		CONT;
+	JMP_JNE_X:
+		if (DST != SRC) {
+			insn += insn->off;
+			CONT_JMP;
+		}
+		CONT;
+	JMP_JNE_K:
+		if (DST != IMM) {
+			insn += insn->off;
+			CONT_JMP;
+		}
+		CONT;
+	JMP_JGT_X:
+		if (DST > SRC) {
+			insn += insn->off;
+			CONT_JMP;
+		}
+		CONT;
+	JMP_JGT_K:
+		if (DST > IMM) {
+			insn += insn->off;
+			CONT_JMP;
+		}
+		CONT;
+	JMP_JGE_X:
+		if (DST >= SRC) {
+			insn += insn->off;
+			CONT_JMP;
+		}
+		CONT;
+	JMP_JGE_K:
+		if (DST >= IMM) {
+			insn += insn->off;
+			CONT_JMP;
+		}
+		CONT;
+	JMP_JSGT_X:
+		if (((s64) DST) > ((s64) SRC)) {
+			insn += insn->off;
+			CONT_JMP;
+		}
+		CONT;
+	JMP_JSGT_K:
+		if (((s64) DST) > ((s64) IMM)) {
+			insn += insn->off;
+			CONT_JMP;
+		}
+		CONT;
+	JMP_JSGE_X:
+		if (((s64) DST) >= ((s64) SRC)) {
+			insn += insn->off;
+			CONT_JMP;
+		}
+		CONT;
+	JMP_JSGE_K:
+		if (((s64) DST) >= ((s64) IMM)) {
+			insn += insn->off;
+			CONT_JMP;
+		}
+		CONT;
+	JMP_JSET_X:
+		if (DST & SRC) {
+			insn += insn->off;
+			CONT_JMP;
+		}
+		CONT;
+	JMP_JSET_K:
+		if (DST & IMM) {
+			insn += insn->off;
+			CONT_JMP;
+		}
+		CONT;
+	JMP_EXIT:
+		return BPF_R0;
+
+	/* STX and ST and LDX*/
+#define LDST(SIZEOP, SIZE)						\
+	STX_MEM_##SIZEOP:						\
+		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
+		CONT;							\
+	ST_MEM_##SIZEOP:						\
+		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
+		CONT;							\
+	LDX_MEM_##SIZEOP:						\
+		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
+		CONT;
+
+	LDST(B,   u8)
+	LDST(H,  u16)
+	LDST(W,  u32)
+	LDST(DW, u64)
+#undef LDST
+	STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
+		atomic_add((u32) SRC, (atomic_t *)(unsigned long)
+			   (DST + insn->off));
+		CONT;
+	STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
+		atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
+			     (DST + insn->off));
+		CONT;
+	LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
+		off = IMM;
+load_word:
+		/* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
+		 * only appearing in the programs where ctx ==
+		 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
+		 * == BPF_R6, sk_convert_filter() saves it in BPF_R6,
+		 * internal BPF verifier will check that BPF_R6 ==
+		 * ctx.
+		 *
+		 * BPF_ABS and BPF_IND are wrappers of function calls,
+		 * so they scratch BPF_R1-BPF_R5 registers, preserve
+		 * BPF_R6-BPF_R9, and store return value into BPF_R0.
+		 *
+		 * Implicit input:
+		 *   ctx == skb == BPF_R6 == CTX
+		 *
+		 * Explicit input:
+		 *   SRC == any register
+		 *   IMM == 32-bit immediate
+		 *
+		 * Output:
+		 *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
+		 */
+
+		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
+		if (likely(ptr != NULL)) {
+			BPF_R0 = get_unaligned_be32(ptr);
+			CONT;
+		}
+
+		return 0;
+	LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
+		off = IMM;
+load_half:
+		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
+		if (likely(ptr != NULL)) {
+			BPF_R0 = get_unaligned_be16(ptr);
+			CONT;
+		}
+
+		return 0;
+	LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
+		off = IMM;
+load_byte:
+		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
+		if (likely(ptr != NULL)) {
+			BPF_R0 = *(u8 *)ptr;
+			CONT;
+		}
+
+		return 0;
+	LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
+		off = IMM + SRC;
+		goto load_word;
+	LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
+		off = IMM + SRC;
+		goto load_half;
+	LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
+		off = IMM + SRC;
+		goto load_byte;
+
+	default_label:
+		/* If we ever reach this, we have a bug somewhere. */
+		WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
+		return 0;
+}
+
+void __weak bpf_int_jit_compile(struct sk_filter *prog)
+{
+}
+
+/**
+ *	sk_filter_select_runtime - select execution runtime for BPF program
+ *	@fp: sk_filter populated with internal BPF program
+ *
+ * try to JIT internal BPF program, if JIT is not available select interpreter
+ * BPF program will be executed via SK_RUN_FILTER() macro
+ */
+void sk_filter_select_runtime(struct sk_filter *fp)
+{
+	fp->bpf_func = (void *) __sk_run_filter;
+
+	/* Probe if internal BPF can be JITed */
+	bpf_int_jit_compile(fp);
+}
+EXPORT_SYMBOL_GPL(sk_filter_select_runtime);
+
+/* free internal BPF program */
+void sk_filter_free(struct sk_filter *fp)
+{
+	bpf_jit_free(fp);
+}
+EXPORT_SYMBOL_GPL(sk_filter_free);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index b0c95f0..6b17ac1 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7458,7 +7458,19 @@
 			 struct perf_event_context *child_ctx,
 			 struct task_struct *child)
 {
-	perf_remove_from_context(child_event, true);
+	/*
+	 * Do not destroy the 'original' grouping; because of the context
+	 * switch optimization the original events could've ended up in a
+	 * random child task.
+	 *
+	 * If we were to destroy the original group, all group related
+	 * operations would cease to function properly after this random
+	 * child dies.
+	 *
+	 * Do destroy all inherited groups, we don't care about those
+	 * and being thorough is better.
+	 */
+	perf_remove_from_context(child_event, !!child_event->parent);
 
 	/*
 	 * It can happen that the parent exits first, and has events
@@ -7474,7 +7486,7 @@
 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
 {
 	struct perf_event *child_event, *next;
-	struct perf_event_context *child_ctx;
+	struct perf_event_context *child_ctx, *parent_ctx;
 	unsigned long flags;
 
 	if (likely(!child->perf_event_ctxp[ctxn])) {
@@ -7499,6 +7511,15 @@
 	raw_spin_lock(&child_ctx->lock);
 	task_ctx_sched_out(child_ctx);
 	child->perf_event_ctxp[ctxn] = NULL;
+
+	/*
+	 * In order to avoid freeing: child_ctx->parent_ctx->task
+	 * under perf_event_context::lock, grab another reference.
+	 */
+	parent_ctx = child_ctx->parent_ctx;
+	if (parent_ctx)
+		get_ctx(parent_ctx);
+
 	/*
 	 * If this context is a clone; unclone it so it can't get
 	 * swapped to another process while we're removing all
@@ -7509,6 +7530,13 @@
 	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
 
 	/*
+	 * Now that we no longer hold perf_event_context::lock, drop
+	 * our extra child_ctx->parent_ctx reference.
+	 */
+	if (parent_ctx)
+		put_ctx(parent_ctx);
+
+	/*
 	 * Report the task dead after unscheduling the events so that we
 	 * won't get any samples after PERF_RECORD_EXIT. We can however still
 	 * get a few PERF_RECORD_READ events.
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 3214289..734e9a7 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2037,19 +2037,23 @@
 {
 	unsigned long *iter;
 	struct kprobe_blacklist_entry *ent;
-	unsigned long offset = 0, size = 0;
+	unsigned long entry, offset = 0, size = 0;
 
 	for (iter = start; iter < end; iter++) {
-		if (!kallsyms_lookup_size_offset(*iter, &size, &offset)) {
-			pr_err("Failed to find blacklist %p\n", (void *)*iter);
+		entry = arch_deref_entry_point((void *)*iter);
+
+		if (!kernel_text_address(entry) ||
+		    !kallsyms_lookup_size_offset(entry, &size, &offset)) {
+			pr_err("Failed to find blacklist at %p\n",
+				(void *)entry);
 			continue;
 		}
 
 		ent = kmalloc(sizeof(*ent), GFP_KERNEL);
 		if (!ent)
 			return -ENOMEM;
-		ent->start_addr = *iter;
-		ent->end_addr = *iter + size;
+		ent->start_addr = entry;
+		ent->end_addr = entry + size;
 		INIT_LIST_HEAD(&ent->list);
 		list_add_tail(&ent->list, &kprobe_blacklist);
 	}
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 301bbc2..565743d 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -248,7 +248,7 @@
 	if (ret)
 		goto free_prog;
 
-	/* Convert 'sock_filter' insns to 'sock_filter_int' insns */
+	/* Convert 'sock_filter' insns to 'bpf_insn' insns */
 	ret = sk_convert_filter(fp, fprog->len, NULL, &new_len);
 	if (ret)
 		goto free_prog;
diff --git a/lib/random32.c b/lib/random32.c
index fa5da61..c9b6bf3 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -40,6 +40,10 @@
 
 #ifdef CONFIG_RANDOM32_SELFTEST
 static void __init prandom_state_selftest(void);
+#else
+static inline void prandom_state_selftest(void)
+{
+}
 #endif
 
 static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
@@ -53,8 +57,7 @@
  */
 u32 prandom_u32_state(struct rnd_state *state)
 {
-#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
-
+#define TAUSWORTHE(s, a, b, c, d) ((s & c) << d) ^ (((s << a) ^ s) >> b)
 	state->s1 = TAUSWORTHE(state->s1,  6U, 13U, 4294967294U, 18U);
 	state->s2 = TAUSWORTHE(state->s2,  2U, 27U, 4294967288U,  2U);
 	state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U,  7U);
@@ -147,21 +150,25 @@
 	prandom_u32_state(state);
 }
 
-static void prandom_seed_very_weak(struct rnd_state *state, u32 seed)
+static u32 __extract_hwseed(void)
 {
-	/* Note: This sort of seeding is ONLY used in test cases and
-	 * during boot at the time from core_initcall until late_initcall
-	 * as we don't have a stronger entropy source available yet.
-	 * After late_initcall, we reseed entire state, we have to (!),
-	 * otherwise an attacker just needs to search 32 bit space to
-	 * probe for our internal 128 bit state if he knows a couple
-	 * of prandom32 outputs!
-	 */
-#define LCG(x)	((x) * 69069U)	/* super-duper LCG */
-	state->s1 = __seed(LCG(seed),        2U);
-	state->s2 = __seed(LCG(state->s1),   8U);
-	state->s3 = __seed(LCG(state->s2),  16U);
-	state->s4 = __seed(LCG(state->s3), 128U);
+	u32 val = 0;
+
+	(void)(arch_get_random_seed_int(&val) ||
+	       arch_get_random_int(&val));
+
+	return val;
+}
+
+static void prandom_seed_early(struct rnd_state *state, u32 seed,
+			       bool mix_with_hwseed)
+{
+#define LCG(x)	 ((x) * 69069U)	/* super-duper LCG */
+#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
+	state->s1 = __seed(HWSEED() ^ LCG(seed),        2U);
+	state->s2 = __seed(HWSEED() ^ LCG(state->s1),   8U);
+	state->s3 = __seed(HWSEED() ^ LCG(state->s2),  16U);
+	state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
 }
 
 /**
@@ -194,14 +201,13 @@
 {
 	int i;
 
-#ifdef CONFIG_RANDOM32_SELFTEST
 	prandom_state_selftest();
-#endif
 
 	for_each_possible_cpu(i) {
 		struct rnd_state *state = &per_cpu(net_rand_state,i);
+		u32 weak_seed = (i + jiffies) ^ random_get_entropy();
 
-		prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy());
+		prandom_seed_early(state, weak_seed, true);
 		prandom_warmup(state);
 	}
 
@@ -210,6 +216,7 @@
 core_initcall(prandom_init);
 
 static void __prandom_timer(unsigned long dontcare);
+
 static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0);
 
 static void __prandom_timer(unsigned long dontcare)
@@ -419,7 +426,7 @@
 	for (i = 0; i < ARRAY_SIZE(test1); i++) {
 		struct rnd_state state;
 
-		prandom_seed_very_weak(&state, test1[i].seed);
+		prandom_seed_early(&state, test1[i].seed, false);
 		prandom_warmup(&state);
 
 		if (test1[i].result != prandom_u32_state(&state))
@@ -434,7 +441,7 @@
 	for (i = 0; i < ARRAY_SIZE(test2); i++) {
 		struct rnd_state state;
 
-		prandom_seed_very_weak(&state, test2[i].seed);
+		prandom_seed_early(&state, test2[i].seed, false);
 		prandom_warmup(&state);
 
 		for (j = 0; j < test2[i].iteration - 1; j++)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index c579e0f..5f48623 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -66,7 +66,7 @@
 	const char *descr;
 	union {
 		struct sock_filter insns[MAX_INSNS];
-		struct sock_filter_int insns_int[MAX_INSNS];
+		struct bpf_insn insns_int[MAX_INSNS];
 	} u;
 	__u8 aux;
 	__u8 data[MAX_DATA];
@@ -1807,7 +1807,7 @@
 
 		fp->len = flen;
 		memcpy(fp->insnsi, tests[which].u.insns_int,
-		       fp->len * sizeof(struct sock_filter_int));
+		       fp->len * sizeof(struct bpf_insn));
 
 		sk_filter_select_runtime(fp);
 		break;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 2024bbd..9221c02 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2604,6 +2604,7 @@
 		} else {
 			if (cow)
 				huge_ptep_set_wrprotect(src, addr, src_pte);
+			entry = huge_ptep_get(src_pte);
 			ptepage = pte_page(entry);
 			get_page(ptepage);
 			page_dup_rmap(ptepage);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index c6399e3..7211a73 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -435,7 +435,7 @@
 	if (av == NULL)	/* Not actually mapped anymore */
 		return;
 
-	pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+	pgoff = page_to_pgoff(page);
 	read_lock(&tasklist_lock);
 	for_each_process (tsk) {
 		struct anon_vma_chain *vmac;
@@ -469,7 +469,7 @@
 	mutex_lock(&mapping->i_mmap_mutex);
 	read_lock(&tasklist_lock);
 	for_each_process(tsk) {
-		pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+		pgoff_t pgoff = page_to_pgoff(page);
 		struct task_struct *t = task_early_kill(tsk, force_early);
 
 		if (!t)
diff --git a/mm/memory.c b/mm/memory.c
index d67fd9f..7e8d820 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2882,7 +2882,8 @@
 	 * if page by the offset is not ready to be mapped (cold cache or
 	 * something).
 	 */
-	if (vma->vm_ops->map_pages && fault_around_pages() > 1) {
+	if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) &&
+	    fault_around_pages() > 1) {
 		pte = pte_offset_map_lock(mm, pmd, address, &ptl);
 		do_fault_around(vma, address, pte, pgoff, flags);
 		if (!pte_same(*pte, orig_pte))
diff --git a/mm/migrate.c b/mm/migrate.c
index 9e0beaa..be6dbf9 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -988,9 +988,10 @@
 	 * it.  Otherwise, putback_lru_page() will drop the reference grabbed
 	 * during isolation.
 	 */
-	if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
+	if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
+		ClearPageSwapBacked(newpage);
 		put_new_page(newpage, private);
-	else
+	} else
 		putback_lru_page(newpage);
 
 	if (result) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0ea758b..8bcfe3a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6062,11 +6062,13 @@
 }
 
 /**
- * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
+ * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
  * @page: The page within the block of interest
- * @start_bitidx: The first bit of interest to retrieve
- * @end_bitidx: The last bit of interest
- * returns pageblock_bits flags
+ * @pfn: The target page frame number
+ * @end_bitidx: The last bit of interest to retrieve
+ * @mask: mask of bits that the caller is interested in
+ *
+ * Return: pageblock_bits flags
  */
 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
 					unsigned long end_bitidx,
@@ -6091,9 +6093,10 @@
 /**
  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
  * @page: The page within the block of interest
- * @start_bitidx: The first bit of interest
- * @end_bitidx: The last bit of interest
  * @flags: The flags to set
+ * @pfn: The target page frame number
+ * @end_bitidx: The last bit of interest
+ * @mask: mask of bits that the caller is interested in
  */
 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
 					unsigned long pfn,
diff --git a/mm/rmap.c b/mm/rmap.c
index b7e94eb..22a4a76 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -517,11 +517,7 @@
 static inline unsigned long
 __vma_address(struct page *page, struct vm_area_struct *vma)
 {
-	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-
-	if (unlikely(is_vm_hugetlb_page(vma)))
-		pgoff = page->index << huge_page_order(page_hstate(page));
-
+	pgoff_t pgoff = page_to_pgoff(page);
 	return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 }
 
@@ -1639,7 +1635,7 @@
 static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
 {
 	struct anon_vma *anon_vma;
-	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+	pgoff_t pgoff = page_to_pgoff(page);
 	struct anon_vma_chain *avc;
 	int ret = SWAP_AGAIN;
 
@@ -1680,7 +1676,7 @@
 static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
 {
 	struct address_space *mapping = page->mapping;
-	pgoff_t pgoff = page->index << compound_order(page);
+	pgoff_t pgoff = page_to_pgoff(page);
 	struct vm_area_struct *vma;
 	int ret = SWAP_AGAIN;
 
diff --git a/mm/shmem.c b/mm/shmem.c
index 1140f49..af68b15 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -85,7 +85,7 @@
  * a time): we would prefer not to enlarge the shmem inode just for that.
  */
 struct shmem_falloc {
-	int	mode;		/* FALLOC_FL mode currently operating */
+	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
 	pgoff_t start;		/* start of range currently being fallocated */
 	pgoff_t next;		/* the next page offset to be fallocated */
 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
@@ -468,23 +468,20 @@
 		return;
 
 	index = start;
-	for ( ; ; ) {
+	while (index < end) {
 		cond_resched();
 
 		pvec.nr = find_get_entries(mapping, index,
 				min(end - index, (pgoff_t)PAGEVEC_SIZE),
 				pvec.pages, indices);
 		if (!pvec.nr) {
-			if (index == start || unfalloc)
+			/* If all gone or hole-punch or unfalloc, we're done */
+			if (index == start || end != -1)
 				break;
+			/* But if truncating, restart to make sure all gone */
 			index = start;
 			continue;
 		}
-		if ((index == start || unfalloc) && indices[0] >= end) {
-			pagevec_remove_exceptionals(&pvec);
-			pagevec_release(&pvec);
-			break;
-		}
 		mem_cgroup_uncharge_start();
 		for (i = 0; i < pagevec_count(&pvec); i++) {
 			struct page *page = pvec.pages[i];
@@ -496,8 +493,12 @@
 			if (radix_tree_exceptional_entry(page)) {
 				if (unfalloc)
 					continue;
-				nr_swaps_freed += !shmem_free_swap(mapping,
-								index, page);
+				if (shmem_free_swap(mapping, index, page)) {
+					/* Swap was replaced by page: retry */
+					index--;
+					break;
+				}
+				nr_swaps_freed++;
 				continue;
 			}
 
@@ -506,6 +507,11 @@
 				if (page->mapping == mapping) {
 					VM_BUG_ON_PAGE(PageWriteback(page), page);
 					truncate_inode_page(mapping, page);
+				} else {
+					/* Page was replaced by swap: retry */
+					unlock_page(page);
+					index--;
+					break;
 				}
 			}
 			unlock_page(page);
@@ -760,7 +766,7 @@
 			spin_lock(&inode->i_lock);
 			shmem_falloc = inode->i_private;
 			if (shmem_falloc &&
-			    !shmem_falloc->mode &&
+			    !shmem_falloc->waitq &&
 			    index >= shmem_falloc->start &&
 			    index < shmem_falloc->next)
 				shmem_falloc->nr_unswapped++;
@@ -1248,38 +1254,58 @@
 	 * Trinity finds that probing a hole which tmpfs is punching can
 	 * prevent the hole-punch from ever completing: which in turn
 	 * locks writers out with its hold on i_mutex.  So refrain from
-	 * faulting pages into the hole while it's being punched, and
-	 * wait on i_mutex to be released if vmf->flags permits.
+	 * faulting pages into the hole while it's being punched.  Although
+	 * shmem_undo_range() does remove the additions, it may be unable to
+	 * keep up, as each new page needs its own unmap_mapping_range() call,
+	 * and the i_mmap tree grows ever slower to scan if new vmas are added.
+	 *
+	 * It does not matter if we sometimes reach this check just before the
+	 * hole-punch begins, so that one fault then races with the punch:
+	 * we just need to make racing faults a rare case.
+	 *
+	 * The implementation below would be much simpler if we just used a
+	 * standard mutex or completion: but we cannot take i_mutex in fault,
+	 * and bloating every shmem inode for this unlikely case would be sad.
 	 */
 	if (unlikely(inode->i_private)) {
 		struct shmem_falloc *shmem_falloc;
 
 		spin_lock(&inode->i_lock);
 		shmem_falloc = inode->i_private;
-		if (!shmem_falloc ||
-		    shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
-		    vmf->pgoff < shmem_falloc->start ||
-		    vmf->pgoff >= shmem_falloc->next)
-			shmem_falloc = NULL;
-		spin_unlock(&inode->i_lock);
-		/*
-		 * i_lock has protected us from taking shmem_falloc seriously
-		 * once return from shmem_fallocate() went back up that stack.
-		 * i_lock does not serialize with i_mutex at all, but it does
-		 * not matter if sometimes we wait unnecessarily, or sometimes
-		 * miss out on waiting: we just need to make those cases rare.
-		 */
-		if (shmem_falloc) {
+		if (shmem_falloc &&
+		    shmem_falloc->waitq &&
+		    vmf->pgoff >= shmem_falloc->start &&
+		    vmf->pgoff < shmem_falloc->next) {
+			wait_queue_head_t *shmem_falloc_waitq;
+			DEFINE_WAIT(shmem_fault_wait);
+
+			ret = VM_FAULT_NOPAGE;
 			if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
 			   !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+				/* It's polite to up mmap_sem if we can */
 				up_read(&vma->vm_mm->mmap_sem);
-				mutex_lock(&inode->i_mutex);
-				mutex_unlock(&inode->i_mutex);
-				return VM_FAULT_RETRY;
+				ret = VM_FAULT_RETRY;
 			}
-			/* cond_resched? Leave that to GUP or return to user */
-			return VM_FAULT_NOPAGE;
+
+			shmem_falloc_waitq = shmem_falloc->waitq;
+			prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
+					TASK_UNINTERRUPTIBLE);
+			spin_unlock(&inode->i_lock);
+			schedule();
+
+			/*
+			 * shmem_falloc_waitq points into the shmem_fallocate()
+			 * stack of the hole-punching task: shmem_falloc_waitq
+			 * is usually invalid by the time we reach here, but
+			 * finish_wait() does not dereference it in that case;
+			 * though i_lock needed lest racing with wake_up_all().
+			 */
+			spin_lock(&inode->i_lock);
+			finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
+			spin_unlock(&inode->i_lock);
+			return ret;
 		}
+		spin_unlock(&inode->i_lock);
 	}
 
 	error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
@@ -1774,13 +1800,13 @@
 
 	mutex_lock(&inode->i_mutex);
 
-	shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
-
 	if (mode & FALLOC_FL_PUNCH_HOLE) {
 		struct address_space *mapping = file->f_mapping;
 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
+		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
 
+		shmem_falloc.waitq = &shmem_falloc_waitq;
 		shmem_falloc.start = unmap_start >> PAGE_SHIFT;
 		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
 		spin_lock(&inode->i_lock);
@@ -1792,8 +1818,13 @@
 					    1 + unmap_end - unmap_start, 0);
 		shmem_truncate_range(inode, offset, offset + len - 1);
 		/* No need to unmap again: hole-punching leaves COWed pages */
+
+		spin_lock(&inode->i_lock);
+		inode->i_private = NULL;
+		wake_up_all(&shmem_falloc_waitq);
+		spin_unlock(&inode->i_lock);
 		error = 0;
-		goto undone;
+		goto out;
 	}
 
 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
@@ -1809,6 +1840,7 @@
 		goto out;
 	}
 
+	shmem_falloc.waitq = NULL;
 	shmem_falloc.start = start;
 	shmem_falloc.next  = start;
 	shmem_falloc.nr_falloced = 0;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 735e01a..d31c4ba 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -55,7 +55,7 @@
 			continue;
 		}
 
-#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
+#if !defined(CONFIG_SLUB)
 		if (!strcmp(s->name, name)) {
 			pr_err("%s (%s): Cache name already exists.\n",
 			       __func__, name);
diff --git a/mm/truncate.c b/mm/truncate.c
index 6a78c81..eda2473 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -355,14 +355,16 @@
 	for ( ; ; ) {
 		cond_resched();
 		if (!pagevec_lookup_entries(&pvec, mapping, index,
-			min(end - index, (pgoff_t)PAGEVEC_SIZE),
-			indices)) {
+			min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
+			/* If all gone from start onwards, we're done */
 			if (index == start)
 				break;
+			/* Otherwise restart to make sure all gone */
 			index = start;
 			continue;
 		}
 		if (index == start && indices[0] >= end) {
+			/* All gone out of hole to be punched, we're done */
 			pagevec_remove_exceptionals(&pvec);
 			pagevec_release(&pvec);
 			break;
@@ -373,8 +375,11 @@
 
 			/* We rely upon deletion not changing page->index */
 			index = indices[i];
-			if (index >= end)
+			if (index >= end) {
+				/* Restart punch to make sure all gone */
+				index = start - 1;
 				break;
+			}
 
 			if (radix_tree_exceptional_entry(page)) {
 				clear_exceptional_entry(mapping, index, page);
diff --git a/net/6lowpan/Kconfig b/net/6lowpan/Kconfig
new file mode 100644
index 0000000..028a5c6
--- /dev/null
+++ b/net/6lowpan/Kconfig
@@ -0,0 +1,6 @@
+config 6LOWPAN
+	bool "6LoWPAN Support"
+	depends on IPV6
+	---help---
+	  This enables IPv6 over Low power Wireless Personal Area Network -
+	  "6LoWPAN" which is supported by IEEE 802.15.4 or Bluetooth stacks.
diff --git a/net/6lowpan/Makefile b/net/6lowpan/Makefile
new file mode 100644
index 0000000..415886b
--- /dev/null
+++ b/net/6lowpan/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_6LOWPAN) := 6lowpan.o
+
+6lowpan-y := iphc.o
diff --git a/net/ieee802154/6lowpan_iphc.c b/net/6lowpan/iphc.c
similarity index 100%
rename from net/ieee802154/6lowpan_iphc.c
rename to net/6lowpan/iphc.c
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index cba9c21..64c6bed 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -325,23 +325,24 @@
 	netdev_update_features(vlandev);
 }
 
-static void __vlan_device_event(struct net_device *dev, unsigned long event)
+static int __vlan_device_event(struct net_device *dev, unsigned long event)
 {
+	int err = 0;
+
 	switch (event) {
 	case NETDEV_CHANGENAME:
 		vlan_proc_rem_dev(dev);
-		if (vlan_proc_add_dev(dev) < 0)
-			pr_warn("failed to change proc name for %s\n",
-				dev->name);
+		err = vlan_proc_add_dev(dev);
 		break;
 	case NETDEV_REGISTER:
-		if (vlan_proc_add_dev(dev) < 0)
-			pr_warn("failed to add proc entry for %s\n", dev->name);
+		err = vlan_proc_add_dev(dev);
 		break;
 	case NETDEV_UNREGISTER:
 		vlan_proc_rem_dev(dev);
 		break;
 	}
+
+	return err;
 }
 
 static int vlan_device_event(struct notifier_block *unused, unsigned long event,
@@ -356,8 +357,12 @@
 	bool last = false;
 	LIST_HEAD(list);
 
-	if (is_vlan_dev(dev))
-		__vlan_device_event(dev, event);
+	if (is_vlan_dev(dev)) {
+		int err = __vlan_device_event(dev, event);
+
+		if (err)
+			return notifier_from_errno(err);
+	}
 
 	if ((event == NETDEV_UP) &&
 	    (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 1d0e8921..ae63cf7 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -171,6 +171,8 @@
 	struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
 	struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
 
+	if (!strcmp(vlandev->name, name_conf))
+		return -EINVAL;
 	vlan->dent =
 		proc_create_data(vlandev->name, S_IFREG|S_IRUSR|S_IWUSR,
 				 vn->proc_vlan_dir, &vlandev_fops, vlandev);
diff --git a/net/Kconfig b/net/Kconfig
index d92afe4..4051fdf 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -214,6 +214,7 @@
 source "net/x25/Kconfig"
 source "net/lapb/Kconfig"
 source "net/phonet/Kconfig"
+source "net/6lowpan/Kconfig"
 source "net/ieee802154/Kconfig"
 source "net/mac802154/Kconfig"
 source "net/sched/Kconfig"
diff --git a/net/Makefile b/net/Makefile
index cbbbe6d..7ed1970 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -57,7 +57,8 @@
 ifneq ($(CONFIG_DCB),)
 obj-y				+= dcb/
 endif
-obj-y				+= ieee802154/
+obj-$(CONFIG_6LOWPAN)		+= 6lowpan/
+obj-$(CONFIG_IEEE802154)	+= ieee802154/
 obj-$(CONFIG_MAC802154)		+= mac802154/
 
 ifeq ($(CONFIG_NET),y)
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index f5afaa2..600fb29 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -41,8 +41,7 @@
 
 config BT_6LOWPAN
 	tristate "Bluetooth 6LoWPAN support"
-	depends on BT && IPV6
-	select 6LOWPAN_IPHC if BT_6LOWPAN
+	depends on BT && 6LOWPAN
 	help
 	  IPv6 compression over Bluetooth Low Energy.
 
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
index bb39509..016cdb6 100644
--- a/net/bluetooth/amp.c
+++ b/net/bluetooth/amp.c
@@ -113,8 +113,9 @@
 {
 	bdaddr_t *dst = &mgr->l2cap_conn->hcon->dst;
 	struct hci_conn *hcon;
+	u8 role = out ? HCI_ROLE_MASTER : HCI_ROLE_SLAVE;
 
-	hcon = hci_conn_add(hdev, AMP_LINK, dst);
+	hcon = hci_conn_add(hdev, AMP_LINK, dst, role);
 	if (!hcon)
 		return NULL;
 
@@ -125,7 +126,6 @@
 	hcon->handle = __next_handle(mgr);
 	hcon->remote_id = remote_id;
 	hcon->amp_mgr = amp_mgr_get(mgr);
-	hcon->out = out;
 
 	return hcon;
 }
@@ -133,8 +133,8 @@
 /* AMP crypto key generation interface */
 static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output)
 {
-	int ret = 0;
 	struct crypto_shash *tfm;
+	int ret;
 
 	if (!ksize)
 		return -EINVAL;
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index cd75e4d..1ca8a87 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -362,12 +362,6 @@
 		CAPIMSG_SETCONTROL(skb->data, contr);
 	}
 
-	if (!ctrl) {
-		BT_ERR("Can't find controller %d for message", session->num);
-		kfree_skb(skb);
-		return;
-	}
-
 	capi_ctr_handle_message(ctrl, appl, skb);
 }
 
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 490ee88..b50dabb 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -66,8 +66,7 @@
 
 	conn->state = BT_CONNECT;
 	conn->out = true;
-
-	set_bit(HCI_CONN_MASTER, &conn->flags);
+	conn->role = HCI_ROLE_MASTER;
 
 	conn->attempt++;
 
@@ -335,7 +334,7 @@
 			 * event handling and hci_clock_offset_evt function.
 			 */
 			if (conn->type == ACL_LINK &&
-			    test_bit(HCI_CONN_MASTER, &conn->flags)) {
+			    conn->role == HCI_ROLE_MASTER) {
 				struct hci_dev *hdev = conn->hdev;
 				struct hci_cp_read_clock_offset cp;
 
@@ -422,13 +421,14 @@
 	hci_le_create_connection_cancel(conn);
 }
 
-struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
+struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+			      u8 role)
 {
 	struct hci_conn *conn;
 
 	BT_DBG("%s dst %pMR", hdev->name, dst);
 
-	conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
+	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
 	if (!conn)
 		return NULL;
 
@@ -436,6 +436,7 @@
 	bacpy(&conn->src, &hdev->bdaddr);
 	conn->hdev  = hdev;
 	conn->type  = type;
+	conn->role  = role;
 	conn->mode  = HCI_CM_ACTIVE;
 	conn->state = BT_OPEN;
 	conn->auth_type = HCI_AT_GENERAL_BONDING;
@@ -448,6 +449,9 @@
 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
 
+	if (conn->role == HCI_ROLE_MASTER)
+		conn->out = true;
+
 	switch (type) {
 	case ACL_LINK:
 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
@@ -698,7 +702,7 @@
 
 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
 				u8 dst_type, u8 sec_level, u16 conn_timeout,
-				bool master)
+				u8 role)
 {
 	struct hci_conn_params *params;
 	struct hci_conn *conn;
@@ -747,7 +751,7 @@
 		dst_type = ADDR_LE_DEV_RANDOM;
 	}
 
-	conn = hci_conn_add(hdev, LE_LINK, dst);
+	conn = hci_conn_add(hdev, LE_LINK, dst, role);
 	if (!conn)
 		return ERR_PTR(-ENOMEM);
 
@@ -771,7 +775,7 @@
 	}
 
 	/* If requested to connect as slave use directed advertising */
-	if (!master) {
+	if (conn->role == HCI_ROLE_SLAVE) {
 		/* If we're active scanning most controllers are unable
 		 * to initiate advertising. Simply reject the attempt.
 		 */
@@ -786,9 +790,6 @@
 		goto create_conn;
 	}
 
-	conn->out = true;
-	set_bit(HCI_CONN_MASTER, &conn->flags);
-
 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
 	if (params) {
 		conn->le_conn_min_interval = params->conn_min_interval;
@@ -833,11 +834,11 @@
 	struct hci_conn *acl;
 
 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
-		return ERR_PTR(-ENOTSUPP);
+		return ERR_PTR(-EOPNOTSUPP);
 
 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
 	if (!acl) {
-		acl = hci_conn_add(hdev, ACL_LINK, dst);
+		acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
 		if (!acl)
 			return ERR_PTR(-ENOMEM);
 	}
@@ -866,7 +867,7 @@
 
 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
 	if (!sco) {
-		sco = hci_conn_add(hdev, type, dst);
+		sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
 		if (!sco) {
 			hci_conn_drop(acl);
 			return ERR_PTR(-ENOMEM);
@@ -972,7 +973,8 @@
 }
 
 /* Enable security */
-int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
+int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
+		      bool initiator)
 {
 	BT_DBG("hcon %p", conn);
 
@@ -1025,6 +1027,9 @@
 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
 		return 0;
 
+	if (initiator)
+		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
+
 	if (!hci_conn_auth(conn, sec_level, auth_type))
 		return 0;
 
@@ -1076,7 +1081,7 @@
 {
 	BT_DBG("hcon %p", conn);
 
-	if (!role && test_bit(HCI_CONN_MASTER, &conn->flags))
+	if (role == conn->role)
 		return 1;
 
 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
@@ -1151,7 +1156,7 @@
 {
 	u32 link_mode = 0;
 
-	if (test_bit(HCI_CONN_MASTER, &conn->flags))
+	if (conn->role == HCI_ROLE_MASTER)
 		link_mode |= HCI_LM_MASTER;
 
 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
@@ -1277,7 +1282,7 @@
 
 	BT_DBG("%s hcon %p", hdev->name, conn);
 
-	chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
+	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
 	if (!chan)
 		return NULL;
 
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 84431b8..cfcb6055 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -54,6 +54,15 @@
 /* HCI ID Numbering */
 static DEFINE_IDA(hci_index_ida);
 
+/* ----- HCI requests ----- */
+
+#define HCI_REQ_DONE	  0
+#define HCI_REQ_PEND	  1
+#define HCI_REQ_CANCELED  2
+
+#define hci_req_lock(d)		mutex_lock(&d->req_lock)
+#define hci_req_unlock(d)	mutex_unlock(&d->req_lock)
+
 /* ---- HCI notifications ---- */
 
 static void hci_notify(struct hci_dev *hdev, int event)
@@ -1339,9 +1348,6 @@
 	/* Read LE Supported States */
 	hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
 
-	/* Read LE Advertising Channel TX Power */
-	hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
-
 	/* Read LE White List Size */
 	hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
 
@@ -1416,14 +1422,17 @@
 		/* Use a different default for LE-only devices */
 		memset(events, 0, sizeof(events));
 		events[0] |= 0x10; /* Disconnection Complete */
-		events[0] |= 0x80; /* Encryption Change */
 		events[1] |= 0x08; /* Read Remote Version Information Complete */
 		events[1] |= 0x20; /* Command Complete */
 		events[1] |= 0x40; /* Command Status */
 		events[1] |= 0x80; /* Hardware Error */
 		events[2] |= 0x04; /* Number of Completed Packets */
 		events[3] |= 0x02; /* Data Buffer Overflow */
-		events[5] |= 0x80; /* Encryption Key Refresh Complete */
+
+		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
+			events[0] |= 0x80; /* Encryption Change */
+			events[5] |= 0x80; /* Encryption Key Refresh Complete */
+		}
 	}
 
 	if (lmp_inq_rssi_capable(hdev))
@@ -1476,8 +1485,6 @@
 	if (lmp_le_capable(hdev))
 		le_setup(req);
 
-	hci_setup_event_mask(req);
-
 	/* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
 	 * local supported commands HCI command.
 	 */
@@ -1605,6 +1612,8 @@
 	struct hci_dev *hdev = req->hdev;
 	u8 p;
 
+	hci_setup_event_mask(req);
+
 	/* Some Broadcom based Bluetooth controllers do not support the
 	 * Delete Stored Link Key command. They are clearly indicating its
 	 * absence in the bit mask of supported commands.
@@ -1635,7 +1644,10 @@
 		u8 events[8];
 
 		memset(events, 0, sizeof(events));
-		events[0] = 0x1f;
+		events[0] = 0x0f;
+
+		if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
+			events[0] |= 0x10;	/* LE Long Term Key Request */
 
 		/* If controller supports the Connection Parameters Request
 		 * Link Layer Procedure, enable the corresponding event.
@@ -1648,6 +1660,11 @@
 		hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
 			    events);
 
+		if (hdev->commands[25] & 0x40) {
+			/* Read LE Advertising Channel TX Power */
+			hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
+		}
+
 		hci_set_le_support(req);
 	}
 
@@ -2071,7 +2088,7 @@
 	}
 
 	/* Entry not in the cache. Add new one. */
-	ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
+	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
 	if (!ie) {
 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
 		goto done;
@@ -2435,6 +2452,16 @@
 	 */
 	flush_workqueue(hdev->req_workqueue);
 
+	/* For controllers not using the management interface and that
+	 * are brought up using legacy ioctl, set the HCI_PAIRABLE bit
+	 * so that pairing works for them. Once the management interface
+	 * is in use this bit will be cleared again and userspace has
+	 * to explicitly enable it.
+	 */
+	if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
+	    !test_bit(HCI_MGMT, &hdev->dev_flags))
+		set_bit(HCI_PAIRABLE, &hdev->dev_flags);
+
 	err = hci_dev_do_open(hdev);
 
 done:
@@ -2655,6 +2682,42 @@
 	return ret;
 }
 
+static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
+{
+	bool conn_changed, discov_changed;
+
+	BT_DBG("%s scan 0x%02x", hdev->name, scan);
+
+	if ((scan & SCAN_PAGE))
+		conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
+						 &hdev->dev_flags);
+	else
+		conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
+						  &hdev->dev_flags);
+
+	if ((scan & SCAN_INQUIRY)) {
+		discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
+						   &hdev->dev_flags);
+	} else {
+		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+		discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
+						    &hdev->dev_flags);
+	}
+
+	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+		return;
+
+	if (conn_changed || discov_changed) {
+		/* In case this was disabled through mgmt */
+		set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+
+		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+			mgmt_update_adv_data(hdev);
+
+		mgmt_new_settings(hdev);
+	}
+}
+
 int hci_dev_cmd(unsigned int cmd, void __user *arg)
 {
 	struct hci_dev *hdev;
@@ -2716,22 +2779,11 @@
 		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
 				   HCI_INIT_TIMEOUT);
 
-		/* Ensure that the connectable state gets correctly
-		 * notified if the whitelist is in use.
+		/* Ensure that the connectable and discoverable states
+		 * get correctly modified as this was a non-mgmt change.
 		 */
-		if (!err && !list_empty(&hdev->whitelist)) {
-			bool changed;
-
-			if ((dr.dev_opt & SCAN_PAGE))
-				changed = !test_and_set_bit(HCI_CONNECTABLE,
-							    &hdev->dev_flags);
-			else
-				changed = test_and_set_bit(HCI_CONNECTABLE,
-							   &hdev->dev_flags);
-
-			if (changed)
-				mgmt_new_settings(hdev);
-		}
+		if (!err)
+			hci_update_scan_state(hdev, dr.dev_opt);
 		break;
 
 	case HCISETLINKPOL:
@@ -2792,14 +2844,17 @@
 
 	read_lock(&hci_dev_list_lock);
 	list_for_each_entry(hdev, &hci_dev_list, list) {
-		if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
-			cancel_delayed_work(&hdev->power_off);
+		unsigned long flags = hdev->flags;
 
-		if (!test_bit(HCI_MGMT, &hdev->dev_flags))
-			set_bit(HCI_PAIRABLE, &hdev->dev_flags);
+		/* When the auto-off is configured it means the transport
+		 * is running, but in that case still indicate that the
+		 * device is actually down.
+		 */
+		if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+			flags &= ~BIT(HCI_UP);
 
 		(dr + n)->dev_id  = hdev->id;
-		(dr + n)->dev_opt = hdev->flags;
+		(dr + n)->dev_opt = flags;
 
 		if (++n >= dev_num)
 			break;
@@ -2819,6 +2874,7 @@
 {
 	struct hci_dev *hdev;
 	struct hci_dev_info di;
+	unsigned long flags;
 	int err = 0;
 
 	if (copy_from_user(&di, arg, sizeof(di)))
@@ -2828,16 +2884,19 @@
 	if (!hdev)
 		return -ENODEV;
 
-	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
-		cancel_delayed_work_sync(&hdev->power_off);
-
-	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
-		set_bit(HCI_PAIRABLE, &hdev->dev_flags);
+	/* When the auto-off is configured it means the transport
+	 * is running, but in that case still indicate that the
+	 * device is actually down.
+	 */
+	if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+		flags = hdev->flags & ~BIT(HCI_UP);
+	else
+		flags = hdev->flags;
 
 	strcpy(di.name, hdev->name);
 	di.bdaddr   = hdev->bdaddr;
 	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
-	di.flags    = hdev->flags;
+	di.flags    = flags;
 	di.pkt_type = hdev->pkt_type;
 	if (lmp_bredr_capable(hdev)) {
 		di.acl_mtu  = hdev->acl_mtu;
@@ -3062,13 +3121,16 @@
 	return false;
 }
 
-static bool ltk_type_master(u8 type)
+static u8 ltk_role(u8 type)
 {
-	return (type == SMP_LTK);
+	if (type == SMP_LTK)
+		return HCI_ROLE_MASTER;
+
+	return HCI_ROLE_SLAVE;
 }
 
 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
-			     bool master)
+			     u8 role)
 {
 	struct smp_ltk *k;
 
@@ -3076,7 +3138,7 @@
 		if (k->ediv != ediv || k->rand != rand)
 			continue;
 
-		if (ltk_type_master(k->type) != master)
+		if (ltk_role(k->type) != role)
 			continue;
 
 		return k;
@@ -3086,14 +3148,14 @@
 }
 
 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
-				     u8 addr_type, bool master)
+				     u8 addr_type, u8 role)
 {
 	struct smp_ltk *k;
 
 	list_for_each_entry(k, &hdev->long_term_keys, list)
 		if (addr_type == k->bdaddr_type &&
 		    bacmp(bdaddr, &k->bdaddr) == 0 &&
-		    ltk_type_master(k->type) == master)
+		    ltk_role(k->type) == role)
 			return k;
 
 	return NULL;
@@ -3188,9 +3250,9 @@
 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
 {
 	struct smp_ltk *key, *old_key;
-	bool master = ltk_type_master(type);
+	u8 role = ltk_role(type);
 
-	old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
+	old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
 	if (old_key)
 		key = old_key;
 	else {
@@ -3430,7 +3492,7 @@
 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
 		return -EEXIST;
 
-	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 	if (!entry)
 		return -ENOMEM;
 
@@ -3835,7 +3897,7 @@
 {
 	struct hci_dev *hdev;
 
-	hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
+	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
 	if (!hdev)
 		return NULL;
 
@@ -4390,6 +4452,11 @@
 	return 0;
 }
 
+bool hci_req_pending(struct hci_dev *hdev)
+{
+	return (hdev->req_status == HCI_REQ_PEND);
+}
+
 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
 				       u32 plen, const void *param)
 {
@@ -5398,8 +5465,7 @@
 
 	hci_req_init(&req, hdev);
 
-	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
-	    list_empty(&hdev->pend_le_conns) &&
+	if (list_empty(&hdev->pend_le_conns) &&
 	    list_empty(&hdev->pend_le_reports)) {
 		/* If there is no pending LE connections or devices
 		 * to be scanned for, we should stop the background
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index c8ae9ee..4c41774 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -101,12 +101,8 @@
 	hci_dev_lock(hdev);
 
 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
-	if (conn) {
-		if (rp->role)
-			clear_bit(HCI_CONN_MASTER, &conn->flags);
-		else
-			set_bit(HCI_CONN_MASTER, &conn->flags);
-	}
+	if (conn)
+		conn->role = rp->role;
 
 	hci_dev_unlock(hdev);
 }
@@ -296,7 +292,6 @@
 {
 	__u8 status = *((__u8 *) skb->data);
 	__u8 param;
-	int old_pscan, old_iscan;
 	void *sent;
 
 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
@@ -310,32 +305,19 @@
 	hci_dev_lock(hdev);
 
 	if (status) {
-		mgmt_write_scan_failed(hdev, param, status);
 		hdev->discov_timeout = 0;
 		goto done;
 	}
 
-	/* We need to ensure that we set this back on if someone changed
-	 * the scan mode through a raw HCI socket.
-	 */
-	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
-
-	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
-	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
-
-	if (param & SCAN_INQUIRY) {
+	if (param & SCAN_INQUIRY)
 		set_bit(HCI_ISCAN, &hdev->flags);
-		if (!old_iscan)
-			mgmt_discoverable(hdev, 1);
-	} else if (old_iscan)
-		mgmt_discoverable(hdev, 0);
+	else
+		clear_bit(HCI_ISCAN, &hdev->flags);
 
-	if (param & SCAN_PAGE) {
+	if (param & SCAN_PAGE)
 		set_bit(HCI_PSCAN, &hdev->flags);
-		if (!old_pscan)
-			mgmt_connectable(hdev, 1);
-	} else if (old_pscan)
-		mgmt_connectable(hdev, 0);
+	else
+		clear_bit(HCI_ISCAN, &hdev->flags);
 
 done:
 	hci_dev_unlock(hdev);
@@ -1432,11 +1414,9 @@
 		}
 	} else {
 		if (!conn) {
-			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
-			if (conn) {
-				conn->out = true;
-				set_bit(HCI_CONN_MASTER, &conn->flags);
-			} else
+			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
+					    HCI_ROLE_MASTER);
+			if (!conn)
 				BT_ERR("No memory for new connection");
 		}
 	}
@@ -1665,6 +1645,8 @@
 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
 		struct hci_cp_auth_requested auth_cp;
 
+		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
+
 		auth_cp.handle = __cpu_to_le16(conn->handle);
 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
 			     sizeof(auth_cp), &auth_cp);
@@ -2149,18 +2131,17 @@
 		return;
 	}
 
-	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
-		if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
-					   BDADDR_BREDR)) {
-			hci_reject_conn(hdev, &ev->bdaddr);
-			return;
-		}
-	} else {
-		if (!hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
-					    BDADDR_BREDR)) {
-			hci_reject_conn(hdev, &ev->bdaddr);
-			return;
-		}
+	if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
+				   BDADDR_BREDR)) {
+		hci_reject_conn(hdev, &ev->bdaddr);
+		return;
+	}
+
+	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
+	    !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
+				    BDADDR_BREDR)) {
+		    hci_reject_conn(hdev, &ev->bdaddr);
+		    return;
 	}
 
 	/* Connection accepted */
@@ -2174,7 +2155,8 @@
 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
 			&ev->bdaddr);
 	if (!conn) {
-		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
+		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
+				    HCI_ROLE_SLAVE);
 		if (!conn) {
 			BT_ERR("No memory for new connection");
 			hci_dev_unlock(hdev);
@@ -2407,6 +2389,9 @@
 
 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
 		struct hci_cp_auth_requested cp;
+
+		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
+
 		cp.handle = __cpu_to_le16(conn->handle);
 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
 	}
@@ -2938,12 +2923,8 @@
 
 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
 	if (conn) {
-		if (!ev->status) {
-			if (ev->role)
-				clear_bit(HCI_CONN_MASTER, &conn->flags);
-			else
-				set_bit(HCI_CONN_MASTER, &conn->flags);
-		}
+		if (!ev->status)
+			conn->role = ev->role;
 
 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
 
@@ -3137,10 +3118,11 @@
 		hci_conn_drop(conn);
 	}
 
-	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
+	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags) &&
+	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
 			     sizeof(ev->bdaddr), &ev->bdaddr);
-	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
+	} else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
 		u8 secure;
 
 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
@@ -3666,7 +3648,11 @@
 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
 		goto unlock;
 
+	/* Allow pairing if we're pairable, the initiators of the
+	 * pairing or if the remote is not requesting bonding.
+	 */
 	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
+	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
 		struct hci_cp_io_capability_reply cp;
 
@@ -3678,18 +3664,14 @@
 
 		/* If we are initiators, there is no remote information yet */
 		if (conn->remote_auth == 0xff) {
-			cp.authentication = conn->auth_type;
-
 			/* Request MITM protection if our IO caps allow it
 			 * except for the no-bonding case.
-			 * conn->auth_type is not updated here since
-			 * that might cause the user confirmation to be
-			 * rejected in case the remote doesn't have the
-			 * IO capabilities for MITM.
 			 */
 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
-			    cp.authentication != HCI_AT_NO_BONDING)
-				cp.authentication |= 0x01;
+			    conn->auth_type != HCI_AT_NO_BONDING)
+				conn->auth_type |= 0x01;
+
+			cp.authentication = conn->auth_type;
 		} else {
 			conn->auth_type = hci_get_auth_req(conn);
 			cp.authentication = conn->auth_type;
@@ -3761,9 +3743,12 @@
 	rem_mitm = (conn->remote_auth & 0x01);
 
 	/* If we require MITM but the remote device can't provide that
-	 * (it has NoInputNoOutput) then reject the confirmation request
+	 * (it has NoInputNoOutput) then reject the confirmation
+	 * request. We check the security level here since it doesn't
+	 * necessarily match conn->auth_type.
 	 */
-	if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
+	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
+	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
 		BT_DBG("Rejecting request: remote device can't provide MITM");
 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
 			     sizeof(ev->bdaddr), &ev->bdaddr);
@@ -3777,9 +3762,11 @@
 		/* If we're not the initiators request authorization to
 		 * proceed from user space (mgmt_user_confirm with
 		 * confirm_hint set to 1). The exception is if neither
-		 * side had MITM in which case we do auto-accept.
+		 * side had MITM or if the local IO capability is
+		 * NoInputNoOutput, in which case we do auto-accept
 		 */
 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
+		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
 		    (loc_mitm || rem_mitm)) {
 			BT_DBG("Confirming auto-accept as acceptor");
 			confirm_hint = 1;
@@ -3893,6 +3880,9 @@
 	if (!conn)
 		goto unlock;
 
+	/* Reset the authentication requirement to unknown */
+	conn->remote_auth = 0xff;
+
 	/* To avoid duplicate auth_failed events to user space we check
 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
 	 * initiated the authentication. A traditional auth_complete
@@ -4123,7 +4113,7 @@
 
 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
 	if (!conn) {
-		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
+		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
 		if (!conn) {
 			BT_ERR("No memory for new connection");
 			goto unlock;
@@ -4131,11 +4121,6 @@
 
 		conn->dst_type = ev->bdaddr_type;
 
-		if (ev->role == LE_CONN_ROLE_MASTER) {
-			conn->out = true;
-			set_bit(HCI_CONN_MASTER, &conn->flags);
-		}
-
 		/* If we didn't have a hci_conn object previously
 		 * but we're in master role this must be something
 		 * initiated using a white list. Since white list based
@@ -4202,14 +4187,14 @@
 	else
 		addr_type = BDADDR_LE_RANDOM;
 
-	/* Drop the connection if he device is blocked */
-	if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
-		hci_conn_drop(conn);
+	if (ev->status) {
+		hci_le_conn_failed(conn, ev->status);
 		goto unlock;
 	}
 
-	if (ev->status) {
-		hci_le_conn_failed(conn, ev->status);
+	/* Drop the connection if the device is blocked */
+	if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
+		hci_conn_drop(conn);
 		goto unlock;
 	}
 
@@ -4275,6 +4260,12 @@
 	if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
 		return;
 
+	/* Most controller will fail if we try to create new connections
+	 * while we have an existing one in slave role.
+	 */
+	if (hdev->conn_hash.le_num_slave > 0)
+		return;
+
 	/* If we're connectable, always connect any ADV_DIRECT_IND event */
 	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
 	    adv_type == LE_ADV_DIRECT_IND)
@@ -4287,9 +4278,8 @@
 		return;
 
 connect:
-	/* Request connection in master = true role */
 	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
-			      HCI_LE_AUTOCONN_TIMEOUT, true);
+			      HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
 	if (!IS_ERR(conn))
 		return;
 
@@ -4329,14 +4319,11 @@
 	 * device found events.
 	 */
 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
-		struct hci_conn_params *param;
-
 		if (type == LE_ADV_DIRECT_IND)
 			return;
 
-		param = hci_pend_le_action_lookup(&hdev->pend_le_reports,
-						  bdaddr, bdaddr_type);
-		if (!param)
+		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
+					       bdaddr, bdaddr_type))
 			return;
 
 		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
@@ -4470,7 +4457,7 @@
 	if (conn == NULL)
 		goto not_found;
 
-	ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
+	ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->role);
 	if (ltk == NULL)
 		goto not_found;
 
@@ -4545,7 +4532,7 @@
 		return send_conn_param_neg_reply(hdev, handle,
 						 HCI_ERROR_INVALID_LL_PARAMS);
 
-	if (test_bit(HCI_CONN_MASTER, &hcon->flags)) {
+	if (hcon->role == HCI_ROLE_MASTER) {
 		struct hci_conn_params *params;
 		u8 store_hint;
 
@@ -4638,7 +4625,7 @@
 	/* Received events are (currently) only needed when a request is
 	 * ongoing so avoid unnecessary memory allocation.
 	 */
-	if (hdev->req_status == HCI_REQ_PEND) {
+	if (hci_req_pending(hdev)) {
 		kfree_skb(hdev->recv_evt);
 		hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
 	}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index c64728d..115f149 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -35,13 +35,32 @@
 
 /* ----- HCI socket interface ----- */
 
+/* Socket info */
+#define hci_pi(sk) ((struct hci_pinfo *) sk)
+
+struct hci_pinfo {
+	struct bt_sock    bt;
+	struct hci_dev    *hdev;
+	struct hci_filter filter;
+	__u32             cmsg_mask;
+	unsigned short    channel;
+};
+
 static inline int hci_test_bit(int nr, void *addr)
 {
 	return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
 }
 
 /* Security filter */
-static struct hci_sec_filter hci_sec_filter = {
+#define HCI_SFLT_MAX_OGF  5
+
+struct hci_sec_filter {
+	__u32 type_mask;
+	__u32 event_mask[2];
+	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
+};
+
+static const struct hci_sec_filter hci_sec_filter = {
 	/* Packet types */
 	0x10,
 	/* Events */
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 8680aae..46547b9 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -46,7 +46,7 @@
 bool disable_ertm;
 
 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
-static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
+static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, };
 
 static LIST_HEAD(chan_list);
 static DEFINE_RWLOCK(chan_list_lock);
@@ -775,7 +775,7 @@
 }
 
 /* Service level security */
-int l2cap_chan_check_security(struct l2cap_chan *chan)
+int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
 {
 	struct l2cap_conn *conn = chan->conn;
 	__u8 auth_type;
@@ -785,7 +785,8 @@
 
 	auth_type = l2cap_get_auth_type(chan);
 
-	return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
+	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
+				 initiator);
 }
 
 static u8 l2cap_get_ident(struct l2cap_conn *conn)
@@ -798,14 +799,14 @@
 	 *  200 - 254 are used by utilities like l2ping, etc.
 	 */
 
-	spin_lock(&conn->lock);
+	mutex_lock(&conn->ident_lock);
 
 	if (++conn->tx_ident > 128)
 		conn->tx_ident = 1;
 
 	id = conn->tx_ident;
 
-	spin_unlock(&conn->lock);
+	mutex_unlock(&conn->ident_lock);
 
 	return id;
 }
@@ -1278,7 +1279,7 @@
 		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
 			return;
 
-		if (l2cap_chan_check_security(chan) &&
+		if (l2cap_chan_check_security(chan, true) &&
 		    __l2cap_no_conn_pending(chan)) {
 			l2cap_start_connection(chan);
 		}
@@ -1357,7 +1358,7 @@
 		}
 
 		if (chan->state == BT_CONNECT) {
-			if (!l2cap_chan_check_security(chan) ||
+			if (!l2cap_chan_check_security(chan, true) ||
 			    !__l2cap_no_conn_pending(chan)) {
 				l2cap_chan_unlock(chan);
 				continue;
@@ -1379,7 +1380,7 @@
 			rsp.scid = cpu_to_le16(chan->dcid);
 			rsp.dcid = cpu_to_le16(chan->scid);
 
-			if (l2cap_chan_check_security(chan)) {
+			if (l2cap_chan_check_security(chan, false)) {
 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
@@ -1487,7 +1488,7 @@
 	 * been configured for this connection. If not, then trigger
 	 * the connection update procedure.
 	 */
-	if (!test_bit(HCI_CONN_MASTER, &hcon->flags) &&
+	if (hcon->role == HCI_ROLE_SLAVE &&
 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
 		struct l2cap_conn_param_update_req req;
@@ -3849,7 +3850,7 @@
 	chan->ident = cmd->ident;
 
 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
-		if (l2cap_chan_check_security(chan)) {
+		if (l2cap_chan_check_security(chan, false)) {
 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
 				l2cap_state_change(chan, BT_CONNECT2);
 				result = L2CAP_CR_PEND;
@@ -5227,7 +5228,7 @@
 	u16 min, max, latency, to_multiplier;
 	int err;
 
-	if (!test_bit(HCI_CONN_MASTER, &hcon->flags))
+	if (hcon->role != HCI_ROLE_MASTER)
 		return -EINVAL;
 
 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
@@ -6984,7 +6985,7 @@
 	if (!hchan)
 		return NULL;
 
-	conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
+	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
 	if (!conn) {
 		hci_chan_del(hchan);
 		return NULL;
@@ -7016,7 +7017,7 @@
 		conn->hs_enabled = test_bit(HCI_HS_ENABLED,
 					    &hcon->hdev->dev_flags);
 
-	spin_lock_init(&conn->lock);
+	mutex_init(&conn->ident_lock);
 	mutex_init(&conn->chan_lock);
 
 	INIT_LIST_HEAD(&conn->chan_l);
@@ -7093,7 +7094,7 @@
 			break;
 		/* fall through */
 	default:
-		err = -ENOTSUPP;
+		err = -EOPNOTSUPP;
 		goto done;
 	}
 
@@ -7128,7 +7129,7 @@
 	chan->dcid = cid;
 
 	if (bdaddr_type_is_le(dst_type)) {
-		bool master;
+		u8 role;
 
 		/* Convert from L2CAP channel address type to HCI address type
 		 */
@@ -7137,10 +7138,13 @@
 		else
 			dst_type = ADDR_LE_DEV_RANDOM;
 
-		master = !test_bit(HCI_ADVERTISING, &hdev->dev_flags);
+		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+			role = HCI_ROLE_SLAVE;
+		else
+			role = HCI_ROLE_MASTER;
 
 		hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
-				      HCI_LE_CONN_TIMEOUT, master);
+				      HCI_LE_CONN_TIMEOUT, role);
 	} else {
 		u8 auth_type = l2cap_get_auth_type(chan);
 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
@@ -7188,7 +7192,7 @@
 	if (hcon->state == BT_CONNECTED) {
 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
 			__clear_chan_timer(chan);
-			if (l2cap_chan_check_security(chan))
+			if (l2cap_chan_check_security(chan, true))
 				l2cap_state_change(chan, BT_CONNECTED);
 		} else
 			l2cap_do_start(chan);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 9bb4d1b..1884f72 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -279,7 +279,7 @@
 			break;
 		/* fall through */
 	default:
-		err = -ENOTSUPP;
+		err = -EOPNOTSUPP;
 		goto done;
 	}
 
@@ -797,7 +797,7 @@
 		} else if ((sk->sk_state == BT_CONNECT2 &&
 			    test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) ||
 			   sk->sk_state == BT_CONNECTED) {
-			if (!l2cap_chan_check_security(chan))
+			if (!l2cap_chan_check_security(chan, true))
 				set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
 			else
 				sk->sk_state_change(sk);
@@ -1112,7 +1112,8 @@
 		l2cap_chan_close(chan, 0);
 		lock_sock(sk);
 
-		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
+		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
+		    !(current->flags & PF_EXITING))
 			err = bt_sock_wait_state(sk, BT_CLOSED,
 						 sk->sk_lingertime);
 	}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 91b1f92..1906683 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -906,6 +906,16 @@
 	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
 }
 
+int mgmt_update_adv_data(struct hci_dev *hdev)
+{
+	struct hci_request req;
+
+	hci_req_init(&req, hdev);
+	update_adv_data(&req);
+
+	return hci_req_run(&req, NULL);
+}
+
 static void create_eir(struct hci_dev *hdev, u8 *data)
 {
 	u8 *ptr = data;
@@ -1743,7 +1753,7 @@
 {
 	struct pending_cmd *cmd;
 	struct mgmt_mode *cp;
-	bool changed;
+	bool conn_changed, discov_changed;
 
 	BT_DBG("status 0x%02x", status);
 
@@ -1760,15 +1770,23 @@
 	}
 
 	cp = cmd->param;
-	if (cp->val)
-		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
-	else
-		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+	if (cp->val) {
+		conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
+						 &hdev->dev_flags);
+		discov_changed = false;
+	} else {
+		conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
+						  &hdev->dev_flags);
+		discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
+						    &hdev->dev_flags);
+	}
 
 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
 
-	if (changed) {
+	if (conn_changed || discov_changed) {
 		new_settings(hdev, cmd->sk);
+		if (discov_changed)
+			mgmt_update_adv_data(hdev);
 		hci_update_background_scan(hdev);
 	}
 
@@ -1882,8 +1900,8 @@
 	if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
 		write_fast_connectable(&req, false);
 
-	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
-	    !test_bit(HCI_LE_ADV, &hdev->dev_flags))
+	/* Update the advertising parameters if necessary */
+	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
 		enable_advertising(&req);
 
 	err = hci_req_run(&req, set_connectable_complete);
@@ -3136,9 +3154,9 @@
 		 */
 		hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
 
-		/* Request a connection with master = true role */
 		conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
-				      sec_level, HCI_LE_CONN_TIMEOUT, true);
+				      sec_level, HCI_LE_CONN_TIMEOUT,
+				      HCI_ROLE_MASTER);
 	}
 
 	if (IS_ERR(conn)) {
@@ -3184,7 +3202,7 @@
 	cmd->user_data = conn;
 
 	if (conn->state == BT_CONNECTED &&
-	    hci_conn_security(conn, sec_level, auth_type))
+	    hci_conn_security(conn, sec_level, auth_type, true))
 		pairing_complete(cmd, 0);
 
 	err = 0;
@@ -6031,88 +6049,6 @@
 	hci_dev_unlock(hdev);
 }
 
-void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
-{
-	bool changed;
-
-	/* Nothing needed here if there's a pending command since that
-	 * commands request completion callback takes care of everything
-	 * necessary.
-	 */
-	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
-		return;
-
-	/* Powering off may clear the scan mode - don't let that interfere */
-	if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
-		return;
-
-	if (discoverable) {
-		changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
-	} else {
-		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
-		changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
-	}
-
-	if (changed) {
-		struct hci_request req;
-
-		/* In case this change in discoverable was triggered by
-		 * a disabling of connectable there could be a need to
-		 * update the advertising flags.
-		 */
-		hci_req_init(&req, hdev);
-		update_adv_data(&req);
-		hci_req_run(&req, NULL);
-
-		new_settings(hdev, NULL);
-	}
-}
-
-void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
-{
-	bool changed;
-
-	/* Nothing needed here if there's a pending command since that
-	 * commands request completion callback takes care of everything
-	 * necessary.
-	 */
-	if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
-		return;
-
-	/* Powering off may clear the scan mode - don't let that interfere */
-	if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
-		return;
-
-	/* If something else than mgmt changed the page scan state we
-	 * can't differentiate this from a change triggered by adding
-	 * the first element to the whitelist. Therefore, avoid
-	 * incorrectly setting HCI_CONNECTABLE.
-	 */
-	if (connectable && !list_empty(&hdev->whitelist))
-		return;
-
-	if (connectable)
-		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
-	else
-		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
-
-	if (changed)
-		new_settings(hdev, NULL);
-}
-
-void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
-{
-	u8 mgmt_err = mgmt_status(status);
-
-	if (scan & SCAN_PAGE)
-		mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
-				     cmd_status_rsp, &mgmt_err);
-
-	if (scan & SCAN_INQUIRY)
-		mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
-				     cmd_status_rsp, &mgmt_err);
-}
-
 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
 		       bool persistent)
 {
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 754b6fe..a0690a8 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -227,7 +227,8 @@
 		break;
 	}
 
-	return hci_conn_security(conn->hcon, d->sec_level, auth_type);
+	return hci_conn_security(conn->hcon, d->sec_level, auth_type,
+				 d->out);
 }
 
 static void rfcomm_session_timeout(unsigned long arg)
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index c603a5e..8bbbb5e 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -918,7 +918,8 @@
 		sk->sk_shutdown = SHUTDOWN_MASK;
 		__rfcomm_sock_close(sk);
 
-		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
+		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
+		    !(current->flags & PF_EXITING))
 			err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
 	}
 	release_sock(sk);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index c06dbd3..7ee9e4a 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -40,13 +40,38 @@
 	.lock = __RW_LOCK_UNLOCKED(sco_sk_list.lock)
 };
 
-static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent);
-static void sco_chan_del(struct sock *sk, int err);
+/* ---- SCO connections ---- */
+struct sco_conn {
+	struct hci_conn	*hcon;
+
+	spinlock_t	lock;
+	struct sock	*sk;
+
+	unsigned int    mtu;
+};
+
+#define sco_conn_lock(c)	spin_lock(&c->lock);
+#define sco_conn_unlock(c)	spin_unlock(&c->lock);
 
 static void sco_sock_close(struct sock *sk);
 static void sco_sock_kill(struct sock *sk);
 
+/* ----- SCO socket info ----- */
+#define sco_pi(sk) ((struct sco_pinfo *) sk)
+
+struct sco_pinfo {
+	struct bt_sock	bt;
+	bdaddr_t	src;
+	bdaddr_t	dst;
+	__u32		flags;
+	__u16		setting;
+	struct sco_conn	*conn;
+};
+
 /* ---- SCO timers ---- */
+#define SCO_CONN_TIMEOUT	(HZ * 40)
+#define SCO_DISCONN_TIMEOUT	(HZ * 2)
+
 static void sco_sock_timeout(unsigned long arg)
 {
 	struct sock *sk = (struct sock *) arg;
@@ -102,13 +127,31 @@
 	return conn;
 }
 
-static struct sock *sco_chan_get(struct sco_conn *conn)
+/* Delete channel.
+ * Must be called on the locked socket. */
+static void sco_chan_del(struct sock *sk, int err)
 {
-	struct sock *sk = NULL;
-	sco_conn_lock(conn);
-	sk = conn->sk;
-	sco_conn_unlock(conn);
-	return sk;
+	struct sco_conn *conn;
+
+	conn = sco_pi(sk)->conn;
+
+	BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
+
+	if (conn) {
+		sco_conn_lock(conn);
+		conn->sk = NULL;
+		sco_pi(sk)->conn = NULL;
+		sco_conn_unlock(conn);
+
+		if (conn->hcon)
+			hci_conn_drop(conn->hcon);
+	}
+
+	sk->sk_state = BT_CLOSED;
+	sk->sk_err   = err;
+	sk->sk_state_change(sk);
+
+	sock_set_flag(sk, SOCK_ZAPPED);
 }
 
 static int sco_conn_del(struct hci_conn *hcon, int err)
@@ -122,7 +165,10 @@
 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
 
 	/* Kill socket */
-	sk = sco_chan_get(conn);
+	sco_conn_lock(conn);
+	sk = conn->sk;
+	sco_conn_unlock(conn);
+
 	if (sk) {
 		bh_lock_sock(sk);
 		sco_sock_clear_timer(sk);
@@ -136,6 +182,17 @@
 	return 0;
 }
 
+static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
+{
+	BT_DBG("conn %p", conn);
+
+	sco_pi(sk)->conn = conn;
+	conn->sk = sk;
+
+	if (parent)
+		bt_accept_enqueue(parent, sk);
+}
+
 static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
 			struct sock *parent)
 {
@@ -240,7 +297,11 @@
 
 static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
 {
-	struct sock *sk = sco_chan_get(conn);
+	struct sock *sk;
+
+	sco_conn_lock(conn);
+	sk = conn->sk;
+	sco_conn_unlock(conn);
 
 	if (!sk)
 		goto drop;
@@ -909,7 +970,8 @@
 		sco_sock_clear_timer(sk);
 		__sco_sock_close(sk);
 
-		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
+		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
+		    !(current->flags & PF_EXITING))
 			err = bt_sock_wait_state(sk, BT_CLOSED,
 						 sk->sk_lingertime);
 	}
@@ -929,7 +991,8 @@
 
 	sco_sock_close(sk);
 
-	if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) {
+	if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
+	    !(current->flags & PF_EXITING)) {
 		lock_sock(sk);
 		err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
 		release_sock(sk);
@@ -940,44 +1003,6 @@
 	return err;
 }
 
-static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
-{
-	BT_DBG("conn %p", conn);
-
-	sco_pi(sk)->conn = conn;
-	conn->sk = sk;
-
-	if (parent)
-		bt_accept_enqueue(parent, sk);
-}
-
-/* Delete channel.
- * Must be called on the locked socket. */
-static void sco_chan_del(struct sock *sk, int err)
-{
-	struct sco_conn *conn;
-
-	conn = sco_pi(sk)->conn;
-
-	BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
-
-	if (conn) {
-		sco_conn_lock(conn);
-		conn->sk = NULL;
-		sco_pi(sk)->conn = NULL;
-		sco_conn_unlock(conn);
-
-		if (conn->hcon)
-			hci_conn_drop(conn->hcon);
-	}
-
-	sk->sk_state = BT_CLOSED;
-	sk->sk_err   = err;
-	sk->sk_state_change(sk);
-
-	sock_set_flag(sk, SOCK_ZAPPED);
-}
-
 static void sco_conn_ready(struct sco_conn *conn)
 {
 	struct sock *parent;
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 55c41de..e49c83d 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -391,10 +391,12 @@
 
 static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io)
 {
-	/* If either side has unknown io_caps, use JUST WORKS */
+	/* If either side has unknown io_caps, use JUST_CFM (which gets
+	 * converted later to JUST_WORKS if we're initiators.
+	 */
 	if (local_io > SMP_IO_KEYBOARD_DISPLAY ||
 	    remote_io > SMP_IO_KEYBOARD_DISPLAY)
-		return JUST_WORKS;
+		return JUST_CFM;
 
 	return gen_method[remote_io][local_io];
 }
@@ -414,21 +416,25 @@
 
 	BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);
 
-	/* If neither side wants MITM, use JUST WORKS */
-	/* Otherwise, look up method from the table */
+	/* If neither side wants MITM, either "just" confirm an incoming
+	 * request or use just-works for outgoing ones. The JUST_CFM
+	 * will be converted to JUST_WORKS if necessary later in this
+	 * function. If either side has MITM look up the method from the
+	 * table.
+	 */
 	if (!(auth & SMP_AUTH_MITM))
-		method = JUST_WORKS;
+		method = JUST_CFM;
 	else
 		method = get_auth_method(smp, local_io, remote_io);
 
-	/* If not bonding, don't ask user to confirm a Zero TK */
-	if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
-		method = JUST_WORKS;
-
 	/* Don't confirm locally initiated pairing attempts */
 	if (method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR, &smp->flags))
 		method = JUST_WORKS;
 
+	/* Don't bother user space with no IO capabilities */
+	if (method == JUST_CFM && hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT)
+		method = JUST_WORKS;
+
 	/* If Just Works, Continue with Zero TK */
 	if (method == JUST_WORKS) {
 		set_bit(SMP_FLAG_TK_VALID, &smp->flags);
@@ -443,7 +449,7 @@
 	 * Confirms and the slave Enters the passkey.
 	 */
 	if (method == OVERLAP) {
-		if (test_bit(HCI_CONN_MASTER, &hcon->flags))
+		if (hcon->role == HCI_ROLE_MASTER)
 			method = CFM_PASSKEY;
 		else
 			method = REQ_PASSKEY;
@@ -674,6 +680,7 @@
 static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
 {
 	struct smp_cmd_pairing rsp, *req = (void *) skb->data;
+	struct hci_dev *hdev = conn->hcon->hdev;
 	struct smp_chan *smp;
 	u8 key_size, auth, sec_level;
 	int ret;
@@ -683,7 +690,7 @@
 	if (skb->len < sizeof(*req))
 		return SMP_INVALID_PARAMS;
 
-	if (test_bit(HCI_CONN_MASTER, &conn->hcon->flags))
+	if (conn->hcon->role != HCI_ROLE_SLAVE)
 		return SMP_CMD_NOTSUPP;
 
 	if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
@@ -694,6 +701,10 @@
 	if (!smp)
 		return SMP_UNSPECIFIED;
 
+	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags) &&
+	    (req->auth_req & SMP_AUTH_BONDING))
+		return SMP_PAIRING_NOTSUPP;
+
 	smp->preq[0] = SMP_CMD_PAIRING_REQ;
 	memcpy(&smp->preq[1], req, sizeof(*req));
 	skb_pull(skb, sizeof(*req));
@@ -733,8 +744,6 @@
 	if (ret)
 		return SMP_UNSPECIFIED;
 
-	clear_bit(SMP_FLAG_INITIATOR, &smp->flags);
-
 	return 0;
 }
 
@@ -750,7 +759,7 @@
 	if (skb->len < sizeof(*rsp))
 		return SMP_INVALID_PARAMS;
 
-	if (!test_bit(HCI_CONN_MASTER, &conn->hcon->flags))
+	if (conn->hcon->role != HCI_ROLE_MASTER)
 		return SMP_CMD_NOTSUPP;
 
 	skb_pull(skb, sizeof(*rsp));
@@ -844,7 +853,7 @@
 	struct hci_conn *hcon = conn->hcon;
 
 	key = hci_find_ltk_by_addr(hcon->hdev, &hcon->dst, hcon->dst_type,
-				   hcon->out);
+				   hcon->role);
 	if (!key)
 		return false;
 
@@ -871,9 +880,12 @@
 	/* If we're encrypted with an STK always claim insufficient
 	 * security. This way we allow the connection to be re-encrypted
 	 * with an LTK, even if the LTK provides the same level of
-	 * security.
+	 * security. Only exception is if we don't have an LTK (e.g.
+	 * because of key distribution bits).
 	 */
-	if (test_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags))
+	if (test_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags) &&
+	    hci_find_ltk_by_addr(hcon->hdev, &hcon->dst, hcon->dst_type,
+				 hcon->role))
 		return false;
 
 	if (hcon->sec_level >= sec_level)
@@ -895,7 +907,7 @@
 	if (skb->len < sizeof(*rp))
 		return SMP_INVALID_PARAMS;
 
-	if (!test_bit(HCI_CONN_MASTER, &conn->hcon->flags))
+	if (hcon->role != HCI_ROLE_MASTER)
 		return SMP_CMD_NOTSUPP;
 
 	sec_level = authreq_to_seclevel(rp->auth_req);
@@ -911,6 +923,10 @@
 	if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
 		return 0;
 
+	if (!test_bit(HCI_PAIRABLE, &hcon->hdev->dev_flags) &&
+	    (rp->auth_req & SMP_AUTH_BONDING))
+		return SMP_PAIRING_NOTSUPP;
+
 	smp = smp_chan_create(conn);
 	if (!smp)
 		return SMP_UNSPECIFIED;
@@ -925,8 +941,6 @@
 
 	smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
 
-	clear_bit(SMP_FLAG_INITIATOR, &smp->flags);
-
 	return 0;
 }
 
@@ -951,7 +965,7 @@
 	if (sec_level > hcon->pending_sec_level)
 		hcon->pending_sec_level = sec_level;
 
-	if (test_bit(HCI_CONN_MASTER, &hcon->flags))
+	if (hcon->role == HCI_ROLE_MASTER)
 		if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
 			return 0;
 
@@ -971,7 +985,7 @@
 	    hcon->pending_sec_level > BT_SECURITY_MEDIUM)
 		authreq |= SMP_AUTH_MITM;
 
-	if (test_bit(HCI_CONN_MASTER, &hcon->flags)) {
+	if (hcon->role == HCI_ROLE_MASTER) {
 		struct smp_cmd_pairing cp;
 
 		build_pairing_cmd(conn, &cp, NULL, authreq);
@@ -1175,7 +1189,7 @@
 	}
 
 	if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) {
-		err = -ENOTSUPP;
+		err = -EOPNOTSUPP;
 		reason = SMP_PAIRING_NOTSUPP;
 		goto done;
 	}
@@ -1193,7 +1207,7 @@
 	    !conn->smp_chan) {
 		BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
 		kfree_skb(skb);
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 	}
 
 	switch (code) {
diff --git a/net/compat.c b/net/compat.c
index 9a76eaf..bc8aeef 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -85,7 +85,7 @@
 {
 	int tot_len;
 
-	if (kern_msg->msg_namelen) {
+	if (kern_msg->msg_name && kern_msg->msg_namelen) {
 		if (mode == VERIFY_READ) {
 			int err = move_addr_to_kernel(kern_msg->msg_name,
 						      kern_msg->msg_namelen,
@@ -93,10 +93,11 @@
 			if (err < 0)
 				return err;
 		}
-		if (kern_msg->msg_name)
-			kern_msg->msg_name = kern_address;
-	} else
+		kern_msg->msg_name = kern_address;
+	} else {
 		kern_msg->msg_name = NULL;
+		kern_msg->msg_namelen = 0;
+	}
 
 	tot_len = iov_from_user_compat_to_kern(kern_iov,
 					  (struct compat_iovec __user *)kern_msg->msg_iov,
diff --git a/net/core/dev.c b/net/core/dev.c
index e1b7cfa..b370230 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2326,7 +2326,7 @@
 	 */
 	if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
 		if (vlan_depth) {
-			if (unlikely(WARN_ON(vlan_depth < VLAN_HLEN)))
+			if (WARN_ON(vlan_depth < VLAN_HLEN))
 				return 0;
 			vlan_depth -= VLAN_HLEN;
 		} else {
diff --git a/net/core/filter.c b/net/core/filter.c
index b90ae7f..42c1944 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -45,45 +45,6 @@
 #include <linux/seccomp.h>
 #include <linux/if_vlan.h>
 
-/* Registers */
-#define BPF_R0	regs[BPF_REG_0]
-#define BPF_R1	regs[BPF_REG_1]
-#define BPF_R2	regs[BPF_REG_2]
-#define BPF_R3	regs[BPF_REG_3]
-#define BPF_R4	regs[BPF_REG_4]
-#define BPF_R5	regs[BPF_REG_5]
-#define BPF_R6	regs[BPF_REG_6]
-#define BPF_R7	regs[BPF_REG_7]
-#define BPF_R8	regs[BPF_REG_8]
-#define BPF_R9	regs[BPF_REG_9]
-#define BPF_R10	regs[BPF_REG_10]
-
-/* Named registers */
-#define DST	regs[insn->dst_reg]
-#define SRC	regs[insn->src_reg]
-#define FP	regs[BPF_REG_FP]
-#define ARG1	regs[BPF_REG_ARG1]
-#define CTX	regs[BPF_REG_CTX]
-#define IMM	insn->imm
-
-/* No hurry in this branch
- *
- * Exported for the bpf jit load helper.
- */
-void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
-{
-	u8 *ptr = NULL;
-
-	if (k >= SKF_NET_OFF)
-		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
-	else if (k >= SKF_LL_OFF)
-		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
-	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
-		return ptr;
-
-	return NULL;
-}
-
 /**
  *	sk_filter - run a packet through a socket filter
  *	@sk: sock associated with &sk_buff
@@ -126,451 +87,6 @@
 }
 EXPORT_SYMBOL(sk_filter);
 
-/* Base function for offset calculation. Needs to go into .text section,
- * therefore keeping it non-static as well; will also be used by JITs
- * anyway later on, so do not let the compiler omit it.
- */
-noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
-{
-	return 0;
-}
-
-/**
- *	__sk_run_filter - run a filter on a given context
- *	@ctx: buffer to run the filter on
- *	@insn: filter to apply
- *
- * Decode and apply filter instructions to the skb->data. Return length to
- * keep, 0 for none. @ctx is the data we are operating on, @insn is the
- * array of filter instructions.
- */
-static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
-{
-	u64 stack[MAX_BPF_STACK / sizeof(u64)];
-	u64 regs[MAX_BPF_REG], tmp;
-	static const void *jumptable[256] = {
-		[0 ... 255] = &&default_label,
-		/* Now overwrite non-defaults ... */
-		/* 32 bit ALU operations */
-		[BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
-		[BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
-		[BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
-		[BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
-		[BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
-		[BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
-		[BPF_ALU | BPF_OR | BPF_X]  = &&ALU_OR_X,
-		[BPF_ALU | BPF_OR | BPF_K]  = &&ALU_OR_K,
-		[BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
-		[BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
-		[BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
-		[BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
-		[BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
-		[BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
-		[BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
-		[BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
-		[BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
-		[BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
-		[BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
-		[BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
-		[BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
-		[BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
-		[BPF_ALU | BPF_NEG] = &&ALU_NEG,
-		[BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
-		[BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
-		/* 64 bit ALU operations */
-		[BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
-		[BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
-		[BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
-		[BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
-		[BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
-		[BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
-		[BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
-		[BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
-		[BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
-		[BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
-		[BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
-		[BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
-		[BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
-		[BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
-		[BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
-		[BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
-		[BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
-		[BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
-		[BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
-		[BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
-		[BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
-		[BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
-		[BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
-		[BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
-		[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
-		/* Call instruction */
-		[BPF_JMP | BPF_CALL] = &&JMP_CALL,
-		/* Jumps */
-		[BPF_JMP | BPF_JA] = &&JMP_JA,
-		[BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
-		[BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
-		[BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
-		[BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
-		[BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
-		[BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
-		[BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
-		[BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
-		[BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
-		[BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
-		[BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
-		[BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
-		[BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
-		[BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
-		/* Program return */
-		[BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
-		/* Store instructions */
-		[BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
-		[BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
-		[BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
-		[BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
-		[BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
-		[BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
-		[BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
-		[BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
-		[BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
-		[BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
-		/* Load instructions */
-		[BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
-		[BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
-		[BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
-		[BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
-		[BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
-		[BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
-		[BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
-		[BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
-		[BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
-		[BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
-	};
-	void *ptr;
-	int off;
-
-#define CONT	 ({ insn++; goto select_insn; })
-#define CONT_JMP ({ insn++; goto select_insn; })
-
-	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
-	ARG1 = (u64) (unsigned long) ctx;
-
-	/* Registers used in classic BPF programs need to be reset first. */
-	regs[BPF_REG_A] = 0;
-	regs[BPF_REG_X] = 0;
-
-select_insn:
-	goto *jumptable[insn->code];
-
-	/* ALU */
-#define ALU(OPCODE, OP)			\
-	ALU64_##OPCODE##_X:		\
-		DST = DST OP SRC;	\
-		CONT;			\
-	ALU_##OPCODE##_X:		\
-		DST = (u32) DST OP (u32) SRC;	\
-		CONT;			\
-	ALU64_##OPCODE##_K:		\
-		DST = DST OP IMM;		\
-		CONT;			\
-	ALU_##OPCODE##_K:		\
-		DST = (u32) DST OP (u32) IMM;	\
-		CONT;
-
-	ALU(ADD,  +)
-	ALU(SUB,  -)
-	ALU(AND,  &)
-	ALU(OR,   |)
-	ALU(LSH, <<)
-	ALU(RSH, >>)
-	ALU(XOR,  ^)
-	ALU(MUL,  *)
-#undef ALU
-	ALU_NEG:
-		DST = (u32) -DST;
-		CONT;
-	ALU64_NEG:
-		DST = -DST;
-		CONT;
-	ALU_MOV_X:
-		DST = (u32) SRC;
-		CONT;
-	ALU_MOV_K:
-		DST = (u32) IMM;
-		CONT;
-	ALU64_MOV_X:
-		DST = SRC;
-		CONT;
-	ALU64_MOV_K:
-		DST = IMM;
-		CONT;
-	ALU64_ARSH_X:
-		(*(s64 *) &DST) >>= SRC;
-		CONT;
-	ALU64_ARSH_K:
-		(*(s64 *) &DST) >>= IMM;
-		CONT;
-	ALU64_MOD_X:
-		if (unlikely(SRC == 0))
-			return 0;
-		tmp = DST;
-		DST = do_div(tmp, SRC);
-		CONT;
-	ALU_MOD_X:
-		if (unlikely(SRC == 0))
-			return 0;
-		tmp = (u32) DST;
-		DST = do_div(tmp, (u32) SRC);
-		CONT;
-	ALU64_MOD_K:
-		tmp = DST;
-		DST = do_div(tmp, IMM);
-		CONT;
-	ALU_MOD_K:
-		tmp = (u32) DST;
-		DST = do_div(tmp, (u32) IMM);
-		CONT;
-	ALU64_DIV_X:
-		if (unlikely(SRC == 0))
-			return 0;
-		do_div(DST, SRC);
-		CONT;
-	ALU_DIV_X:
-		if (unlikely(SRC == 0))
-			return 0;
-		tmp = (u32) DST;
-		do_div(tmp, (u32) SRC);
-		DST = (u32) tmp;
-		CONT;
-	ALU64_DIV_K:
-		do_div(DST, IMM);
-		CONT;
-	ALU_DIV_K:
-		tmp = (u32) DST;
-		do_div(tmp, (u32) IMM);
-		DST = (u32) tmp;
-		CONT;
-	ALU_END_TO_BE:
-		switch (IMM) {
-		case 16:
-			DST = (__force u16) cpu_to_be16(DST);
-			break;
-		case 32:
-			DST = (__force u32) cpu_to_be32(DST);
-			break;
-		case 64:
-			DST = (__force u64) cpu_to_be64(DST);
-			break;
-		}
-		CONT;
-	ALU_END_TO_LE:
-		switch (IMM) {
-		case 16:
-			DST = (__force u16) cpu_to_le16(DST);
-			break;
-		case 32:
-			DST = (__force u32) cpu_to_le32(DST);
-			break;
-		case 64:
-			DST = (__force u64) cpu_to_le64(DST);
-			break;
-		}
-		CONT;
-
-	/* CALL */
-	JMP_CALL:
-		/* Function call scratches BPF_R1-BPF_R5 registers,
-		 * preserves BPF_R6-BPF_R9, and stores return value
-		 * into BPF_R0.
-		 */
-		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
-						       BPF_R4, BPF_R5);
-		CONT;
-
-	/* JMP */
-	JMP_JA:
-		insn += insn->off;
-		CONT;
-	JMP_JEQ_X:
-		if (DST == SRC) {
-			insn += insn->off;
-			CONT_JMP;
-		}
-		CONT;
-	JMP_JEQ_K:
-		if (DST == IMM) {
-			insn += insn->off;
-			CONT_JMP;
-		}
-		CONT;
-	JMP_JNE_X:
-		if (DST != SRC) {
-			insn += insn->off;
-			CONT_JMP;
-		}
-		CONT;
-	JMP_JNE_K:
-		if (DST != IMM) {
-			insn += insn->off;
-			CONT_JMP;
-		}
-		CONT;
-	JMP_JGT_X:
-		if (DST > SRC) {
-			insn += insn->off;
-			CONT_JMP;
-		}
-		CONT;
-	JMP_JGT_K:
-		if (DST > IMM) {
-			insn += insn->off;
-			CONT_JMP;
-		}
-		CONT;
-	JMP_JGE_X:
-		if (DST >= SRC) {
-			insn += insn->off;
-			CONT_JMP;
-		}
-		CONT;
-	JMP_JGE_K:
-		if (DST >= IMM) {
-			insn += insn->off;
-			CONT_JMP;
-		}
-		CONT;
-	JMP_JSGT_X:
-		if (((s64) DST) > ((s64) SRC)) {
-			insn += insn->off;
-			CONT_JMP;
-		}
-		CONT;
-	JMP_JSGT_K:
-		if (((s64) DST) > ((s64) IMM)) {
-			insn += insn->off;
-			CONT_JMP;
-		}
-		CONT;
-	JMP_JSGE_X:
-		if (((s64) DST) >= ((s64) SRC)) {
-			insn += insn->off;
-			CONT_JMP;
-		}
-		CONT;
-	JMP_JSGE_K:
-		if (((s64) DST) >= ((s64) IMM)) {
-			insn += insn->off;
-			CONT_JMP;
-		}
-		CONT;
-	JMP_JSET_X:
-		if (DST & SRC) {
-			insn += insn->off;
-			CONT_JMP;
-		}
-		CONT;
-	JMP_JSET_K:
-		if (DST & IMM) {
-			insn += insn->off;
-			CONT_JMP;
-		}
-		CONT;
-	JMP_EXIT:
-		return BPF_R0;
-
-	/* STX and ST and LDX*/
-#define LDST(SIZEOP, SIZE)						\
-	STX_MEM_##SIZEOP:						\
-		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
-		CONT;							\
-	ST_MEM_##SIZEOP:						\
-		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
-		CONT;							\
-	LDX_MEM_##SIZEOP:						\
-		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
-		CONT;
-
-	LDST(B,   u8)
-	LDST(H,  u16)
-	LDST(W,  u32)
-	LDST(DW, u64)
-#undef LDST
-	STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
-		atomic_add((u32) SRC, (atomic_t *)(unsigned long)
-			   (DST + insn->off));
-		CONT;
-	STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
-		atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
-			     (DST + insn->off));
-		CONT;
-	LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
-		off = IMM;
-load_word:
-		/* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
-		 * only appearing in the programs where ctx ==
-		 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
-		 * == BPF_R6, sk_convert_filter() saves it in BPF_R6,
-		 * internal BPF verifier will check that BPF_R6 ==
-		 * ctx.
-		 *
-		 * BPF_ABS and BPF_IND are wrappers of function calls,
-		 * so they scratch BPF_R1-BPF_R5 registers, preserve
-		 * BPF_R6-BPF_R9, and store return value into BPF_R0.
-		 *
-		 * Implicit input:
-		 *   ctx == skb == BPF_R6 == CTX
-		 *
-		 * Explicit input:
-		 *   SRC == any register
-		 *   IMM == 32-bit immediate
-		 *
-		 * Output:
-		 *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
-		 */
-
-		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
-		if (likely(ptr != NULL)) {
-			BPF_R0 = get_unaligned_be32(ptr);
-			CONT;
-		}
-
-		return 0;
-	LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
-		off = IMM;
-load_half:
-		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
-		if (likely(ptr != NULL)) {
-			BPF_R0 = get_unaligned_be16(ptr);
-			CONT;
-		}
-
-		return 0;
-	LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
-		off = IMM;
-load_byte:
-		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
-		if (likely(ptr != NULL)) {
-			BPF_R0 = *(u8 *)ptr;
-			CONT;
-		}
-
-		return 0;
-	LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
-		off = IMM + SRC;
-		goto load_word;
-	LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
-		off = IMM + SRC;
-		goto load_half;
-	LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
-		off = IMM + SRC;
-		goto load_byte;
-
-	default_label:
-		/* If we ever reach this, we have a bug somewhere. */
-		WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
-		return 0;
-}
-
 /* Helper to find the offset of pkt_type in sk_buff structure. We want
  * to make sure its still a 3bit field starting at a byte boundary;
  * taken from arch/x86/net/bpf_jit_comp.c.
@@ -658,9 +174,9 @@
 }
 
 static bool convert_bpf_extensions(struct sock_filter *fp,
-				   struct sock_filter_int **insnp)
+				   struct bpf_insn **insnp)
 {
-	struct sock_filter_int *insn = *insnp;
+	struct bpf_insn *insn = *insnp;
 
 	switch (fp->k) {
 	case SKF_AD_OFF + SKF_AD_PROTOCOL:
@@ -810,7 +326,7 @@
  *
  * 2) 2nd pass to remap in two passes: 1st pass finds new
  *    jump offsets, 2nd pass remapping:
- *   new_prog = kmalloc(sizeof(struct sock_filter_int) * new_len);
+ *   new_prog = kmalloc(sizeof(struct bpf_insn) * new_len);
  *   sk_convert_filter(old_prog, old_len, new_prog, &new_len);
  *
  * User BPF's register A is mapped to our BPF register 6, user BPF
@@ -820,10 +336,10 @@
  * ctx == 'struct seccomp_data *'.
  */
 int sk_convert_filter(struct sock_filter *prog, int len,
-		      struct sock_filter_int *new_prog, int *new_len)
+		      struct bpf_insn *new_prog, int *new_len)
 {
 	int new_flen = 0, pass = 0, target, i;
-	struct sock_filter_int *new_insn;
+	struct bpf_insn *new_insn;
 	struct sock_filter *fp;
 	int *addrs = NULL;
 	u8 bpf_src;
@@ -849,8 +365,8 @@
 	new_insn++;
 
 	for (i = 0; i < len; fp++, i++) {
-		struct sock_filter_int tmp_insns[6] = { };
-		struct sock_filter_int *insn = tmp_insns;
+		struct bpf_insn tmp_insns[6] = { };
+		struct bpf_insn *insn = tmp_insns;
 
 		if (addrs)
 			addrs[i] = new_insn - new_prog;
@@ -1325,6 +841,12 @@
 	}
 }
 
+static void __sk_filter_release(struct sk_filter *fp)
+{
+	sk_release_orig_filter(fp);
+	sk_filter_free(fp);
+}
+
 /**
  * 	sk_filter_release_rcu - Release a socket filter by rcu_head
  *	@rcu: rcu_head that contains the sk_filter to free
@@ -1333,8 +855,7 @@
 {
 	struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
 
-	sk_release_orig_filter(fp);
-	sk_filter_free(fp);
+	__sk_filter_release(fp);
 }
 
 /**
@@ -1397,7 +918,7 @@
 	 * representation.
 	 */
 	BUILD_BUG_ON(sizeof(struct sock_filter) !=
-		     sizeof(struct sock_filter_int));
+		     sizeof(struct bpf_insn));
 
 	/* Conversion cannot happen on overlapping memory areas,
 	 * so we need to keep the user BPF around until the 2nd
@@ -1429,7 +950,7 @@
 
 	fp->len = new_len;
 
-	/* 2nd pass: remap sock_filter insns into sock_filter_int insns. */
+	/* 2nd pass: remap sock_filter insns into bpf_insn insns. */
 	err = sk_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
 	if (err)
 		/* 2nd sk_convert_filter() can fail only if it fails
@@ -1455,33 +976,6 @@
 	return ERR_PTR(err);
 }
 
-void __weak bpf_int_jit_compile(struct sk_filter *prog)
-{
-}
-
-/**
- *	sk_filter_select_runtime - select execution runtime for BPF program
- *	@fp: sk_filter populated with internal BPF program
- *
- * try to JIT internal BPF program, if JIT is not available select interpreter
- * BPF program will be executed via SK_RUN_FILTER() macro
- */
-void sk_filter_select_runtime(struct sk_filter *fp)
-{
-	fp->bpf_func = (void *) __sk_run_filter;
-
-	/* Probe if internal BPF can be JITed */
-	bpf_int_jit_compile(fp);
-}
-EXPORT_SYMBOL_GPL(sk_filter_select_runtime);
-
-/* free internal BPF program */
-void sk_filter_free(struct sk_filter *fp)
-{
-	bpf_jit_free(fp);
-}
-EXPORT_SYMBOL_GPL(sk_filter_free);
-
 static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
 					     struct sock *sk)
 {
@@ -1561,7 +1055,7 @@
 
 void sk_unattached_filter_destroy(struct sk_filter *fp)
 {
-	sk_filter_release(fp);
+	__sk_filter_release(fp);
 }
 EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
 
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 827dd6b..e1ec45a 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -39,7 +39,7 @@
 {
 	int size, ct, err;
 
-	if (m->msg_namelen) {
+	if (m->msg_name && m->msg_namelen) {
 		if (mode == VERIFY_READ) {
 			void __user *namep;
 			namep = (void __user __force *) m->msg_name;
@@ -48,10 +48,10 @@
 			if (err < 0)
 				return err;
 		}
-		if (m->msg_name)
-			m->msg_name = address;
+		m->msg_name = address;
 	} else {
 		m->msg_name = NULL;
+		m->msg_namelen = 0;
 	}
 
 	size = m->msg_iovlen * sizeof(struct iovec);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 559890b..ef31fef 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2249,7 +2249,7 @@
 	ndm->ndm_pad1    = 0;
 	ndm->ndm_pad2    = 0;
 	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
-	ndm->ndm_type	 = NDA_DST;
+	ndm->ndm_type	 = RTN_UNICAST;
 	ndm->ndm_ifindex = pn->dev->ifindex;
 	ndm->ndm_state	 = NUD_NONE;
 
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 7752f2a..9dd0669 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -43,12 +43,12 @@
 			   struct device_attribute *attr, char *buf,
 			   ssize_t (*format)(const struct net_device *, char *))
 {
-	struct net_device *net = to_net_dev(dev);
+	struct net_device *ndev = to_net_dev(dev);
 	ssize_t ret = -EINVAL;
 
 	read_lock(&dev_base_lock);
-	if (dev_isalive(net))
-		ret = (*format)(net, buf);
+	if (dev_isalive(ndev))
+		ret = (*format)(ndev, buf);
 	read_unlock(&dev_base_lock);
 
 	return ret;
@@ -56,9 +56,9 @@
 
 /* generate a show function for simple field */
 #define NETDEVICE_SHOW(field, format_string)				\
-static ssize_t format_##field(const struct net_device *net, char *buf)	\
+static ssize_t format_##field(const struct net_device *dev, char *buf)	\
 {									\
-	return sprintf(buf, format_string, net->field);			\
+	return sprintf(buf, format_string, dev->field);			\
 }									\
 static ssize_t field##_show(struct device *dev,				\
 			    struct device_attribute *attr, char *buf)	\
@@ -112,19 +112,19 @@
 NETDEVICE_SHOW_RO(type, fmt_dec);
 NETDEVICE_SHOW_RO(link_mode, fmt_dec);
 
-static ssize_t format_name_assign_type(const struct net_device *net, char *buf)
+static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
 {
-	return sprintf(buf, fmt_dec, net->name_assign_type);
+	return sprintf(buf, fmt_dec, dev->name_assign_type);
 }
 
 static ssize_t name_assign_type_show(struct device *dev,
 				     struct device_attribute *attr,
 				     char *buf)
 {
-	struct net_device *net = to_net_dev(dev);
+	struct net_device *ndev = to_net_dev(dev);
 	ssize_t ret = -EINVAL;
 
-	if (net->name_assign_type != NET_NAME_UNKNOWN)
+	if (ndev->name_assign_type != NET_NAME_UNKNOWN)
 		ret = netdev_show(dev, attr, buf, format_name_assign_type);
 
 	return ret;
@@ -135,12 +135,12 @@
 static ssize_t address_show(struct device *dev, struct device_attribute *attr,
 			    char *buf)
 {
-	struct net_device *net = to_net_dev(dev);
+	struct net_device *ndev = to_net_dev(dev);
 	ssize_t ret = -EINVAL;
 
 	read_lock(&dev_base_lock);
-	if (dev_isalive(net))
-		ret = sysfs_format_mac(buf, net->dev_addr, net->addr_len);
+	if (dev_isalive(ndev))
+		ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
 	read_unlock(&dev_base_lock);
 	return ret;
 }
@@ -149,18 +149,18 @@
 static ssize_t broadcast_show(struct device *dev,
 			      struct device_attribute *attr, char *buf)
 {
-	struct net_device *net = to_net_dev(dev);
-	if (dev_isalive(net))
-		return sysfs_format_mac(buf, net->broadcast, net->addr_len);
+	struct net_device *ndev = to_net_dev(dev);
+	if (dev_isalive(ndev))
+		return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
 	return -EINVAL;
 }
 static DEVICE_ATTR_RO(broadcast);
 
-static int change_carrier(struct net_device *net, unsigned long new_carrier)
+static int change_carrier(struct net_device *dev, unsigned long new_carrier)
 {
-	if (!netif_running(net))
+	if (!netif_running(dev))
 		return -EINVAL;
-	return dev_change_carrier(net, (bool) new_carrier);
+	return dev_change_carrier(dev, (bool) new_carrier);
 }
 
 static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
@@ -284,9 +284,9 @@
 
 /* read-write attributes */
 
-static int change_mtu(struct net_device *net, unsigned long new_mtu)
+static int change_mtu(struct net_device *dev, unsigned long new_mtu)
 {
-	return dev_set_mtu(net, (int) new_mtu);
+	return dev_set_mtu(dev, (int) new_mtu);
 }
 
 static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
@@ -296,9 +296,9 @@
 }
 NETDEVICE_SHOW_RW(mtu, fmt_dec);
 
-static int change_flags(struct net_device *net, unsigned long new_flags)
+static int change_flags(struct net_device *dev, unsigned long new_flags)
 {
-	return dev_change_flags(net, (unsigned int) new_flags);
+	return dev_change_flags(dev, (unsigned int) new_flags);
 }
 
 static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
@@ -308,9 +308,9 @@
 }
 NETDEVICE_SHOW_RW(flags, fmt_hex);
 
-static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
+static int change_tx_queue_len(struct net_device *dev, unsigned long new_len)
 {
-	net->tx_queue_len = new_len;
+	dev->tx_queue_len = new_len;
 	return 0;
 }
 
@@ -363,9 +363,9 @@
 }
 static DEVICE_ATTR_RW(ifalias);
 
-static int change_group(struct net_device *net, unsigned long new_group)
+static int change_group(struct net_device *dev, unsigned long new_group)
 {
-	dev_set_group(net, (int) new_group);
+	dev_set_group(dev, (int) new_group);
 	return 0;
 }
 
@@ -796,20 +796,20 @@
 	.namespace = rx_queue_namespace
 };
 
-static int rx_queue_add_kobject(struct net_device *net, int index)
+static int rx_queue_add_kobject(struct net_device *dev, int index)
 {
-	struct netdev_rx_queue *queue = net->_rx + index;
+	struct netdev_rx_queue *queue = dev->_rx + index;
 	struct kobject *kobj = &queue->kobj;
 	int error = 0;
 
-	kobj->kset = net->queues_kset;
+	kobj->kset = dev->queues_kset;
 	error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
 	    "rx-%u", index);
 	if (error)
 		goto exit;
 
-	if (net->sysfs_rx_queue_group) {
-		error = sysfs_create_group(kobj, net->sysfs_rx_queue_group);
+	if (dev->sysfs_rx_queue_group) {
+		error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
 		if (error)
 			goto exit;
 	}
@@ -825,18 +825,18 @@
 #endif /* CONFIG_SYSFS */
 
 int
-net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
+net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
 {
 #ifdef CONFIG_SYSFS
 	int i;
 	int error = 0;
 
 #ifndef CONFIG_RPS
-	if (!net->sysfs_rx_queue_group)
+	if (!dev->sysfs_rx_queue_group)
 		return 0;
 #endif
 	for (i = old_num; i < new_num; i++) {
-		error = rx_queue_add_kobject(net, i);
+		error = rx_queue_add_kobject(dev, i);
 		if (error) {
 			new_num = old_num;
 			break;
@@ -844,10 +844,10 @@
 	}
 
 	while (--i >= new_num) {
-		if (net->sysfs_rx_queue_group)
-			sysfs_remove_group(&net->_rx[i].kobj,
-					   net->sysfs_rx_queue_group);
-		kobject_put(&net->_rx[i].kobj);
+		if (dev->sysfs_rx_queue_group)
+			sysfs_remove_group(&dev->_rx[i].kobj,
+					   dev->sysfs_rx_queue_group);
+		kobject_put(&dev->_rx[i].kobj);
 	}
 
 	return error;
@@ -1155,13 +1155,13 @@
 	.namespace = netdev_queue_namespace,
 };
 
-static int netdev_queue_add_kobject(struct net_device *net, int index)
+static int netdev_queue_add_kobject(struct net_device *dev, int index)
 {
-	struct netdev_queue *queue = net->_tx + index;
+	struct netdev_queue *queue = dev->_tx + index;
 	struct kobject *kobj = &queue->kobj;
 	int error = 0;
 
-	kobj->kset = net->queues_kset;
+	kobj->kset = dev->queues_kset;
 	error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
 	    "tx-%u", index);
 	if (error)
@@ -1184,14 +1184,14 @@
 #endif /* CONFIG_SYSFS */
 
 int
-netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
+netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
 {
 #ifdef CONFIG_SYSFS
 	int i;
 	int error = 0;
 
 	for (i = old_num; i < new_num; i++) {
-		error = netdev_queue_add_kobject(net, i);
+		error = netdev_queue_add_kobject(dev, i);
 		if (error) {
 			new_num = old_num;
 			break;
@@ -1199,7 +1199,7 @@
 	}
 
 	while (--i >= new_num) {
-		struct netdev_queue *queue = net->_tx + i;
+		struct netdev_queue *queue = dev->_tx + i;
 
 #ifdef CONFIG_BQL
 		sysfs_remove_group(&queue->kobj, &dql_group);
@@ -1213,25 +1213,25 @@
 #endif /* CONFIG_SYSFS */
 }
 
-static int register_queue_kobjects(struct net_device *net)
+static int register_queue_kobjects(struct net_device *dev)
 {
 	int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
 
 #ifdef CONFIG_SYSFS
-	net->queues_kset = kset_create_and_add("queues",
-	    NULL, &net->dev.kobj);
-	if (!net->queues_kset)
+	dev->queues_kset = kset_create_and_add("queues",
+	    NULL, &dev->dev.kobj);
+	if (!dev->queues_kset)
 		return -ENOMEM;
-	real_rx = net->real_num_rx_queues;
+	real_rx = dev->real_num_rx_queues;
 #endif
-	real_tx = net->real_num_tx_queues;
+	real_tx = dev->real_num_tx_queues;
 
-	error = net_rx_queue_update_kobjects(net, 0, real_rx);
+	error = net_rx_queue_update_kobjects(dev, 0, real_rx);
 	if (error)
 		goto error;
 	rxq = real_rx;
 
-	error = netdev_queue_update_kobjects(net, 0, real_tx);
+	error = netdev_queue_update_kobjects(dev, 0, real_tx);
 	if (error)
 		goto error;
 	txq = real_tx;
@@ -1239,24 +1239,24 @@
 	return 0;
 
 error:
-	netdev_queue_update_kobjects(net, txq, 0);
-	net_rx_queue_update_kobjects(net, rxq, 0);
+	netdev_queue_update_kobjects(dev, txq, 0);
+	net_rx_queue_update_kobjects(dev, rxq, 0);
 	return error;
 }
 
-static void remove_queue_kobjects(struct net_device *net)
+static void remove_queue_kobjects(struct net_device *dev)
 {
 	int real_rx = 0, real_tx = 0;
 
 #ifdef CONFIG_SYSFS
-	real_rx = net->real_num_rx_queues;
+	real_rx = dev->real_num_rx_queues;
 #endif
-	real_tx = net->real_num_tx_queues;
+	real_tx = dev->real_num_tx_queues;
 
-	net_rx_queue_update_kobjects(net, real_rx, 0);
-	netdev_queue_update_kobjects(net, real_tx, 0);
+	net_rx_queue_update_kobjects(dev, real_rx, 0);
+	netdev_queue_update_kobjects(dev, real_tx, 0);
 #ifdef CONFIG_SYSFS
-	kset_unregister(net->queues_kset);
+	kset_unregister(dev->queues_kset);
 #endif
 }
 
@@ -1349,13 +1349,13 @@
 /* Delete sysfs entries but hold kobject reference until after all
  * netdev references are gone.
  */
-void netdev_unregister_kobject(struct net_device * net)
+void netdev_unregister_kobject(struct net_device *ndev)
 {
-	struct device *dev = &(net->dev);
+	struct device *dev = &(ndev->dev);
 
 	kobject_get(&dev->kobj);
 
-	remove_queue_kobjects(net);
+	remove_queue_kobjects(ndev);
 
 	pm_runtime_set_memalloc_noio(dev, false);
 
@@ -1363,18 +1363,18 @@
 }
 
 /* Create sysfs entries for network device. */
-int netdev_register_kobject(struct net_device *net)
+int netdev_register_kobject(struct net_device *ndev)
 {
-	struct device *dev = &(net->dev);
-	const struct attribute_group **groups = net->sysfs_groups;
+	struct device *dev = &(ndev->dev);
+	const struct attribute_group **groups = ndev->sysfs_groups;
 	int error = 0;
 
 	device_initialize(dev);
 	dev->class = &net_class;
-	dev->platform_data = net;
+	dev->platform_data = ndev;
 	dev->groups = groups;
 
-	dev_set_name(dev, "%s", net->name);
+	dev_set_name(dev, "%s", ndev->name);
 
 #ifdef CONFIG_SYSFS
 	/* Allow for a device specific group */
@@ -1384,10 +1384,10 @@
 	*groups++ = &netstat_group;
 
 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
-	if (net->ieee80211_ptr)
+	if (ndev->ieee80211_ptr)
 		*groups++ = &wireless_group;
 #if IS_ENABLED(CONFIG_WIRELESS_EXT)
-	else if (net->wireless_handlers)
+	else if (ndev->wireless_handlers)
 		*groups++ = &wireless_group;
 #endif
 #endif
@@ -1397,7 +1397,7 @@
 	if (error)
 		return error;
 
-	error = register_queue_kobjects(net);
+	error = register_queue_kobjects(ndev);
 	if (error) {
 		device_del(dev);
 		return error;
diff --git a/net/core/sock.c b/net/core/sock.c
index 026e01f..134291d 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -491,7 +491,7 @@
 
 	skb->dev = NULL;
 
-	if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
+	if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
 		atomic_inc(&sk->sk_drops);
 		goto discard_and_relse;
 	}
@@ -862,8 +862,6 @@
 					       (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
 				  val & SOF_TIMESTAMPING_SOFTWARE);
-		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
-				  val & SOF_TIMESTAMPING_SYS_HARDWARE);
 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
 				  val & SOF_TIMESTAMPING_RAW_HARDWARE);
 		break;
@@ -1102,8 +1100,6 @@
 			v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
 		if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
 			v.val |= SOF_TIMESTAMPING_SOFTWARE;
-		if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
-			v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
 		if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
 			v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
 		break;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index c34af7a..ca11d28 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1776,7 +1776,7 @@
  *
  * Priority 0 is an invalid priority in CEE spec. This routine
  * removes applications from the app list if the priority is
- * set to zero.
+ * set to zero. Priority is expected to be 8-bit 802.1p user priority bitmap
  */
 int dcb_setapp(struct net_device *dev, struct dcb_app *new)
 {
@@ -1837,7 +1837,8 @@
  *
  * This adds Application data to the list. Multiple application
  * entries may exists for the same selector and protocol as long
- * as the priorities are different.
+ * as the priorities are different. Priority is expected to be a
+ * 3-bit unsigned integer
  */
 int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
 {
diff --git a/net/ieee802154/Kconfig b/net/ieee802154/Kconfig
index 8af1330..c0d4154 100644
--- a/net/ieee802154/Kconfig
+++ b/net/ieee802154/Kconfig
@@ -12,13 +12,6 @@
 
 config IEEE802154_6LOWPAN
 	tristate "6lowpan support over IEEE 802.15.4"
-	depends on IEEE802154 && IPV6
-	select 6LOWPAN_IPHC
+	depends on IEEE802154 && 6LOWPAN
 	---help---
 	  IPv6 compression over IEEE 802.15.4.
-
-config 6LOWPAN_IPHC
-	tristate
-	---help---
-	  6lowpan compression code which is shared between IEEE 802.15.4 and Bluetooth
-	  stacks.
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index bf1b514..3914b1e 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -1,8 +1,7 @@
 obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
-obj-$(CONFIG_IEEE802154_6LOWPAN) += 6lowpan.o
-obj-$(CONFIG_6LOWPAN_IPHC) += 6lowpan_iphc.o
+obj-$(CONFIG_IEEE802154_6LOWPAN) += ieee802154_6lowpan.o
 
-6lowpan-y := 6lowpan_rtnl.o reassembly.o
+ieee802154_6lowpan-y := 6lowpan_rtnl.o reassembly.o
 ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o \
                 header_ops.o
 af_802154-y := af_ieee802154.o raw.o dgram.o
diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
index b85bd3f..f13d4f3 100644
--- a/net/ieee802154/reassembly.c
+++ b/net/ieee802154/reassembly.c
@@ -50,29 +50,25 @@
 				     const struct ieee802154_addr *saddr,
 				     const struct ieee802154_addr *daddr)
 {
-	u32 c;
-
 	net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
-	c = jhash_3words(ieee802154_addr_hash(saddr),
-			 ieee802154_addr_hash(daddr),
-			 (__force u32)(tag + (d_size << 16)),
-			 lowpan_frags.rnd);
-
-	return c & (INETFRAGS_HASHSZ - 1);
+	return jhash_3words(ieee802154_addr_hash(saddr),
+			    ieee802154_addr_hash(daddr),
+			    (__force u32)(tag + (d_size << 16)),
+			    lowpan_frags.rnd);
 }
 
-static unsigned int lowpan_hashfn(struct inet_frag_queue *q)
+static unsigned int lowpan_hashfn(const struct inet_frag_queue *q)
 {
-	struct lowpan_frag_queue *fq;
+	const struct lowpan_frag_queue *fq;
 
 	fq = container_of(q, struct lowpan_frag_queue, q);
 	return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
 }
 
-static bool lowpan_frag_match(struct inet_frag_queue *q, void *a)
+static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a)
 {
-	struct lowpan_frag_queue *fq;
-	struct lowpan_create_arg *arg = a;
+	const struct lowpan_frag_queue *fq;
+	const struct lowpan_create_arg *arg = a;
 
 	fq = container_of(q, struct lowpan_frag_queue, q);
 	return	fq->tag == arg->tag && fq->d_size == arg->d_size &&
@@ -80,10 +76,10 @@
 		ieee802154_addr_equal(&fq->daddr, arg->dst);
 }
 
-static void lowpan_frag_init(struct inet_frag_queue *q, void *a)
+static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
 {
+	const struct lowpan_create_arg *arg = a;
 	struct lowpan_frag_queue *fq;
-	struct lowpan_create_arg *arg = a;
 
 	fq = container_of(q, struct lowpan_frag_queue, q);
 
@@ -128,7 +124,6 @@
 	arg.src = src;
 	arg.dst = dst;
 
-	read_lock(&lowpan_frags.lock);
 	hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst);
 
 	q = inet_frag_find(&ieee802154_lowpan->frags,
@@ -223,7 +218,6 @@
 		return res;
 	}
 
-	inet_frag_lru_move(&fq->q);
 	return -1;
 err:
 	kfree_skb(skb);
@@ -373,8 +367,6 @@
 	if (frag_info->d_size > ieee802154_lowpan->max_dsize)
 		goto err;
 
-	inet_frag_evictor(&ieee802154_lowpan->frags, &lowpan_frags, false);
-
 	fq = fq_find(net, frag_info, &source, &dest);
 	if (fq != NULL) {
 		int ret;
@@ -394,20 +386,25 @@
 EXPORT_SYMBOL(lowpan_frag_rcv);
 
 #ifdef CONFIG_SYSCTL
+static int zero;
+
 static struct ctl_table lowpan_frags_ns_ctl_table[] = {
 	{
 		.procname	= "6lowpanfrag_high_thresh",
 		.data		= &init_net.ieee802154_lowpan.frags.high_thresh,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &init_net.ieee802154_lowpan.frags.low_thresh
 	},
 	{
 		.procname	= "6lowpanfrag_low_thresh",
 		.data		= &init_net.ieee802154_lowpan.frags.low_thresh,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &init_net.ieee802154_lowpan.frags.high_thresh
 	},
 	{
 		.procname	= "6lowpanfrag_time",
@@ -426,10 +423,12 @@
 	{ }
 };
 
+/* secret interval has been deprecated */
+static int lowpan_frags_secret_interval_unused;
 static struct ctl_table lowpan_frags_ctl_table[] = {
 	{
 		.procname	= "6lowpanfrag_secret_interval",
-		.data		= &lowpan_frags.secret_interval,
+		.data		= &lowpan_frags_secret_interval_unused,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_jiffies,
@@ -452,7 +451,10 @@
 			goto err_alloc;
 
 		table[0].data = &ieee802154_lowpan->frags.high_thresh;
+		table[0].extra1 = &ieee802154_lowpan->frags.low_thresh;
+		table[0].extra2 = &init_net.ieee802154_lowpan.frags.high_thresh;
 		table[1].data = &ieee802154_lowpan->frags.low_thresh;
+		table[1].extra2 = &ieee802154_lowpan->frags.high_thresh;
 		table[2].data = &ieee802154_lowpan->frags.timeout;
 		table[3].data = &ieee802154_lowpan->max_dsize;
 
@@ -569,7 +571,6 @@
 	lowpan_frags.qsize = sizeof(struct frag_queue);
 	lowpan_frags.match = lowpan_frag_match;
 	lowpan_frags.frag_expire = lowpan_frag_expire;
-	lowpan_frags.secret_interval = 10 * 60 * HZ;
 	inet_frags_init(&lowpan_frags);
 
 	return ret;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index e944937..214882e 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -180,11 +180,12 @@
 static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
 			 int destroy);
 #ifdef CONFIG_SYSCTL
-static void devinet_sysctl_register(struct in_device *idev);
+static int devinet_sysctl_register(struct in_device *idev);
 static void devinet_sysctl_unregister(struct in_device *idev);
 #else
-static void devinet_sysctl_register(struct in_device *idev)
+static int devinet_sysctl_register(struct in_device *idev)
 {
+	return 0;
 }
 static void devinet_sysctl_unregister(struct in_device *idev)
 {
@@ -232,6 +233,7 @@
 static struct in_device *inetdev_init(struct net_device *dev)
 {
 	struct in_device *in_dev;
+	int err = -ENOMEM;
 
 	ASSERT_RTNL();
 
@@ -252,7 +254,13 @@
 	/* Account for reference dev->ip_ptr (below) */
 	in_dev_hold(in_dev);
 
-	devinet_sysctl_register(in_dev);
+	err = devinet_sysctl_register(in_dev);
+	if (err) {
+		in_dev->dead = 1;
+		in_dev_put(in_dev);
+		in_dev = NULL;
+		goto out;
+	}
 	ip_mc_init_dev(in_dev);
 	if (dev->flags & IFF_UP)
 		ip_mc_up(in_dev);
@@ -260,7 +268,7 @@
 	/* we can receive as soon as ip_ptr is set -- do this last */
 	rcu_assign_pointer(dev->ip_ptr, in_dev);
 out:
-	return in_dev;
+	return in_dev ?: ERR_PTR(err);
 out_kfree:
 	kfree(in_dev);
 	in_dev = NULL;
@@ -1347,8 +1355,8 @@
 	if (!in_dev) {
 		if (event == NETDEV_REGISTER) {
 			in_dev = inetdev_init(dev);
-			if (!in_dev)
-				return notifier_from_errno(-ENOMEM);
+			if (IS_ERR(in_dev))
+				return notifier_from_errno(PTR_ERR(in_dev));
 			if (dev->flags & IFF_LOOPBACK) {
 				IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
 				IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
@@ -2182,11 +2190,21 @@
 	kfree(t);
 }
 
-static void devinet_sysctl_register(struct in_device *idev)
+static int devinet_sysctl_register(struct in_device *idev)
 {
-	neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
-	__devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
+	int err;
+
+	if (!sysctl_dev_name_is_allowed(idev->dev->name))
+		return -EINVAL;
+
+	err = neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
+	if (err)
+		return err;
+	err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
 					&idev->cnf);
+	if (err)
+		neigh_sysctl_unregister(idev->arp_parms);
+	return err;
 }
 
 static void devinet_sysctl_unregister(struct in_device *idev)
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index f0bdd47..6556263 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -74,7 +74,7 @@
 	/* segment inner packet. */
 	enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
 	segs = skb_mac_gso_segment(skb, enc_features);
-	if (!segs || IS_ERR(segs)) {
+	if (IS_ERR_OR_NULL(segs)) {
 		skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len);
 		goto out;
 	}
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index db710b0..f10eab4 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1321,7 +1321,7 @@
 	atomic_set(&im->refcnt, 1);
 	spin_lock_init(&im->lock);
 #ifdef CONFIG_IP_MULTICAST
-	setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im);
+	setup_timer(&im->timer, igmp_timer_expire, (unsigned long)im);
 	im->unsolicit_count = IGMP_Unsolicited_Report_Count;
 #endif
 
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 3b01959..62b1f73 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -25,6 +25,12 @@
 #include <net/inet_frag.h>
 #include <net/inet_ecn.h>
 
+#define INETFRAGS_EVICT_BUCKETS   128
+#define INETFRAGS_EVICT_MAX	  512
+
+/* don't rebuild inetfrag table with new secret more often than this */
+#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
+
 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
  * Value : 0xff if frame should be dropped.
  *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
@@ -46,24 +52,39 @@
 };
 EXPORT_SYMBOL(ip_frag_ecn_table);
 
-static void inet_frag_secret_rebuild(unsigned long dummy)
+static unsigned int
+inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
 {
-	struct inet_frags *f = (struct inet_frags *)dummy;
-	unsigned long now = jiffies;
+	return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
+}
+
+static bool inet_frag_may_rebuild(struct inet_frags *f)
+{
+	return time_after(jiffies,
+	       f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
+}
+
+static void inet_frag_secret_rebuild(struct inet_frags *f)
+{
 	int i;
 
-	/* Per bucket lock NOT needed here, due to write lock protection */
-	write_lock(&f->lock);
+	write_seqlock_bh(&f->rnd_seqlock);
+
+	if (!inet_frag_may_rebuild(f))
+		goto out;
 
 	get_random_bytes(&f->rnd, sizeof(u32));
+
 	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
 		struct inet_frag_bucket *hb;
 		struct inet_frag_queue *q;
 		struct hlist_node *n;
 
 		hb = &f->hash[i];
+		spin_lock(&hb->chain_lock);
+
 		hlist_for_each_entry_safe(q, n, &hb->chain, list) {
-			unsigned int hval = f->hashfn(q);
+			unsigned int hval = inet_frag_hashfn(f, q);
 
 			if (hval != i) {
 				struct inet_frag_bucket *hb_dest;
@@ -72,76 +93,195 @@
 
 				/* Relink to new hash chain. */
 				hb_dest = &f->hash[hval];
+
+				/* This is the only place where we take
+				 * another chain_lock while already holding
+				 * one.  As this will not run concurrently,
+				 * we cannot deadlock on hb_dest lock below, if its
+				 * already locked it will be released soon since
+				 * other caller cannot be waiting for hb lock
+				 * that we've taken above.
+				 */
+				spin_lock_nested(&hb_dest->chain_lock,
+						 SINGLE_DEPTH_NESTING);
 				hlist_add_head(&q->list, &hb_dest->chain);
+				spin_unlock(&hb_dest->chain_lock);
 			}
 		}
+		spin_unlock(&hb->chain_lock);
 	}
-	write_unlock(&f->lock);
 
-	mod_timer(&f->secret_timer, now + f->secret_interval);
+	f->rebuild = false;
+	f->last_rebuild_jiffies = jiffies;
+out:
+	write_sequnlock_bh(&f->rnd_seqlock);
+}
+
+static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
+{
+	return q->net->low_thresh == 0 ||
+	       frag_mem_limit(q->net) >= q->net->low_thresh;
+}
+
+static unsigned int
+inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
+{
+	struct inet_frag_queue *fq;
+	struct hlist_node *n;
+	unsigned int evicted = 0;
+	HLIST_HEAD(expired);
+
+evict_again:
+	spin_lock(&hb->chain_lock);
+
+	hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
+		if (!inet_fragq_should_evict(fq))
+			continue;
+
+		if (!del_timer(&fq->timer)) {
+			/* q expiring right now thus increment its refcount so
+			 * it won't be freed under us and wait until the timer
+			 * has finished executing then destroy it
+			 */
+			atomic_inc(&fq->refcnt);
+			spin_unlock(&hb->chain_lock);
+			del_timer_sync(&fq->timer);
+			WARN_ON(atomic_read(&fq->refcnt) != 1);
+			inet_frag_put(fq, f);
+			goto evict_again;
+		}
+
+		/* suppress xmit of (icmp) error packet */
+		fq->last_in &= ~INET_FRAG_FIRST_IN;
+		fq->last_in |= INET_FRAG_EVICTED;
+		hlist_del(&fq->list);
+		hlist_add_head(&fq->list, &expired);
+		++evicted;
+	}
+
+	spin_unlock(&hb->chain_lock);
+
+	hlist_for_each_entry_safe(fq, n, &expired, list)
+		f->frag_expire((unsigned long) fq);
+
+	return evicted;
+}
+
+static void inet_frag_worker(struct work_struct *work)
+{
+	unsigned int budget = INETFRAGS_EVICT_BUCKETS;
+	unsigned int i, evicted = 0;
+	struct inet_frags *f;
+
+	f = container_of(work, struct inet_frags, frags_work);
+
+	BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
+
+	local_bh_disable();
+
+	for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
+		evicted += inet_evict_bucket(f, &f->hash[i]);
+		i = (i + 1) & (INETFRAGS_HASHSZ - 1);
+		if (evicted > INETFRAGS_EVICT_MAX)
+			break;
+	}
+
+	f->next_bucket = i;
+
+	local_bh_enable();
+
+	if (f->rebuild && inet_frag_may_rebuild(f))
+		inet_frag_secret_rebuild(f);
+}
+
+static void inet_frag_schedule_worker(struct inet_frags *f)
+{
+	if (unlikely(!work_pending(&f->frags_work)))
+		schedule_work(&f->frags_work);
 }
 
 void inet_frags_init(struct inet_frags *f)
 {
 	int i;
 
+	INIT_WORK(&f->frags_work, inet_frag_worker);
+
 	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
 		struct inet_frag_bucket *hb = &f->hash[i];
 
 		spin_lock_init(&hb->chain_lock);
 		INIT_HLIST_HEAD(&hb->chain);
 	}
-	rwlock_init(&f->lock);
 
-	setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
-			(unsigned long)f);
-	f->secret_timer.expires = jiffies + f->secret_interval;
-	add_timer(&f->secret_timer);
+	seqlock_init(&f->rnd_seqlock);
+	f->last_rebuild_jiffies = 0;
 }
 EXPORT_SYMBOL(inet_frags_init);
 
 void inet_frags_init_net(struct netns_frags *nf)
 {
-	nf->nqueues = 0;
 	init_frag_mem_limit(nf);
-	INIT_LIST_HEAD(&nf->lru_list);
-	spin_lock_init(&nf->lru_lock);
 }
 EXPORT_SYMBOL(inet_frags_init_net);
 
 void inet_frags_fini(struct inet_frags *f)
 {
-	del_timer(&f->secret_timer);
+	cancel_work_sync(&f->frags_work);
 }
 EXPORT_SYMBOL(inet_frags_fini);
 
 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
 {
-	nf->low_thresh = 0;
+	unsigned int seq;
+	int i;
 
+	nf->low_thresh = 0;
 	local_bh_disable();
-	inet_frag_evictor(nf, f, true);
+
+evict_again:
+	seq = read_seqbegin(&f->rnd_seqlock);
+
+	for (i = 0; i < INETFRAGS_HASHSZ ; i++)
+		inet_evict_bucket(f, &f->hash[i]);
+
+	if (read_seqretry(&f->rnd_seqlock, seq))
+		goto evict_again;
+
 	local_bh_enable();
 
 	percpu_counter_destroy(&nf->mem);
 }
 EXPORT_SYMBOL(inet_frags_exit_net);
 
-static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
+static struct inet_frag_bucket *
+get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
+__acquires(hb->chain_lock)
 {
 	struct inet_frag_bucket *hb;
-	unsigned int hash;
+	unsigned int seq, hash;
 
-	read_lock(&f->lock);
-	hash = f->hashfn(fq);
+ restart:
+	seq = read_seqbegin(&f->rnd_seqlock);
+
+	hash = inet_frag_hashfn(f, fq);
 	hb = &f->hash[hash];
 
 	spin_lock(&hb->chain_lock);
+	if (read_seqretry(&f->rnd_seqlock, seq)) {
+		spin_unlock(&hb->chain_lock);
+		goto restart;
+	}
+
+	return hb;
+}
+
+static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
+{
+	struct inet_frag_bucket *hb;
+
+	hb = get_frag_bucket_locked(fq, f);
 	hlist_del(&fq->list);
 	spin_unlock(&hb->chain_lock);
-
-	read_unlock(&f->lock);
-	inet_frag_lru_del(fq);
 }
 
 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
@@ -165,8 +305,7 @@
 	kfree_skb(skb);
 }
 
-void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
-					int *work)
+void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
 {
 	struct sk_buff *fp;
 	struct netns_frags *nf;
@@ -186,86 +325,30 @@
 		fp = xp;
 	}
 	sum = sum_truesize + f->qsize;
-	if (work)
-		*work -= sum;
 	sub_frag_mem_limit(q, sum);
 
 	if (f->destructor)
 		f->destructor(q);
 	kfree(q);
-
 }
 EXPORT_SYMBOL(inet_frag_destroy);
 
-int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
-{
-	struct inet_frag_queue *q;
-	int work, evicted = 0;
-
-	if (!force) {
-		if (frag_mem_limit(nf) <= nf->high_thresh)
-			return 0;
-	}
-
-	work = frag_mem_limit(nf) - nf->low_thresh;
-	while (work > 0 || force) {
-		spin_lock(&nf->lru_lock);
-
-		if (list_empty(&nf->lru_list)) {
-			spin_unlock(&nf->lru_lock);
-			break;
-		}
-
-		q = list_first_entry(&nf->lru_list,
-				struct inet_frag_queue, lru_list);
-		atomic_inc(&q->refcnt);
-		/* Remove q from list to avoid several CPUs grabbing it */
-		list_del_init(&q->lru_list);
-
-		spin_unlock(&nf->lru_lock);
-
-		spin_lock(&q->lock);
-		if (!(q->last_in & INET_FRAG_COMPLETE))
-			inet_frag_kill(q, f);
-		spin_unlock(&q->lock);
-
-		if (atomic_dec_and_test(&q->refcnt))
-			inet_frag_destroy(q, f, &work);
-		evicted++;
-	}
-
-	return evicted;
-}
-EXPORT_SYMBOL(inet_frag_evictor);
-
 static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
 		struct inet_frag_queue *qp_in, struct inet_frags *f,
 		void *arg)
 {
-	struct inet_frag_bucket *hb;
+	struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
 	struct inet_frag_queue *qp;
-	unsigned int hash;
-
-	read_lock(&f->lock); /* Protects against hash rebuild */
-	/*
-	 * While we stayed w/o the lock other CPU could update
-	 * the rnd seed, so we need to re-calculate the hash
-	 * chain. Fortunatelly the qp_in can be used to get one.
-	 */
-	hash = f->hashfn(qp_in);
-	hb = &f->hash[hash];
-	spin_lock(&hb->chain_lock);
 
 #ifdef CONFIG_SMP
 	/* With SMP race we have to recheck hash table, because
-	 * such entry could be created on other cpu, while we
-	 * released the hash bucket lock.
+	 * such entry could have been created on other cpu before
+	 * we acquired hash bucket lock.
 	 */
 	hlist_for_each_entry(qp, &hb->chain, list) {
 		if (qp->net == nf && f->match(qp, arg)) {
 			atomic_inc(&qp->refcnt);
 			spin_unlock(&hb->chain_lock);
-			read_unlock(&f->lock);
 			qp_in->last_in |= INET_FRAG_COMPLETE;
 			inet_frag_put(qp_in, f);
 			return qp;
@@ -278,9 +361,8 @@
 
 	atomic_inc(&qp->refcnt);
 	hlist_add_head(&qp->list, &hb->chain);
-	inet_frag_lru_add(nf, qp);
+
 	spin_unlock(&hb->chain_lock);
-	read_unlock(&f->lock);
 
 	return qp;
 }
@@ -290,6 +372,11 @@
 {
 	struct inet_frag_queue *q;
 
+	if (frag_mem_limit(nf) > nf->high_thresh) {
+		inet_frag_schedule_worker(f);
+		return NULL;
+	}
+
 	q = kzalloc(f->qsize, GFP_ATOMIC);
 	if (q == NULL)
 		return NULL;
@@ -301,7 +388,6 @@
 	setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
 	spin_lock_init(&q->lock);
 	atomic_set(&q->refcnt, 1);
-	INIT_LIST_HEAD(&q->lru_list);
 
 	return q;
 }
@@ -320,12 +406,15 @@
 
 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
 		struct inet_frags *f, void *key, unsigned int hash)
-	__releases(&f->lock)
 {
 	struct inet_frag_bucket *hb;
 	struct inet_frag_queue *q;
 	int depth = 0;
 
+	if (frag_mem_limit(nf) > nf->low_thresh)
+		inet_frag_schedule_worker(f);
+
+	hash &= (INETFRAGS_HASHSZ - 1);
 	hb = &f->hash[hash];
 
 	spin_lock(&hb->chain_lock);
@@ -333,18 +422,22 @@
 		if (q->net == nf && f->match(q, key)) {
 			atomic_inc(&q->refcnt);
 			spin_unlock(&hb->chain_lock);
-			read_unlock(&f->lock);
 			return q;
 		}
 		depth++;
 	}
 	spin_unlock(&hb->chain_lock);
-	read_unlock(&f->lock);
 
 	if (depth <= INETFRAGS_MAXDEPTH)
 		return inet_frag_create(nf, f, key);
-	else
-		return ERR_PTR(-ENOBUFS);
+
+	if (inet_frag_may_rebuild(f)) {
+		if (!f->rebuild)
+			f->rebuild = true;
+		inet_frag_schedule_worker(f);
+	}
+
+	return ERR_PTR(-ENOBUFS);
 }
 EXPORT_SYMBOL(inet_frag_find);
 
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index ed32313..634fc31 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -86,11 +86,6 @@
 
 static struct inet_frags ip4_frags;
 
-int ip_frag_nqueues(struct net *net)
-{
-	return net->ipv4.frags.nqueues;
-}
-
 int ip_frag_mem(struct net *net)
 {
 	return sum_frag_mem_limit(&net->ipv4.frags);
@@ -109,21 +104,21 @@
 	net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd));
 	return jhash_3words((__force u32)id << 16 | prot,
 			    (__force u32)saddr, (__force u32)daddr,
-			    ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1);
+			    ip4_frags.rnd);
 }
 
-static unsigned int ip4_hashfn(struct inet_frag_queue *q)
+static unsigned int ip4_hashfn(const struct inet_frag_queue *q)
 {
-	struct ipq *ipq;
+	const struct ipq *ipq;
 
 	ipq = container_of(q, struct ipq, q);
 	return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
 }
 
-static bool ip4_frag_match(struct inet_frag_queue *q, void *a)
+static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a)
 {
-	struct ipq *qp;
-	struct ip4_create_arg *arg = a;
+	const struct ipq *qp;
+	const struct ip4_create_arg *arg = a;
 
 	qp = container_of(q, struct ipq, q);
 	return	qp->id == arg->iph->id &&
@@ -133,14 +128,14 @@
 		qp->user == arg->user;
 }
 
-static void ip4_frag_init(struct inet_frag_queue *q, void *a)
+static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
 {
 	struct ipq *qp = container_of(q, struct ipq, q);
 	struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4,
 					       frags);
 	struct net *net = container_of(ipv4, struct net, ipv4);
 
-	struct ip4_create_arg *arg = a;
+	const struct ip4_create_arg *arg = a;
 
 	qp->protocol = arg->iph->protocol;
 	qp->id = arg->iph->id;
@@ -177,18 +172,6 @@
 	inet_frag_kill(&ipq->q, &ip4_frags);
 }
 
-/* Memory limiting on fragments.  Evictor trashes the oldest
- * fragment queue until we are back under the threshold.
- */
-static void ip_evictor(struct net *net)
-{
-	int evicted;
-
-	evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags, false);
-	if (evicted)
-		IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted);
-}
-
 /*
  * Oops, a fragment queue timed out.  Kill it and send an ICMP reply.
  */
@@ -207,7 +190,8 @@
 
 	ipq_kill(qp);
 
-	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
+	if (!(qp->q.last_in & INET_FRAG_EVICTED))
+		IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
 
 	if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
@@ -260,7 +244,6 @@
 	arg.iph = iph;
 	arg.user = user;
 
-	read_lock(&ip4_frags.lock);
 	hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
 
 	q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
@@ -505,7 +488,6 @@
 	}
 
 	skb_dst_drop(skb);
-	inet_frag_lru_move(&qp->q);
 	return -EINPROGRESS;
 
 err:
@@ -655,9 +637,6 @@
 	net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev);
 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
 
-	/* Start by cleaning up the memory. */
-	ip_evictor(net);
-
 	/* Lookup (or create) queue header */
 	if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
 		int ret;
@@ -721,14 +700,17 @@
 		.data		= &init_net.ipv4.frags.high_thresh,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &init_net.ipv4.frags.low_thresh
 	},
 	{
 		.procname	= "ipfrag_low_thresh",
 		.data		= &init_net.ipv4.frags.low_thresh,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &init_net.ipv4.frags.high_thresh
 	},
 	{
 		.procname	= "ipfrag_time",
@@ -740,10 +722,12 @@
 	{ }
 };
 
+/* secret interval has been deprecated */
+static int ip4_frags_secret_interval_unused;
 static struct ctl_table ip4_frags_ctl_table[] = {
 	{
 		.procname	= "ipfrag_secret_interval",
-		.data		= &ip4_frags.secret_interval,
+		.data		= &ip4_frags_secret_interval_unused,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_jiffies,
@@ -771,7 +755,10 @@
 			goto err_alloc;
 
 		table[0].data = &net->ipv4.frags.high_thresh;
+		table[0].extra1 = &net->ipv4.frags.low_thresh;
+		table[0].extra2 = &init_net.ipv4.frags.high_thresh;
 		table[1].data = &net->ipv4.frags.low_thresh;
+		table[1].extra2 = &net->ipv4.frags.high_thresh;
 		table[2].data = &net->ipv4.frags.timeout;
 
 		/* Don't export sysctls to unprivileged users */
@@ -873,6 +860,5 @@
 	ip4_frags.qsize = sizeof(struct ipq);
 	ip4_frags.match = ip4_frag_match;
 	ip4_frags.frag_expire = ip_expire;
-	ip4_frags.secret_interval = 10 * 60 * HZ;
 	inet_frags_init(&ip4_frags);
 }
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 64741b9..5cb830c 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1319,7 +1319,7 @@
 		if (sk->sk_type != SOCK_STREAM)
 			return -ENOPROTOOPT;
 
-		msg.msg_control = optval;
+		msg.msg_control = (__force void *) optval;
 		msg.msg_controllen = len;
 		msg.msg_flags = flags;
 
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index b8960f3..e453cb7 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -534,40 +534,28 @@
 
 static int __init vti_init(void)
 {
+	const char *msg;
 	int err;
 
-	pr_info("IPv4 over IPSec tunneling driver\n");
+	pr_info("IPv4 over IPsec tunneling driver\n");
 
+	msg = "tunnel device";
 	err = register_pernet_device(&vti_net_ops);
 	if (err < 0)
-		return err;
+		goto pernet_dev_failed;
+
+	msg = "tunnel protocols";
 	err = xfrm4_protocol_register(&vti_esp4_protocol, IPPROTO_ESP);
-	if (err < 0) {
-		unregister_pernet_device(&vti_net_ops);
-		pr_info("vti init: can't register tunnel\n");
-
-		return err;
-	}
-
+	if (err < 0)
+		goto xfrm_proto_esp_failed;
 	err = xfrm4_protocol_register(&vti_ah4_protocol, IPPROTO_AH);
-	if (err < 0) {
-		xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
-		unregister_pernet_device(&vti_net_ops);
-		pr_info("vti init: can't register tunnel\n");
-
-		return err;
-	}
-
+	if (err < 0)
+		goto xfrm_proto_ah_failed;
 	err = xfrm4_protocol_register(&vti_ipcomp4_protocol, IPPROTO_COMP);
-	if (err < 0) {
-		xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
-		xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
-		unregister_pernet_device(&vti_net_ops);
-		pr_info("vti init: can't register tunnel\n");
+	if (err < 0)
+		goto xfrm_proto_comp_failed;
 
-		return err;
-	}
-
+	msg = "netlink interface";
 	err = rtnl_link_register(&vti_link_ops);
 	if (err < 0)
 		goto rtnl_link_failed;
@@ -576,23 +564,23 @@
 
 rtnl_link_failed:
 	xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
+xfrm_proto_comp_failed:
 	xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
+xfrm_proto_ah_failed:
 	xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
+xfrm_proto_esp_failed:
 	unregister_pernet_device(&vti_net_ops);
+pernet_dev_failed:
+	pr_err("vti init: failed to register %s\n", msg);
 	return err;
 }
 
 static void __exit vti_fini(void)
 {
 	rtnl_link_unregister(&vti_link_ops);
-	if (xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP))
-		pr_info("vti close: can't deregister tunnel\n");
-	if (xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH))
-		pr_info("vti close: can't deregister tunnel\n");
-	if (xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP))
-		pr_info("vti close: can't deregister tunnel\n");
-
-
+	xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
+	xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
+	xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
 	unregister_pernet_device(&vti_net_ops);
 }
 
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index ae0af93..8e3eb39 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -52,6 +52,7 @@
 static int sockstat_seq_show(struct seq_file *seq, void *v)
 {
 	struct net *net = seq->private;
+	unsigned int frag_mem;
 	int orphans, sockets;
 
 	local_bh_disable();
@@ -71,8 +72,8 @@
 		   sock_prot_inuse_get(net, &udplite_prot));
 	seq_printf(seq, "RAW: inuse %d\n",
 		   sock_prot_inuse_get(net, &raw_prot));
-	seq_printf(seq,  "FRAG: inuse %d memory %d\n",
-			ip_frag_nqueues(net), ip_frag_mem(net));
+	frag_mem = ip_frag_mem(net);
+	seq_printf(seq,  "FRAG: inuse %u memory %u\n", !!frag_mem, frag_mem);
 	return 0;
 }
 
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 2054d71..739db31 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -58,6 +58,7 @@
 #include <linux/in_route.h>
 #include <linux/route.h>
 #include <linux/skbuff.h>
+#include <linux/igmp.h>
 #include <net/net_namespace.h>
 #include <net/dst.h>
 #include <net/sock.h>
@@ -174,7 +175,9 @@
 
 	while (sk) {
 		delivered = 1;
-		if (iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) {
+		if ((iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) &&
+		    ip_mc_sf_allow(sk, iph->daddr, iph->saddr,
+				   skb->dev->ifindex)) {
 			struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
 
 			/* Not releasing hash table! */
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 3162ea9..1901998 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -457,8 +457,31 @@
 	return neigh_create(&arp_tbl, pkey, dev);
 }
 
-atomic_t *ip_idents __read_mostly;
-EXPORT_SYMBOL(ip_idents);
+#define IP_IDENTS_SZ 2048u
+struct ip_ident_bucket {
+	atomic_t	id;
+	u32		stamp32;
+};
+
+static struct ip_ident_bucket *ip_idents __read_mostly;
+
+/* In order to protect privacy, we add a perturbation to identifiers
+ * if one generator is seldom used. This makes hard for an attacker
+ * to infer how many packets were sent between two points in time.
+ */
+u32 ip_idents_reserve(u32 hash, int segs)
+{
+	struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
+	u32 old = ACCESS_ONCE(bucket->stamp32);
+	u32 now = (u32)jiffies;
+	u32 delta = 0;
+
+	if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
+		delta = prandom_u32_max(now - old);
+
+	return atomic_add_return(segs + delta, &bucket->id) - segs;
+}
+EXPORT_SYMBOL(ip_idents_reserve);
 
 void __ip_select_ident(struct iphdr *iph, int segs)
 {
@@ -467,7 +490,10 @@
 
 	net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
 
-	hash = jhash_1word((__force u32)iph->daddr, ip_idents_hashrnd);
+	hash = jhash_3words((__force u32)iph->daddr,
+			    (__force u32)iph->saddr,
+			    iph->protocol,
+			    ip_idents_hashrnd);
 	id = ip_idents_reserve(hash, segs);
 	iph->id = htons(id);
 }
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 4fe0418..0d54e59 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -1093,7 +1093,6 @@
 		.doit = tcp_metrics_nl_cmd_get,
 		.dumpit = tcp_metrics_nl_dump,
 		.policy = tcp_metrics_nl_policy,
-		.flags = GENL_ADMIN_PERM,
 	},
 	{
 		.cmd = TCP_METRICS_CMD_DEL,
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f31053b..f57c0e4 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1567,7 +1567,7 @@
 		goto csum_error;
 
 
-	if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
+	if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
 		UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
 				 is_udplite);
 		goto drop;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 4807544..59035bc 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -79,7 +79,7 @@
 	/* segment inner packet. */
 	enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
 	segs = skb_mac_gso_segment(skb, enc_features);
-	if (!segs || IS_ERR(segs)) {
+	if (IS_ERR_OR_NULL(segs)) {
 		skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
 				     mac_len);
 		goto out;
diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c
index a2ce010..dccefa9 100644
--- a/net/ipv4/xfrm4_protocol.c
+++ b/net/ipv4/xfrm4_protocol.c
@@ -124,7 +124,7 @@
 
 	for_each_protocol_rcu(ah4_handlers, handler)
 		if ((ret = handler->handler(skb)) != -EINVAL)
-			return ret;;
+			return ret;
 
 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 4c03c28..0b239fc 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -108,11 +108,12 @@
 }
 
 #ifdef CONFIG_SYSCTL
-static void addrconf_sysctl_register(struct inet6_dev *idev);
+static int addrconf_sysctl_register(struct inet6_dev *idev);
 static void addrconf_sysctl_unregister(struct inet6_dev *idev);
 #else
-static inline void addrconf_sysctl_register(struct inet6_dev *idev)
+static inline int addrconf_sysctl_register(struct inet6_dev *idev)
 {
+	return 0;
 }
 
 static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
@@ -310,16 +311,16 @@
 static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
 {
 	struct inet6_dev *ndev;
+	int err = -ENOMEM;
 
 	ASSERT_RTNL();
 
 	if (dev->mtu < IPV6_MIN_MTU)
-		return NULL;
+		return ERR_PTR(-EINVAL);
 
 	ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL);
-
 	if (ndev == NULL)
-		return NULL;
+		return ERR_PTR(err);
 
 	rwlock_init(&ndev->lock);
 	ndev->dev = dev;
@@ -332,7 +333,7 @@
 	ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
 	if (ndev->nd_parms == NULL) {
 		kfree(ndev);
-		return NULL;
+		return ERR_PTR(err);
 	}
 	if (ndev->cnf.forwarding)
 		dev_disable_lro(dev);
@@ -346,17 +347,14 @@
 		neigh_parms_release(&nd_tbl, ndev->nd_parms);
 		dev_put(dev);
 		kfree(ndev);
-		return NULL;
+		return ERR_PTR(err);
 	}
 
 	if (snmp6_register_dev(ndev) < 0) {
 		ADBG(KERN_WARNING
 			"%s: cannot create /proc/net/dev_snmp6/%s\n",
 			__func__, dev->name);
-		neigh_parms_release(&nd_tbl, ndev->nd_parms);
-		ndev->dead = 1;
-		in6_dev_finish_destroy(ndev);
-		return NULL;
+		goto err_release;
 	}
 
 	/* One reference from device.  We must do this before
@@ -394,7 +392,12 @@
 
 	ipv6_mc_init_dev(ndev);
 	ndev->tstamp = jiffies;
-	addrconf_sysctl_register(ndev);
+	err = addrconf_sysctl_register(ndev);
+	if (err) {
+		ipv6_mc_destroy_dev(ndev);
+		del_timer(&ndev->regen_timer);
+		goto err_release;
+	}
 	/* protected by rtnl_lock */
 	rcu_assign_pointer(dev->ip6_ptr, ndev);
 
@@ -409,6 +412,12 @@
 		ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
 
 	return ndev;
+
+err_release:
+	neigh_parms_release(&nd_tbl, ndev->nd_parms);
+	ndev->dead = 1;
+	in6_dev_finish_destroy(ndev);
+	return ERR_PTR(err);
 }
 
 static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
@@ -420,7 +429,7 @@
 	idev = __in6_dev_get(dev);
 	if (!idev) {
 		idev = ipv6_add_dev(dev);
-		if (!idev)
+		if (IS_ERR(idev))
 			return NULL;
 	}
 
@@ -2830,8 +2839,8 @@
 	case NETDEV_REGISTER:
 		if (!idev && dev->mtu >= IPV6_MIN_MTU) {
 			idev = ipv6_add_dev(dev);
-			if (!idev)
-				return notifier_from_errno(-ENOMEM);
+			if (IS_ERR(idev))
+				return notifier_from_errno(PTR_ERR(idev));
 		}
 		break;
 
@@ -2851,7 +2860,7 @@
 			if (!idev && dev->mtu >= IPV6_MIN_MTU)
 				idev = ipv6_add_dev(dev);
 
-			if (idev) {
+			if (!IS_ERR_OR_NULL(idev)) {
 				idev->if_flags |= IF_READY;
 				run_pending = 1;
 			}
@@ -2894,7 +2903,7 @@
 			break;
 		}
 
-		if (idev) {
+		if (!IS_ERR_OR_NULL(idev)) {
 			if (run_pending)
 				addrconf_dad_run(idev);
 
@@ -2929,7 +2938,7 @@
 
 		if (!idev && dev->mtu >= IPV6_MIN_MTU) {
 			idev = ipv6_add_dev(dev);
-			if (idev)
+			if (!IS_ERR(idev))
 				break;
 		}
 
@@ -2950,10 +2959,14 @@
 		if (idev) {
 			snmp6_unregister_dev(idev);
 			addrconf_sysctl_unregister(idev);
-			addrconf_sysctl_register(idev);
-			err = snmp6_register_dev(idev);
+			err = addrconf_sysctl_register(idev);
 			if (err)
 				return notifier_from_errno(err);
+			err = snmp6_register_dev(idev);
+			if (err) {
+				addrconf_sysctl_unregister(idev);
+				return notifier_from_errno(err);
+			}
 		}
 		break;
 
@@ -5248,12 +5261,23 @@
 	kfree(t);
 }
 
-static void addrconf_sysctl_register(struct inet6_dev *idev)
+static int addrconf_sysctl_register(struct inet6_dev *idev)
 {
-	neigh_sysctl_register(idev->dev, idev->nd_parms,
-			      &ndisc_ifinfo_sysctl_change);
-	__addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
-					idev, &idev->cnf);
+	int err;
+
+	if (!sysctl_dev_name_is_allowed(idev->dev->name))
+		return -EINVAL;
+
+	err = neigh_sysctl_register(idev->dev, idev->nd_parms,
+				    &ndisc_ifinfo_sysctl_change);
+	if (err)
+		return err;
+	err = __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
+					 idev, &idev->cnf);
+	if (err)
+		neigh_sysctl_unregister(idev->nd_parms);
+
+	return err;
 }
 
 static void addrconf_sysctl_unregister(struct inet6_dev *idev)
@@ -5338,6 +5362,7 @@
 
 int __init addrconf_init(void)
 {
+	struct inet6_dev *idev;
 	int i, err;
 
 	err = ipv6_addr_label_init();
@@ -5376,11 +5401,12 @@
 	 * device and it being up should be removed.
 	 */
 	rtnl_lock();
-	if (!ipv6_add_dev(init_net.loopback_dev))
-		err = -ENOMEM;
+	idev = ipv6_add_dev(init_net.loopback_dev);
 	rtnl_unlock();
-	if (err)
+	if (IS_ERR(idev)) {
+		err = PTR_ERR(idev);
 		goto errlo;
+	}
 
 	for (i = 0; i < IN6_ADDR_HSIZE; i++)
 		INIT_HLIST_HEAD(&inet6_addr_lst[i]);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 759456f..f5dafe6 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -546,6 +546,8 @@
 	net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
 
 	hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
+	hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
+
 	id = ip_idents_reserve(hash, 1);
 	fhdr->identification = htonl(id);
 }
@@ -1270,7 +1272,6 @@
 		}
 	}
 
-	/* For UDP, check if TX timestamp is enabled */
 	if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW)
 		sock_tx_timestamp(sk, &tx_flags);
 
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 17ee4fc..7f52fd9 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -1089,36 +1089,26 @@
  **/
 static int __init vti6_tunnel_init(void)
 {
-	int  err;
+	const char *msg;
+	int err;
 
+	msg = "tunnel device";
 	err = register_pernet_device(&vti6_net_ops);
 	if (err < 0)
-		goto out_pernet;
+		goto pernet_dev_failed;
 
+	msg = "tunnel protocols";
 	err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP);
-	if (err < 0) {
-		pr_err("%s: can't register vti6 protocol\n", __func__);
-
-		goto out;
-	}
-
+	if (err < 0)
+		goto xfrm_proto_esp_failed;
 	err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH);
-	if (err < 0) {
-		xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
-		pr_err("%s: can't register vti6 protocol\n", __func__);
-
-		goto out;
-	}
-
+	if (err < 0)
+		goto xfrm_proto_ah_failed;
 	err = xfrm6_protocol_register(&vti_ipcomp6_protocol, IPPROTO_COMP);
-	if (err < 0) {
-		xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
-		xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
-		pr_err("%s: can't register vti6 protocol\n", __func__);
+	if (err < 0)
+		goto xfrm_proto_comp_failed;
 
-		goto out;
-	}
-
+	msg = "netlink interface";
 	err = rtnl_link_register(&vti6_link_ops);
 	if (err < 0)
 		goto rtnl_link_failed;
@@ -1127,11 +1117,14 @@
 
 rtnl_link_failed:
 	xfrm6_protocol_deregister(&vti_ipcomp6_protocol, IPPROTO_COMP);
+xfrm_proto_comp_failed:
 	xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
+xfrm_proto_ah_failed:
 	xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
-out:
+xfrm_proto_esp_failed:
 	unregister_pernet_device(&vti6_net_ops);
-out_pernet:
+pernet_dev_failed:
+	pr_err("vti6 init: failed to register %s\n", msg);
 	return err;
 }
 
@@ -1141,13 +1134,9 @@
 static void __exit vti6_tunnel_cleanup(void)
 {
 	rtnl_link_unregister(&vti6_link_ops);
-	if (xfrm6_protocol_deregister(&vti_ipcomp6_protocol, IPPROTO_COMP))
-		pr_info("%s: can't deregister protocol\n", __func__);
-	if (xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH))
-		pr_info("%s: can't deregister protocol\n", __func__);
-	if (xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP))
-		pr_info("%s: can't deregister protocol\n", __func__);
-
+	xfrm6_protocol_deregister(&vti_ipcomp6_protocol, IPPROTO_COMP);
+	xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
+	xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
 	unregister_pernet_device(&vti6_net_ops);
 }
 
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index b7ece27..339078f 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1768,7 +1768,7 @@
 
 #ifdef CONFIG_SYSCTL
 	err = neigh_sysctl_register(NULL, &nd_tbl.parms,
-				    &ndisc_ifinfo_sysctl_change);
+				    ndisc_ifinfo_sysctl_change);
 	if (err)
 		goto out_unregister_pernet;
 out:
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 0d5279f..3d4bccf 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -63,6 +63,8 @@
 static struct inet_frags nf_frags;
 
 #ifdef CONFIG_SYSCTL
+static int zero;
+
 static struct ctl_table nf_ct_frag6_sysctl_table[] = {
 	{
 		.procname	= "nf_conntrack_frag6_timeout",
@@ -76,14 +78,17 @@
 		.data		= &init_net.nf_frag.frags.low_thresh,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &init_net.nf_frag.frags.high_thresh
 	},
 	{
 		.procname	= "nf_conntrack_frag6_high_thresh",
 		.data		= &init_net.nf_frag.frags.high_thresh,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &init_net.nf_frag.frags.low_thresh
 	},
 	{ }
 };
@@ -102,7 +107,10 @@
 
 		table[0].data = &net->nf_frag.frags.timeout;
 		table[1].data = &net->nf_frag.frags.low_thresh;
+		table[1].extra2 = &net->nf_frag.frags.high_thresh;
 		table[2].data = &net->nf_frag.frags.high_thresh;
+		table[2].extra1 = &net->nf_frag.frags.low_thresh;
+		table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
 	}
 
 	hdr = register_net_sysctl(net, "net/netfilter", table);
@@ -147,16 +155,13 @@
 static unsigned int nf_hash_frag(__be32 id, const struct in6_addr *saddr,
 				 const struct in6_addr *daddr)
 {
-	u32 c;
-
 	net_get_random_once(&nf_frags.rnd, sizeof(nf_frags.rnd));
-	c = jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
-			 (__force u32)id, nf_frags.rnd);
-	return c & (INETFRAGS_HASHSZ - 1);
+	return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
+			    (__force u32)id, nf_frags.rnd);
 }
 
 
-static unsigned int nf_hashfn(struct inet_frag_queue *q)
+static unsigned int nf_hashfn(const struct inet_frag_queue *q)
 {
 	const struct frag_queue *nq;
 
@@ -196,7 +201,7 @@
 	arg.dst = dst;
 	arg.ecn = ecn;
 
-	read_lock_bh(&nf_frags.lock);
+	local_bh_disable();
 	hash = nf_hash_frag(id, src, dst);
 
 	q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
@@ -352,7 +357,6 @@
 		fq->q.last_in |= INET_FRAG_FIRST_IN;
 	}
 
-	inet_frag_lru_move(&fq->q);
 	return 0;
 
 discard_fq:
@@ -597,10 +601,6 @@
 	hdr = ipv6_hdr(clone);
 	fhdr = (struct frag_hdr *)skb_transport_header(clone);
 
-	local_bh_disable();
-	inet_frag_evictor(&net->nf_frag.frags, &nf_frags, false);
-	local_bh_enable();
-
 	fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
 		     ip6_frag_ecn(hdr));
 	if (fq == NULL) {
@@ -677,7 +677,6 @@
 	nf_frags.qsize = sizeof(struct frag_queue);
 	nf_frags.match = ip6_frag_match;
 	nf_frags.frag_expire = nf_ct_frag6_expire;
-	nf_frags.secret_interval = 10 * 60 * HZ;
 	inet_frags_init(&nf_frags);
 
 	ret = register_pernet_subsys(&nf_ct_net_ops);
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 3317440..2d6f860 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -33,6 +33,7 @@
 static int sockstat6_seq_show(struct seq_file *seq, void *v)
 {
 	struct net *net = seq->private;
+	unsigned int frag_mem = ip6_frag_mem(net);
 
 	seq_printf(seq, "TCP6: inuse %d\n",
 		       sock_prot_inuse_get(net, &tcpv6_prot));
@@ -42,8 +43,7 @@
 			sock_prot_inuse_get(net, &udplitev6_prot));
 	seq_printf(seq, "RAW6: inuse %d\n",
 		       sock_prot_inuse_get(net, &rawv6_prot));
-	seq_printf(seq, "FRAG6: inuse %d memory %d\n",
-		       ip6_frag_nqueues(net), ip6_frag_mem(net));
+	seq_printf(seq, "FRAG6: inuse %u memory %u\n", !!frag_mem, frag_mem);
 	return 0;
 }
 
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index cc85a9b..f1709c4 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -85,27 +85,23 @@
 static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
 				    const struct in6_addr *daddr)
 {
-	u32 c;
-
 	net_get_random_once(&ip6_frags.rnd, sizeof(ip6_frags.rnd));
-	c = jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
-			 (__force u32)id, ip6_frags.rnd);
-
-	return c & (INETFRAGS_HASHSZ - 1);
+	return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
+			    (__force u32)id, ip6_frags.rnd);
 }
 
-static unsigned int ip6_hashfn(struct inet_frag_queue *q)
+static unsigned int ip6_hashfn(const struct inet_frag_queue *q)
 {
-	struct frag_queue *fq;
+	const struct frag_queue *fq;
 
 	fq = container_of(q, struct frag_queue, q);
 	return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr);
 }
 
-bool ip6_frag_match(struct inet_frag_queue *q, void *a)
+bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
 {
-	struct frag_queue *fq;
-	struct ip6_create_arg *arg = a;
+	const struct frag_queue *fq;
+	const struct ip6_create_arg *arg = a;
 
 	fq = container_of(q, struct frag_queue, q);
 	return	fq->id == arg->id &&
@@ -115,10 +111,10 @@
 }
 EXPORT_SYMBOL(ip6_frag_match);
 
-void ip6_frag_init(struct inet_frag_queue *q, void *a)
+void ip6_frag_init(struct inet_frag_queue *q, const void *a)
 {
 	struct frag_queue *fq = container_of(q, struct frag_queue, q);
-	struct ip6_create_arg *arg = a;
+	const struct ip6_create_arg *arg = a;
 
 	fq->id = arg->id;
 	fq->user = arg->user;
@@ -145,7 +141,9 @@
 	if (!dev)
 		goto out_rcu_unlock;
 
-	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
+	if (!(fq->q.last_in & INET_FRAG_EVICTED))
+		IP6_INC_STATS_BH(net, __in6_dev_get(dev),
+				 IPSTATS_MIB_REASMTIMEOUT);
 	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
 
 	/* Don't send error if the first segment did not arrive. */
@@ -192,7 +190,6 @@
 	arg.dst = dst;
 	arg.ecn = ecn;
 
-	read_lock(&ip6_frags.lock);
 	hash = inet6_hash_frag(id, src, dst);
 
 	q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
@@ -353,7 +350,6 @@
 	}
 
 	skb_dst_drop(skb);
-	inet_frag_lru_move(&fq->q);
 	return -1;
 
 discard_fq:
@@ -523,7 +519,6 @@
 	struct frag_queue *fq;
 	const struct ipv6hdr *hdr = ipv6_hdr(skb);
 	struct net *net = dev_net(skb_dst(skb)->dev);
-	int evicted;
 
 	if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
 		goto fail_hdr;
@@ -552,11 +547,6 @@
 		return 1;
 	}
 
-	evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags, false);
-	if (evicted)
-		IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-				 IPSTATS_MIB_REASMFAILS, evicted);
-
 	fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
 		     ip6_frag_ecn(hdr));
 	if (fq != NULL) {
@@ -588,20 +578,25 @@
 };
 
 #ifdef CONFIG_SYSCTL
+static int zero;
+
 static struct ctl_table ip6_frags_ns_ctl_table[] = {
 	{
 		.procname	= "ip6frag_high_thresh",
 		.data		= &init_net.ipv6.frags.high_thresh,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &init_net.ipv6.frags.low_thresh
 	},
 	{
 		.procname	= "ip6frag_low_thresh",
 		.data		= &init_net.ipv6.frags.low_thresh,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &init_net.ipv6.frags.high_thresh
 	},
 	{
 		.procname	= "ip6frag_time",
@@ -613,10 +608,12 @@
 	{ }
 };
 
+/* secret interval has been deprecated */
+static int ip6_frags_secret_interval_unused;
 static struct ctl_table ip6_frags_ctl_table[] = {
 	{
 		.procname	= "ip6frag_secret_interval",
-		.data		= &ip6_frags.secret_interval,
+		.data		= &ip6_frags_secret_interval_unused,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_jiffies,
@@ -636,7 +633,10 @@
 			goto err_alloc;
 
 		table[0].data = &net->ipv6.frags.high_thresh;
+		table[0].extra1 = &net->ipv6.frags.low_thresh;
+		table[0].extra2 = &init_net.ipv6.frags.high_thresh;
 		table[1].data = &net->ipv6.frags.low_thresh;
+		table[1].extra2 = &net->ipv6.frags.high_thresh;
 		table[2].data = &net->ipv6.frags.timeout;
 
 		/* Don't export sysctls to unprivileged users */
@@ -746,7 +746,6 @@
 	ip6_frags.qsize = sizeof(struct frag_queue);
 	ip6_frags.match = ip6_frag_match;
 	ip6_frags.frag_expire = ip6_frag_expire;
-	ip6_frags.secret_interval = 10 * 60 * HZ;
 	inet_frags_init(&ip6_frags);
 out:
 	return ret;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index f9d8800..5b6091d 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -673,7 +673,7 @@
 			goto csum_error;
 	}
 
-	if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
+	if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
 		UDP6_INC_STATS_BH(sock_net(sk),
 				  UDP_MIB_RCVBUFERRORS, is_udplite);
 		goto drop;
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 31bf258..f0e84bc 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -52,7 +52,7 @@
 	del_timer_sync(&tid_rx->reorder_timer);
 
 	for (i = 0; i < tid_rx->buf_size; i++)
-		dev_kfree_skb(tid_rx->reorder_buf[i]);
+		__skb_queue_purge(&tid_rx->reorder_buf[i]);
 	kfree(tid_rx->reorder_buf);
 	kfree(tid_rx->reorder_time);
 	kfree(tid_rx);
@@ -224,28 +224,15 @@
 	ieee80211_tx_skb(sdata, skb);
 }
 
-void ieee80211_process_addba_request(struct ieee80211_local *local,
-				     struct sta_info *sta,
-				     struct ieee80211_mgmt *mgmt,
-				     size_t len)
+void __ieee80211_start_rx_ba_session(struct sta_info *sta,
+				     u8 dialog_token, u16 timeout,
+				     u16 start_seq_num, u16 ba_policy, u16 tid,
+				     u16 buf_size, bool tx)
 {
+	struct ieee80211_local *local = sta->sdata->local;
 	struct tid_ampdu_rx *tid_agg_rx;
-	u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status;
-	u8 dialog_token;
-	int ret = -EOPNOTSUPP;
-
-	/* extract session parameters from addba request frame */
-	dialog_token = mgmt->u.action.u.addba_req.dialog_token;
-	timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout);
-	start_seq_num =
-		le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4;
-
-	capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
-	ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1;
-	tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
-	buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
-
-	status = WLAN_STATUS_REQUEST_DECLINED;
+	int i, ret = -EOPNOTSUPP;
+	u16 status = WLAN_STATUS_REQUEST_DECLINED;
 
 	if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
 		ht_dbg(sta->sdata,
@@ -264,7 +251,7 @@
 		status = WLAN_STATUS_INVALID_QOS_PARAM;
 		ht_dbg_ratelimited(sta->sdata,
 				   "AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n",
-				   mgmt->sa, tid, ba_policy, buf_size);
+				   sta->sta.addr, tid, ba_policy, buf_size);
 		goto end_no_lock;
 	}
 	/* determine default buffer size */
@@ -281,7 +268,7 @@
 	if (sta->ampdu_mlme.tid_rx[tid]) {
 		ht_dbg_ratelimited(sta->sdata,
 				   "unexpected AddBA Req from %pM on tid %u\n",
-				   mgmt->sa, tid);
+				   sta->sta.addr, tid);
 
 		/* delete existing Rx BA session on the same tid */
 		___ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
@@ -308,7 +295,7 @@
 
 	/* prepare reordering buffer */
 	tid_agg_rx->reorder_buf =
-		kcalloc(buf_size, sizeof(struct sk_buff *), GFP_KERNEL);
+		kcalloc(buf_size, sizeof(struct sk_buff_head), GFP_KERNEL);
 	tid_agg_rx->reorder_time =
 		kcalloc(buf_size, sizeof(unsigned long), GFP_KERNEL);
 	if (!tid_agg_rx->reorder_buf || !tid_agg_rx->reorder_time) {
@@ -318,6 +305,9 @@
 		goto end;
 	}
 
+	for (i = 0; i < buf_size; i++)
+		__skb_queue_head_init(&tid_agg_rx->reorder_buf[i]);
+
 	ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
 			       &sta->sta, tid, &start_seq_num, 0);
 	ht_dbg(sta->sdata, "Rx A-MPDU request on %pM tid %d result %d\n",
@@ -350,6 +340,74 @@
 	mutex_unlock(&sta->ampdu_mlme.mtx);
 
 end_no_lock:
-	ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid,
-				  dialog_token, status, 1, buf_size, timeout);
+	if (tx)
+		ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid,
+					  dialog_token, status, 1, buf_size,
+					  timeout);
 }
+
+void ieee80211_process_addba_request(struct ieee80211_local *local,
+				     struct sta_info *sta,
+				     struct ieee80211_mgmt *mgmt,
+				     size_t len)
+{
+	u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num;
+	u8 dialog_token;
+
+	/* extract session parameters from addba request frame */
+	dialog_token = mgmt->u.action.u.addba_req.dialog_token;
+	timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout);
+	start_seq_num =
+		le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4;
+
+	capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+	ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1;
+	tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
+	buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
+
+	__ieee80211_start_rx_ba_session(sta, dialog_token, timeout,
+					start_seq_num, ba_policy, tid,
+					buf_size, true);
+}
+
+void ieee80211_start_rx_ba_session_offl(struct ieee80211_vif *vif,
+					const u8 *addr, u16 tid)
+{
+	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_rx_agg *rx_agg;
+	struct sk_buff *skb = dev_alloc_skb(0);
+
+	if (unlikely(!skb))
+		return;
+
+	rx_agg = (struct ieee80211_rx_agg *) &skb->cb;
+	memcpy(&rx_agg->addr, addr, ETH_ALEN);
+	rx_agg->tid = tid;
+
+	skb->pkt_type = IEEE80211_SDATA_QUEUE_RX_AGG_START;
+	skb_queue_tail(&sdata->skb_queue, skb);
+	ieee80211_queue_work(&local->hw, &sdata->work);
+}
+EXPORT_SYMBOL(ieee80211_start_rx_ba_session_offl);
+
+void ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif,
+				       const u8 *addr, u16 tid)
+{
+	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_rx_agg *rx_agg;
+	struct sk_buff *skb = dev_alloc_skb(0);
+
+	if (unlikely(!skb))
+		return;
+
+	rx_agg = (struct ieee80211_rx_agg *) &skb->cb;
+	memcpy(&rx_agg->addr, addr, ETH_ALEN);
+	rx_agg->tid = tid;
+
+	skb->pkt_type = IEEE80211_SDATA_QUEUE_RX_AGG_STOP;
+	skb_queue_tail(&sdata->skb_queue, skb);
+	ieee80211_queue_work(&local->hw, &sdata->work);
+}
+EXPORT_SYMBOL(ieee80211_stop_rx_ba_session_offl);
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index c3fd4d2..6d537f0 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -66,7 +66,7 @@
 static struct ieee80211_chanctx *
 ieee80211_vif_get_chanctx(struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_local *local __maybe_unused = sdata->local;
 	struct ieee80211_chanctx_conf *conf;
 
 	conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 15702ff6..ff630be 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -150,13 +150,12 @@
 
 	/*
 	 * If user has specified capability over-rides, take care
-	 * of that if the station we're setting up is the AP that
+	 * of that if the station we're setting up is the AP or TDLS peer that
 	 * we advertised a restricted capability set to. Override
 	 * our own capabilities and then use those below.
 	 */
-	if ((sdata->vif.type == NL80211_IFTYPE_STATION ||
-	     sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
-	    !test_sta_flag(sta, WLAN_STA_TDLS_PEER))
+	if (sdata->vif.type == NL80211_IFTYPE_STATION ||
+	    sdata->vif.type == NL80211_IFTYPE_ADHOC)
 		ieee80211_apply_htcap_overrides(sdata, &own_cap);
 
 	/*
@@ -228,6 +227,9 @@
 	if (own_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1)
 		ht_cap.mcs.rx_mask[32/8] |= 1;
 
+	/* set Rx highest rate */
+	ht_cap.mcs.rx_highest = ht_cap_ie->mcs.rx_highest;
+
  apply:
 	changed = memcmp(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap));
 
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 713485f..9713dc5 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -189,17 +189,8 @@
 						 chandef, 0);
 	}
 
-	if (local->hw.queues >= IEEE80211_NUM_ACS) {
-		*pos++ = WLAN_EID_VENDOR_SPECIFIC;
-		*pos++ = 7; /* len */
-		*pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
-		*pos++ = 0x50;
-		*pos++ = 0xf2;
-		*pos++ = 2; /* WME */
-		*pos++ = 0; /* WME info */
-		*pos++ = 1; /* WME ver */
-		*pos++ = 0; /* U-APSD no in use */
-	}
+	if (local->hw.queues >= IEEE80211_NUM_ACS)
+		pos = ieee80211_add_wmm_info_ie(pos, 0); /* U-APSD not in use */
 
 	presp->head_len = pos - presp->head;
 	if (WARN_ON(presp->head_len > frame_len))
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 9e025e1..ef7a089 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -345,7 +345,6 @@
 	IEEE80211_STA_CONNECTION_POLL	= BIT(1),
 	IEEE80211_STA_CONTROL_PORT	= BIT(2),
 	IEEE80211_STA_DISABLE_HT	= BIT(4),
-	IEEE80211_STA_CSA_RECEIVED	= BIT(5),
 	IEEE80211_STA_MFP_ENABLED	= BIT(6),
 	IEEE80211_STA_UAPSD_ENABLED	= BIT(7),
 	IEEE80211_STA_NULLFUNC_ACKED	= BIT(8),
@@ -503,6 +502,9 @@
 	struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */
 	struct ieee80211_vht_cap vht_capa; /* configured VHT overrides */
 	struct ieee80211_vht_cap vht_capa_mask; /* Valid parts of vht_capa */
+
+	u8 tdls_peer[ETH_ALEN] __aligned(2);
+	struct delayed_work tdls_peer_del_work;
 };
 
 struct ieee80211_if_ibss {
@@ -815,9 +817,6 @@
 	bool radar_required;
 	struct delayed_work dfs_cac_timer_work;
 
-	u8 tdls_peer[ETH_ALEN] __aligned(2);
-	struct delayed_work tdls_peer_del_work;
-
 	/*
 	 * AP this belongs to: self in AP mode and
 	 * corresponding AP in VLAN mode, NULL for
@@ -926,10 +925,17 @@
 	return shift;
 }
 
+struct ieee80211_rx_agg {
+	u8 addr[ETH_ALEN];
+	u16 tid;
+};
+
 enum sdata_queue_type {
 	IEEE80211_SDATA_QUEUE_TYPE_FRAME	= 0,
 	IEEE80211_SDATA_QUEUE_AGG_START		= 1,
 	IEEE80211_SDATA_QUEUE_AGG_STOP		= 2,
+	IEEE80211_SDATA_QUEUE_RX_AGG_START	= 3,
+	IEEE80211_SDATA_QUEUE_RX_AGG_STOP	= 4,
 };
 
 enum {
@@ -1578,6 +1584,10 @@
 				     u16 initiator, u16 reason, bool stop);
 void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
 				    u16 initiator, u16 reason, bool stop);
+void __ieee80211_start_rx_ba_session(struct sta_info *sta,
+				     u8 dialog_token, u16 timeout,
+				     u16 start_seq_num, u16 ba_policy, u16 tid,
+				     u16 buf_size, bool tx);
 void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
 					 enum ieee80211_agg_stop_reason reason);
 void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
@@ -1730,6 +1740,21 @@
 	ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0);
 }
 
+static inline bool ieee80211_rx_reorder_ready(struct sk_buff_head *frames)
+{
+	struct sk_buff *tail = skb_peek_tail(frames);
+	struct ieee80211_rx_status *status;
+
+	if (!tail)
+		return false;
+
+	status = IEEE80211_SKB_RXCB(tail);
+	if (status->flag & RX_FLAG_AMSDU_MORE)
+		return false;
+
+	return true;
+}
+
 void ieee80211_dynamic_ps_enable_work(struct work_struct *work);
 void ieee80211_dynamic_ps_disable_work(struct work_struct *work);
 void ieee80211_dynamic_ps_timer(unsigned long data);
@@ -1824,6 +1849,7 @@
 int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
 				struct sk_buff *skb, bool need_basic,
 				enum ieee80211_band band);
+u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo);
 
 /* channel management */
 void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 4edfc7c..01eede7 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1140,6 +1140,7 @@
 	struct sk_buff *skb;
 	struct sta_info *sta;
 	struct ieee80211_ra_tid *ra_tid;
+	struct ieee80211_rx_agg *rx_agg;
 
 	if (!ieee80211_sdata_running(sdata))
 		return;
@@ -1167,6 +1168,34 @@
 			ra_tid = (void *)&skb->cb;
 			ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra,
 						ra_tid->tid);
+		} else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
+			rx_agg = (void *)&skb->cb;
+			mutex_lock(&local->sta_mtx);
+			sta = sta_info_get_bss(sdata, rx_agg->addr);
+			if (sta) {
+				u16 last_seq;
+
+				last_seq = le16_to_cpu(
+					sta->last_seq_ctrl[rx_agg->tid]);
+
+				__ieee80211_start_rx_ba_session(sta,
+						0, 0,
+						ieee80211_sn_inc(last_seq),
+						1, rx_agg->tid,
+						IEEE80211_MAX_AMPDU_BUF,
+						false);
+			}
+			mutex_unlock(&local->sta_mtx);
+		} else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_STOP) {
+			rx_agg = (void *)&skb->cb;
+			mutex_lock(&local->sta_mtx);
+			sta = sta_info_get_bss(sdata, rx_agg->addr);
+			if (sta)
+				__ieee80211_stop_rx_ba_session(sta,
+							rx_agg->tid,
+							WLAN_BACK_RECIPIENT, 0,
+							false);
+			mutex_unlock(&local->sta_mtx);
 		} else if (ieee80211_is_action(mgmt->frame_control) &&
 			   mgmt->u.action.category == WLAN_CATEGORY_BACK) {
 			int len = skb->len;
@@ -1672,8 +1701,6 @@
 			  ieee80211_dfs_cac_timer_work);
 	INIT_DELAYED_WORK(&sdata->dec_tailroom_needed_wk,
 			  ieee80211_delayed_tailroom_dec);
-	INIT_DELAYED_WORK(&sdata->tdls_peer_del_work,
-			  ieee80211_tdls_peer_del_work);
 
 	for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
 		struct ieee80211_supported_band *sband;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 16d97f0..d808cff 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -482,9 +482,6 @@
 	int idx, ret;
 	bool pairwise;
 
-	if (WARN_ON(!sdata || !key))
-		return -EINVAL;
-
 	pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
 	idx = key->conf.keyidx;
 	key->local = sdata->local;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 931330b..31a8afa 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -830,16 +830,7 @@
 			qos_info = 0;
 		}
 
-		pos = skb_put(skb, 9);
-		*pos++ = WLAN_EID_VENDOR_SPECIFIC;
-		*pos++ = 7; /* len */
-		*pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
-		*pos++ = 0x50;
-		*pos++ = 0xf2;
-		*pos++ = 2; /* WME */
-		*pos++ = 0; /* WME info */
-		*pos++ = 1; /* WME ver */
-		*pos++ = qos_info;
+		pos = ieee80211_add_wmm_info_ie(skb_put(skb, 9), qos_info);
 	}
 
 	/* add any remaining custom (i.e. vendor specific here) IEs */
@@ -1005,8 +996,6 @@
 		sdata->csa_block_tx = false;
 	}
 
-	ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
-
 	ieee80211_sta_reset_beacon_monitor(sdata);
 	ieee80211_sta_reset_conn_monitor(sdata);
 
@@ -1064,7 +1053,7 @@
 		return;
 
 	/* disregard subsequent announcements if we are already processing */
-	if (ifmgd->flags & IEEE80211_STA_CSA_RECEIVED)
+	if (sdata->vif.csa_active)
 		return;
 
 	current_band = cbss->channel->band;
@@ -1091,8 +1080,6 @@
 		return;
 	}
 
-	ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
-
 	mutex_lock(&local->mtx);
 	mutex_lock(&local->chanctx_mtx);
 	conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
@@ -2108,8 +2095,6 @@
 	ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
 			       WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
 			       true, frame_buf);
-	ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
-
 	mutex_lock(&local->mtx);
 	sdata->vif.csa_active = false;
 	if (sdata->csa_block_tx) {
@@ -3722,6 +3707,8 @@
 	INIT_WORK(&ifmgd->csa_connection_drop_work,
 		  ieee80211_csa_connection_drop_work);
 	INIT_WORK(&ifmgd->request_smps_work, ieee80211_request_smps_mgd_work);
+	INIT_DELAYED_WORK(&ifmgd->tdls_peer_del_work,
+			  ieee80211_tdls_peer_del_work);
 	setup_timer(&ifmgd->timer, ieee80211_sta_timer,
 		    (unsigned long) sdata);
 	setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer,
@@ -4585,6 +4572,7 @@
 	cancel_work_sync(&ifmgd->request_smps_work);
 	cancel_work_sync(&ifmgd->csa_connection_drop_work);
 	cancel_work_sync(&ifmgd->chswitch_work);
+	cancel_delayed_work_sync(&ifmgd->tdls_peer_del_work);
 
 	sdata_lock(sdata);
 	if (ifmgd->assoc_data) {
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 5f572be..bd2c9b2 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -688,20 +688,27 @@
 					    int index,
 					    struct sk_buff_head *frames)
 {
-	struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
+	struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index];
+	struct sk_buff *skb;
 	struct ieee80211_rx_status *status;
 
 	lockdep_assert_held(&tid_agg_rx->reorder_lock);
 
-	if (!skb)
+	if (skb_queue_empty(skb_list))
 		goto no_frame;
 
-	/* release the frame from the reorder ring buffer */
+	if (!ieee80211_rx_reorder_ready(skb_list)) {
+		__skb_queue_purge(skb_list);
+		goto no_frame;
+	}
+
+	/* release frames from the reorder ring buffer */
 	tid_agg_rx->stored_mpdu_num--;
-	tid_agg_rx->reorder_buf[index] = NULL;
-	status = IEEE80211_SKB_RXCB(skb);
-	status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
-	__skb_queue_tail(frames, skb);
+	while ((skb = __skb_dequeue(skb_list))) {
+		status = IEEE80211_SKB_RXCB(skb);
+		status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
+		__skb_queue_tail(frames, skb);
+	}
 
 no_frame:
 	tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
@@ -738,13 +745,13 @@
 					  struct tid_ampdu_rx *tid_agg_rx,
 					  struct sk_buff_head *frames)
 {
-	int index, j;
+	int index, i, j;
 
 	lockdep_assert_held(&tid_agg_rx->reorder_lock);
 
 	/* release the buffer until next missing frame */
 	index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
-	if (!tid_agg_rx->reorder_buf[index] &&
+	if (!ieee80211_rx_reorder_ready(&tid_agg_rx->reorder_buf[index]) &&
 	    tid_agg_rx->stored_mpdu_num) {
 		/*
 		 * No buffers ready to be released, but check whether any
@@ -753,7 +760,8 @@
 		int skipped = 1;
 		for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
 		     j = (j + 1) % tid_agg_rx->buf_size) {
-			if (!tid_agg_rx->reorder_buf[j]) {
+			if (!ieee80211_rx_reorder_ready(
+					&tid_agg_rx->reorder_buf[j])) {
 				skipped++;
 				continue;
 			}
@@ -762,6 +770,11 @@
 					HT_RX_REORDER_BUF_TIMEOUT))
 				goto set_release_timer;
 
+			/* don't leave incomplete A-MSDUs around */
+			for (i = (index + 1) % tid_agg_rx->buf_size; i != j;
+			     i = (i + 1) % tid_agg_rx->buf_size)
+				__skb_queue_purge(&tid_agg_rx->reorder_buf[i]);
+
 			ht_dbg_ratelimited(sdata,
 					   "release an RX reorder frame due to timeout on earlier frames\n");
 			ieee80211_release_reorder_frame(sdata, tid_agg_rx, j,
@@ -775,7 +788,8 @@
 				 skipped) & IEEE80211_SN_MASK;
 			skipped = 0;
 		}
-	} else while (tid_agg_rx->reorder_buf[index]) {
+	} else while (ieee80211_rx_reorder_ready(
+				&tid_agg_rx->reorder_buf[index])) {
 		ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
 						frames);
 		index =	tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
@@ -786,7 +800,8 @@
 
 		for (; j != (index - 1) % tid_agg_rx->buf_size;
 		     j = (j + 1) % tid_agg_rx->buf_size) {
-			if (tid_agg_rx->reorder_buf[j])
+			if (ieee80211_rx_reorder_ready(
+					&tid_agg_rx->reorder_buf[j]))
 				break;
 		}
 
@@ -811,6 +826,7 @@
 					     struct sk_buff_head *frames)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 	u16 sc = le16_to_cpu(hdr->seq_ctrl);
 	u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
 	u16 head_seq_num, buf_size;
@@ -845,7 +861,7 @@
 	index = mpdu_seq_num % tid_agg_rx->buf_size;
 
 	/* check if we already stored this frame */
-	if (tid_agg_rx->reorder_buf[index]) {
+	if (ieee80211_rx_reorder_ready(&tid_agg_rx->reorder_buf[index])) {
 		dev_kfree_skb(skb);
 		goto out;
 	}
@@ -858,17 +874,20 @@
 	 */
 	if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
 	    tid_agg_rx->stored_mpdu_num == 0) {
-		tid_agg_rx->head_seq_num =
-			ieee80211_sn_inc(tid_agg_rx->head_seq_num);
+		if (!(status->flag & RX_FLAG_AMSDU_MORE))
+			tid_agg_rx->head_seq_num =
+				ieee80211_sn_inc(tid_agg_rx->head_seq_num);
 		ret = false;
 		goto out;
 	}
 
 	/* put the frame in the reordering buffer */
-	tid_agg_rx->reorder_buf[index] = skb;
-	tid_agg_rx->reorder_time[index] = jiffies;
-	tid_agg_rx->stored_mpdu_num++;
-	ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
+	__skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb);
+	if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
+		tid_agg_rx->reorder_time[index] = jiffies;
+		tid_agg_rx->stored_mpdu_num++;
+		ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
+	}
 
  out:
 	spin_unlock(&tid_agg_rx->reorder_lock);
@@ -3129,6 +3148,14 @@
 			if (!ieee80211_is_beacon(hdr->frame_control))
 				return false;
 			status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
+		} else if (!ieee80211_has_tods(hdr->frame_control)) {
+			/* ignore data frames to TDLS-peers */
+			if (ieee80211_is_data(hdr->frame_control))
+				return false;
+			/* ignore action frames to TDLS-peers */
+			if (ieee80211_is_action(hdr->frame_control) &&
+			    !ether_addr_equal(bssid, hdr->addr1))
+				return false;
 		}
 		break;
 	case NL80211_IFTYPE_WDS:
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index f41177f..c6ee213 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1724,12 +1724,15 @@
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	struct ieee80211_local *local = sdata->local;
-	struct rate_control_ref *ref = local->rate_ctrl;
+	struct rate_control_ref *ref = NULL;
 	struct timespec uptime;
 	u64 packets = 0;
 	u32 thr = 0;
 	int i, ac;
 
+	if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
+		ref = local->rate_ctrl;
+
 	sinfo->generation = sdata->local->sta_generation;
 
 	sinfo->filled = STATION_INFO_INACTIVE_TIME |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 2a04361..d411bcc 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -47,6 +47,8 @@
  * @WLAN_STA_TDLS_PEER: Station is a TDLS peer.
  * @WLAN_STA_TDLS_PEER_AUTH: This TDLS peer is authorized to send direct
  *	packets. This means the link is enabled.
+ * @WLAN_STA_TDLS_INITIATOR: We are the initiator of the TDLS link with this
+ *	station.
  * @WLAN_STA_UAPSD: Station requested unscheduled SP while driver was
  *	keeping station in power-save mode, reply when the driver
  *	unblocks the station.
@@ -76,6 +78,7 @@
 	WLAN_STA_PSPOLL,
 	WLAN_STA_TDLS_PEER,
 	WLAN_STA_TDLS_PEER_AUTH,
+	WLAN_STA_TDLS_INITIATOR,
 	WLAN_STA_UAPSD,
 	WLAN_STA_SP,
 	WLAN_STA_4ADDR_EVENT,
@@ -152,7 +155,8 @@
 /**
  * struct tid_ampdu_rx - TID aggregation information (Rx).
  *
- * @reorder_buf: buffer to reorder incoming aggregated MPDUs
+ * @reorder_buf: buffer to reorder incoming aggregated MPDUs. An MPDU may be an
+ *	A-MSDU with individually reported subframes.
  * @reorder_time: jiffies when skb was added
  * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
  * @reorder_timer: releases expired frames from the reorder buffer.
@@ -177,7 +181,7 @@
 struct tid_ampdu_rx {
 	struct rcu_head rcu_head;
 	spinlock_t reorder_lock;
-	struct sk_buff **reorder_buf;
+	struct sk_buff_head *reorder_buf;
 	unsigned long *reorder_time;
 	struct timer_list session_timer;
 	struct timer_list reorder_timer;
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index f718533..1b21050 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -8,6 +8,7 @@
  */
 
 #include <linux/ieee80211.h>
+#include <linux/log2.h>
 #include <net/cfg80211.h>
 #include "ieee80211_i.h"
 #include "driver-ops.h"
@@ -21,14 +22,14 @@
 	struct ieee80211_local *local;
 
 	sdata = container_of(wk, struct ieee80211_sub_if_data,
-			     tdls_peer_del_work.work);
+			     u.mgd.tdls_peer_del_work.work);
 	local = sdata->local;
 
 	mutex_lock(&local->mtx);
-	if (!is_zero_ether_addr(sdata->tdls_peer)) {
-		tdls_dbg(sdata, "TDLS del peer %pM\n", sdata->tdls_peer);
-		sta_info_destroy_addr(sdata, sdata->tdls_peer);
-		eth_zero_addr(sdata->tdls_peer);
+	if (!is_zero_ether_addr(sdata->u.mgd.tdls_peer)) {
+		tdls_dbg(sdata, "TDLS del peer %pM\n", sdata->u.mgd.tdls_peer);
+		sta_info_destroy_addr(sdata, sdata->u.mgd.tdls_peer);
+		eth_zero_addr(sdata->u.mgd.tdls_peer);
 	}
 	mutex_unlock(&local->mtx);
 }
@@ -46,11 +47,16 @@
 	*pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
 }
 
-static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata)
+static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata,
+					u16 status_code)
 {
 	struct ieee80211_local *local = sdata->local;
 	u16 capab;
 
+	/* The capability will be 0 when sending a failure code */
+	if (status_code != 0)
+		return 0;
+
 	capab = 0;
 	if (ieee80211_get_sdata_band(sdata) != IEEE80211_BAND_2GHZ)
 		return capab;
@@ -63,19 +69,332 @@
 	return capab;
 }
 
-static void ieee80211_tdls_add_link_ie(struct sk_buff *skb, const u8 *src_addr,
-				       const u8 *peer, const u8 *bssid)
+static void ieee80211_tdls_add_link_ie(struct ieee80211_sub_if_data *sdata,
+				       struct sk_buff *skb, const u8 *peer,
+				       bool initiator)
 {
 	struct ieee80211_tdls_lnkie *lnkid;
+	const u8 *init_addr, *rsp_addr;
+
+	if (initiator) {
+		init_addr = sdata->vif.addr;
+		rsp_addr = peer;
+	} else {
+		init_addr = peer;
+		rsp_addr = sdata->vif.addr;
+	}
 
 	lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
 
 	lnkid->ie_type = WLAN_EID_LINK_ID;
 	lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2;
 
-	memcpy(lnkid->bssid, bssid, ETH_ALEN);
-	memcpy(lnkid->init_sta, src_addr, ETH_ALEN);
-	memcpy(lnkid->resp_sta, peer, ETH_ALEN);
+	memcpy(lnkid->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+	memcpy(lnkid->init_sta, init_addr, ETH_ALEN);
+	memcpy(lnkid->resp_sta, rsp_addr, ETH_ALEN);
+}
+
+/* translate numbering in the WMM parameter IE to the mac80211 notation */
+static enum ieee80211_ac_numbers ieee80211_ac_from_wmm(int ac)
+{
+	switch (ac) {
+	default:
+		WARN_ON_ONCE(1);
+	case 0:
+		return IEEE80211_AC_BE;
+	case 1:
+		return IEEE80211_AC_BK;
+	case 2:
+		return IEEE80211_AC_VI;
+	case 3:
+		return IEEE80211_AC_VO;
+	}
+}
+
+static u8 ieee80211_wmm_aci_aifsn(int aifsn, bool acm, int aci)
+{
+	u8 ret;
+
+	ret = aifsn & 0x0f;
+	if (acm)
+		ret |= 0x10;
+	ret |= (aci << 5) & 0x60;
+	return ret;
+}
+
+static u8 ieee80211_wmm_ecw(u16 cw_min, u16 cw_max)
+{
+	return ((ilog2(cw_min + 1) << 0x0) & 0x0f) |
+	       ((ilog2(cw_max + 1) << 0x4) & 0xf0);
+}
+
+static void ieee80211_tdls_add_wmm_param_ie(struct ieee80211_sub_if_data *sdata,
+					    struct sk_buff *skb)
+{
+	struct ieee80211_wmm_param_ie *wmm;
+	struct ieee80211_tx_queue_params *txq;
+	int i;
+
+	wmm = (void *)skb_put(skb, sizeof(*wmm));
+	memset(wmm, 0, sizeof(*wmm));
+
+	wmm->element_id = WLAN_EID_VENDOR_SPECIFIC;
+	wmm->len = sizeof(*wmm) - 2;
+
+	wmm->oui[0] = 0x00; /* Microsoft OUI 00:50:F2 */
+	wmm->oui[1] = 0x50;
+	wmm->oui[2] = 0xf2;
+	wmm->oui_type = 2; /* WME */
+	wmm->oui_subtype = 1; /* WME param */
+	wmm->version = 1; /* WME ver */
+	wmm->qos_info = 0; /* U-APSD not in use */
+
+	/*
+	 * Use the EDCA parameters defined for the BSS, or default if the AP
+	 * doesn't support it, as mandated by 802.11-2012 section 10.22.4
+	 */
+	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+		txq = &sdata->tx_conf[ieee80211_ac_from_wmm(i)];
+		wmm->ac[i].aci_aifsn = ieee80211_wmm_aci_aifsn(txq->aifs,
+							       txq->acm, i);
+		wmm->ac[i].cw = ieee80211_wmm_ecw(txq->cw_min, txq->cw_max);
+		wmm->ac[i].txop_limit = cpu_to_le16(txq->txop);
+	}
+}
+
+static void
+ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
+				   struct sk_buff *skb, const u8 *peer,
+				   u8 action_code, bool initiator,
+				   const u8 *extra_ies, size_t extra_ies_len)
+{
+	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_supported_band *sband;
+	struct ieee80211_sta_ht_cap ht_cap;
+	struct sta_info *sta = NULL;
+	size_t offset = 0, noffset;
+	u8 *pos;
+
+	rcu_read_lock();
+
+	/* we should have the peer STA if we're already responding */
+	if (action_code == WLAN_TDLS_SETUP_RESPONSE) {
+		sta = sta_info_get(sdata, peer);
+		if (WARN_ON_ONCE(!sta)) {
+			rcu_read_unlock();
+			return;
+		}
+	}
+
+	ieee80211_add_srates_ie(sdata, skb, false, band);
+	ieee80211_add_ext_srates_ie(sdata, skb, false, band);
+
+	/* add any custom IEs that go before Extended Capabilities */
+	if (extra_ies_len) {
+		static const u8 before_ext_cap[] = {
+			WLAN_EID_SUPP_RATES,
+			WLAN_EID_COUNTRY,
+			WLAN_EID_EXT_SUPP_RATES,
+			WLAN_EID_SUPPORTED_CHANNELS,
+			WLAN_EID_RSN,
+		};
+		noffset = ieee80211_ie_split(extra_ies, extra_ies_len,
+					     before_ext_cap,
+					     ARRAY_SIZE(before_ext_cap),
+					     offset);
+		pos = skb_put(skb, noffset - offset);
+		memcpy(pos, extra_ies + offset, noffset - offset);
+		offset = noffset;
+	}
+
+	ieee80211_tdls_add_ext_capab(skb);
+
+	/* add the QoS element if we support it */
+	if (local->hw.queues >= IEEE80211_NUM_ACS &&
+	    action_code != WLAN_PUB_ACTION_TDLS_DISCOVER_RES)
+		ieee80211_add_wmm_info_ie(skb_put(skb, 9), 0); /* no U-APSD */
+
+	/* add any custom IEs that go before HT capabilities */
+	if (extra_ies_len) {
+		static const u8 before_ht_cap[] = {
+			WLAN_EID_SUPP_RATES,
+			WLAN_EID_COUNTRY,
+			WLAN_EID_EXT_SUPP_RATES,
+			WLAN_EID_SUPPORTED_CHANNELS,
+			WLAN_EID_RSN,
+			WLAN_EID_EXT_CAPABILITY,
+			WLAN_EID_QOS_CAPA,
+			WLAN_EID_FAST_BSS_TRANSITION,
+			WLAN_EID_TIMEOUT_INTERVAL,
+			WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+		};
+		noffset = ieee80211_ie_split(extra_ies, extra_ies_len,
+					     before_ht_cap,
+					     ARRAY_SIZE(before_ht_cap),
+					     offset);
+		pos = skb_put(skb, noffset - offset);
+		memcpy(pos, extra_ies + offset, noffset - offset);
+		offset = noffset;
+	}
+
+	/*
+	 * with TDLS we can switch channels, and HT-caps are not necessarily
+	 * the same on all bands. The specification limits the setup to a
+	 * single HT-cap, so use the current band for now.
+	 */
+	sband = local->hw.wiphy->bands[band];
+	memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
+	if ((action_code == WLAN_TDLS_SETUP_REQUEST ||
+	     action_code == WLAN_TDLS_SETUP_RESPONSE) &&
+	    ht_cap.ht_supported && (!sta || sta->sta.ht_cap.ht_supported)) {
+		if (action_code == WLAN_TDLS_SETUP_REQUEST) {
+			ieee80211_apply_htcap_overrides(sdata, &ht_cap);
+
+			/* disable SMPS in TDLS initiator */
+			ht_cap.cap |= (WLAN_HT_CAP_SM_PS_DISABLED
+				       << IEEE80211_HT_CAP_SM_PS_SHIFT);
+		} else {
+			/* disable SMPS in TDLS responder */
+			sta->sta.ht_cap.cap |=
+				(WLAN_HT_CAP_SM_PS_DISABLED
+				 << IEEE80211_HT_CAP_SM_PS_SHIFT);
+
+			/* the peer caps are already intersected with our own */
+			memcpy(&ht_cap, &sta->sta.ht_cap, sizeof(ht_cap));
+		}
+
+		pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+		ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
+	}
+
+	rcu_read_unlock();
+
+	/* add any remaining IEs */
+	if (extra_ies_len) {
+		noffset = extra_ies_len;
+		pos = skb_put(skb, noffset - offset);
+		memcpy(pos, extra_ies + offset, noffset - offset);
+	}
+
+	ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
+}
+
+static void
+ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
+				 struct sk_buff *skb, const u8 *peer,
+				 bool initiator, const u8 *extra_ies,
+				 size_t extra_ies_len)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+	size_t offset = 0, noffset;
+	struct sta_info *sta, *ap_sta;
+	u8 *pos;
+
+	rcu_read_lock();
+
+	sta = sta_info_get(sdata, peer);
+	ap_sta = sta_info_get(sdata, ifmgd->bssid);
+	if (WARN_ON_ONCE(!sta || !ap_sta)) {
+		rcu_read_unlock();
+		return;
+	}
+
+	/* add any custom IEs that go before the QoS IE */
+	if (extra_ies_len) {
+		static const u8 before_qos[] = {
+			WLAN_EID_RSN,
+		};
+		noffset = ieee80211_ie_split(extra_ies, extra_ies_len,
+					     before_qos,
+					     ARRAY_SIZE(before_qos),
+					     offset);
+		pos = skb_put(skb, noffset - offset);
+		memcpy(pos, extra_ies + offset, noffset - offset);
+		offset = noffset;
+	}
+
+	/* add the QoS param IE if both the peer and we support it */
+	if (local->hw.queues >= IEEE80211_NUM_ACS &&
+	    test_sta_flag(sta, WLAN_STA_WME))
+		ieee80211_tdls_add_wmm_param_ie(sdata, skb);
+
+	/* add any custom IEs that go before HT operation */
+	if (extra_ies_len) {
+		static const u8 before_ht_op[] = {
+			WLAN_EID_RSN,
+			WLAN_EID_QOS_CAPA,
+			WLAN_EID_FAST_BSS_TRANSITION,
+			WLAN_EID_TIMEOUT_INTERVAL,
+		};
+		noffset = ieee80211_ie_split(extra_ies, extra_ies_len,
+					     before_ht_op,
+					     ARRAY_SIZE(before_ht_op),
+					     offset);
+		pos = skb_put(skb, noffset - offset);
+		memcpy(pos, extra_ies + offset, noffset - offset);
+		offset = noffset;
+	}
+
+	/* if HT support is only added in TDLS, we need an HT-operation IE */
+	if (!ap_sta->sta.ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) {
+		struct ieee80211_chanctx_conf *chanctx_conf =
+				rcu_dereference(sdata->vif.chanctx_conf);
+		if (!WARN_ON(!chanctx_conf)) {
+			pos = skb_put(skb, 2 +
+				      sizeof(struct ieee80211_ht_operation));
+			/* send an empty HT operation IE */
+			ieee80211_ie_build_ht_oper(pos, &sta->sta.ht_cap,
+						   &chanctx_conf->def, 0);
+		}
+	}
+
+	rcu_read_unlock();
+
+	/* add any remaining IEs */
+	if (extra_ies_len) {
+		noffset = extra_ies_len;
+		pos = skb_put(skb, noffset - offset);
+		memcpy(pos, extra_ies + offset, noffset - offset);
+	}
+
+	ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
+}
+
+static void ieee80211_tdls_add_ies(struct ieee80211_sub_if_data *sdata,
+				   struct sk_buff *skb, const u8 *peer,
+				   u8 action_code, u16 status_code,
+				   bool initiator, const u8 *extra_ies,
+				   size_t extra_ies_len)
+{
+	switch (action_code) {
+	case WLAN_TDLS_SETUP_REQUEST:
+	case WLAN_TDLS_SETUP_RESPONSE:
+	case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+		if (status_code == 0)
+			ieee80211_tdls_add_setup_start_ies(sdata, skb, peer,
+							   action_code,
+							   initiator,
+							   extra_ies,
+							   extra_ies_len);
+		break;
+	case WLAN_TDLS_SETUP_CONFIRM:
+		if (status_code == 0)
+			ieee80211_tdls_add_setup_cfm_ies(sdata, skb, peer,
+							 initiator, extra_ies,
+							 extra_ies_len);
+		break;
+	case WLAN_TDLS_TEARDOWN:
+	case WLAN_TDLS_DISCOVERY_REQUEST:
+		if (extra_ies_len)
+			memcpy(skb_put(skb, extra_ies_len), extra_ies,
+			       extra_ies_len);
+		if (status_code == 0 || action_code == WLAN_TDLS_TEARDOWN)
+			ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
+		break;
+	}
+
 }
 
 static int
@@ -84,7 +403,6 @@
 			       u16 status_code, struct sk_buff *skb)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
 	struct ieee80211_tdls_data *tf;
 
 	tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
@@ -102,11 +420,8 @@
 		skb_put(skb, sizeof(tf->u.setup_req));
 		tf->u.setup_req.dialog_token = dialog_token;
 		tf->u.setup_req.capability =
-			cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
-
-		ieee80211_add_srates_ie(sdata, skb, false, band);
-		ieee80211_add_ext_srates_ie(sdata, skb, false, band);
-		ieee80211_tdls_add_ext_capab(skb);
+			cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata,
+								 status_code));
 		break;
 	case WLAN_TDLS_SETUP_RESPONSE:
 		tf->category = WLAN_CATEGORY_TDLS;
@@ -116,11 +431,8 @@
 		tf->u.setup_resp.status_code = cpu_to_le16(status_code);
 		tf->u.setup_resp.dialog_token = dialog_token;
 		tf->u.setup_resp.capability =
-			cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
-
-		ieee80211_add_srates_ie(sdata, skb, false, band);
-		ieee80211_add_ext_srates_ie(sdata, skb, false, band);
-		ieee80211_tdls_add_ext_capab(skb);
+			cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata,
+								 status_code));
 		break;
 	case WLAN_TDLS_SETUP_CONFIRM:
 		tf->category = WLAN_CATEGORY_TDLS;
@@ -157,7 +469,6 @@
 			   u16 status_code, struct sk_buff *skb)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
 	struct ieee80211_mgmt *mgmt;
 
 	mgmt = (void *)skb_put(skb, 24);
@@ -178,11 +489,8 @@
 		mgmt->u.action.u.tdls_discover_resp.dialog_token =
 			dialog_token;
 		mgmt->u.action.u.tdls_discover_resp.capability =
-			cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
-
-		ieee80211_add_srates_ie(sdata, skb, false, band);
-		ieee80211_add_ext_srates_ie(sdata, skb, false, band);
-		ieee80211_tdls_add_ext_capab(skb);
+			cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata,
+								 status_code));
 		break;
 	default:
 		return -EINVAL;
@@ -202,7 +510,7 @@
 	struct ieee80211_local *local = sdata->local;
 	struct sk_buff *skb = NULL;
 	bool send_direct;
-	const u8 *init_addr, *rsp_addr;
+	struct sta_info *sta;
 	int ret;
 
 	skb = dev_alloc_skb(local->hw.extra_tx_headroom +
@@ -210,6 +518,9 @@
 				sizeof(struct ieee80211_tdls_data)) +
 			    50 + /* supported rates */
 			    7 + /* ext capab */
+			    26 + /* max(WMM-info, WMM-param) */
+			    2 + max(sizeof(struct ieee80211_ht_cap),
+				    sizeof(struct ieee80211_ht_operation)) +
 			    extra_ies_len +
 			    sizeof(struct ieee80211_tdls_lnkie));
 	if (!skb)
@@ -242,45 +553,48 @@
 	if (ret < 0)
 		goto fail;
 
-	if (extra_ies_len)
-		memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
+	rcu_read_lock();
+	sta = sta_info_get(sdata, peer);
 
-	/* sanity check for initiator */
+	/* infer the initiator if we can, to support old userspace */
 	switch (action_code) {
 	case WLAN_TDLS_SETUP_REQUEST:
+		if (sta)
+			set_sta_flag(sta, WLAN_STA_TDLS_INITIATOR);
+		/* fall-through */
 	case WLAN_TDLS_SETUP_CONFIRM:
 	case WLAN_TDLS_DISCOVERY_REQUEST:
-		if (!initiator) {
-			ret = -EINVAL;
-			goto fail;
-		}
+		initiator = true;
 		break;
 	case WLAN_TDLS_SETUP_RESPONSE:
+		/*
+		 * In some testing scenarios, we send a request and response.
+		 * Make the last packet sent take effect for the initiator
+		 * value.
+		 */
+		if (sta)
+			clear_sta_flag(sta, WLAN_STA_TDLS_INITIATOR);
+		/* fall-through */
 	case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
-		if (initiator) {
-			ret = -EINVAL;
-			goto fail;
-		}
+		initiator = false;
 		break;
 	case WLAN_TDLS_TEARDOWN:
 		/* any value is ok */
 		break;
 	default:
 		ret = -ENOTSUPP;
+		break;
+	}
+
+	if (sta && test_sta_flag(sta, WLAN_STA_TDLS_INITIATOR))
+		initiator = true;
+
+	rcu_read_unlock();
+	if (ret < 0)
 		goto fail;
-	}
 
-	if (initiator) {
-		init_addr = sdata->vif.addr;
-		rsp_addr = peer;
-	} else {
-		init_addr = peer;
-		rsp_addr = sdata->vif.addr;
-	}
-
-	ieee80211_tdls_add_link_ie(skb, init_addr, rsp_addr,
-				   sdata->u.mgd.bssid);
-
+	ieee80211_tdls_add_ies(sdata, skb, peer, action_code, status_code,
+			       initiator, extra_ies, extra_ies_len);
 	if (send_direct) {
 		ieee80211_tx_skb(sdata, skb);
 		return 0;
@@ -327,8 +641,8 @@
 	mutex_lock(&local->mtx);
 
 	/* we don't support concurrent TDLS peer setups */
-	if (!is_zero_ether_addr(sdata->tdls_peer) &&
-	    !ether_addr_equal(sdata->tdls_peer, peer)) {
+	if (!is_zero_ether_addr(sdata->u.mgd.tdls_peer) &&
+	    !ether_addr_equal(sdata->u.mgd.tdls_peer, peer)) {
 		ret = -EBUSY;
 		goto exit;
 	}
@@ -336,15 +650,19 @@
 	/*
 	 * make sure we have a STA representing the peer so we drop or buffer
 	 * non-TDLS-setup frames to the peer. We can't send other packets
-	 * during setup through the AP path
+	 * during setup through the AP path.
+	 * Allow error packets to be sent - sometimes we don't even add a STA
+	 * before failing the setup.
 	 */
-	rcu_read_lock();
-	if (!sta_info_get(sdata, peer)) {
+	if (status_code == 0) {
+		rcu_read_lock();
+		if (!sta_info_get(sdata, peer)) {
+			rcu_read_unlock();
+			ret = -ENOLINK;
+			goto exit;
+		}
 		rcu_read_unlock();
-		ret = -ENOLINK;
-		goto exit;
 	}
-	rcu_read_unlock();
 
 	ieee80211_flush_queues(local, sdata);
 
@@ -355,9 +673,9 @@
 	if (ret < 0)
 		goto exit;
 
-	memcpy(sdata->tdls_peer, peer, ETH_ALEN);
+	memcpy(sdata->u.mgd.tdls_peer, peer, ETH_ALEN);
 	ieee80211_queue_delayed_work(&sdata->local->hw,
-				     &sdata->tdls_peer_del_work,
+				     &sdata->u.mgd.tdls_peer_del_work,
 				     TDLS_PEER_SETUP_TIMEOUT);
 
 exit:
@@ -513,11 +831,22 @@
 		set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
 		rcu_read_unlock();
 
-		WARN_ON_ONCE(is_zero_ether_addr(sdata->tdls_peer) ||
-			     !ether_addr_equal(sdata->tdls_peer, peer));
+		WARN_ON_ONCE(is_zero_ether_addr(sdata->u.mgd.tdls_peer) ||
+			     !ether_addr_equal(sdata->u.mgd.tdls_peer, peer));
 		ret = 0;
 		break;
 	case NL80211_TDLS_DISABLE_LINK:
+		/*
+		 * The teardown message in ieee80211_tdls_mgmt_teardown() was
+		 * created while the queues were stopped, so it might still be
+		 * pending. Before flushing the queues we need to be sure the
+		 * message is handled by the tasklet handling pending messages,
+		 * otherwise we might start destroying the station before
+		 * sending the teardown packet.
+		 * Note that this only forces the tasklet to flush pendings -
+		 * not to stop the tasklet from rescheduling itself.
+		 */
+		tasklet_kill(&local->tx_pending_tasklet);
 		/* flush a potentially queued teardown packet */
 		ieee80211_flush_queues(local, sdata);
 
@@ -528,9 +857,9 @@
 		break;
 	}
 
-	if (ret == 0 && ether_addr_equal(sdata->tdls_peer, peer)) {
-		cancel_delayed_work(&sdata->tdls_peer_del_work);
-		eth_zero_addr(sdata->tdls_peer);
+	if (ret == 0 && ether_addr_equal(sdata->u.mgd.tdls_peer, peer)) {
+		cancel_delayed_work(&sdata->u.mgd.tdls_peer_del_work);
+		eth_zero_addr(sdata->u.mgd.tdls_peer);
 	}
 
 	mutex_unlock(&local->mtx);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 865bdaf..464106c 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -415,6 +415,9 @@
 	if (ieee80211_has_order(hdr->frame_control))
 		return TX_CONTINUE;
 
+	if (ieee80211_is_probe_req(hdr->frame_control))
+		return TX_CONTINUE;
+
 	if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
 		info->hw_queue = tx->sdata->vif.cab_queue;
 
@@ -464,6 +467,7 @@
 {
 	struct sta_info *sta = tx->sta;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
 	struct ieee80211_local *local = tx->local;
 
 	if (unlikely(!sta))
@@ -475,6 +479,12 @@
 		     !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
 		int ac = skb_get_queue_mapping(tx->skb);
 
+		if (ieee80211_is_mgmt(hdr->frame_control) &&
+		    !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
+			info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
+			return TX_CONTINUE;
+		}
+
 		ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
 		       sta->sta.addr, sta->sta.aid, ac);
 		if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
@@ -534,19 +544,9 @@
 static ieee80211_tx_result debug_noinline
 ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
 {
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
-
 	if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
 		return TX_CONTINUE;
 
-	if (ieee80211_is_mgmt(hdr->frame_control) &&
-	    !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
-		if (tx->flags & IEEE80211_TX_UNICAST)
-			info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
-		return TX_CONTINUE;
-	}
-
 	if (tx->flags & IEEE80211_TX_UNICAST)
 		return ieee80211_tx_h_unicast_ps_buf(tx);
 	else
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index df1bb7e..725af7a 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -3083,3 +3083,18 @@
 
 	return max_num_different_channels;
 }
+
+u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo)
+{
+	*buf++ = WLAN_EID_VENDOR_SPECIFIC;
+	*buf++ = 7; /* len */
+	*buf++ = 0x00; /* Microsoft OUI 00:50:F2 */
+	*buf++ = 0x50;
+	*buf++ = 0xf2;
+	*buf++ = 2; /* WME */
+	*buf++ = 0; /* WME info */
+	*buf++ = 1; /* WME ver */
+	*buf++ = qosinfo; /* U-APSD no in use */
+
+	return buf;
+}
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 9265adfd..671ce0d 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -129,6 +129,10 @@
 	if (!vht_cap_ie || !sband->vht_cap.vht_supported)
 		return;
 
+	/* don't support VHT for TDLS peers for now */
+	if (test_sta_flag(sta, WLAN_STA_TDLS_PEER))
+		return;
+
 	/*
 	 * A VHT STA must support 40 MHz, but if we verify that here
 	 * then we break a few things - some APs (e.g. Netgear R6300v2
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 9b3dcc2..f7d4ca4 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -811,7 +811,7 @@
 ieee80211_rx_result
 ieee80211_crypto_hw_decrypt(struct ieee80211_rx_data *rx)
 {
-	if (rx->sta->cipher_scheme)
+	if (rx->sta && rx->sta->cipher_scheme)
 		return ieee80211_crypto_cs_decrypt(rx);
 
 	return RX_DROP_UNUSABLE;
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index a8eb0a8..610e19c 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -797,7 +797,6 @@
 			ip_vs_control_del(cp);
 
 		if (cp->flags & IP_VS_CONN_F_NFCT) {
-			ip_vs_conn_drop_conntrack(cp);
 			/* Do not access conntracks during subsys cleanup
 			 * because nf_conntrack_find_get can not be used after
 			 * conntrack cleanup for the net.
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index e70d8b1..fe5cda0 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -38,7 +38,7 @@
 #include "vport.h"
 
 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
-			const struct nlattr *attr, int len, bool keep_skb);
+			      const struct nlattr *attr, int len);
 
 static int make_writable(struct sk_buff *skb, int write_len)
 {
@@ -434,11 +434,17 @@
 	return ovs_dp_upcall(dp, skb, &upcall);
 }
 
+static bool last_action(const struct nlattr *a, int rem)
+{
+	return a->nla_len == rem;
+}
+
 static int sample(struct datapath *dp, struct sk_buff *skb,
 		  const struct nlattr *attr)
 {
 	const struct nlattr *acts_list = NULL;
 	const struct nlattr *a;
+	struct sk_buff *sample_skb;
 	int rem;
 
 	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
@@ -455,8 +461,34 @@
 		}
 	}
 
-	return do_execute_actions(dp, skb, nla_data(acts_list),
-						 nla_len(acts_list), true);
+	rem = nla_len(acts_list);
+	a = nla_data(acts_list);
+
+	/* Actions list is either empty or only contains a single user-space
+	 * action, the latter being a special case as it is the only known
+	 * usage of the sample action.
+	 * In these special cases don't clone the skb as there are no
+	 * side-effects in the nested actions.
+	 * Otherwise, clone in case the nested actions have side effects.
+	 */
+	if (likely(rem == 0 || (nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
+				last_action(a, rem)))) {
+		sample_skb = skb;
+		skb_get(skb);
+	} else {
+		sample_skb = skb_clone(skb, GFP_ATOMIC);
+		if (!sample_skb) /* Skip sample action when out of memory. */
+			return 0;
+	}
+
+	/* Note that do_execute_actions() never consumes skb.
+	 * In the case where skb has been cloned above it is the clone that
+	 * is consumed.  Otherwise the skb_get(skb) call prevents
+	 * consumption by do_execute_actions(). Thus, it is safe to simply
+	 * return the error code and let the caller (also
+	 * do_execute_actions()) free skb on error.
+	 */
+	return do_execute_actions(dp, sample_skb, a, rem);
 }
 
 static int execute_set_action(struct sk_buff *skb,
@@ -507,7 +539,7 @@
 
 /* Execute a list of actions against 'skb'. */
 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
-			const struct nlattr *attr, int len, bool keep_skb)
+			      const struct nlattr *attr, int len)
 {
 	/* Every output action needs a separate clone of 'skb', but the common
 	 * case is just a single output action, so that doing a clone and
@@ -562,12 +594,9 @@
 		}
 	}
 
-	if (prev_port != -1) {
-		if (keep_skb)
-			skb = skb_clone(skb, GFP_ATOMIC);
-
+	if (prev_port != -1)
 		do_output(dp, skb, prev_port);
-	} else if (!keep_skb)
+	else
 		consume_skb(skb);
 
 	return 0;
@@ -579,6 +608,5 @@
 	struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
 
 	OVS_CB(skb)->tun_key = NULL;
-	return do_execute_actions(dp, skb, acts->actions,
-					 acts->actions_len, false);
+	return do_execute_actions(dp, skb, acts->actions, acts->actions_len);
 }
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 20f59b6..7ad3f02 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -266,7 +266,7 @@
 		upcall.cmd = OVS_PACKET_CMD_MISS;
 		upcall.key = &key;
 		upcall.userdata = NULL;
-		upcall.portid = p->upcall_portid;
+		upcall.portid = ovs_vport_find_upcall_portid(p, skb);
 		ovs_dp_upcall(dp, skb, &upcall);
 		consume_skb(skb);
 		stats_counter = &stats->n_missed;
@@ -464,7 +464,8 @@
 	upcall->dp_ifindex = dp_ifindex;
 
 	nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
-	ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb);
+	err = ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb);
+	BUG_ON(err);
 	nla_nest_end(user_skb, nla);
 
 	if (upcall_info->userdata)
@@ -780,7 +781,7 @@
 
 	skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
 				      always);
-	if (!skb || IS_ERR(skb))
+	if (IS_ERR_OR_NULL(skb))
 		return skb;
 
 	retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
@@ -1373,7 +1374,7 @@
 	parms.options = NULL;
 	parms.dp = dp;
 	parms.port_no = OVSP_LOCAL;
-	parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
+	parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
 
 	ovs_dp_change(dp, a);
 
@@ -1632,8 +1633,8 @@
 
 	if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
 	    nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
-	    nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
-	    nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_portid))
+	    nla_put_string(skb, OVS_VPORT_ATTR_NAME,
+			   vport->ops->get_name(vport)))
 		goto nla_put_failure;
 
 	ovs_vport_get_stats(vport, &vport_stats);
@@ -1641,6 +1642,9 @@
 		    &vport_stats))
 		goto nla_put_failure;
 
+	if (ovs_vport_get_upcall_portids(vport, skb))
+		goto nla_put_failure;
+
 	err = ovs_vport_get_options(vport, skb);
 	if (err == -EMSGSIZE)
 		goto error;
@@ -1762,7 +1766,7 @@
 	parms.options = a[OVS_VPORT_ATTR_OPTIONS];
 	parms.dp = dp;
 	parms.port_no = port_no;
-	parms.upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
+	parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
 
 	vport = new_vport(&parms);
 	err = PTR_ERR(vport);
@@ -1812,8 +1816,14 @@
 			goto exit_unlock_free;
 	}
 
-	if (a[OVS_VPORT_ATTR_UPCALL_PID])
-		vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
+
+	if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
+		struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
+
+		err = ovs_vport_set_upcall_portids(vport, ids);
+		if (err)
+			goto exit_unlock_free;
+	}
 
 	err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
 				      info->snd_seq, 0, OVS_VPORT_CMD_NEW);
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 7ede507..701b573 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -144,7 +144,7 @@
 #define lockdep_ovsl_is_held()	1
 #endif
 
-#define ASSERT_OVSL()		WARN_ON(unlikely(!lockdep_ovsl_is_held()))
+#define ASSERT_OVSL()		WARN_ON(!lockdep_ovsl_is_held())
 #define ovsl_dereference(p)					\
 	rcu_dereference_protected(p, lockdep_ovsl_is_held())
 #define rcu_dereference_ovsl(p)					\
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index bd65855..8451612 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -140,11 +140,14 @@
 	netdev->tx_queue_len = 0;
 
 	netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
-			   NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
+			   NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
+			   NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL;
 
 	netdev->vlan_features = netdev->features;
+	netdev->hw_enc_features = netdev->features;
 	netdev->features |= NETIF_F_HW_VLAN_CTAG_TX;
 	netdev->hw_features = netdev->features & ~NETIF_F_LLTX;
+
 	eth_hw_addr_random(netdev);
 }
 
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 42c0f4a..702fb21 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -134,10 +134,12 @@
 
 	vport->dp = parms->dp;
 	vport->port_no = parms->port_no;
-	vport->upcall_portid = parms->upcall_portid;
 	vport->ops = ops;
 	INIT_HLIST_NODE(&vport->dp_hash_node);
 
+	if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids))
+		return ERR_PTR(-EINVAL);
+
 	vport->percpu_stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
 	if (!vport->percpu_stats) {
 		kfree(vport);
@@ -161,6 +163,10 @@
  */
 void ovs_vport_free(struct vport *vport)
 {
+	/* vport is freed from RCU callback or error path, Therefore
+	 * it is safe to use raw dereference.
+	 */
+	kfree(rcu_dereference_raw(vport->upcall_portids));
 	free_percpu(vport->percpu_stats);
 	kfree(vport);
 }
@@ -327,6 +333,99 @@
 }
 
 /**
+ *	ovs_vport_set_upcall_portids - set upcall portids of @vport.
+ *
+ * @vport: vport to modify.
+ * @ids: new configuration, an array of port ids.
+ *
+ * Sets the vport's upcall_portids to @ids.
+ *
+ * Returns 0 if successful, -EINVAL if @ids is zero length or cannot be parsed
+ * as an array of U32.
+ *
+ * Must be called with ovs_mutex.
+ */
+int ovs_vport_set_upcall_portids(struct vport *vport,  struct nlattr *ids)
+{
+	struct vport_portids *old, *vport_portids;
+
+	if (!nla_len(ids) || nla_len(ids) % sizeof(u32))
+		return -EINVAL;
+
+	old = ovsl_dereference(vport->upcall_portids);
+
+	vport_portids = kmalloc(sizeof(*vport_portids) + nla_len(ids),
+				GFP_KERNEL);
+	if (!vport_portids)
+		return -ENOMEM;
+
+	vport_portids->n_ids = nla_len(ids) / sizeof(u32);
+	vport_portids->rn_ids = reciprocal_value(vport_portids->n_ids);
+	nla_memcpy(vport_portids->ids, ids, nla_len(ids));
+
+	rcu_assign_pointer(vport->upcall_portids, vport_portids);
+
+	if (old)
+		kfree_rcu(old, rcu);
+	return 0;
+}
+
+/**
+ *	ovs_vport_get_upcall_portids - get the upcall_portids of @vport.
+ *
+ * @vport: vport from which to retrieve the portids.
+ * @skb: sk_buff where portids should be appended.
+ *
+ * Retrieves the configuration of the given vport, appending the
+ * %OVS_VPORT_ATTR_UPCALL_PID attribute which is the array of upcall
+ * portids to @skb.
+ *
+ * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room.
+ * If an error occurs, @skb is left unmodified.  Must be called with
+ * ovs_mutex or rcu_read_lock.
+ */
+int ovs_vport_get_upcall_portids(const struct vport *vport,
+				 struct sk_buff *skb)
+{
+	struct vport_portids *ids;
+
+	ids = rcu_dereference_ovsl(vport->upcall_portids);
+
+	if (vport->dp->user_features & OVS_DP_F_VPORT_PIDS)
+		return nla_put(skb, OVS_VPORT_ATTR_UPCALL_PID,
+			       ids->n_ids * sizeof(u32), (void *)ids->ids);
+	else
+		return nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, ids->ids[0]);
+}
+
+/**
+ *	ovs_vport_find_upcall_portid - find the upcall portid to send upcall.
+ *
+ * @vport: vport from which the missed packet is received.
+ * @skb: skb that the missed packet was received.
+ *
+ * Uses the skb_get_hash() to select the upcall portid to send the
+ * upcall.
+ *
+ * Returns the portid of the target socket.  Must be called with rcu_read_lock.
+ */
+u32 ovs_vport_find_upcall_portid(const struct vport *p, struct sk_buff *skb)
+{
+	struct vport_portids *ids;
+	u32 ids_index;
+	u32 hash;
+
+	ids = rcu_dereference(p->upcall_portids);
+
+	if (ids->n_ids == 1 && ids->ids[0] == 0)
+		return 0;
+
+	hash = skb_get_hash(skb);
+	ids_index = hash - ids->n_ids * reciprocal_divide(hash, ids->rn_ids);
+	return ids->ids[ids_index];
+}
+
+/**
  *	ovs_vport_receive - pass up received packet to the datapath for processing
  *
  * @vport: vport that received the packet
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index 8d721e6..35f89d8 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -23,6 +23,7 @@
 #include <linux/list.h>
 #include <linux/netlink.h>
 #include <linux/openvswitch.h>
+#include <linux/reciprocal_div.h>
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/u64_stats_sync.h>
@@ -52,6 +53,10 @@
 int ovs_vport_set_options(struct vport *, struct nlattr *options);
 int ovs_vport_get_options(const struct vport *, struct sk_buff *);
 
+int ovs_vport_set_upcall_portids(struct vport *, struct nlattr *pids);
+int ovs_vport_get_upcall_portids(const struct vport *, struct sk_buff *);
+u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *);
+
 int ovs_vport_send(struct vport *, struct sk_buff *);
 
 /* The following definitions are for implementers of vport devices: */
@@ -62,13 +67,27 @@
 	u64 tx_dropped;
 	u64 tx_errors;
 };
+/**
+ * struct vport_portids - array of netlink portids of a vport.
+ *                        must be protected by rcu.
+ * @rn_ids: The reciprocal value of @n_ids.
+ * @rcu: RCU callback head for deferred destruction.
+ * @n_ids: Size of @ids array.
+ * @ids: Array storing the Netlink socket pids to be used for packets received
+ * on this port that miss the flow table.
+ */
+struct vport_portids {
+	struct reciprocal_value rn_ids;
+	struct rcu_head rcu;
+	u32 n_ids;
+	u32 ids[];
+};
 
 /**
  * struct vport - one port within a datapath
  * @rcu: RCU callback head for deferred destruction.
  * @dp: Datapath to which this port belongs.
- * @upcall_portid: The Netlink port to use for packets received on this port that
- * miss the flow table.
+ * @upcall_portids: RCU protected 'struct vport_portids'.
  * @port_no: Index into @dp's @ports array.
  * @hash_node: Element in @dev_table hash table in vport.c.
  * @dp_hash_node: Element in @datapath->ports hash table in datapath.c.
@@ -80,7 +99,7 @@
 struct vport {
 	struct rcu_head rcu;
 	struct datapath	*dp;
-	u32 upcall_portid;
+	struct vport_portids __rcu *upcall_portids;
 	u16 port_no;
 
 	struct hlist_node hash_node;
@@ -111,7 +130,7 @@
 	/* For ovs_vport_alloc(). */
 	struct datapath *dp;
 	u16 port_no;
-	u32 upcall_portid;
+	struct nlattr *upcall_portids;
 };
 
 /**
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 614ca91..8d9f804 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -441,14 +441,10 @@
 {
 	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 
-	if (shhwtstamps) {
-		if ((flags & SOF_TIMESTAMPING_SYS_HARDWARE) &&
-		    ktime_to_timespec_cond(shhwtstamps->syststamp, ts))
-			return TP_STATUS_TS_SYS_HARDWARE;
-		if ((flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
-		    ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
-			return TP_STATUS_TS_RAW_HARDWARE;
-	}
+	if (shhwtstamps &&
+	    (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
+	    ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
+		return TP_STATUS_TS_RAW_HARDWARE;
 
 	if (ktime_to_timespec_cond(skb->tstamp, ts))
 		return TP_STATUS_TS_SOFTWARE;
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index c721cd4..3e9f764 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -420,7 +420,7 @@
 	pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
 	walker.count = 0;
 	walker.skip = 0;
-	walker.fn = &tcindex_destroy_element;
+	walker.fn = tcindex_destroy_element;
 	tcindex_walk(tp, &walker);
 	kfree(p->perfect);
 	kfree(p->h);
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 9de23a2..06a9ee6 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1097,6 +1097,7 @@
 	asoc->c = new->c;
 	asoc->peer.rwnd = new->peer.rwnd;
 	asoc->peer.sack_needed = new->peer.sack_needed;
+	asoc->peer.auth_capable = new->peer.auth_capable;
 	asoc->peer.i = new->peer.i;
 	sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
 			 asoc->peer.i.initial_tsn, GFP_ATOMIC);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 01ab8e0..1eedba5 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -178,7 +178,7 @@
 
 	case SCTP_XMIT_RWND_FULL:
 	case SCTP_XMIT_OK:
-	case SCTP_XMIT_NAGLE_DELAY:
+	case SCTP_XMIT_DELAY:
 		break;
 	}
 
@@ -633,7 +633,6 @@
 static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
 					   struct sctp_chunk *chunk)
 {
-	sctp_xmit_t retval = SCTP_XMIT_OK;
 	size_t datasize, rwnd, inflight, flight_size;
 	struct sctp_transport *transport = packet->transport;
 	struct sctp_association *asoc = transport->asoc;
@@ -658,15 +657,11 @@
 
 	datasize = sctp_data_size(chunk);
 
-	if (datasize > rwnd) {
-		if (inflight > 0) {
-			/* We have (at least) one data chunk in flight,
-			 * so we can't fall back to rule 6.1 B).
-			 */
-			retval = SCTP_XMIT_RWND_FULL;
-			goto finish;
-		}
-	}
+	if (datasize > rwnd && inflight > 0)
+		/* We have (at least) one data chunk in flight,
+		 * so we can't fall back to rule 6.1 B).
+		 */
+		return SCTP_XMIT_RWND_FULL;
 
 	/* RFC 2960 6.1  Transmission of DATA Chunks
 	 *
@@ -680,36 +675,44 @@
 	 *    When a Fast Retransmit is being performed the sender SHOULD
 	 *    ignore the value of cwnd and SHOULD NOT delay retransmission.
 	 */
-	if (chunk->fast_retransmit != SCTP_NEED_FRTX)
-		if (flight_size >= transport->cwnd) {
-			retval = SCTP_XMIT_RWND_FULL;
-			goto finish;
-		}
+	if (chunk->fast_retransmit != SCTP_NEED_FRTX &&
+	    flight_size >= transport->cwnd)
+		return SCTP_XMIT_RWND_FULL;
 
 	/* Nagle's algorithm to solve small-packet problem:
 	 * Inhibit the sending of new chunks when new outgoing data arrives
 	 * if any previously transmitted data on the connection remains
 	 * unacknowledged.
 	 */
-	if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) &&
-	    inflight && sctp_state(asoc, ESTABLISHED)) {
-		unsigned int max = transport->pathmtu - packet->overhead;
-		unsigned int len = chunk->skb->len + q->out_qlen;
 
-		/* Check whether this chunk and all the rest of pending
-		 * data will fit or delay in hopes of bundling a full
-		 * sized packet.
-		 * Don't delay large message writes that may have been
-		 * fragmeneted into small peices.
-		 */
-		if ((len < max) && chunk->msg->can_delay) {
-			retval = SCTP_XMIT_NAGLE_DELAY;
-			goto finish;
-		}
-	}
+	if (sctp_sk(asoc->base.sk)->nodelay)
+		/* Nagle disabled */
+		return SCTP_XMIT_OK;
 
-finish:
-	return retval;
+	if (!sctp_packet_empty(packet))
+		/* Append to packet */
+		return SCTP_XMIT_OK;
+
+	if (inflight == 0)
+		/* Nothing unacked */
+		return SCTP_XMIT_OK;
+
+	if (!sctp_state(asoc, ESTABLISHED))
+		return SCTP_XMIT_OK;
+
+	/* Check whether this chunk and all the rest of pending data will fit
+	 * or delay in hopes of bundling a full sized packet.
+	 */
+	if (chunk->skb->len + q->out_qlen >= transport->pathmtu - packet->overhead)
+		/* Enough data queued to fill a packet */
+		return SCTP_XMIT_OK;
+
+	/* Don't delay large message writes that may have been fragmented */
+	if (!chunk->msg->can_delay)
+		return SCTP_XMIT_OK;
+
+	/* Defer until all data acked or packet full */
+	return SCTP_XMIT_DELAY;
 }
 
 /* This private function does management things when adding DATA chunk */
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index d31435e..7e8f0a1 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -629,7 +629,7 @@
 			done = 1;
 			break;
 
-		case SCTP_XMIT_NAGLE_DELAY:
+		case SCTP_XMIT_DELAY:
 			/* Send this packet. */
 			error = sctp_packet_transmit(pkt);
 
@@ -1015,7 +1015,7 @@
 			switch (status) {
 			case SCTP_XMIT_PMTU_FULL:
 			case SCTP_XMIT_RWND_FULL:
-			case SCTP_XMIT_NAGLE_DELAY:
+			case SCTP_XMIT_DELAY:
 				/* We could not append this chunk, so put
 				 * the chunk back on the output queue.
 				 */
diff --git a/net/socket.c b/net/socket.c
index abf56b2..d8222c0 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -725,14 +725,10 @@
 	if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE) &&
 	    ktime_to_timespec_cond(skb->tstamp, ts + 0))
 		empty = 0;
-	if (shhwtstamps) {
-		if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE) &&
-		    ktime_to_timespec_cond(shhwtstamps->syststamp, ts + 1))
-			empty = 0;
-		if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE) &&
-		    ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts + 2))
-			empty = 0;
-	}
+	if (shhwtstamps &&
+	    sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE) &&
+	    ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts + 2))
+		empty = 0;
 	if (!empty)
 		put_cmsg(msg, SOL_SOCKET,
 			 SCM_TIMESTAMPING, sizeof(ts), &ts);
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index b6f45d0..9680be6 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -72,27 +72,38 @@
 	struct sk_buff *head = *headbuf;
 	struct sk_buff *frag = *buf;
 	struct sk_buff *tail;
-	struct tipc_msg *msg = buf_msg(frag);
-	u32 fragid = msg_type(msg);
-	bool headstolen;
+	struct tipc_msg *msg;
+	u32 fragid;
 	int delta;
+	bool headstolen;
 
+	if (!frag)
+		goto err;
+
+	msg = buf_msg(frag);
+	fragid = msg_type(msg);
+	frag->next = NULL;
 	skb_pull(frag, msg_hdr_sz(msg));
 
 	if (fragid == FIRST_FRAGMENT) {
-		if (head || skb_unclone(frag, GFP_ATOMIC))
-			goto out_free;
+		if (unlikely(head))
+			goto err;
+		if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
+			goto err;
 		head = *headbuf = frag;
 		skb_frag_list_init(head);
+		TIPC_SKB_CB(head)->tail = NULL;
 		*buf = NULL;
 		return 0;
 	}
+
 	if (!head)
-		goto out_free;
-	tail = TIPC_SKB_CB(head)->tail;
+		goto err;
+
 	if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
 		kfree_skb_partial(frag, headstolen);
 	} else {
+		tail = TIPC_SKB_CB(head)->tail;
 		if (!skb_has_frag_list(head))
 			skb_shinfo(head)->frag_list = frag;
 		else
@@ -102,6 +113,7 @@
 		head->len += frag->len;
 		TIPC_SKB_CB(head)->tail = frag;
 	}
+
 	if (fragid == LAST_FRAGMENT) {
 		*buf = head;
 		TIPC_SKB_CB(head)->tail = NULL;
@@ -110,7 +122,8 @@
 	}
 	*buf = NULL;
 	return 0;
-out_free:
+
+err:
 	pr_warn_ratelimited("Unable to build fragment list\n");
 	kfree_skb(*buf);
 	kfree_skb(*headbuf);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 8477d08..7d423ee 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -40,7 +40,6 @@
 #include "node.h"
 #include "link.h"
 #include <linux/export.h>
-#include "link.h"
 
 #define SS_LISTENING	-1	/* socket is listening */
 #define SS_READY	-2	/* socket is connectionless */
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 405f3c4..29c8675 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -162,6 +162,12 @@
 	  and includes code to query that database.  This is an alternative
 	  to using CRDA for defining regulatory rules for the kernel.
 
+	  Using this option requires some parsing of the db.txt at build time,
+	  the parser will be upkept with the latest wireless-regdb updates but
+	  older wireless-regdb formats will be ignored. The parser may later
+	  be replaced to avoid issues with conflicts on versions of
+	  wireless-regdb.
+
 	  For details see:
 
 	  http://wireless.kernel.org/en/developers/Regulatory
diff --git a/net/wireless/genregdb.awk b/net/wireless/genregdb.awk
index 40c37fc..baf2426 100644
--- a/net/wireless/genregdb.awk
+++ b/net/wireless/genregdb.awk
@@ -51,32 +51,41 @@
 
 function parse_reg_rule()
 {
+	flag_starts_at = 7
+
 	start = $1
 	sub(/\(/, "", start)
 	end = $3
 	bw = $5
 	sub(/\),/, "", bw)
-	gain = $6
-	sub(/\(/, "", gain)
-	sub(/,/, "", gain)
-	power = $7
-	sub(/\)/, "", power)
-	sub(/,/, "", power)
+	gain = 0
+	power = $6
 	# power might be in mW...
-	units = $8
+	units = $7
+	dfs_cac = 0
+
+	sub(/\(/, "", power)
+	sub(/\),/, "", power)
+	sub(/\),/, "", units)
 	sub(/\)/, "", units)
-	sub(/,/, "", units)
-	dfs_cac = $9
+
 	if (units == "mW") {
+		flag_starts_at = 8
 		power = 10 * log(power)/log(10)
+		if ($8 ~ /[[:digit:]]/) {
+			flag_starts_at = 9
+			dfs_cac = $8
+		}
 	} else {
-		dfs_cac = $8
+		if ($7 ~ /[[:digit:]]/) {
+			flag_starts_at = 8
+			dfs_cac = $7
+		}
 	}
-	sub(/,/, "", dfs_cac)
 	sub(/\(/, "", dfs_cac)
-	sub(/\)/, "", dfs_cac)
+	sub(/\),/, "", dfs_cac)
 	flagstr = ""
-	for (i=8; i<=NF; i++)
+	for (i=flag_starts_at; i<=NF; i++)
 		flagstr = flagstr $i
 	split(flagstr, flagarray, ",")
 	flags = ""
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 082f5c6..df7b133 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3814,7 +3814,8 @@
 {
 	if (params->listen_interval != -1)
 		return -EINVAL;
-	if (params->aid)
+	if (params->aid &&
+	    !(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
 		return -EINVAL;
 
 	/* When you run into this, adjust the code below for the new flag */
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 85474ee..0c524cd 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2049,7 +2049,8 @@
 		MAC_ASSIGN(addr, addr);
 		__entry->key_type = key_type;
 		__entry->key_id = key_id;
-		memcpy(__entry->tsc, tsc, 6);
+		if (tsc)
+			memcpy(__entry->tsc, tsc, 6);
 	),
 	TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", key type: %d, key id: %d, tsc: %pm",
 		  NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->key_type,
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index a8ef510..0525d78 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2097,6 +2097,8 @@
 				goto no_transform;
 			}
 
+			dst_hold(&xdst->u.dst);
+			xdst->u.dst.flags |= DST_NOCACHE;
 			route = xdst->route;
 		}
 	}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 412d9dc..d4db6eb 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -177,9 +177,7 @@
 		    attrs[XFRMA_ALG_AEAD]	||
 		    attrs[XFRMA_ALG_CRYPT]	||
 		    attrs[XFRMA_ALG_COMP]	||
-		    attrs[XFRMA_TFCPAD]		||
-		    (ntohl(p->id.spi) >= 0x10000))
-
+		    attrs[XFRMA_TFCPAD])
 			goto out;
 		break;
 
@@ -207,7 +205,8 @@
 		    attrs[XFRMA_ALG_AUTH]	||
 		    attrs[XFRMA_ALG_AUTH_TRUNC]	||
 		    attrs[XFRMA_ALG_CRYPT]	||
-		    attrs[XFRMA_TFCPAD])
+		    attrs[XFRMA_TFCPAD]		||
+		    (ntohl(p->id.spi) >= 0x10000))
 			goto out;
 		break;
 
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
index 6af50eb..70faa3a 100644
--- a/sound/firewire/bebob/bebob_maudio.c
+++ b/sound/firewire/bebob/bebob_maudio.c
@@ -379,11 +379,11 @@
 	struct special_params *params = bebob->maudio_special_quirk;
 	int err, id;
 
-	mutex_lock(&bebob->mutex);
-
 	id = uval->value.enumerated.item[0];
 	if (id >= ARRAY_SIZE(special_clk_labels))
-		return 0;
+		return -EINVAL;
+
+	mutex_lock(&bebob->mutex);
 
 	err = avc_maudio_set_special_clk(bebob, id,
 					 params->dig_in_fmt,
@@ -391,7 +391,10 @@
 					 params->clk_lock);
 	mutex_unlock(&bebob->mutex);
 
-	return err >= 0;
+	if (err >= 0)
+		err = 1;
+
+	return err;
 }
 static struct snd_kcontrol_new special_clk_ctl = {
 	.name	= "Clock Source",
@@ -434,8 +437,8 @@
 	.get	= special_sync_ctl_get,
 };
 
-/* Digital interface control for special firmware */
-static char *const special_dig_iface_labels[] = {
+/* Digital input interface control for special firmware */
+static char *const special_dig_in_iface_labels[] = {
 	"S/PDIF Optical", "S/PDIF Coaxial", "ADAT Optical"
 };
 static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
@@ -443,13 +446,13 @@
 {
 	einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
 	einf->count = 1;
-	einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels);
+	einf->value.enumerated.items = ARRAY_SIZE(special_dig_in_iface_labels);
 
 	if (einf->value.enumerated.item >= einf->value.enumerated.items)
 		einf->value.enumerated.item = einf->value.enumerated.items - 1;
 
 	strcpy(einf->value.enumerated.name,
-	       special_dig_iface_labels[einf->value.enumerated.item]);
+	       special_dig_in_iface_labels[einf->value.enumerated.item]);
 
 	return 0;
 }
@@ -491,26 +494,36 @@
 	unsigned int id, dig_in_fmt, dig_in_iface;
 	int err;
 
-	mutex_lock(&bebob->mutex);
-
 	id = uval->value.enumerated.item[0];
+	if (id >= ARRAY_SIZE(special_dig_in_iface_labels))
+		return -EINVAL;
 
 	/* decode user value */
 	dig_in_fmt = (id >> 1) & 0x01;
 	dig_in_iface = id & 0x01;
 
+	mutex_lock(&bebob->mutex);
+
 	err = avc_maudio_set_special_clk(bebob,
 					 params->clk_src,
 					 dig_in_fmt,
 					 params->dig_out_fmt,
 					 params->clk_lock);
-	if ((err < 0) || (params->dig_in_fmt > 0)) /* ADAT */
+	if (err < 0)
 		goto end;
 
+	/* For ADAT, optical interface is only available. */
+	if (params->dig_in_fmt > 0) {
+		err = 1;
+		goto end;
+	}
+
+	/* For S/PDIF, optical/coaxial interfaces are selectable. */
 	err = avc_audio_set_selector(bebob->unit, 0x00, 0x04, dig_in_iface);
 	if (err < 0)
 		dev_err(&bebob->unit->device,
 			"fail to set digital input interface: %d\n", err);
+	err = 1;
 end:
 	special_stream_formation_set(bebob);
 	mutex_unlock(&bebob->mutex);
@@ -525,18 +538,22 @@
 	.put	= special_dig_in_iface_ctl_set
 };
 
+/* Digital output interface control for special firmware */
+static char *const special_dig_out_iface_labels[] = {
+	"S/PDIF Optical and Coaxial", "ADAT Optical"
+};
 static int special_dig_out_iface_ctl_info(struct snd_kcontrol *kctl,
 					  struct snd_ctl_elem_info *einf)
 {
 	einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
 	einf->count = 1;
-	einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels) - 1;
+	einf->value.enumerated.items = ARRAY_SIZE(special_dig_out_iface_labels);
 
 	if (einf->value.enumerated.item >= einf->value.enumerated.items)
 		einf->value.enumerated.item = einf->value.enumerated.items - 1;
 
 	strcpy(einf->value.enumerated.name,
-	       special_dig_iface_labels[einf->value.enumerated.item + 1]);
+	       special_dig_out_iface_labels[einf->value.enumerated.item]);
 
 	return 0;
 }
@@ -558,16 +575,20 @@
 	unsigned int id;
 	int err;
 
-	mutex_lock(&bebob->mutex);
-
 	id = uval->value.enumerated.item[0];
+	if (id >= ARRAY_SIZE(special_dig_out_iface_labels))
+		return -EINVAL;
+
+	mutex_lock(&bebob->mutex);
 
 	err = avc_maudio_set_special_clk(bebob,
 					 params->clk_src,
 					 params->dig_in_fmt,
 					 id, params->clk_lock);
-	if (err >= 0)
+	if (err >= 0) {
 		special_stream_formation_set(bebob);
+		err = 1;
+	}
 
 	mutex_unlock(&bebob->mutex);
 	return err;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 56ff9be..476d3bf 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1526,17 +1526,33 @@
 		goto out_unmap;
 	}
 
-	kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
-		 vctrl_res.start, vgic_maint_irq);
-	on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
-
 	if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
 		kvm_err("Cannot obtain VCPU resource\n");
 		ret = -ENXIO;
 		goto out_unmap;
 	}
+
+	if (!PAGE_ALIGNED(vcpu_res.start)) {
+		kvm_err("GICV physical address 0x%llx not page aligned\n",
+			(unsigned long long)vcpu_res.start);
+		ret = -ENXIO;
+		goto out_unmap;
+	}
+
+	if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
+		kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
+			(unsigned long long)resource_size(&vcpu_res),
+			PAGE_SIZE);
+		ret = -ENXIO;
+		goto out_unmap;
+	}
+
 	vgic_vcpu_base = vcpu_res.start;
 
+	kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
+		 vctrl_res.start, vgic_maint_irq);
+	on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
+
 	goto out;
 
 out_unmap: