Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next into for-davem

Conflicts:
	drivers/net/wireless/iwlwifi/iwl-testmode.c
diff --git a/Documentation/ABI/stable/sysfs-driver-usb-usbtmc b/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
index 2a7f9a0..e960cd0 100644
--- a/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
+++ b/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
@@ -1,5 +1,5 @@
-What:		/sys/bus/usb/drivers/usbtmc/devices/*/interface_capabilities
-What:		/sys/bus/usb/drivers/usbtmc/devices/*/device_capabilities
+What:		/sys/bus/usb/drivers/usbtmc/*/interface_capabilities
+What:		/sys/bus/usb/drivers/usbtmc/*/device_capabilities
 Date:		August 2008
 Contact:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 Description:
@@ -12,8 +12,8 @@
 		The files are read only.
 
 
-What:		/sys/bus/usb/drivers/usbtmc/devices/*/usb488_interface_capabilities
-What:		/sys/bus/usb/drivers/usbtmc/devices/*/usb488_device_capabilities
+What:		/sys/bus/usb/drivers/usbtmc/*/usb488_interface_capabilities
+What:		/sys/bus/usb/drivers/usbtmc/*/usb488_device_capabilities
 Date:		August 2008
 Contact:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 Description:
@@ -27,7 +27,7 @@
 		The files are read only.
 
 
-What:		/sys/bus/usb/drivers/usbtmc/devices/*/TermChar
+What:		/sys/bus/usb/drivers/usbtmc/*/TermChar
 Date:		August 2008
 Contact:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 Description:
@@ -40,7 +40,7 @@
 		sent to the device or not.
 
 
-What:		/sys/bus/usb/drivers/usbtmc/devices/*/TermCharEnabled
+What:		/sys/bus/usb/drivers/usbtmc/*/TermCharEnabled
 Date:		August 2008
 Contact:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 Description:
@@ -51,7 +51,7 @@
 		published by the USB-IF.
 
 
-What:		/sys/bus/usb/drivers/usbtmc/devices/*/auto_abort
+What:		/sys/bus/usb/drivers/usbtmc/*/auto_abort
 Date:		August 2008
 Contact:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 Description:
diff --git a/Documentation/ABI/testing/sysfs-block-rssd b/Documentation/ABI/testing/sysfs-block-rssd
new file mode 100644
index 0000000..d535757
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-block-rssd
@@ -0,0 +1,18 @@
+What:           /sys/block/rssd*/registers
+Date:           March 2012
+KernelVersion:  3.3
+Contact:        Asai Thambi S P <asamymuthupa@micron.com>
+Description:    This is a read-only file. Dumps below driver information and
+                hardware registers.
+                    - S ACTive
+                    - Command Issue
+                    - Allocated
+                    - Completed
+                    - PORT IRQ STAT
+                    - HOST IRQ STAT
+
+What:           /sys/block/rssd*/status
+Date:           April 2012
+KernelVersion:  3.4
+Contact:        Asai Thambi S P <asamymuthupa@micron.com>
+Description:   This is a read-only file. Indicates the status of the device.
diff --git a/Documentation/ABI/testing/sysfs-cfq-target-latency b/Documentation/ABI/testing/sysfs-cfq-target-latency
new file mode 100644
index 0000000..df0f782
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-cfq-target-latency
@@ -0,0 +1,8 @@
+What:		/sys/block/<device>/iosched/target_latency
+Date:		March 2012
+contact:	Tao Ma <boyu.mt@taobao.com>
+Description:
+		The /sys/block/<device>/iosched/target_latency only exists
+		when the user sets cfq to /sys/block/<device>/scheduler.
+		It contains an estimated latency time for the cfq. cfq will
+		use it to calculate the time slice used for every task.
diff --git a/Documentation/ABI/testing/sysfs-class-net-mesh b/Documentation/ABI/testing/sysfs-class-net-mesh
index b218e0f..c81fe89 100644
--- a/Documentation/ABI/testing/sysfs-class-net-mesh
+++ b/Documentation/ABI/testing/sysfs-class-net-mesh
@@ -14,6 +14,15 @@
                 mesh will be sent using multiple interfaces at the
                 same time (if available).
 
+What:           /sys/class/net/<mesh_iface>/mesh/bridge_loop_avoidance
+Date:           November 2011
+Contact:        Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
+Description:
+                Indicates whether the bridge loop avoidance feature
+                is enabled. This feature detects and avoids loops
+                between the mesh and devices bridged with the soft
+                interface <mesh_iface>.
+
 What:           /sys/class/net/<mesh_iface>/mesh/fragmentation
 Date:           October 2010
 Contact:        Andreas Langer <an.langer@gmx.de>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-nv12m.xml b/Documentation/DocBook/media/v4l/pixfmt-nv12m.xml
index 3fd3ce5..5274c24 100644
--- a/Documentation/DocBook/media/v4l/pixfmt-nv12m.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt-nv12m.xml
@@ -1,6 +1,6 @@
     <refentry id="V4L2-PIX-FMT-NV12M">
       <refmeta>
-	<refentrytitle>V4L2_PIX_FMT_NV12M ('NV12M')</refentrytitle>
+	<refentrytitle>V4L2_PIX_FMT_NV12M ('NM12')</refentrytitle>
 	&manvol;
       </refmeta>
       <refnamediv>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml b/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml
index 9957863..60308f1 100644
--- a/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml
@@ -1,6 +1,6 @@
     <refentry id="V4L2-PIX-FMT-YUV420M">
       <refmeta>
-	<refentrytitle>V4L2_PIX_FMT_YUV420M ('YU12M')</refentrytitle>
+	<refentrytitle>V4L2_PIX_FMT_YUV420M ('YM12')</refentrytitle>
 	&manvol;
       </refmeta>
       <refnamediv>
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 4c95c00..9b1067a 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -34,8 +34,7 @@
 
 Features:
  - accounting anonymous pages, file caches, swap caches usage and limiting them.
- - private LRU and reclaim routine. (system's global LRU and private LRU
-   work independently from each other)
+ - pages are linked to per-memcg LRU exclusively, and there is no global LRU.
  - optionally, memory+swap usage can be accounted and limited.
  - hierarchical accounting
  - soft limit
@@ -154,7 +153,7 @@
 2.2.1 Accounting details
 
 All mapped anon pages (RSS) and cache pages (Page Cache) are accounted.
-Some pages which are never reclaimable and will not be on the global LRU
+Some pages which are never reclaimable and will not be on the LRU
 are not accounted. We just account pages under usual VM management.
 
 RSS pages are accounted at page_fault unless they've already been accounted
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 709e08e..03ca210 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -531,3 +531,11 @@
 	of ASLR. It was only ever intended for debugging, so it should be
 	removed.
 Who:	Kees Cook <keescook@chromium.org>
+
+----------------------------
+
+What:	setitimer accepts user NULL pointer (value)
+When:	3.6
+Why:	setitimer is not returning -EFAULT if user pointer is NULL. This
+	violates the spec.
+Who:	Sasikantha Babu <sasikanth.v19@gmail.com>
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index e916e3d..0d04920 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -114,7 +114,7 @@
 struct file_system_type {
 	const char *name;
 	int fs_flags;
-        struct dentry (*mount) (struct file_system_type *, int,
+        struct dentry *(*mount) (struct file_system_type *, int,
                        const char *, void *);
         void (*kill_sb) (struct super_block *);
         struct module *owner;
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index 221ad0c..220a58c 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -67,18 +67,18 @@
 All  mesh  wide  settings  can be found in batman's own interface
 folder:
 
-#  ls  /sys/class/net/bat0/mesh/
-# aggregated_ogms   fragmentation gw_sel_class   vis_mode
-# ap_isolation      gw_bandwidth  hop_penalty
-# bonding           gw_mode       orig_interval
+# ls /sys/class/net/bat0/mesh/
+# aggregated_ogms        fragmentation          hop_penalty
+# ap_isolation           gw_bandwidth           log_level
+# bonding                gw_mode                orig_interval
+# bridge_loop_avoidance  gw_sel_class           vis_mode
 
 
 There is a special folder for debugging information:
 
 #  ls /sys/kernel/debug/batman_adv/bat0/
-#  gateways     socket        transtable_global  vis_data
-#  originators  softif_neigh  transtable_local
-
+# bla_claim_table    log                socket             transtable_local
+# gateways           originators        transtable_global  vis_data
 
 Some of the files contain all sort of status information  regard-
 ing  the  mesh  network.  For  example, you can view the table of
@@ -202,12 +202,13 @@
 1 - Enable messages related to routing / flooding / broadcasting
 2 - Enable messages related to route added / changed / deleted
 4 - Enable messages related to translation table operations
-7 - Enable all messages
+8 - Enable messages related to bridge loop avoidance
+15 - enable all messages
 
 The debug output can be changed at runtime  using  the  file
 /sys/class/net/bat0/mesh/log_level. e.g.
 
-# echo 2 > /sys/class/net/bat0/mesh/log_level
+# echo 6 > /sys/class/net/bat0/mesh/log_level
 
 will enable debug messages for when routes change.
 
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index d0aeead..ab1e8d7 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -111,11 +111,12 @@
 	int phy_addr;
 	int interface;
 	struct stmmac_mdio_bus_data *mdio_bus_data;
-	int pbl;
+	struct stmmac_dma_cfg *dma_cfg;
 	int clk_csr;
 	int has_gmac;
 	int enh_desc;
 	int tx_coe;
+	int rx_coe;
 	int bugged_jumbo;
 	int pmt;
 	int force_sf_dma_mode;
@@ -136,10 +137,12 @@
  o pbl: the Programmable Burst Length is maximum number of beats to
        be transferred in one DMA transaction.
        GMAC also enables the 4xPBL by default.
- o clk_csr: CSR Clock range selection.
+ o clk_csr: fixed CSR Clock range selection.
  o has_gmac: uses the GMAC core.
  o enh_desc: if sets the MAC will use the enhanced descriptor structure.
  o tx_coe: core is able to perform the tx csum in HW.
+ o rx_coe: the supports three check sum offloading engine types:
+	   type_1, type_2 (full csum) and no RX coe.
  o bugged_jumbo: some HWs are not able to perform the csum in HW for
 		over-sized frames due to limited buffer sizes.
 		Setting this flag the csum will be done in SW on
@@ -160,7 +163,7 @@
  o custom_cfg: this is a custom configuration that can be passed while
 	      initialising the resources.
 
-The we have:
+For MDIO bus The we have:
 
  struct stmmac_mdio_bus_data {
 	int bus_id;
@@ -177,10 +180,28 @@
  o irqs: list of IRQs, one per PHY.
  o probed_phy_irq: if irqs is NULL, use this for probed PHY.
 
+
+For DMA engine we have the following internal fields that should be
+tuned according to the HW capabilities.
+
+struct stmmac_dma_cfg {
+	int pbl;
+	int fixed_burst;
+	int burst_len_supported;
+};
+
+Where:
+ o pbl: Programmable Burst Length
+ o fixed_burst: program the DMA to use the fixed burst mode
+ o burst_len: this is the value we put in the register
+	      supported values are provided as macros in
+	      linux/stmmac.h header file.
+
+---
+
 Below an example how the structures above are using on ST platforms.
 
  static struct plat_stmmacenet_data stxYYY_ethernet_platform_data = {
-	.pbl = 32,
 	.has_gmac = 0,
 	.enh_desc = 0,
 	.fix_mac_speed = stxYYY_ethernet_fix_mac_speed,
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index d97d992..03f7897 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -43,7 +43,9 @@
 
 ALC882/883/885/888/889
 ======================
-  N/A
+  acer-aspire-4930g	Acer Aspire 4930G/5930G/6530G/6930G/7730G
+  acer-aspire-8930g	Acer Aspire 8330G/6935G
+  acer-aspire		Acer Aspire others
 
 ALC861/660
 ==========
diff --git a/Documentation/usb/URB.txt b/Documentation/usb/URB.txt
index 8ffce74..00d2c64 100644
--- a/Documentation/usb/URB.txt
+++ b/Documentation/usb/URB.txt
@@ -168,6 +168,28 @@
 they will get a -EPERM error.  Thus you can be sure that when
 usb_kill_urb() returns, the URB is totally idle.
 
+There is a lifetime issue to consider.  An URB may complete at any
+time, and the completion handler may free the URB.  If this happens
+while usb_unlink_urb or usb_kill_urb is running, it will cause a
+memory-access violation.  The driver is responsible for avoiding this,
+which often means some sort of lock will be needed to prevent the URB
+from being deallocated while it is still in use.
+
+On the other hand, since usb_unlink_urb may end up calling the
+completion handler, the handler must not take any lock that is held
+when usb_unlink_urb is invoked.  The general solution to this problem
+is to increment the URB's reference count while holding the lock, then
+drop the lock and call usb_unlink_urb or usb_kill_urb, and then
+decrement the URB's reference count.  You increment the reference
+count by calling
+
+	struct urb *usb_get_urb(struct urb *urb)
+
+(ignore the return value; it is the same as the argument) and
+decrement the reference count by calling usb_free_urb.  Of course,
+none of this is necessary if there's no danger of the URB being freed
+by the completion handler.
+
 
 1.7. What about the completion handler?
 
diff --git a/Documentation/usb/usbmon.txt b/Documentation/usb/usbmon.txt
index 5335fa8..c42bb9c 100644
--- a/Documentation/usb/usbmon.txt
+++ b/Documentation/usb/usbmon.txt
@@ -183,10 +183,10 @@
 d5ea89a0 3575914555 S Ci:1:001:0 s a3 00 0000 0003 0004 4 <
 d5ea89a0 3575914560 C Ci:1:001:0 0 4 = 01050000
 
-An output bulk transfer to send a SCSI command 0x5E in a 31-byte Bulk wrapper
-to a storage device at address 5:
+An output bulk transfer to send a SCSI command 0x28 (READ_10) in a 31-byte
+Bulk wrapper to a storage device at address 5:
 
-dd65f0e8 4128379752 S Bo:1:005:2 -115 31 = 55534243 5e000000 00000000 00000600 00000000 00000000 00000000 000000
+dd65f0e8 4128379752 S Bo:1:005:2 -115 31 = 55534243 ad000000 00800000 80010a28 20000000 20000040 00000000 000000
 dd65f0e8 4128379808 C Bo:1:005:2 0 31 >
 
 * Raw binary format and API
diff --git a/MAINTAINERS b/MAINTAINERS
index fa96459..a6fcc1e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -228,7 +228,7 @@
 L:	linux-acpi@vger.kernel.org
 W:	http://www.lesswatts.org/projects/acpi/
 Q:	http://patchwork.kernel.org/project/linux-acpi/list/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
 S:	Supported
 F:	drivers/acpi/
 F:	drivers/pnp/pnpacpi/
@@ -1431,6 +1431,7 @@
 BATMAN ADVANCED
 M:	Marek Lindner <lindner_marek@yahoo.de>
 M:	Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
+M:	Antonio Quartulli <ordex@autistici.org>
 L:	b.a.t.m.a.n@lists.open-mesh.org
 W:	http://www.open-mesh.org/
 S:	Maintained
@@ -2321,9 +2322,9 @@
 F:	drivers/acpi/dock.c
 
 DOCUMENTATION
-M:	Randy Dunlap <rdunlap@xenotime.net>
+M:	Rob Landley <rob@landley.net>
 L:	linux-doc@vger.kernel.org
-T:	quilt http://xenotime.net/kernel-doc-patches/current/
+T:	TBD
 S:	Maintained
 F:	Documentation/
 
@@ -2450,17 +2451,17 @@
 
 EDAC-CORE
 M:	Doug Thompson <dougthompson@xmission.com>
-L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Supported
 F:	Documentation/edac.txt
-F:	drivers/edac/edac_*
+F:	drivers/edac/
 F:	include/linux/edac.h
 
 EDAC-AMD64
 M:	Doug Thompson <dougthompson@xmission.com>
 M:	Borislav Petkov <borislav.petkov@amd.com>
-L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Supported
 F:	drivers/edac/amd64_edac*
@@ -2468,35 +2469,35 @@
 EDAC-E752X
 M:	Mark Gross <mark.gross@intel.com>
 M:	Doug Thompson <dougthompson@xmission.com>
-L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Maintained
 F:	drivers/edac/e752x_edac.c
 
 EDAC-E7XXX
 M:	Doug Thompson <dougthompson@xmission.com>
-L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Maintained
 F:	drivers/edac/e7xxx_edac.c
 
 EDAC-I82443BXGX
 M:	Tim Small <tim@buttersideup.com>
-L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Maintained
 F:	drivers/edac/i82443bxgx_edac.c
 
 EDAC-I3000
 M:	Jason Uhlenkott <juhlenko@akamai.com>
-L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Maintained
 F:	drivers/edac/i3000_edac.c
 
 EDAC-I5000
 M:	Doug Thompson <dougthompson@xmission.com>
-L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Maintained
 F:	drivers/edac/i5000_edac.c
@@ -2525,21 +2526,21 @@
 EDAC-I82975X
 M:	Ranganathan Desikan <ravi@jetztechnologies.com>
 M:	"Arvind R." <arvino55@gmail.com>
-L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Maintained
 F:	drivers/edac/i82975x_edac.c
 
 EDAC-PASEMI
 M:	Egor Martovetsky <egor@pasemi.com>
-L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Maintained
 F:	drivers/edac/pasemi_edac.c
 
 EDAC-R82600
 M:	Tim Small <tim@buttersideup.com>
-L:	bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
 S:	Maintained
 F:	drivers/edac/r82600_edac.c
@@ -3519,12 +3520,6 @@
 S:	Maintained
 F:	drivers/char/hw_random/ixp4xx-rng.c
 
-INTEL IXP2000 ETHERNET DRIVER
-M:	Lennert Buytenhek <kernel@wantstofly.org>
-L:	netdev@vger.kernel.org
-S:	Maintained
-F:	drivers/net/ethernet/xscale/ixp2000/
-
 INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf)
 M:	Jeff Kirsher <jeffrey.t.kirsher@intel.com>
 M:	Jesse Brandeburg <jesse.brandeburg@intel.com>
@@ -3592,6 +3587,7 @@
 F:	drivers/net/wireless/iwlegacy/
 
 INTEL WIRELESS WIFI LINK (iwlwifi)
+M:	Johannes Berg <johannes.berg@intel.com>
 M:	Wey-Yi Guy <wey-yi.w.guy@intel.com>
 M:	Intel Linux Wireless <ilw@linux.intel.com>
 L:	linux-wireless@vger.kernel.org
@@ -4533,8 +4529,7 @@
 F:	drivers/net/ethernet/myricom/myri10ge/
 
 NATSEMI ETHERNET DRIVER (DP8381x)
-M:	Tim Hockin <thockin@hockin.org>
-S:	Maintained
+S:	Orphan
 F:	drivers/net/ethernet/natsemi/natsemi.c
 
 NATIVE INSTRUMENTS USB SOUND INTERFACE DRIVER
@@ -4803,6 +4798,7 @@
 F:	arch/arm/mach-omap2/clockdomain44xx.c
 
 OMAP AUDIO SUPPORT
+M:	Peter Ujfalusi <peter.ujfalusi@ti.com>
 M:	Jarkko Nikula <jarkko.nikula@bitmer.com>
 L:	alsa-devel@alsa-project.org (subscribers-only)
 L:	linux-omap@vger.kernel.org
@@ -5117,6 +5113,11 @@
 F:	include/linux/i2c-algo-pca.h
 F:	include/linux/i2c-pca-platform.h
 
+PCDP - PRIMARY CONSOLE AND DEBUG PORT
+M:	Khalid Aziz <khalid.aziz@hp.com>
+S:	Maintained
+F:	drivers/firmware/pcdp.*
+
 PCI ERROR RECOVERY
 M:     Linas Vepstas <linasvepstas@gmail.com>
 L:	linux-pci@vger.kernel.org
@@ -5202,7 +5203,7 @@
 F:	include/linux/personality.h
 
 PHONET PROTOCOL
-M:	Remi Denis-Courmont <remi.denis-courmont@nokia.com>
+M:	Remi Denis-Courmont <courmisch@gmail.com>
 S:	Supported
 F:	Documentation/networking/phonet.txt
 F:	include/linux/phonet.h
@@ -6466,6 +6467,7 @@
 F:	drivers/staging/olpc_dcon/
 
 STAGING - OZMO DEVICES USB OVER WIFI DRIVER
+M:	Rupesh Gujare <rgujare@ozmodevices.com>
 M:	Chris Kelly <ckelly@ozmodevices.com>
 S:	Maintained
 F:	drivers/staging/ozwpan/
@@ -7454,8 +7456,7 @@
 
 WOLFSON MICROELECTRONICS DRIVERS
 M:	Mark Brown <broonie@opensource.wolfsonmicro.com>
-M:	Ian Lartey <ian@opensource.wolfsonmicro.com>
-M:	Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+L:	patches@opensource.wolfsonmicro.com
 T:	git git://opensource.wolfsonmicro.com/linux-2.6-asoc
 T:	git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
 W:	http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices
@@ -7566,8 +7567,8 @@
 F:	fs/xfs/
 
 XILINX AXI ETHERNET DRIVER
-M:	Ariane Keller <ariane.keller@tik.ee.ethz.ch>
-M:	Daniel Borkmann <daniel.borkmann@tik.ee.ethz.ch>
+M:	Anirudha Sarangi <anirudh@xilinx.com>
+M:	John Linn <John.Linn@xilinx.com>
 S:	Maintained
 F:	drivers/net/ethernet/xilinx/xilinx_axienet*
 
diff --git a/Makefile b/Makefile
index 5e637c2..afc868e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 4
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc4
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index f62251e..3bb7ffe 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -3,6 +3,7 @@
 
 #include <linux/types.h>
 #include <asm/barrier.h>
+#include <asm/cmpxchg.h>
 
 /*
  * Atomic operations that C can't guarantee us.  Useful for
@@ -168,73 +169,6 @@
 	return result;
 }
 
-/*
- * Atomic exchange routines.
- */
-
-#define __ASM__MB
-#define ____xchg(type, args...)		__xchg ## type ## _local(args)
-#define ____cmpxchg(type, args...)	__cmpxchg ## type ## _local(args)
-#include <asm/xchg.h>
-
-#define xchg_local(ptr,x)						\
-  ({									\
-     __typeof__(*(ptr)) _x_ = (x);					\
-     (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_,	\
-				       sizeof(*(ptr)));			\
-  })
-
-#define cmpxchg_local(ptr, o, n)					\
-  ({									\
-     __typeof__(*(ptr)) _o_ = (o);					\
-     __typeof__(*(ptr)) _n_ = (n);					\
-     (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_,	\
-					  (unsigned long)_n_,		\
-					  sizeof(*(ptr)));		\
-  })
-
-#define cmpxchg64_local(ptr, o, n)					\
-  ({									\
-	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
-	cmpxchg_local((ptr), (o), (n));					\
-  })
-
-#ifdef CONFIG_SMP
-#undef __ASM__MB
-#define __ASM__MB	"\tmb\n"
-#endif
-#undef ____xchg
-#undef ____cmpxchg
-#define ____xchg(type, args...)		__xchg ##type(args)
-#define ____cmpxchg(type, args...)	__cmpxchg ##type(args)
-#include <asm/xchg.h>
-
-#define xchg(ptr,x)							\
-  ({									\
-     __typeof__(*(ptr)) _x_ = (x);					\
-     (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_,		\
-				 sizeof(*(ptr)));			\
-  })
-
-#define cmpxchg(ptr, o, n)						\
-  ({									\
-     __typeof__(*(ptr)) _o_ = (o);					\
-     __typeof__(*(ptr)) _n_ = (n);					\
-     (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		\
-				    (unsigned long)_n_,	sizeof(*(ptr)));\
-  })
-
-#define cmpxchg64(ptr, o, n)						\
-  ({									\
-	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
-	cmpxchg((ptr), (o), (n));					\
-  })
-
-#undef __ASM__MB
-#undef ____cmpxchg
-
-#define __HAVE_ARCH_CMPXCHG 1
-
 #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
new file mode 100644
index 0000000..429e8cd
--- /dev/null
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -0,0 +1,71 @@
+#ifndef _ALPHA_CMPXCHG_H
+#define _ALPHA_CMPXCHG_H
+
+/*
+ * Atomic exchange routines.
+ */
+
+#define __ASM__MB
+#define ____xchg(type, args...)		__xchg ## type ## _local(args)
+#define ____cmpxchg(type, args...)	__cmpxchg ## type ## _local(args)
+#include <asm/xchg.h>
+
+#define xchg_local(ptr, x)						\
+({									\
+	__typeof__(*(ptr)) _x_ = (x);					\
+	(__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_,	\
+				       sizeof(*(ptr)));			\
+})
+
+#define cmpxchg_local(ptr, o, n)					\
+({									\
+	__typeof__(*(ptr)) _o_ = (o);					\
+	__typeof__(*(ptr)) _n_ = (n);					\
+	(__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_,	\
+					  (unsigned long)_n_,		\
+					  sizeof(*(ptr)));		\
+})
+
+#define cmpxchg64_local(ptr, o, n)					\
+({									\
+	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
+	cmpxchg_local((ptr), (o), (n));					\
+})
+
+#ifdef CONFIG_SMP
+#undef __ASM__MB
+#define __ASM__MB	"\tmb\n"
+#endif
+#undef ____xchg
+#undef ____cmpxchg
+#define ____xchg(type, args...)		__xchg ##type(args)
+#define ____cmpxchg(type, args...)	__cmpxchg ##type(args)
+#include <asm/xchg.h>
+
+#define xchg(ptr, x)							\
+({									\
+	__typeof__(*(ptr)) _x_ = (x);					\
+	(__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_,		\
+				 sizeof(*(ptr)));			\
+})
+
+#define cmpxchg(ptr, o, n)						\
+({									\
+	__typeof__(*(ptr)) _o_ = (o);					\
+	__typeof__(*(ptr)) _n_ = (n);					\
+	(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,	\
+				    (unsigned long)_n_,	sizeof(*(ptr)));\
+})
+
+#define cmpxchg64(ptr, o, n)						\
+({									\
+	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
+	cmpxchg((ptr), (o), (n));					\
+})
+
+#undef __ASM__MB
+#undef ____cmpxchg
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+#endif /* _ALPHA_CMPXCHG_H */
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
index 1d1b436..0ca9724 100644
--- a/arch/alpha/include/asm/xchg.h
+++ b/arch/alpha/include/asm/xchg.h
@@ -1,10 +1,10 @@
-#ifndef _ALPHA_ATOMIC_H
+#ifndef _ALPHA_CMPXCHG_H
 #error Do not include xchg.h directly!
 #else
 /*
  * xchg/xchg_local and cmpxchg/cmpxchg_local share the same code
  * except that local version do not have the expensive memory barrier.
- * So this file is included twice from asm/system.h.
+ * So this file is included twice from asm/cmpxchg.h.
  */
 
 /*
diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c
index 6ce11c4..797f04b 100644
--- a/arch/arm/boot/compressed/atags_to_fdt.c
+++ b/arch/arm/boot/compressed/atags_to_fdt.c
@@ -77,6 +77,8 @@
 		} else if (atag->hdr.tag == ATAG_MEM) {
 			if (memcount >= sizeof(mem_reg_property)/4)
 				continue;
+			if (!atag->u.mem.size)
+				continue;
 			mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.start);
 			mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.size);
 		} else if (atag->hdr.tag == ATAG_INITRD2) {
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 5f6045f..dc7e8ce 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -273,7 +273,7 @@
 		add	r0, r0, #0x100
 		mov	r1, r6
 		sub	r2, sp, r6
-		blne	atags_to_fdt
+		bleq	atags_to_fdt
 
 		ldmfd	sp!, {r0-r3, ip, lr}
 		sub	sp, sp, #0x10000
diff --git a/arch/arm/boot/dts/at91sam9g20.dtsi b/arch/arm/boot/dts/at91sam9g20.dtsi
index 799ad18..773ef48 100644
--- a/arch/arm/boot/dts/at91sam9g20.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20.dtsi
@@ -55,7 +55,6 @@
 				#interrupt-cells = <2>;
 				compatible = "atmel,at91rm9200-aic";
 				interrupt-controller;
-				interrupt-parent;
 				reg = <0xfffff000 0x200>;
 			};
 
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index 9e6eb6e..c804214 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -56,7 +56,6 @@
 				#interrupt-cells = <2>;
 				compatible = "atmel,at91rm9200-aic";
 				interrupt-controller;
-				interrupt-parent;
 				reg = <0xfffff000 0x200>;
 			};
 
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 70ab3a4..dd4ed74 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -54,7 +54,6 @@
 				#interrupt-cells = <2>;
 				compatible = "atmel,at91rm9200-aic";
 				interrupt-controller;
-				interrupt-parent;
 				reg = <0xfffff000 0x200>;
 			};
 
diff --git a/arch/arm/boot/dts/db8500.dtsi b/arch/arm/boot/dts/db8500.dtsi
index d73dce6..14bc307 100644
--- a/arch/arm/boot/dts/db8500.dtsi
+++ b/arch/arm/boot/dts/db8500.dtsi
@@ -24,7 +24,6 @@
 			#interrupt-cells = <3>;
 			#address-cells = <1>;
 			interrupt-controller;
-			interrupt-parent;
 			reg = <0xa0411000 0x1000>,
 			      <0xa0410100 0x100>;
 		};
diff --git a/arch/arm/boot/dts/highbank.dts b/arch/arm/boot/dts/highbank.dts
index 37c0ff9c..83e7229 100644
--- a/arch/arm/boot/dts/highbank.dts
+++ b/arch/arm/boot/dts/highbank.dts
@@ -89,7 +89,6 @@
 			#size-cells = <0>;
 			#address-cells = <1>;
 			interrupt-controller;
-			interrupt-parent;
 			reg = <0xfff11000 0x1000>,
 			      <0xfff10100 0x100>;
 		};
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index 7a66311..7e288f9 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -427,19 +427,18 @@
 
 /*
  * Handle each interrupt in a single VIC.  Returns non-zero if we've
- * handled at least one interrupt.  This does a single read of the
- * status register and handles all interrupts in order from LSB first.
+ * handled at least one interrupt.  This reads the status register
+ * before handling each interrupt, which is necessary given that
+ * handle_IRQ may briefly re-enable interrupts for soft IRQ handling.
  */
 static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
 {
 	u32 stat, irq;
 	int handled = 0;
 
-	stat = readl_relaxed(vic->base + VIC_IRQ_STATUS);
-	while (stat) {
+	while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
 		irq = ffs(stat) - 1;
 		handle_IRQ(irq_find_mapping(vic->domain, irq), regs);
-		stat &= ~(1 << irq);
 		handled = 1;
 	}
 
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index b5ac644..6b31cb6 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -112,6 +112,7 @@
 CONFIG_IMX2_WDT=y
 CONFIG_MFD_MC13XXX=y
 CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_MC13783=y
 CONFIG_REGULATOR_MC13892=y
 CONFIG_FB=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 889d73a..7e84f45 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -8,8 +8,6 @@
 # CONFIG_LBDAF is not set
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_ARCH_U8500=y
-CONFIG_UX500_SOC_DB5500=y
-CONFIG_UX500_SOC_DB8500=y
 CONFIG_MACH_HREFV60=y
 CONFIG_MACH_SNOWBALL=y
 CONFIG_MACH_U5500=y
@@ -39,7 +37,6 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=65536
-CONFIG_MISC_DEVICES=y
 CONFIG_AB8500_PWM=y
 CONFIG_SENSORS_BH1780=y
 CONFIG_NETDEVICES=y
@@ -65,16 +62,18 @@
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_NOMADIK=y
-CONFIG_I2C=y
-CONFIG_I2C_NOMADIK=y
 CONFIG_SPI=y
 CONFIG_SPI_PL022=y
 CONFIG_GPIO_STMPE=y
 CONFIG_GPIO_TC3589X=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_AB8500_BM=y
+CONFIG_AB8500_BATTERY_THERM_ON_BATCTRL=y
 CONFIG_MFD_STMPE=y
 CONFIG_MFD_TC3589X=y
 CONFIG_AB5500_CORE=y
 CONFIG_AB8500_CORE=y
+CONFIG_REGULATOR=y
 CONFIG_REGULATOR_AB8500=y
 # CONFIG_HID_SUPPORT is not set
 CONFIG_USB_GADGET=y
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index 5c5ca2e..bfc198c 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -14,7 +14,7 @@
 #define JUMP_LABEL_NOP	"nop"
 #endif
 
-static __always_inline bool arch_static_branch(struct jump_label_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key)
 {
 	asm goto("1:\n\t"
 		 JUMP_LABEL_NOP "\n\t"
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index b914113..ebfac78 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -523,7 +523,21 @@
 	 */
 	size -= start & ~PAGE_MASK;
 	bank->start = PAGE_ALIGN(start);
-	bank->size  = size & PAGE_MASK;
+
+#ifndef CONFIG_LPAE
+	if (bank->start + size < bank->start) {
+		printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
+			"32-bit physical address space\n", (long long)start);
+		/*
+		 * To ensure bank->start + bank->size is representable in
+		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
+		 * This means we lose a page after masking.
+		 */
+		size = ULONG_MAX - bank->start;
+	}
+#endif
+
+	bank->size = size & PAGE_MASK;
 
 	/*
 	 * Check whether this memory region has non-zero size or
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
index 99ce5c9..05774e5 100644
--- a/arch/arm/mach-at91/at91rm9200_devices.c
+++ b/arch/arm/mach-at91/at91rm9200_devices.c
@@ -1173,7 +1173,6 @@
 		printk(KERN_INFO "AT91: No default serial console defined.\n");
 }
 #else
-void __init __deprecated at91_init_serial(struct at91_uart_config *config) {}
 void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {}
 void __init at91_set_serial_console(unsigned portnr) {}
 void __init at91_add_device_serial(void) {}
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index dd7f782..104ca40 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -23,6 +23,7 @@
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/clockchips.h>
+#include <linux/export.h>
 
 #include <asm/mach/time.h>
 
@@ -176,6 +177,7 @@
 };
 
 void __iomem *at91_st_base;
+EXPORT_SYMBOL_GPL(at91_st_base);
 
 void __init at91rm9200_ioremap_st(u32 addr)
 {
diff --git a/arch/arm/mach-at91/board-rm9200ek.c b/arch/arm/mach-at91/board-rm9200ek.c
index 11cbaa8..b2e4fe2 100644
--- a/arch/arm/mach-at91/board-rm9200ek.c
+++ b/arch/arm/mach-at91/board-rm9200ek.c
@@ -117,7 +117,7 @@
 };
 
 #define EK_FLASH_BASE	AT91_CHIPSELECT_0
-#define EK_FLASH_SIZE	SZ_2M
+#define EK_FLASH_SIZE	SZ_8M
 
 static struct physmap_flash_data ek_flash_data = {
 	.width		= 2,
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index c3f9944..065fed3 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -85,8 +85,6 @@
 		.flags	= IORESOURCE_MEM
 	},
 	[2] = {
-		.start	= AT91_PIN_PC11,
-		.end	= AT91_PIN_PC11,
 		.flags	= IORESOURCE_IRQ
 			| IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE,
 	}
@@ -130,6 +128,8 @@
 
 static void __init ek_add_device_dm9000(void)
 {
+	struct resource *r = &dm9000_resource[2];
+
 	/* Configure chip-select 2 (DM9000) */
 	sam9_smc_configure(0, 2, &dm9000_smc_config);
 
@@ -139,6 +139,7 @@
 	/* Configure Interrupt pin as input, no pull-up */
 	at91_set_gpio_input(AT91_PIN_PC11, 0);
 
+	r->start = r->end = gpio_to_irq(AT91_PIN_PC11);
 	platform_device_register(&dm9000_device);
 }
 #else
diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c
index a0f4d74..6b69282 100644
--- a/arch/arm/mach-at91/clock.c
+++ b/arch/arm/mach-at91/clock.c
@@ -35,6 +35,7 @@
 #include "generic.h"
 
 void __iomem *at91_pmc_base;
+EXPORT_SYMBOL_GPL(at91_pmc_base);
 
 /*
  * There's a lot more which can be done with clocks, including cpufreq
diff --git a/arch/arm/mach-at91/include/mach/at91_pmc.h b/arch/arm/mach-at91/include/mach/at91_pmc.h
index 3660478..ea2c57a 100644
--- a/arch/arm/mach-at91/include/mach/at91_pmc.h
+++ b/arch/arm/mach-at91/include/mach/at91_pmc.h
@@ -25,7 +25,7 @@
 #define at91_pmc_write(field, value) \
 	__raw_writel(value, at91_pmc_base + field)
 #else
-.extern at91_aic_base
+.extern at91_pmc_base
 #endif
 
 #define	AT91_PMC_SCER		0x00			/* System Clock Enable Register */
diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
index 97cc04d..f44a2e7 100644
--- a/arch/arm/mach-at91/setup.c
+++ b/arch/arm/mach-at91/setup.c
@@ -54,6 +54,7 @@
 }
 
 void __iomem *at91_ramc_base[2];
+EXPORT_SYMBOL_GPL(at91_ramc_base);
 
 void __init at91_ioremap_ramc(int id, u32 addr, u32 size)
 {
@@ -292,6 +293,7 @@
 }
 
 void __iomem *at91_matrix_base;
+EXPORT_SYMBOL_GPL(at91_matrix_base);
 
 void __init at91_ioremap_matrix(u32 base_addr)
 {
diff --git a/arch/arm/mach-bcmring/core.c b/arch/arm/mach-bcmring/core.c
index 22e4e0a..adbfb19 100644
--- a/arch/arm/mach-bcmring/core.c
+++ b/arch/arm/mach-bcmring/core.c
@@ -52,8 +52,8 @@
 #include <mach/csp/chipcHw_inline.h>
 #include <mach/csp/tmrHw_reg.h>
 
-static AMBA_APB_DEVICE(uartA, "uarta", MM_ADDR_IO_UARTA, { IRQ_UARTA }, NULL);
-static AMBA_APB_DEVICE(uartB, "uartb", MM_ADDR_IO_UARTB, { IRQ_UARTB }, NULL);
+static AMBA_APB_DEVICE(uartA, "uartA", 0, MM_ADDR_IO_UARTA, {IRQ_UARTA}, NULL);
+static AMBA_APB_DEVICE(uartB, "uartB", 0, MM_ADDR_IO_UARTB, {IRQ_UARTB}, NULL);
 
 static struct clk pll1_clk = {
 	.name = "PLL1",
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 0491cee..e81c35f 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -368,6 +368,7 @@
 
 config MACH_EXYNOS4_DT
 	bool "Samsung Exynos4 Machine using device tree"
+	depends on ARCH_EXYNOS4
 	select CPU_EXYNOS4210
 	select USE_OF
 	select ARM_AMBA
@@ -380,6 +381,7 @@
 
 config MACH_EXYNOS5_DT
 	bool "SAMSUNG EXYNOS5 Machine using device tree"
+	depends on ARCH_EXYNOS5
 	select SOC_EXYNOS5250
 	select USE_OF
 	select ARM_AMBA
diff --git a/arch/arm/mach-exynos/include/mach/irqs.h b/arch/arm/mach-exynos/include/mach/irqs.h
index 9bee853..591e785 100644
--- a/arch/arm/mach-exynos/include/mach/irqs.h
+++ b/arch/arm/mach-exynos/include/mach/irqs.h
@@ -212,6 +212,8 @@
 #define IRQ_MFC				EXYNOS4_IRQ_MFC
 #define IRQ_SDO				EXYNOS4_IRQ_SDO
 
+#define IRQ_I2S0			EXYNOS4_IRQ_I2S0
+
 #define IRQ_ADC				EXYNOS4_IRQ_ADC0
 #define IRQ_TC				EXYNOS4_IRQ_PEN0
 
diff --git a/arch/arm/mach-exynos/include/mach/map.h b/arch/arm/mach-exynos/include/mach/map.h
index 024d38f..6e6d11f 100644
--- a/arch/arm/mach-exynos/include/mach/map.h
+++ b/arch/arm/mach-exynos/include/mach/map.h
@@ -89,6 +89,10 @@
 #define EXYNOS4_PA_MDMA1		0x12840000
 #define EXYNOS4_PA_PDMA0		0x12680000
 #define EXYNOS4_PA_PDMA1		0x12690000
+#define EXYNOS5_PA_MDMA0		0x10800000
+#define EXYNOS5_PA_MDMA1		0x11C10000
+#define EXYNOS5_PA_PDMA0		0x121A0000
+#define EXYNOS5_PA_PDMA1		0x121B0000
 
 #define EXYNOS4_PA_SYSMMU_MDMA		0x10A40000
 #define EXYNOS4_PA_SYSMMU_SSS		0x10A50000
diff --git a/arch/arm/mach-exynos/include/mach/regs-clock.h b/arch/arm/mach-exynos/include/mach/regs-clock.h
index e141c1f..d9578a5 100644
--- a/arch/arm/mach-exynos/include/mach/regs-clock.h
+++ b/arch/arm/mach-exynos/include/mach/regs-clock.h
@@ -255,9 +255,15 @@
 
 /* For EXYNOS5250 */
 
+#define EXYNOS5_APLL_LOCK			EXYNOS_CLKREG(0x00000)
 #define EXYNOS5_APLL_CON0			EXYNOS_CLKREG(0x00100)
 #define EXYNOS5_CLKSRC_CPU			EXYNOS_CLKREG(0x00200)
+#define EXYNOS5_CLKMUX_STATCPU			EXYNOS_CLKREG(0x00400)
 #define EXYNOS5_CLKDIV_CPU0			EXYNOS_CLKREG(0x00500)
+#define EXYNOS5_CLKDIV_CPU1			EXYNOS_CLKREG(0x00504)
+#define EXYNOS5_CLKDIV_STATCPU0			EXYNOS_CLKREG(0x00600)
+#define EXYNOS5_CLKDIV_STATCPU1			EXYNOS_CLKREG(0x00604)
+
 #define EXYNOS5_MPLL_CON0			EXYNOS_CLKREG(0x04100)
 #define EXYNOS5_CLKSRC_CORE1			EXYNOS_CLKREG(0x04204)
 
diff --git a/arch/arm/mach-exynos/mach-exynos5-dt.c b/arch/arm/mach-exynos/mach-exynos5-dt.c
index 0d26f50..4711c89 100644
--- a/arch/arm/mach-exynos/mach-exynos5-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos5-dt.c
@@ -45,7 +45,7 @@
 				"exynos4210-uart.3", NULL),
 	OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA0, "dma-pl330.0", NULL),
 	OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.1", NULL),
-	OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.2", NULL),
+	OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_MDMA1, "dma-pl330.2", NULL),
 	{},
 };
 
diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c
index b3982c8..b4f1f90 100644
--- a/arch/arm/mach-exynos/mach-nuri.c
+++ b/arch/arm/mach-exynos/mach-nuri.c
@@ -307,49 +307,7 @@
 };
 
 /* TSP */
-static u8 mxt_init_vals[] = {
-	/* MXT_GEN_COMMAND(6) */
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	/* MXT_GEN_POWER(7) */
-	0x20, 0xff, 0x32,
-	/* MXT_GEN_ACQUIRE(8) */
-	0x0a, 0x00, 0x05, 0x00, 0x00, 0x00, 0x09, 0x23,
-	/* MXT_TOUCH_MULTI(9) */
-	0x00, 0x00, 0x00, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x02, 0x00,
-	0x00, 0x01, 0x01, 0x0e, 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00,
-	/* MXT_TOUCH_KEYARRAY(15) */
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
-	0x00,
-	/* MXT_SPT_GPIOPWM(19) */
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	/* MXT_PROCI_GRIPFACE(20) */
-	0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x28, 0x04,
-	0x0f, 0x0a,
-	/* MXT_PROCG_NOISE(22) */
-	0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x23, 0x00,
-	0x00, 0x05, 0x0f, 0x19, 0x23, 0x2d, 0x03,
-	/* MXT_TOUCH_PROXIMITY(23) */
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00,
-	/* MXT_PROCI_ONETOUCH(24) */
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	/* MXT_SPT_SELFTEST(25) */
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00,
-	/* MXT_PROCI_TWOTOUCH(27) */
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	/* MXT_SPT_CTECONFIG(28) */
-	0x00, 0x00, 0x02, 0x08, 0x10, 0x00,
-};
-
 static struct mxt_platform_data mxt_platform_data = {
-	.config			= mxt_init_vals,
-	.config_length		= ARRAY_SIZE(mxt_init_vals),
-
 	.x_line			= 18,
 	.y_line			= 11,
 	.x_size			= 1024,
@@ -571,7 +529,7 @@
 
 static struct regulator_init_data __initdata max8997_ldo8_data = {
 	.constraints	= {
-		.name		= "VUSB/VDAC_3.3V_C210",
+		.name		= "VUSB+VDAC_3.3V_C210",
 		.min_uV		= 3300000,
 		.max_uV		= 3300000,
 		.valid_ops_mask	= REGULATOR_CHANGE_STATUS,
@@ -1347,6 +1305,7 @@
 
 static void __init nuri_map_io(void)
 {
+	clk_xusbxti.rate = 24000000;
 	exynos_init_io(NULL, 0);
 	s3c24xx_init_clocks(24000000);
 	s3c24xx_init_uarts(nuri_uartcfgs, ARRAY_SIZE(nuri_uartcfgs));
@@ -1379,7 +1338,6 @@
 	nuri_camera_init();
 
 	nuri_ehci_init();
-	clk_xusbxti.rate = 24000000;
 
 	/* Last */
 	platform_add_devices(nuri_devices, ARRAY_SIZE(nuri_devices));
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
index 6bb9dbd..7ebf79c 100644
--- a/arch/arm/mach-exynos/mach-universal_c210.c
+++ b/arch/arm/mach-exynos/mach-universal_c210.c
@@ -29,6 +29,7 @@
 #include <asm/mach-types.h>
 
 #include <plat/regs-serial.h>
+#include <plat/clock.h>
 #include <plat/cpu.h>
 #include <plat/devs.h>
 #include <plat/iic.h>
@@ -1057,6 +1058,7 @@
 
 static void __init universal_map_io(void)
 {
+	clk_xusbxti.rate = 24000000;
 	exynos_init_io(NULL, 0);
 	s3c24xx_init_clocks(24000000);
 	s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs));
diff --git a/arch/arm/mach-imx/imx27-dt.c b/arch/arm/mach-imx/imx27-dt.c
index 861ceb8..ed38d03 100644
--- a/arch/arm/mach-imx/imx27-dt.c
+++ b/arch/arm/mach-imx/imx27-dt.c
@@ -35,7 +35,7 @@
 static int __init imx27_avic_add_irq_domain(struct device_node *np,
 				struct device_node *interrupt_parent)
 {
-	irq_domain_add_simple(np, 0);
+	irq_domain_add_legacy(np, 64, 0, 0, &irq_domain_simple_ops, NULL);
 	return 0;
 }
 
@@ -44,7 +44,9 @@
 {
 	static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
 
-	irq_domain_add_simple(np, gpio_irq_base);
+	gpio_irq_base -= 32;
+	irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops,
+				NULL);
 
 	return 0;
 }
diff --git a/arch/arm/mach-imx/mm-imx5.c b/arch/arm/mach-imx/mm-imx5.c
index 05250ae..e10f391 100644
--- a/arch/arm/mach-imx/mm-imx5.c
+++ b/arch/arm/mach-imx/mm-imx5.c
@@ -35,7 +35,7 @@
 	}
 	clk_enable(gpc_dvfs_clk);
 	mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
-	if (tzic_enable_wake() != 0)
+	if (!tzic_enable_wake())
 		cpu_do_idle();
 	clk_disable(gpc_dvfs_clk);
 }
diff --git a/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h b/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h
index 292d55e..cf03614 100644
--- a/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h
+++ b/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h
@@ -75,4 +75,7 @@
 #define TX_SNAPSHOT_LOCKED (1<<0)
 #define RX_SNAPSHOT_LOCKED (1<<1)
 
+/* The ptp_ixp46x module will set this variable */
+extern int ixp46x_phc_index;
+
 #endif
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
index 3698a37..26aac36 100644
--- a/arch/arm/mach-msm/board-halibut.c
+++ b/arch/arm/mach-msm/board-halibut.c
@@ -86,9 +86,6 @@
 static void __init halibut_fixup(struct tag *tags, char **cmdline,
 				 struct meminfo *mi)
 {
-	mi->nr_banks=1;
-	mi->bank[0].start = PHYS_OFFSET;
-	mi->bank[0].size = (101*1024*1024);
 }
 
 static void __init halibut_map_io(void)
diff --git a/arch/arm/mach-msm/board-trout-panel.c b/arch/arm/mach-msm/board-trout-panel.c
index 25105c1..89bf6b4 100644
--- a/arch/arm/mach-msm/board-trout-panel.c
+++ b/arch/arm/mach-msm/board-trout-panel.c
@@ -12,6 +12,7 @@
 
 #include <asm/io.h>
 #include <asm/mach-types.h>
+#include <asm/system_info.h>
 
 #include <mach/msm_fb.h>
 #include <mach/vreg.h>
diff --git a/arch/arm/mach-msm/board-trout.c b/arch/arm/mach-msm/board-trout.c
index 5414f76..d4060a3 100644
--- a/arch/arm/mach-msm/board-trout.c
+++ b/arch/arm/mach-msm/board-trout.c
@@ -19,6 +19,7 @@
 #include <linux/platform_device.h>
 #include <linux/clkdev.h>
 
+#include <asm/system_info.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
diff --git a/arch/arm/mach-msm/proc_comm.c b/arch/arm/mach-msm/proc_comm.c
index 67e701c..9980dc7 100644
--- a/arch/arm/mach-msm/proc_comm.c
+++ b/arch/arm/mach-msm/proc_comm.c
@@ -121,7 +121,7 @@
  * and unknown state. This function should be called early to
  * wait on the ARM9.
  */
-void __init proc_comm_boot_wait(void)
+void __devinit proc_comm_boot_wait(void)
 {
 	void __iomem *base = MSM_SHARED_RAM_BASE;
  
diff --git a/arch/arm/mach-omap1/mux.c b/arch/arm/mach-omap1/mux.c
index 087dba0..e9cc52d 100644
--- a/arch/arm/mach-omap1/mux.c
+++ b/arch/arm/mach-omap1/mux.c
@@ -27,6 +27,7 @@
 #include <linux/io.h>
 #include <linux/spinlock.h>
 
+#include <mach/hardware.h>
 
 #include <plat/mux.h>
 
diff --git a/arch/arm/mach-omap1/timer.c b/arch/arm/mach-omap1/timer.c
index 6e90665..fb202af 100644
--- a/arch/arm/mach-omap1/timer.c
+++ b/arch/arm/mach-omap1/timer.c
@@ -47,9 +47,9 @@
 	int n = (pdev->id - 1) << 1;
 	u32 l;
 
-	l = __raw_readl(MOD_CONF_CTRL_1) & ~(0x03 << n);
+	l = omap_readl(MOD_CONF_CTRL_1) & ~(0x03 << n);
 	l |= source << n;
-	__raw_writel(l, MOD_CONF_CTRL_1);
+	omap_writel(l, MOD_CONF_CTRL_1);
 
 	return 0;
 }
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index a39fc4b..130ab00 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -20,6 +20,7 @@
 #include <linux/usb/otg.h>
 #include <linux/spi/spi.h>
 #include <linux/i2c/twl.h>
+#include <linux/mfd/twl6040.h>
 #include <linux/gpio_keys.h>
 #include <linux/regulator/machine.h>
 #include <linux/regulator/fixed.h>
@@ -560,7 +561,7 @@
 	},
 };
 
-static struct twl4030_codec_data twl6040_codec = {
+static struct twl6040_codec_data twl6040_codec = {
 	/* single-step ramp for headset and handsfree */
 	.hs_left_step	= 0x0f,
 	.hs_right_step	= 0x0f,
@@ -568,7 +569,7 @@
 	.hf_right_step	= 0x1d,
 };
 
-static struct twl4030_vibra_data twl6040_vibra = {
+static struct twl6040_vibra_data twl6040_vibra = {
 	.vibldrv_res = 8,
 	.vibrdrv_res = 3,
 	.viblmotor_res = 10,
@@ -577,16 +578,14 @@
 	.vddvibr_uV = 0,	/* fixed volt supply - VBAT */
 };
 
-static struct twl4030_audio_data twl6040_audio = {
+static struct twl6040_platform_data twl6040_data = {
 	.codec		= &twl6040_codec,
 	.vibra		= &twl6040_vibra,
 	.audpwron_gpio	= 127,
-	.naudint_irq	= OMAP44XX_IRQ_SYS_2N,
 	.irq_base	= TWL6040_CODEC_IRQ_BASE,
 };
 
 static struct twl4030_platform_data sdp4430_twldata = {
-	.audio		= &twl6040_audio,
 	/* Regulators */
 	.vusim		= &sdp4430_vusim,
 	.vaux1		= &sdp4430_vaux1,
@@ -617,7 +616,8 @@
 			TWL_COMMON_REGULATOR_VCXIO |
 			TWL_COMMON_REGULATOR_VUSB |
 			TWL_COMMON_REGULATOR_CLK32KG);
-	omap4_pmic_init("twl6030", &sdp4430_twldata);
+	omap4_pmic_init("twl6030", &sdp4430_twldata,
+			&twl6040_data, OMAP44XX_IRQ_SYS_2N);
 	omap_register_i2c_bus(2, 400, NULL, 0);
 	omap_register_i2c_bus(3, 400, sdp4430_i2c_3_boardinfo,
 				ARRAY_SIZE(sdp4430_i2c_3_boardinfo));
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 74e1687..098d183 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -137,7 +137,7 @@
 
 static void __init omap4_i2c_init(void)
 {
-	omap4_pmic_init("twl6030", &sdp4430_twldata);
+	omap4_pmic_init("twl6030", &sdp4430_twldata, NULL, 0);
 }
 
 static void __init omap4_init(void)
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index d8c0e89..1b782ba 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -25,6 +25,7 @@
 #include <linux/gpio.h>
 #include <linux/usb/otg.h>
 #include <linux/i2c/twl.h>
+#include <linux/mfd/twl6040.h>
 #include <linux/regulator/machine.h>
 #include <linux/regulator/fixed.h>
 #include <linux/wl12xx.h>
@@ -284,7 +285,7 @@
 	return 0;
 }
 
-static struct twl4030_codec_data twl6040_codec = {
+static struct twl6040_codec_data twl6040_codec = {
 	/* single-step ramp for headset and handsfree */
 	.hs_left_step	= 0x0f,
 	.hs_right_step	= 0x0f,
@@ -292,17 +293,14 @@
 	.hf_right_step	= 0x1d,
 };
 
-static struct twl4030_audio_data twl6040_audio = {
+static struct twl6040_platform_data twl6040_data = {
 	.codec		= &twl6040_codec,
 	.audpwron_gpio	= 127,
-	.naudint_irq	= OMAP44XX_IRQ_SYS_2N,
 	.irq_base	= TWL6040_CODEC_IRQ_BASE,
 };
 
 /* Panda board uses the common PMIC configuration */
-static struct twl4030_platform_data omap4_panda_twldata = {
-	.audio		= &twl6040_audio,
-};
+static struct twl4030_platform_data omap4_panda_twldata;
 
 /*
  * Display monitor features are burnt in their EEPROM as EDID data. The EEPROM
@@ -326,7 +324,8 @@
 			TWL_COMMON_REGULATOR_VCXIO |
 			TWL_COMMON_REGULATOR_VUSB |
 			TWL_COMMON_REGULATOR_CLK32KG);
-	omap4_pmic_init("twl6030", &omap4_panda_twldata);
+	omap4_pmic_init("twl6030", &omap4_panda_twldata,
+			&twl6040_data, OMAP44XX_IRQ_SYS_2N);
 	omap_register_i2c_bus(2, 400, NULL, 0);
 	/*
 	 * Bus 3 is attached to the DVI port where devices like the pico DLP
diff --git a/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c b/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
index 7072e0d..3d9d746 100644
--- a/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
+++ b/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
@@ -165,83 +165,3 @@
 
 	return 0;
 }
-
-#ifdef CONFIG_CPU_FREQ
-/*
- * Walk PRCM rate table and fillout cpufreq freq_table
- * XXX This should be replaced by an OPP layer in the near future
- */
-static struct cpufreq_frequency_table *freq_table;
-
-void omap2_clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
-{
-	const struct prcm_config *prcm;
-	int i = 0;
-	int tbl_sz = 0;
-
-	if (!cpu_is_omap24xx())
-		return;
-
-	for (prcm = rate_table; prcm->mpu_speed; prcm++) {
-		if (!(prcm->flags & cpu_mask))
-			continue;
-		if (prcm->xtal_speed != sclk->rate)
-			continue;
-
-		/* don't put bypass rates in table */
-		if (prcm->dpll_speed == prcm->xtal_speed)
-			continue;
-
-		tbl_sz++;
-	}
-
-	/*
-	 * XXX Ensure that we're doing what CPUFreq expects for this error
-	 * case and the following one
-	 */
-	if (tbl_sz == 0) {
-		pr_warning("%s: no matching entries in rate_table\n",
-			   __func__);
-		return;
-	}
-
-	/* Include the CPUFREQ_TABLE_END terminator entry */
-	tbl_sz++;
-
-	freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * tbl_sz,
-			     GFP_ATOMIC);
-	if (!freq_table) {
-		pr_err("%s: could not kzalloc frequency table\n", __func__);
-		return;
-	}
-
-	for (prcm = rate_table; prcm->mpu_speed; prcm++) {
-		if (!(prcm->flags & cpu_mask))
-			continue;
-		if (prcm->xtal_speed != sclk->rate)
-			continue;
-
-		/* don't put bypass rates in table */
-		if (prcm->dpll_speed == prcm->xtal_speed)
-			continue;
-
-		freq_table[i].index = i;
-		freq_table[i].frequency = prcm->mpu_speed / 1000;
-		i++;
-	}
-
-	freq_table[i].index = i;
-	freq_table[i].frequency = CPUFREQ_TABLE_END;
-
-	*table = &freq_table[0];
-}
-
-void omap2_clk_exit_cpufreq_table(struct cpufreq_frequency_table **table)
-{
-	if (!cpu_is_omap24xx())
-		return;
-
-	kfree(freq_table);
-}
-
-#endif
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index f57ed5b..d9f4931 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -536,10 +536,5 @@
 	.clk_set_rate		= omap2_clk_set_rate,
 	.clk_set_parent		= omap2_clk_set_parent,
 	.clk_disable_unused	= omap2_clk_disable_unused,
-#ifdef CONFIG_CPU_FREQ
-	/* These will be removed when the OPP code is integrated */
-	.clk_init_cpufreq_table	= omap2_clk_init_cpufreq_table,
-	.clk_exit_cpufreq_table	= omap2_clk_exit_cpufreq_table,
-#endif
 };
 
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index b8c2a68..a1bb23a 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -146,14 +146,6 @@
 extern const struct clksel_rate gfx_l3_rates[];
 extern const struct clksel_rate dsp_ick_rates[];
 
-#if defined(CONFIG_ARCH_OMAP2) && defined(CONFIG_CPU_FREQ)
-extern void omap2_clk_init_cpufreq_table(struct cpufreq_frequency_table **table);
-extern void omap2_clk_exit_cpufreq_table(struct cpufreq_frequency_table **table);
-#else
-#define omap2_clk_init_cpufreq_table	0
-#define omap2_clk_exit_cpufreq_table	0
-#endif
-
 extern const struct clkops clkops_omap2_iclk_dflt_wait;
 extern const struct clkops clkops_omap2_iclk_dflt;
 extern const struct clkops clkops_omap2_iclk_idle_only;
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 2c27fdb..7144ae6 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1422,6 +1422,9 @@
 		goto dis_opt_clks;
 	_write_sysconfig(v, oh);
 
+	if (oh->class->sysc->srst_udelay)
+		udelay(oh->class->sysc->srst_udelay);
+
 	if (oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS)
 		omap_test_timeout((omap_hwmod_read(oh,
 						    oh->class->sysc->syss_offs)
@@ -1903,10 +1906,20 @@
  */
 int omap_hwmod_softreset(struct omap_hwmod *oh)
 {
-	if (!oh)
+	u32 v;
+	int ret;
+
+	if (!oh || !(oh->_sysc_cache))
 		return -EINVAL;
 
-	return _ocp_softreset(oh);
+	v = oh->_sysc_cache;
+	ret = _set_softreset(oh, &v);
+	if (ret)
+		goto error;
+	_write_sysconfig(v, oh);
+
+error:
+	return ret;
 }
 
 /**
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
index a5409ce..a6bde34 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
@@ -1000,7 +1000,6 @@
 			.flags	= OMAP_FIREWALL_L4,
 		}
 	},
-	.flags		= OCPIF_SWSUP_IDLE,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index c4f56cb..04a3885 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -1049,7 +1049,6 @@
 	.slave		= &omap2430_dss_venc_hwmod,
 	.clk		= "dss_ick",
 	.addr		= omap2_dss_venc_addrs,
-	.flags		= OCPIF_SWSUP_IDLE,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 34b9766d..db86ce9 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1676,7 +1676,6 @@
 			.flags	= OMAP_FIREWALL_L4,
 		}
 	},
-	.flags		= OCPIF_SWSUP_IDLE,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index cc9bd10..6abc757 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2594,6 +2594,15 @@
 static struct omap_hwmod_class_sysconfig omap44xx_iss_sysc = {
 	.rev_offs	= 0x0000,
 	.sysc_offs	= 0x0010,
+	/*
+	 * ISS needs 100 OCP clk cycles delay after a softreset before
+	 * accessing sysconfig again.
+	 * The lowest frequency at the moment for L3 bus is 100 MHz, so
+	 * 1usec delay is needed. Add an x2 margin to be safe (2 usecs).
+	 *
+	 * TODO: Indicate errata when available.
+	 */
+	.srst_udelay	= 2,
 	.sysc_flags	= (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS |
 			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
 	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 0cdd359..9fc2f44 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -108,8 +108,14 @@
 static void omap_uart_set_smartidle(struct platform_device *pdev)
 {
 	struct omap_device *od = to_omap_device(pdev);
+	u8 idlemode;
 
-	omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_SMART);
+	if (od->hwmods[0]->class->sysc->idlemodes & SIDLE_SMART_WKUP)
+		idlemode = HWMOD_IDLEMODE_SMART_WKUP;
+	else
+		idlemode = HWMOD_IDLEMODE_SMART;
+
+	omap_hwmod_set_slave_idlemode(od->hwmods[0], idlemode);
 }
 
 #else
@@ -120,124 +126,8 @@
 #endif /* CONFIG_PM */
 
 #ifdef CONFIG_OMAP_MUX
-static struct omap_device_pad default_uart1_pads[] __initdata = {
-	{
-		.name	= "uart1_cts.uart1_cts",
-		.enable	= OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
-	},
-	{
-		.name	= "uart1_rts.uart1_rts",
-		.enable	= OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
-	},
-	{
-		.name	= "uart1_tx.uart1_tx",
-		.enable	= OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
-	},
-	{
-		.name	= "uart1_rx.uart1_rx",
-		.flags	= OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
-		.enable	= OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
-		.idle	= OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
-	},
-};
-
-static struct omap_device_pad default_uart2_pads[] __initdata = {
-	{
-		.name	= "uart2_cts.uart2_cts",
-		.enable	= OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
-	},
-	{
-		.name	= "uart2_rts.uart2_rts",
-		.enable	= OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
-	},
-	{
-		.name	= "uart2_tx.uart2_tx",
-		.enable	= OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
-	},
-	{
-		.name	= "uart2_rx.uart2_rx",
-		.flags	= OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
-		.enable	= OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
-		.idle	= OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
-	},
-};
-
-static struct omap_device_pad default_uart3_pads[] __initdata = {
-	{
-		.name	= "uart3_cts_rctx.uart3_cts_rctx",
-		.enable	= OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
-	},
-	{
-		.name	= "uart3_rts_sd.uart3_rts_sd",
-		.enable	= OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
-	},
-	{
-		.name	= "uart3_tx_irtx.uart3_tx_irtx",
-		.enable	= OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
-	},
-	{
-		.name	= "uart3_rx_irrx.uart3_rx_irrx",
-		.flags	= OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
-		.enable	= OMAP_PIN_INPUT | OMAP_MUX_MODE0,
-		.idle	= OMAP_PIN_INPUT | OMAP_MUX_MODE0,
-	},
-};
-
-static struct omap_device_pad default_omap36xx_uart4_pads[] __initdata = {
-	{
-		.name   = "gpmc_wait2.uart4_tx",
-		.enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
-	},
-	{
-		.name	= "gpmc_wait3.uart4_rx",
-		.flags	= OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
-		.enable	= OMAP_PIN_INPUT | OMAP_MUX_MODE2,
-		.idle	= OMAP_PIN_INPUT | OMAP_MUX_MODE2,
-	},
-};
-
-static struct omap_device_pad default_omap4_uart4_pads[] __initdata = {
-	{
-		.name	= "uart4_tx.uart4_tx",
-		.enable	= OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
-	},
-	{
-		.name	= "uart4_rx.uart4_rx",
-		.flags	= OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
-		.enable	= OMAP_PIN_INPUT | OMAP_MUX_MODE0,
-		.idle	= OMAP_PIN_INPUT | OMAP_MUX_MODE0,
-	},
-};
-
 static void omap_serial_fill_default_pads(struct omap_board_data *bdata)
 {
-	switch (bdata->id) {
-	case 0:
-		bdata->pads = default_uart1_pads;
-		bdata->pads_cnt = ARRAY_SIZE(default_uart1_pads);
-		break;
-	case 1:
-		bdata->pads = default_uart2_pads;
-		bdata->pads_cnt = ARRAY_SIZE(default_uart2_pads);
-		break;
-	case 2:
-		bdata->pads = default_uart3_pads;
-		bdata->pads_cnt = ARRAY_SIZE(default_uart3_pads);
-		break;
-	case 3:
-		if (cpu_is_omap44xx()) {
-			bdata->pads = default_omap4_uart4_pads;
-			bdata->pads_cnt =
-				ARRAY_SIZE(default_omap4_uart4_pads);
-		} else if (cpu_is_omap3630()) {
-			bdata->pads = default_omap36xx_uart4_pads;
-			bdata->pads_cnt =
-				ARRAY_SIZE(default_omap36xx_uart4_pads);
-		}
-		break;
-	default:
-		break;
-	}
 }
 #else
 static void omap_serial_fill_default_pads(struct omap_board_data *bdata) {}
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index 4b57757..7a7b893 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -37,6 +37,16 @@
 	.flags		= I2C_CLIENT_WAKE,
 };
 
+static struct i2c_board_info __initdata omap4_i2c1_board_info[] = {
+	{
+		.addr		= 0x48,
+		.flags		= I2C_CLIENT_WAKE,
+	},
+	{
+		I2C_BOARD_INFO("twl6040", 0x4b),
+	},
+};
+
 void __init omap_pmic_init(int bus, u32 clkrate,
 			   const char *pmic_type, int pmic_irq,
 			   struct twl4030_platform_data *pmic_data)
@@ -49,14 +59,31 @@
 	omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
 }
 
+void __init omap4_pmic_init(const char *pmic_type,
+		    struct twl4030_platform_data *pmic_data,
+		    struct twl6040_platform_data *twl6040_data, int twl6040_irq)
+{
+	/* PMIC part*/
+	strncpy(omap4_i2c1_board_info[0].type, pmic_type,
+		sizeof(omap4_i2c1_board_info[0].type));
+	omap4_i2c1_board_info[0].irq = OMAP44XX_IRQ_SYS_1N;
+	omap4_i2c1_board_info[0].platform_data = pmic_data;
+
+	/* TWL6040 audio IC part */
+	omap4_i2c1_board_info[1].irq = twl6040_irq;
+	omap4_i2c1_board_info[1].platform_data = twl6040_data;
+
+	omap_register_i2c_bus(1, 400, omap4_i2c1_board_info, 2);
+
+}
+
 void __init omap_pmic_late_init(void)
 {
 	/* Init the OMAP TWL parameters (if PMIC has been registerd) */
-	if (!pmic_i2c_board_info.irq)
-		return;
-
-	omap3_twl_init();
-	omap4_twl_init();
+	if (pmic_i2c_board_info.irq)
+		omap3_twl_init();
+	if (omap4_i2c1_board_info[0].irq)
+		omap4_twl_init();
 }
 
 #if defined(CONFIG_ARCH_OMAP3)
diff --git a/arch/arm/mach-omap2/twl-common.h b/arch/arm/mach-omap2/twl-common.h
index 275dde8..0962748 100644
--- a/arch/arm/mach-omap2/twl-common.h
+++ b/arch/arm/mach-omap2/twl-common.h
@@ -29,6 +29,7 @@
 
 
 struct twl4030_platform_data;
+struct twl6040_platform_data;
 
 void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq,
 		    struct twl4030_platform_data *pmic_data);
@@ -46,12 +47,9 @@
 	omap_pmic_init(1, 2600, pmic_type, INT_34XX_SYS_NIRQ, pmic_data);
 }
 
-static inline void omap4_pmic_init(const char *pmic_type,
-				   struct twl4030_platform_data *pmic_data)
-{
-	/* Phoenix Audio IC needs I2C1 to start with 400 KHz or less */
-	omap_pmic_init(1, 400, pmic_type, OMAP44XX_IRQ_SYS_1N, pmic_data);
-}
+void omap4_pmic_init(const char *pmic_type,
+		    struct twl4030_platform_data *pmic_data,
+		    struct twl6040_platform_data *audio_data, int twl6040_irq);
 
 void omap3_pmic_get_config(struct twl4030_platform_data *pmic_data,
 			   u32 pdata_flags, u32 regulators_flags);
diff --git a/arch/arm/mach-s5pv210/dma.c b/arch/arm/mach-s5pv210/dma.c
index 86ce62f..b8337e2 100644
--- a/arch/arm/mach-s5pv210/dma.c
+++ b/arch/arm/mach-s5pv210/dma.c
@@ -33,8 +33,6 @@
 #include <mach/irqs.h>
 #include <mach/dma.h>
 
-static u64 dma_dmamask = DMA_BIT_MASK(32);
-
 static u8 pdma0_peri[] = {
 	DMACH_UART0_RX,
 	DMACH_UART0_TX,
diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c
index a9ea64e..48d018f 100644
--- a/arch/arm/mach-s5pv210/mach-aquila.c
+++ b/arch/arm/mach-s5pv210/mach-aquila.c
@@ -484,8 +484,8 @@
 	.gpio_defaults[8] = 0x0100,
 	.gpio_defaults[9] = 0x0100,
 	.gpio_defaults[10] = 0x0100,
-	.ldo[0]	= { S5PV210_MP03(6), NULL, &wm8994_ldo1_data },	/* XM0FRNB_2 */
-	.ldo[1]	= { 0, NULL, &wm8994_ldo2_data },
+	.ldo[0]	= { S5PV210_MP03(6), &wm8994_ldo1_data },	/* XM0FRNB_2 */
+	.ldo[1]	= { 0, &wm8994_ldo2_data },
 };
 
 /* GPIO I2C PMIC */
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index 2cf5ed7..a8933de 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -674,8 +674,8 @@
 	.gpio_defaults[8] = 0x0100,
 	.gpio_defaults[9] = 0x0100,
 	.gpio_defaults[10] = 0x0100,
-	.ldo[0]	= { S5PV210_MP03(6), NULL, &wm8994_ldo1_data },	/* XM0FRNB_2 */
-	.ldo[1]	= { 0, NULL, &wm8994_ldo2_data },
+	.ldo[0]	= { S5PV210_MP03(6), &wm8994_ldo1_data },	/* XM0FRNB_2 */
+	.ldo[1]	= { 0, &wm8994_ldo2_data },
 };
 
 /* GPIO I2C PMIC */
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 880d02e..ef7099e 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -17,6 +17,7 @@
 config UX500_SOC_DB8500
 	bool
 	select MFD_DB8500_PRCMU
+	select REGULATOR
 	select REGULATOR_DB8500_PRCMU
 	select CPU_FREQ_TABLE if CPU_FREQ
 
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index d2058ef..eff5842 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -99,7 +99,7 @@
 	 */
 	write_pen_release(cpu_logical_map(cpu));
 
-	gic_raise_softirq(cpumask_of(cpu), 1);
+	smp_send_reschedule(cpu);
 
 	timeout = jiffies + (1 * HZ);
 	while (time_before(jiffies, timeout)) {
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 7edef91..7c8a7d8 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -723,7 +723,7 @@
 	bool "Select the High exception vector"
 	help
 	  Say Y here to select high exception vector(0xFFFF0000~).
-	  The exception vector can be vary depending on the platform
+	  The exception vector can vary depending on the platform
 	  design in nommu mode. If your platform needs to select
 	  high exception vector, say Y.
 	  Otherwise or if you are unsure, say N, and the low exception
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 9055b5a..f074675 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -320,7 +320,7 @@
 	 */
 
 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
-	if (flags & FAULT_FLAG_ALLOW_RETRY) {
+	if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
 		if (fault & VM_FAULT_MAJOR) {
 			tsk->maj_flt++;
 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 6486d2f..d51225f 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -13,6 +13,7 @@
 #include <asm/sections.h>
 #include <asm/page.h>
 #include <asm/setup.h>
+#include <asm/traps.h>
 #include <asm/mach/arch.h>
 
 #include "mm.h"
@@ -39,6 +40,7 @@
  */
 void __init paging_init(struct machine_desc *mdesc)
 {
+	early_trap_init((void *)CONFIG_VECTORS_BASE);
 	bootmem_init();
 }
 
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index f1c8486..c2e2b66 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -255,6 +255,18 @@
 	mcr	p15, 0, r5, c10, c2, 0		@ write PRRR
 	mcr	p15, 0, r6, c10, c2, 1		@ write NMRR
 #endif
+#ifndef CONFIG_ARM_THUMBEE
+	mrc	p15, 0, r0, c0, c1, 0		@ read ID_PFR0 for ThumbEE
+	and	r0, r0, #(0xf << 12)		@ ThumbEE enabled field
+	teq	r0, #(1 << 12)			@ check if ThumbEE is present
+	bne	1f
+	mov	r5, #0
+	mcr	p14, 6, r5, c1, c0, 0		@ Initialize TEEHBR to 0
+	mrc	p14, 6, r0, c0, c0, 0		@ load TEECR
+	orr	r0, r0, #1			@ set the 1st bit in order to
+	mcr	p14, 6, r0, c0, c0, 0		@ stop userspace TEEHBR access
+1:
+#endif
 	adr	r5, v7_crval
 	ldmia	r5, {r5, r6}
 #ifdef CONFIG_CPU_ENDIAN_BE8
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index 8506cbb..62ec5c4 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -398,32 +398,6 @@
 	.ops	= &clkops_null,
 };
 
-#ifdef CONFIG_CPU_FREQ
-void clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
-{
-	unsigned long flags;
-
-	if (!arch_clock || !arch_clock->clk_init_cpufreq_table)
-		return;
-
-	spin_lock_irqsave(&clockfw_lock, flags);
-	arch_clock->clk_init_cpufreq_table(table);
-	spin_unlock_irqrestore(&clockfw_lock, flags);
-}
-
-void clk_exit_cpufreq_table(struct cpufreq_frequency_table **table)
-{
-	unsigned long flags;
-
-	if (!arch_clock || !arch_clock->clk_exit_cpufreq_table)
-		return;
-
-	spin_lock_irqsave(&clockfw_lock, flags);
-	arch_clock->clk_exit_cpufreq_table(table);
-	spin_unlock_irqrestore(&clockfw_lock, flags);
-}
-#endif
-
 /*
  *
  */
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
index 240a7b9..d0ef57c 100644
--- a/arch/arm/plat-omap/include/plat/clock.h
+++ b/arch/arm/plat-omap/include/plat/clock.h
@@ -272,8 +272,6 @@
 #endif
 };
 
-struct cpufreq_frequency_table;
-
 struct clk_functions {
 	int		(*clk_enable)(struct clk *clk);
 	void		(*clk_disable)(struct clk *clk);
@@ -283,10 +281,6 @@
 	void		(*clk_allow_idle)(struct clk *clk);
 	void		(*clk_deny_idle)(struct clk *clk);
 	void		(*clk_disable_unused)(struct clk *clk);
-#ifdef CONFIG_CPU_FREQ
-	void		(*clk_init_cpufreq_table)(struct cpufreq_frequency_table **);
-	void		(*clk_exit_cpufreq_table)(struct cpufreq_frequency_table **);
-#endif
 };
 
 extern int mpurate;
@@ -301,10 +295,6 @@
 extern unsigned long followparent_recalc(struct clk *clk);
 extern void clk_enable_init_clocks(void);
 unsigned long omap_fixed_divisor_recalc(struct clk *clk);
-#ifdef CONFIG_CPU_FREQ
-extern void clk_init_cpufreq_table(struct cpufreq_frequency_table **table);
-extern void clk_exit_cpufreq_table(struct cpufreq_frequency_table **table);
-#endif
 extern struct clk *omap_clk_get_by_name(const char *name);
 extern int omap_clk_enable_autoidle_all(void);
 extern int omap_clk_disable_autoidle_all(void);
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
index 8070145..3f26db4 100644
--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -305,6 +305,7 @@
  * @rev_offs: IP block revision register offset (from module base addr)
  * @sysc_offs: OCP_SYSCONFIG register offset (from module base addr)
  * @syss_offs: OCP_SYSSTATUS register offset (from module base addr)
+ * @srst_udelay: Delay needed after doing a softreset in usecs
  * @idlemodes: One or more of {SIDLE,MSTANDBY}_{OFF,FORCE,SMART}
  * @sysc_flags: SYS{C,S}_HAS* flags indicating SYSCONFIG bits supported
  * @clockact: the default value of the module CLOCKACTIVITY bits
@@ -330,9 +331,10 @@
 	u16 sysc_offs;
 	u16 syss_offs;
 	u16 sysc_flags;
+	struct omap_hwmod_sysc_fields *sysc_fields;
+	u8 srst_udelay;
 	u8 idlemodes;
 	u8 clockact;
-	struct omap_hwmod_sysc_fields *sysc_fields;
 };
 
 /**
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index eec98af..f9a8c53 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -348,7 +348,6 @@
 			sdrc_actim_ctrl_b_1, sdrc_mr_1);
 }
 
-#ifdef CONFIG_PM
 void omap3_sram_restore_context(void)
 {
 	omap_sram_ceil = omap_sram_base + omap_sram_size;
@@ -358,17 +357,18 @@
 			       omap3_sram_configure_core_dpll_sz);
 	omap_push_sram_idle();
 }
-#endif /* CONFIG_PM */
-
-#endif /* CONFIG_ARCH_OMAP3 */
 
 static inline int omap34xx_sram_init(void)
 {
-#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
 	omap3_sram_restore_context();
-#endif
 	return 0;
 }
+#else
+static inline int omap34xx_sram_init(void)
+{
+	return 0;
+}
+#endif /* CONFIG_ARCH_OMAP3 */
 
 static inline int am33xx_sram_init(void)
 {
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index 71553f4..a0ffc77d 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -302,6 +302,7 @@
 config SAMSUNG_PM_DEBUG
 	bool "S3C2410 PM Suspend debug"
 	depends on PM
+	select DEBUG_LL
 	help
 	  Say Y here if you want verbose debugging from the PM Suspend and
 	  Resume code. See <file:Documentation/arm/Samsung-S3C24XX/Suspend.txt>
diff --git a/arch/c6x/include/asm/irq.h b/arch/c6x/include/asm/irq.h
index f13b78d..ab4577f 100644
--- a/arch/c6x/include/asm/irq.h
+++ b/arch/c6x/include/asm/irq.h
@@ -42,10 +42,6 @@
 /* This number is used when no interrupt has been assigned */
 #define NO_IRQ		0
 
-struct irq_data;
-extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
-extern irq_hw_number_t virq_to_hw(unsigned int virq);
-
 extern void __init init_pic_c64xplus(void);
 
 extern void init_IRQ(void);
diff --git a/arch/c6x/kernel/irq.c b/arch/c6x/kernel/irq.c
index 65b8ddf..c90fb5e 100644
--- a/arch/c6x/kernel/irq.c
+++ b/arch/c6x/kernel/irq.c
@@ -130,16 +130,3 @@
 	seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
 	return 0;
 }
-
-irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
-{
-	return d->hwirq;
-}
-EXPORT_SYMBOL_GPL(irqd_to_hwirq);
-
-irq_hw_number_t virq_to_hw(unsigned int virq)
-{
-	struct irq_data *irq_data = irq_get_irq_data(virq);
-	return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
-}
-EXPORT_SYMBOL_GPL(virq_to_hw);
diff --git a/arch/ia64/include/asm/cmpxchg.h b/arch/ia64/include/asm/cmpxchg.h
index 4c96187..4f37dbb 100644
--- a/arch/ia64/include/asm/cmpxchg.h
+++ b/arch/ia64/include/asm/cmpxchg.h
@@ -1 +1,147 @@
-#include <asm/intrinsics.h>
+#ifndef _ASM_IA64_CMPXCHG_H
+#define _ASM_IA64_CMPXCHG_H
+
+/*
+ * Compare/Exchange, forked from asm/intrinsics.h
+ * which was:
+ *
+ *	Copyright (C) 2002-2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+/* include compiler specific intrinsics */
+#include <asm/ia64regs.h>
+#ifdef __INTEL_COMPILER
+# include <asm/intel_intrin.h>
+#else
+# include <asm/gcc_intrin.h>
+#endif
+
+/*
+ * This function doesn't exist, so you'll get a linker error if
+ * something tries to do an invalid xchg().
+ */
+extern void ia64_xchg_called_with_bad_pointer(void);
+
+#define __xchg(x, ptr, size)						\
+({									\
+	unsigned long __xchg_result;					\
+									\
+	switch (size) {							\
+	case 1:								\
+		__xchg_result = ia64_xchg1((__u8 *)ptr, x);		\
+		break;							\
+									\
+	case 2:								\
+		__xchg_result = ia64_xchg2((__u16 *)ptr, x);		\
+		break;							\
+									\
+	case 4:								\
+		__xchg_result = ia64_xchg4((__u32 *)ptr, x);		\
+		break;							\
+									\
+	case 8:								\
+		__xchg_result = ia64_xchg8((__u64 *)ptr, x);		\
+		break;							\
+	default:							\
+		ia64_xchg_called_with_bad_pointer();			\
+	}								\
+	__xchg_result;							\
+})
+
+#define xchg(ptr, x)							\
+((__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr))))
+
+/*
+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
+ * store NEW in MEM.  Return the initial value in MEM.  Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+/*
+ * This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid cmpxchg().
+ */
+extern long ia64_cmpxchg_called_with_bad_pointer(void);
+
+#define ia64_cmpxchg(sem, ptr, old, new, size)				\
+({									\
+	__u64 _o_, _r_;							\
+									\
+	switch (size) {							\
+	case 1:								\
+		_o_ = (__u8) (long) (old);				\
+		break;							\
+	case 2:								\
+		_o_ = (__u16) (long) (old);				\
+		break;							\
+	case 4:								\
+		_o_ = (__u32) (long) (old);				\
+		break;							\
+	case 8:								\
+		_o_ = (__u64) (long) (old);				\
+		break;							\
+	default:							\
+		break;							\
+	}								\
+	switch (size) {							\
+	case 1:								\
+		_r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_);	\
+		break;							\
+									\
+	case 2:								\
+		_r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_);	\
+		break;							\
+									\
+	case 4:								\
+		_r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_);	\
+		break;							\
+									\
+	case 8:								\
+		_r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_);	\
+		break;							\
+									\
+	default:							\
+		_r_ = ia64_cmpxchg_called_with_bad_pointer();		\
+		break;							\
+	}								\
+	(__typeof__(old)) _r_;						\
+})
+
+#define cmpxchg_acq(ptr, o, n)	\
+	ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
+#define cmpxchg_rel(ptr, o, n)	\
+	ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
+
+/* for compatibility with other platforms: */
+#define cmpxchg(ptr, o, n)	cmpxchg_acq((ptr), (o), (n))
+#define cmpxchg64(ptr, o, n)	cmpxchg_acq((ptr), (o), (n))
+
+#define cmpxchg_local		cmpxchg
+#define cmpxchg64_local		cmpxchg64
+
+#ifdef CONFIG_IA64_DEBUG_CMPXCHG
+# define CMPXCHG_BUGCHECK_DECL	int _cmpxchg_bugcheck_count = 128;
+# define CMPXCHG_BUGCHECK(v)						\
+do {									\
+	if (_cmpxchg_bugcheck_count-- <= 0) {				\
+		void *ip;						\
+		extern int printk(const char *fmt, ...);		\
+		ip = (void *) ia64_getreg(_IA64_REG_IP);		\
+		printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v));\
+		break;							\
+	}								\
+} while (0)
+#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
+# define CMPXCHG_BUGCHECK_DECL
+# define CMPXCHG_BUGCHECK(v)
+#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_CMPXCHG_H */
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
index 0ab82cc..d2bf1fd 100644
--- a/arch/ia64/include/asm/futex.h
+++ b/arch/ia64/include/asm/futex.h
@@ -106,15 +106,16 @@
 		return -EFAULT;
 
 	{
-		register unsigned long r8 __asm ("r8") = 0;
+		register unsigned long r8 __asm ("r8");
 		unsigned long prev;
 		__asm__ __volatile__(
 			"	mf;;					\n"
-			"	mov ar.ccv=%3;;				\n"
-			"[1:]	cmpxchg4.acq %0=[%1],%2,ar.ccv		\n"
+			"	mov %0=r0				\n"
+			"	mov ar.ccv=%4;;				\n"
+			"[1:]	cmpxchg4.acq %1=[%2],%3,ar.ccv		\n"
 			"	.xdata4 \"__ex_table\", 1b-., 2f-.	\n"
 			"[2:]"
-			: "=r" (prev)
+			: "=r" (r8), "=r" (prev)
 			: "r" (uaddr), "r" (newval),
 			  "rO" ((long) (unsigned) oldval)
 			: "memory");
diff --git a/arch/ia64/include/asm/intrinsics.h b/arch/ia64/include/asm/intrinsics.h
index e4076b5..d129e36 100644
--- a/arch/ia64/include/asm/intrinsics.h
+++ b/arch/ia64/include/asm/intrinsics.h
@@ -18,6 +18,7 @@
 #else
 # include <asm/gcc_intrin.h>
 #endif
+#include <asm/cmpxchg.h>
 
 #define ia64_native_get_psr_i()	(ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I)
 
@@ -81,119 +82,6 @@
 
 #define ia64_fetch_and_add(i,v)	(ia64_fetchadd(i, v, rel) + (i)) /* return new value */
 
-/*
- * This function doesn't exist, so you'll get a linker error if
- * something tries to do an invalid xchg().
- */
-extern void ia64_xchg_called_with_bad_pointer (void);
-
-#define __xchg(x,ptr,size)						\
-({									\
-	unsigned long __xchg_result;					\
-									\
-	switch (size) {							\
-	      case 1:							\
-		__xchg_result = ia64_xchg1((__u8 *)ptr, x);		\
-		break;							\
-									\
-	      case 2:							\
-		__xchg_result = ia64_xchg2((__u16 *)ptr, x);		\
-		break;							\
-									\
-	      case 4:							\
-		__xchg_result = ia64_xchg4((__u32 *)ptr, x);		\
-		break;							\
-									\
-	      case 8:							\
-		__xchg_result = ia64_xchg8((__u64 *)ptr, x);		\
-		break;							\
-	      default:							\
-		ia64_xchg_called_with_bad_pointer();			\
-	}								\
-	__xchg_result;							\
-})
-
-#define xchg(ptr,x)							     \
-  ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
-
-/*
- * Atomic compare and exchange.  Compare OLD with MEM, if identical,
- * store NEW in MEM.  Return the initial value in MEM.  Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#define __HAVE_ARCH_CMPXCHG 1
-
-/*
- * This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid cmpxchg().
- */
-extern long ia64_cmpxchg_called_with_bad_pointer (void);
-
-#define ia64_cmpxchg(sem,ptr,old,new,size)						\
-({											\
-	__u64 _o_, _r_;									\
-											\
-	switch (size) {									\
-	      case 1: _o_ = (__u8 ) (long) (old); break;				\
-	      case 2: _o_ = (__u16) (long) (old); break;				\
-	      case 4: _o_ = (__u32) (long) (old); break;				\
-	      case 8: _o_ = (__u64) (long) (old); break;				\
-	      default: break;								\
-	}										\
-	switch (size) {									\
-	      case 1:									\
-	      	_r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_);			\
-		break;									\
-											\
-	      case 2:									\
-	       _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_);			\
-		break;									\
-											\
-	      case 4:									\
-	      	_r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_);			\
-		break;									\
-											\
-	      case 8:									\
-		_r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_);			\
-		break;									\
-											\
-	      default:									\
-		_r_ = ia64_cmpxchg_called_with_bad_pointer();				\
-		break;									\
-	}										\
-	(__typeof__(old)) _r_;								\
-})
-
-#define cmpxchg_acq(ptr, o, n)	\
-	ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
-#define cmpxchg_rel(ptr, o, n)	\
-	ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
-
-/* for compatibility with other platforms: */
-#define cmpxchg(ptr, o, n)	cmpxchg_acq((ptr), (o), (n))
-#define cmpxchg64(ptr, o, n)	cmpxchg_acq((ptr), (o), (n))
-
-#define cmpxchg_local		cmpxchg
-#define cmpxchg64_local		cmpxchg64
-
-#ifdef CONFIG_IA64_DEBUG_CMPXCHG
-# define CMPXCHG_BUGCHECK_DECL	int _cmpxchg_bugcheck_count = 128;
-# define CMPXCHG_BUGCHECK(v)							\
-  do {										\
-	if (_cmpxchg_bugcheck_count-- <= 0) {					\
-		void *ip;							\
-		extern int printk(const char *fmt, ...);			\
-		ip = (void *) ia64_getreg(_IA64_REG_IP);			\
-		printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v));	\
-		break;								\
-	}									\
-  } while (0)
-#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
-# define CMPXCHG_BUGCHECK_DECL
-# define CMPXCHG_BUGCHECK(v)
-#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
-
 #endif
 
 #ifdef __KERNEL__
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 9d0fd7d..f00ba02 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -604,12 +604,6 @@
 	spin_unlock(&(x)->ctx_lock);
 }
 
-static inline unsigned int
-pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct)
-{
-	return do_munmap(mm, addr, len);
-}
-
 static inline unsigned long 
 pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
 {
@@ -1458,8 +1452,9 @@
  * a PROTECT_CTX() section.
  */
 static int
-pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size)
+pfm_remove_smpl_mapping(void *vaddr, unsigned long size)
 {
+	struct task_struct *task = current;
 	int r;
 
 	/* sanity checks */
@@ -1473,13 +1468,8 @@
 	/*
 	 * does the actual unmapping
 	 */
-	down_write(&task->mm->mmap_sem);
+	r = vm_munmap((unsigned long)vaddr, size);
 
-	DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size));
-
-	r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0);
-
-	up_write(&task->mm->mmap_sem);
 	if (r !=0) {
 		printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
 	}
@@ -1945,7 +1935,7 @@
 	 * because some VM function reenables interrupts.
 	 *
 	 */
-	if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size);
+	if (smpl_buf_vaddr) pfm_remove_smpl_mapping(smpl_buf_vaddr, smpl_buf_size);
 
 	return 0;
 }
diff --git a/arch/m68k/configs/m5275evb_defconfig b/arch/m68k/configs/m5275evb_defconfig
index 33c32ae..a1230e8 100644
--- a/arch/m68k/configs/m5275evb_defconfig
+++ b/arch/m68k/configs/m5275evb_defconfig
@@ -49,7 +49,6 @@
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_FEC=y
-CONFIG_FEC2=y
 # CONFIG_NETDEV_1000 is not set
 # CONFIG_NETDEV_10000 is not set
 CONFIG_PPP=y
diff --git a/arch/m68k/platform/527x/config.c b/arch/m68k/platform/527x/config.c
index 7ed848c..f91a532 100644
--- a/arch/m68k/platform/527x/config.c
+++ b/arch/m68k/platform/527x/config.c
@@ -74,9 +74,7 @@
 	writew(par | 0xf00, MCF_IPSBAR + 0x100082);
 	v = readb(MCF_IPSBAR + 0x100078);
 	writeb(v | 0xc0, MCF_IPSBAR + 0x100078);
-#endif
 
-#ifdef CONFIG_FEC2
 	/* Set multi-function pins to ethernet mode for fec1 */
 	par = readw(MCF_IPSBAR + 0x100082);
 	writew(par | 0xa0, MCF_IPSBAR + 0x100082);
diff --git a/arch/m68k/platform/68EZ328/Makefile b/arch/m68k/platform/68EZ328/Makefile
index ee97735..b44d799 100644
--- a/arch/m68k/platform/68EZ328/Makefile
+++ b/arch/m68k/platform/68EZ328/Makefile
@@ -3,9 +3,3 @@
 #
 
 obj-y := config.o
-
-extra-y := bootlogo.rh
-
-$(obj)/bootlogo.rh: $(src)/bootlogo.h
-	perl $(src)/../68328/bootlogo.pl < $(src)/bootlogo.h \
-		> $(obj)/bootlogo.rh
diff --git a/arch/m68k/platform/68VZ328/Makefile b/arch/m68k/platform/68VZ328/Makefile
index 447ffa0..a49d75e 100644
--- a/arch/m68k/platform/68VZ328/Makefile
+++ b/arch/m68k/platform/68VZ328/Makefile
@@ -3,14 +3,9 @@
 #
 
 obj-y		:= config.o
-logo-$(UCDIMM)	:= bootlogo.rh
-logo-$(DRAGEN2)	:= screen.h
-extra-y		:= $(logo-y)
-
-$(obj)/bootlogo.rh: $(src)/../68EZ328/bootlogo.h
-	perl $(src)/bootlogo.pl < $(src)/../68328/bootlogo.h > $(obj)/bootlogo.rh
+extra-$(DRAGEN2):= screen.h
 
 $(obj)/screen.h: $(src)/screen.xbm $(src)/xbm2lcd.pl
 	perl $(src)/xbm2lcd.pl < $(src)/screen.xbm > $(obj)/screen.h
 
-clean-files := $(obj)/screen.h $(obj)/bootlogo.rh
+clean-files := $(obj)/screen.h
diff --git a/arch/m68k/platform/68EZ328/bootlogo.h b/arch/m68k/platform/68VZ328/bootlogo.h
similarity index 99%
rename from arch/m68k/platform/68EZ328/bootlogo.h
rename to arch/m68k/platform/68VZ328/bootlogo.h
index e842bda..b38e2b2 100644
--- a/arch/m68k/platform/68EZ328/bootlogo.h
+++ b/arch/m68k/platform/68VZ328/bootlogo.h
@@ -1,6 +1,6 @@
 #define splash_width 640
 #define splash_height 480
-static unsigned char splash_bits[] = {
+unsigned char __attribute__ ((aligned(16))) bootlogo_bits[] = {
   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
diff --git a/arch/m68k/platform/coldfire/device.c b/arch/m68k/platform/coldfire/device.c
index fa50c48..7af9736 100644
--- a/arch/m68k/platform/coldfire/device.c
+++ b/arch/m68k/platform/coldfire/device.c
@@ -114,7 +114,7 @@
 
 static struct platform_device mcf_fec1 = {
 	.name			= "fec",
-	.id			= 0,
+	.id			= 1,
 	.num_resources		= ARRAY_SIZE(mcf_fec1_resources),
 	.resource		= mcf_fec1_resources,
 };
diff --git a/arch/powerpc/boot/dts/fsl/pq3-mpic-message-B.dtsi b/arch/powerpc/boot/dts/fsl/pq3-mpic-message-B.dtsi
new file mode 100644
index 0000000..1cf0b77
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-mpic-message-B.dtsi
@@ -0,0 +1,43 @@
+/*
+ * PQ3 MPIC Message (Group B) device tree stub [ controller @ offset 0x42400 ]
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+message@42400 {
+	compatible = "fsl,mpic-v3.1-msgr";
+	reg = <0x42400 0x200>;
+	interrupts = <
+		0xb4 2 0 0
+		0xb5 2 0 0
+		0xb6 2 0 0
+		0xb7 2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi b/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi
index fdedf7b..71c30eb 100644
--- a/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi
+++ b/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi
@@ -53,6 +53,16 @@
 		      3 0 3 0>;
 };
 
+message@41400 {
+	compatible = "fsl,mpic-v3.1-msgr";
+	reg = <0x41400 0x200>;
+	interrupts = <
+		0xb0 2 0 0
+		0xb1 2 0 0
+		0xb2 2 0 0
+		0xb3 2 0 0>;
+};
+
 msi@41600 {
 	compatible = "fsl,mpic-msi";
 	reg = <0x41600 0x80>;
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index cf417e51..e648af92 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -33,8 +33,6 @@
 /* Same thing, used by the generic IRQ code */
 #define NR_IRQS_LEGACY		NUM_ISA_INTERRUPTS
 
-struct irq_data;
-extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
 extern irq_hw_number_t virq_to_hw(unsigned int virq);
 
 /**
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index c65b929..c9f698a 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -275,9 +275,6 @@
 	unsigned int		isu_mask;
 	/* Number of sources */
 	unsigned int		num_sources;
-	/* default senses array */
-	unsigned char		*senses;
-	unsigned int		senses_count;
 
 	/* vector numbers used for internal sources (ipi/timers) */
 	unsigned int		ipi_vecs[4];
@@ -415,21 +412,6 @@
 extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
 			    phys_addr_t phys_addr);
 
-/* Set default sense codes
- *
- * @mpic:	controller
- * @senses:	array of sense codes
- * @count:	size of above array
- *
- * Optionally provide an array (indexed on hardware interrupt numbers
- * for this MPIC) of default sense codes for the chip. Those are linux
- * sense codes IRQ_TYPE_*
- *
- * The driver gets ownership of the pointer, don't dispose of it or
- * anything like that. __init only.
- */
-extern void mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count);
-
 
 /* Initialize the controller. After this has been called, none of the above
  * should be called again for this mpic
diff --git a/arch/powerpc/include/asm/mpic_msgr.h b/arch/powerpc/include/asm/mpic_msgr.h
index 3ec37dc..326d33c 100644
--- a/arch/powerpc/include/asm/mpic_msgr.h
+++ b/arch/powerpc/include/asm/mpic_msgr.h
@@ -13,6 +13,7 @@
 
 #include <linux/types.h>
 #include <linux/spinlock.h>
+#include <asm/smp.h>
 
 struct mpic_msgr {
 	u32 __iomem *base;
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index b86faa9..8a97aa7 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -15,11 +15,6 @@
 #ifndef __ASM_POWERPC_REG_BOOKE_H__
 #define __ASM_POWERPC_REG_BOOKE_H__
 
-#ifdef CONFIG_BOOKE_WDT
-extern u32 booke_wdt_enabled;
-extern u32 booke_wdt_period;
-#endif /* CONFIG_BOOKE_WDT */
-
 /* Machine State Register (MSR) Fields */
 #define MSR_GS		(1<<28) /* Guest state */
 #define MSR_UCLE	(1<<26)	/* User-mode cache lock enable */
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 3e57a00..ba3aeb4 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -206,40 +206,43 @@
 	andi.	r10,r10,MSR_EE		/* Did EE change? */
 	beq	1f
 
-	/* Save handler and return address into the 2 unused words
-	 * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
-	 * else can be recovered from the pt_regs except r3 which for
-	 * normal interrupts has been set to pt_regs and for syscalls
-	 * is an argument, so we temporarily use ORIG_GPR3 to save it
-	 */
-	stw	r9,8(r1)
-	stw	r11,12(r1)
-	stw	r3,ORIG_GPR3(r1)
 	/*
 	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
 	 * If from user mode there is only one stack frame on the stack, and
 	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
 	 * stack frame to make trace_hardirqs_off happy.
+	 *
+	 * This is handy because we also need to save a bunch of GPRs,
+	 * r3 can be different from GPR3(r1) at this point, r9 and r11
+	 * contains the old MSR and handler address respectively,
+	 * r4 & r5 can contain page fault arguments that need to be passed
+	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
+	 * they aren't useful past this point (aren't syscall arguments),
+	 * the rest is restored from the exception frame.
 	 */
+	stwu	r1,-32(r1)
+	stw	r9,8(r1)
+	stw	r11,12(r1)
+	stw	r3,16(r1)
+	stw	r4,20(r1)
+	stw	r5,24(r1)
 	andi.	r12,r12,MSR_PR
-	beq	11f
-	stwu	r1,-16(r1)
+	b	11f
 	bl	trace_hardirqs_off
-	addi	r1,r1,16
 	b	12f
-
 11:
 	bl	trace_hardirqs_off
 12:
+	lwz	r5,24(r1)
+	lwz	r4,20(r1)
+	lwz	r3,16(r1)
+	lwz	r11,12(r1)
+	lwz	r9,8(r1)
+	addi	r1,r1,32
 	lwz	r0,GPR0(r1)
-	lwz	r3,ORIG_GPR3(r1)
-	lwz	r4,GPR4(r1)
-	lwz	r5,GPR5(r1)
 	lwz	r6,GPR6(r1)
 	lwz	r7,GPR7(r1)
 	lwz	r8,GPR8(r1)
-	lwz	r9,8(r1)
-	lwz	r11,12(r1)
 1:	mtctr	r11
 	mtlr	r9
 	bctr				/* jump to handler */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 243dbab..5ec1b23 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -560,12 +560,6 @@
 	local_irq_restore(flags);
 }
 
-irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
-{
-	return d->hwirq;
-}
-EXPORT_SYMBOL_GPL(irqd_to_hwirq);
-
 irq_hw_number_t virq_to_hw(unsigned int virq)
 {
 	struct irq_data *irq_data = irq_get_irq_data(virq);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index f88698c..4937c96 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1235,7 +1235,7 @@
 	ctrl |= CTRL_RUNLATCH;
 	mtspr(SPRN_CTRLT, ctrl);
 
-	ti->local_flags |= TLF_RUNLATCH;
+	ti->local_flags |= _TLF_RUNLATCH;
 }
 
 /* Called with hard IRQs off */
@@ -1244,7 +1244,7 @@
 	struct thread_info *ti = current_thread_info();
 	unsigned long ctrl;
 
-	ti->local_flags &= ~TLF_RUNLATCH;
+	ti->local_flags &= ~_TLF_RUNLATCH;
 
 	ctrl = mfspr(SPRN_CTRLF);
 	ctrl &= ~CTRL_RUNLATCH;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 9825f29..ec8a53f 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -150,6 +150,9 @@
 }
 
 #ifdef CONFIG_BOOKE_WDT
+extern u32 booke_wdt_enabled;
+extern u32 booke_wdt_period;
+
 /* Checks wdt=x and wdt_period=xx command-line option */
 notrace int __init early_parse_wdt(char *p)
 {
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index bed1279..e1b60f5 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -173,9 +173,9 @@
 
 static struct kvmppc_linear_info *kvm_alloc_linear(int type)
 {
-	struct kvmppc_linear_info *ri;
+	struct kvmppc_linear_info *ri, *ret;
 
-	ri = NULL;
+	ret = NULL;
 	spin_lock(&linear_lock);
 	list_for_each_entry(ri, &free_linears, list) {
 		if (ri->type != type)
@@ -183,11 +183,12 @@
 
 		list_del(&ri->list);
 		atomic_inc(&ri->use_count);
+		memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT);
+		ret = ri;
 		break;
 	}
 	spin_unlock(&linear_lock);
-	memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT);
-	return ri;
+	return ret;
 }
 
 static void kvm_release_linear(struct kvmppc_linear_info *ri)
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 3f7b674..d3fb4df 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -46,8 +46,10 @@
 	/* Save host state to the stack */
 	stdu	r1, -SWITCH_FRAME_SIZE(r1)
 
-	/* Save non-volatile registers (r14 - r31) */
+	/* Save non-volatile registers (r14 - r31) and CR */
 	SAVE_NVGPRS(r1)
+	mfcr	r3
+	std	r3, _CCR(r1)
 
 	/* Save host DSCR */
 BEGIN_FTR_SECTION
@@ -157,8 +159,10 @@
 	 * R13      = PACA
 	 */
 
-	/* Restore non-volatile host registers (r14 - r31) */
+	/* Restore non-volatile host registers (r14 - r31) and CR */
 	REST_NVGPRS(r1)
+	ld	r4, _CCR(r1)
+	mtcr	r4
 
 	addi    r1, r1, SWITCH_FRAME_SIZE
 	ld	r0, PPC_LR_STKOFF(r1)
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 0a8515a..3e35383 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -84,6 +84,10 @@
 	/* Save non-volatile registers (r14 - r31) */
 	SAVE_NVGPRS(r1)
 
+	/* Save CR */
+	mfcr	r14
+	stw	r14, _CCR(r1)
+
 	/* Save LR */
 	PPC_STL	r0, _LINK(r1)
 
@@ -165,6 +169,9 @@
 	PPC_LL	r4, _LINK(r1)
 	mtlr	r4
 
+	lwz	r14, _CCR(r1)
+	mtcr	r14
+
 	/* Restore non-volatile host registers (r14 - r31) */
 	REST_NVGPRS(r1)
 
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 642d885..7759053 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -777,6 +777,7 @@
 	}
 	}
 
+	preempt_disable();
 	if (!(r & RESUME_HOST)) {
 		/* To avoid clobbering exit_reason, only check for signals if
 		 * we aren't already exiting to userspace for some other
@@ -798,8 +799,6 @@
 			run->exit_reason = KVM_EXIT_INTR;
 			r = -EINTR;
 		} else {
-			preempt_disable();
-
 			/* In case an interrupt came in that was triggered
 			 * from userspace (like DEC), we need to check what
 			 * to inject now! */
@@ -881,7 +880,8 @@
 
 	switch (reg->id) {
 	case KVM_REG_PPC_HIOR:
-		r = put_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr);
+		r = copy_to_user((u64 __user *)(long)reg->addr,
+				&to_book3s(vcpu)->hior, sizeof(u64));
 		break;
 	default:
 		break;
@@ -896,7 +896,8 @@
 
 	switch (reg->id) {
 	case KVM_REG_PPC_HIOR:
-		r = get_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr);
+		r = copy_from_user(&to_book3s(vcpu)->hior,
+				   (u64 __user *)(long)reg->addr, sizeof(u64));
 		if (!r)
 			to_book3s(vcpu)->hior_explicit = true;
 		break;
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 10d8ef6..c8c4b87 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -34,7 +34,8 @@
 /* r2 is special: it holds 'current', and it made nonvolatile in the
  * kernel with the -ffixed-r2 gcc option. */
 #define HOST_R2         12
-#define HOST_NV_GPRS    16
+#define HOST_CR         16
+#define HOST_NV_GPRS    20
 #define HOST_NV_GPR(n)  (HOST_NV_GPRS + ((n - 14) * 4))
 #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4)
 #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
@@ -296,8 +297,10 @@
 
 	/* Return to kvm_vcpu_run(). */
 	lwz	r4, HOST_STACK_LR(r1)
+	lwz	r5, HOST_CR(r1)
 	addi	r1, r1, HOST_STACK_SIZE
 	mtlr	r4
+	mtcr	r5
 	/* r3 still contains the return code from kvmppc_handle_exit(). */
 	blr
 
@@ -314,6 +317,8 @@
 	stw	r3, HOST_RUN(r1)
 	mflr	r3
 	stw	r3, HOST_STACK_LR(r1)
+	mfcr	r5
+	stw	r5, HOST_CR(r1)
 
 	/* Save host non-volatile register state to stack. */
 	stw	r14, HOST_NV_GPR(r14)(r1)
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index 9fef530..67dac22 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -21,6 +21,12 @@
 	{ .compatible = "fsl,qe", },
 	{ .compatible = "fsl,cpm2", },
 	{ .compatible = "fsl,srio", },
+	/* So that the DMA channel nodes can be probed individually: */
+	{ .compatible = "fsl,eloplus-dma", },
+	/* For the PMC driver */
+	{ .compatible = "fsl,mpc8548-guts", },
+	/* Probably unnecessary? */
+	{ .compatible = "gpio-leds", },
 	{},
 };
 
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 9a6f044..d208ebc 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -399,12 +399,6 @@
 machine_arch_initcall(mpc8568_mds, board_fixups);
 machine_arch_initcall(mpc8569_mds, board_fixups);
 
-static struct of_device_id mpc85xx_ids[] = {
-	{ .compatible = "fsl,mpc8548-guts", },
-	{ .compatible = "gpio-leds", },
-	{},
-};
-
 static int __init mpc85xx_publish_devices(void)
 {
 	if (machine_is(mpc8568_mds))
@@ -412,10 +406,7 @@
 	if (machine_is(mpc8569_mds))
 		simple_gpiochip_init("fsl,mpc8569mds-bcsr-gpio");
 
-	mpc85xx_common_publish_devices();
-	of_platform_bus_probe(NULL, mpc85xx_ids, NULL);
-
-	return 0;
+	return mpc85xx_common_publish_devices();
 }
 
 machine_device_initcall(mpc8568_mds, mpc85xx_publish_devices);
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index e74b7cd..f700c81 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -460,18 +460,7 @@
 	pr_info("Freescale P1022 DS reference board\n");
 }
 
-static struct of_device_id __initdata p1022_ds_ids[] = {
-	/* So that the DMA channel nodes can be probed individually: */
-	{ .compatible = "fsl,eloplus-dma", },
-	{},
-};
-
-static int __init p1022_ds_publish_devices(void)
-{
-	mpc85xx_common_publish_devices();
-	return of_platform_bus_probe(NULL, p1022_ds_ids, NULL);
-}
-machine_device_initcall(p1022_ds, p1022_ds_publish_devices);
+machine_device_initcall(p1022_ds, mpc85xx_common_publish_devices);
 
 machine_arch_initcall(p1022_ds, swiotlb_setup_bus_notifier);
 
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index db360fc..d09f3e8 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -392,7 +392,7 @@
 	}
 	memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
 
-	msic->irq_domain = irq_domain_add_nomap(dn, &msic_host_ops, msic);
+	msic->irq_domain = irq_domain_add_nomap(dn, 0, &msic_host_ops, msic);
 	if (!msic->irq_domain) {
 		printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
 		       dn->full_name);
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
index e5c3a2c..f9a48af 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -239,7 +239,7 @@
 	ppc_md.get_irq = beatic_get_irq;
 
 	/* Allocate an irq host */
-	beatic_host = irq_domain_add_nomap(NULL, &beatic_pic_host_ops, NULL);
+	beatic_host = irq_domain_add_nomap(NULL, 0, &beatic_pic_host_ops, NULL);
 	BUG_ON(beatic_host == NULL);
 	irq_set_default_host(beatic_host);
 }
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index 996c5ff..03685a3 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -366,11 +366,20 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&host->lock, flags);
+
+	/*
+	 * If the timer is pending, that means we raced with the
+	 * irq, in which case we just return
+	 */
+	if (timer_pending(&host->timeout_timer))
+		goto skip;
+
 	kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr));
 	if (host->state != state_idle) {
 		host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
 		add_timer(&host->timeout_timer);
 	}
+ skip:
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index a81e5a8..b4ddaa3 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -192,7 +192,7 @@
 {
 	int rc = -ENOMEM;
 
-	psurge_host = irq_domain_add_nomap(NULL, &psurge_host_ops, NULL);
+	psurge_host = irq_domain_add_nomap(NULL, 0, &psurge_host_ops, NULL);
 
 	if (psurge_host)
 		psurge_secondary_virq = irq_create_direct_mapping(psurge_host);
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 2a4ff86..5f3b232 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -753,9 +753,8 @@
 	unsigned cpu;
 	struct irq_domain *host;
 
-	host = irq_domain_add_nomap(NULL, &ps3_host_ops, NULL);
+	host = irq_domain_add_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL);
 	irq_set_default_host(host);
-	irq_set_virq_count(PS3_PLUG_MAX + 1);
 
 	for_each_possible_cpu(cpu) {
 		struct ps3_private *pd = &per_cpu(ps3_private, cpu);
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 309d38e..a75e37d 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -1076,7 +1076,7 @@
 	pr_debug("EEH: Adding device %s\n", pci_name(dev));
 
 	dn = pci_device_to_OF_node(dev);
-	edev = pci_dev_to_eeh_dev(dev);
+	edev = of_node_to_eeh_dev(dn);
 	if (edev->pdev == dev) {
 		pr_debug("EEH: Already referenced !\n");
 		return;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 9ac71eb..395af13 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -604,18 +604,14 @@
 }
 
 /* Determine if the linux irq is an IPI */
-static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int irq)
+static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int src)
 {
-	unsigned int src = virq_to_hw(irq);
-
 	return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]);
 }
 
 /* Determine if the linux irq is a timer */
-static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int irq)
+static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int src)
 {
-	unsigned int src = virq_to_hw(irq);
-
 	return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]);
 }
 
@@ -876,21 +872,45 @@
 	if (src >= mpic->num_sources)
 		return -EINVAL;
 
-	if (flow_type == IRQ_TYPE_NONE)
-		if (mpic->senses && src < mpic->senses_count)
-			flow_type = mpic->senses[src];
-	if (flow_type == IRQ_TYPE_NONE)
-		flow_type = IRQ_TYPE_LEVEL_LOW;
+	vold = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
 
+	/* We don't support "none" type */
+	if (flow_type == IRQ_TYPE_NONE)
+		flow_type = IRQ_TYPE_DEFAULT;
+
+	/* Default: read HW settings */
+	if (flow_type == IRQ_TYPE_DEFAULT) {
+		switch(vold & (MPIC_INFO(VECPRI_POLARITY_MASK) |
+			       MPIC_INFO(VECPRI_SENSE_MASK))) {
+			case MPIC_INFO(VECPRI_SENSE_EDGE) |
+			     MPIC_INFO(VECPRI_POLARITY_POSITIVE):
+				flow_type = IRQ_TYPE_EDGE_RISING;
+				break;
+			case MPIC_INFO(VECPRI_SENSE_EDGE) |
+			     MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
+				flow_type = IRQ_TYPE_EDGE_FALLING;
+				break;
+			case MPIC_INFO(VECPRI_SENSE_LEVEL) |
+			     MPIC_INFO(VECPRI_POLARITY_POSITIVE):
+				flow_type = IRQ_TYPE_LEVEL_HIGH;
+				break;
+			case MPIC_INFO(VECPRI_SENSE_LEVEL) |
+			     MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
+				flow_type = IRQ_TYPE_LEVEL_LOW;
+				break;
+		}
+	}
+
+	/* Apply to irq desc */
 	irqd_set_trigger_type(d, flow_type);
 
+	/* Apply to HW */
 	if (mpic_is_ht_interrupt(mpic, src))
 		vecpri = MPIC_VECPRI_POLARITY_POSITIVE |
 			MPIC_VECPRI_SENSE_EDGE;
 	else
 		vecpri = mpic_type_to_vecpri(mpic, flow_type);
 
-	vold = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
 	vnew = vold & ~(MPIC_INFO(VECPRI_POLARITY_MASK) |
 			MPIC_INFO(VECPRI_SENSE_MASK));
 	vnew |= vecpri;
@@ -1026,7 +1046,7 @@
 	irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq);
 
 	/* Set default irq type */
-	irq_set_irq_type(virq, IRQ_TYPE_NONE);
+	irq_set_irq_type(virq, IRQ_TYPE_DEFAULT);
 
 	/* If the MPIC was reset, then all vectors have already been
 	 * initialized.  Otherwise, a per source lazy initialization
@@ -1417,12 +1437,6 @@
 		mpic->num_sources = isu_first + mpic->isu_size;
 }
 
-void __init mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count)
-{
-	mpic->senses = senses;
-	mpic->senses_count = count;
-}
-
 void __init mpic_init(struct mpic *mpic)
 {
 	int i, cpu;
@@ -1555,12 +1569,12 @@
 		return;
 
 	raw_spin_lock_irqsave(&mpic_lock, flags);
-	if (mpic_is_ipi(mpic, irq)) {
+	if (mpic_is_ipi(mpic, src)) {
 		reg = mpic_ipi_read(src - mpic->ipi_vecs[0]) &
 			~MPIC_VECPRI_PRIORITY_MASK;
 		mpic_ipi_write(src - mpic->ipi_vecs[0],
 			       reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
-	} else if (mpic_is_tm(mpic, irq)) {
+	} else if (mpic_is_tm(mpic, src)) {
 		reg = mpic_tm_read(src - mpic->timer_vecs[0]) &
 			~MPIC_VECPRI_PRIORITY_MASK;
 		mpic_tm_write(src - mpic->timer_vecs[0],
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index 6e7fa38..483d8fa 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -27,6 +27,7 @@
 
 static struct mpic_msgr **mpic_msgrs;
 static unsigned int mpic_msgr_count;
+static DEFINE_RAW_SPINLOCK(msgrs_lock);
 
 static inline void _mpic_msgr_mer_write(struct mpic_msgr *msgr, u32 value)
 {
@@ -56,12 +57,11 @@
 	if (reg_num >= mpic_msgr_count)
 		return ERR_PTR(-ENODEV);
 
-	raw_spin_lock_irqsave(&msgr->lock, flags);
-	if (mpic_msgrs[reg_num]->in_use == MSGR_FREE) {
-		msgr = mpic_msgrs[reg_num];
+	raw_spin_lock_irqsave(&msgrs_lock, flags);
+	msgr = mpic_msgrs[reg_num];
+	if (msgr->in_use == MSGR_FREE)
 		msgr->in_use = MSGR_INUSE;
-	}
-	raw_spin_unlock_irqrestore(&msgr->lock, flags);
+	raw_spin_unlock_irqrestore(&msgrs_lock, flags);
 
 	return msgr;
 }
@@ -228,7 +228,7 @@
 
 		reg_number = block_number * MPIC_MSGR_REGISTERS_PER_BLOCK + i;
 		msgr->base = msgr_block_addr + i * MPIC_MSGR_STRIDE;
-		msgr->mer = msgr->base + MPIC_MSGR_MER_OFFSET;
+		msgr->mer = (u32 *)((u8 *)msgr->base + MPIC_MSGR_MER_OFFSET);
 		msgr->in_use = MSGR_FREE;
 		msgr->num = i;
 		raw_spin_lock_init(&msgr->lock);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2b7c0fb..9015060 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -90,7 +90,6 @@
 	select HAVE_KERNEL_XZ
 	select HAVE_ARCH_MUTEX_CPU_RELAX
 	select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
-	select HAVE_RCU_TABLE_FREE if SMP
 	select ARCH_SAVE_PAGE_KEYS if HIBERNATION
 	select HAVE_MEMBLOCK
 	select HAVE_MEMBLOCK_NODE_MAP
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 6cf8e26..1957a9d 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,8 +1,12 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_AUDIT=y
-CONFIG_RCU_TRACE=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_CGROUPS=y
@@ -14,16 +18,22 @@
 CONFIG_CGROUP_SCHED=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_BLK_CGROUP=y
+CONFIG_NAMESPACES=y
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_XZ=y
+CONFIG_RD_LZO=y
+CONFIG_EXPERT=y
 # CONFIG_COMPAT_BRK is not set
-CONFIG_SLAB=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
 CONFIG_DEFAULT_DEADLINE=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -34,18 +44,15 @@
 CONFIG_BINFMT_MISC=m
 CONFIG_CMM=m
 CONFIG_HZ_100=y
-CONFIG_KEXEC=y
-CONFIG_PM=y
+CONFIG_CRASH_DUMP=y
 CONFIG_HIBERNATION=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_NET_KEY=y
-CONFIG_AFIUCV=m
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
 # CONFIG_INET_LRO is not set
 CONFIG_IPV6=y
-CONFIG_NET_SCTPPROBE=m
 CONFIG_L2TP=m
 CONFIG_L2TP_DEBUGFS=m
 CONFIG_VLAN_8021Q=y
@@ -84,15 +91,14 @@
 CONFIG_SCSI_LOGGING=y
 CONFIG_SCSI_SCAN_ASYNC=y
 CONFIG_ZFCP=y
-CONFIG_ZFCP_DIF=y
 CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
 CONFIG_BONDING=m
+CONFIG_DUMMY=m
 CONFIG_EQUALIZER=m
 CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
 CONFIG_VIRTIO_NET=y
 CONFIG_RAW_DRIVER=m
+CONFIG_VIRTIO_BALLOON=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
@@ -103,27 +109,21 @@
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_IBM_PARTITION=y
-CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
 CONFIG_TIMER_STATS=y
 CONFIG_PROVE_LOCKING=y
 CONFIG_PROVE_RCU=y
 CONFIG_LOCK_STAT=y
 CONFIG_DEBUG_LOCKDEP=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
 CONFIG_DEBUG_LIST=y
 CONFIG_DEBUG_NOTIFIERS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_RCU_TRACE=y
 CONFIG_KPROBES_SANITY_TEST=y
 CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
 CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
 CONFIG_LATENCYTOP=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
 CONFIG_DEBUG_PAGEALLOC=y
-# CONFIG_FTRACE is not set
+CONFIG_BLK_DEV_IO_TRACE=y
 # CONFIG_STRICT_DEVMEM is not set
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
@@ -173,4 +173,3 @@
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRC7=m
-CONFIG_VIRTIO_BALLOON=y
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index 1e5b27e..2ee66a6 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -38,12 +38,11 @@
 	unsigned long nr;
 
 	preempt_disable();
-	S390_lowcore.stfl_fac_list = 0;
 	asm volatile(
 		"	.insn s,0xb2b10000,0(0)\n" /* stfl */
 		"0:\n"
 		EX_TABLE(0b, 0b)
-		: "=m" (S390_lowcore.stfl_fac_list));
+		: "+m" (S390_lowcore.stfl_fac_list));
 	nr = 4; /* bytes stored by stfl */
 	memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
 	if (S390_lowcore.stfl_fac_list & 0x01000000) {
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 8eef9b5..78e3041 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -22,10 +22,7 @@
 
 unsigned long *page_table_alloc(struct mm_struct *, unsigned long);
 void page_table_free(struct mm_struct *, unsigned long *);
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 void page_table_free_rcu(struct mmu_gather *, unsigned long *);
-void __tlb_remove_table(void *_table);
-#endif
 
 static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
 {
diff --git a/arch/s390/include/asm/swab.h b/arch/s390/include/asm/swab.h
index 6bdee21..a3e4ebb 100644
--- a/arch/s390/include/asm/swab.h
+++ b/arch/s390/include/asm/swab.h
@@ -77,7 +77,7 @@
 	
 	asm volatile(
 #ifndef __s390x__
-		"	icm	%0,2,%O+1(%R1)\n"
+		"	icm	%0,2,%O1+1(%R1)\n"
 		"	ic	%0,%1\n"
 		: "=&d" (result) : "Q" (*x) : "cc");
 #else /* __s390x__ */
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index c687a2c..775a5ee 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -30,14 +30,10 @@
 
 struct mmu_gather {
 	struct mm_struct *mm;
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 	struct mmu_table_batch *batch;
-#endif
 	unsigned int fullmm;
-	unsigned int need_flush;
 };
 
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 struct mmu_table_batch {
 	struct rcu_head		rcu;
 	unsigned int		nr;
@@ -49,7 +45,6 @@
 
 extern void tlb_table_flush(struct mmu_gather *tlb);
 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-#endif
 
 static inline void tlb_gather_mmu(struct mmu_gather *tlb,
 				  struct mm_struct *mm,
@@ -57,29 +52,20 @@
 {
 	tlb->mm = mm;
 	tlb->fullmm = full_mm_flush;
-	tlb->need_flush = 0;
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 	tlb->batch = NULL;
-#endif
 	if (tlb->fullmm)
 		__tlb_flush_mm(mm);
 }
 
 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 {
-	if (!tlb->need_flush)
-		return;
-	tlb->need_flush = 0;
-	__tlb_flush_mm(tlb->mm);
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 	tlb_table_flush(tlb);
-#endif
 }
 
 static inline void tlb_finish_mmu(struct mmu_gather *tlb,
 				  unsigned long start, unsigned long end)
 {
-	tlb_flush_mmu(tlb);
+	tlb_table_flush(tlb);
 }
 
 /*
@@ -105,10 +91,8 @@
 static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 				unsigned long address)
 {
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 	if (!tlb->fullmm)
 		return page_table_free_rcu(tlb, (unsigned long *) pte);
-#endif
 	page_table_free(tlb->mm, (unsigned long *) pte);
 }
 
@@ -125,10 +109,8 @@
 #ifdef __s390x__
 	if (tlb->mm->context.asce_limit <= (1UL << 31))
 		return;
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 	if (!tlb->fullmm)
 		return tlb_remove_table(tlb, pmd);
-#endif
 	crst_table_free(tlb->mm, (unsigned long *) pmd);
 #endif
 }
@@ -146,10 +128,8 @@
 #ifdef __s390x__
 	if (tlb->mm->context.asce_limit <= (1UL << 42))
 		return;
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 	if (!tlb->fullmm)
 		return tlb_remove_table(tlb, pud);
-#endif
 	crst_table_free(tlb->mm, (unsigned long *) pud);
 #endif
 }
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index c27a072..adccd90 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -474,9 +474,9 @@
 	stck	__LC_LAST_UPDATE_CLOCK
 	spt	5f-.LPG0(%r13)
 	mvc	__LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13)
+	xc	__LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
 #ifndef CONFIG_MARCH_G5
 	# check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
-	xc	__LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
 	.insn	s,0xb2b10000,__LC_STFL_FAC_LIST	# store facility list
 	tm	__LC_STFL_FAC_LIST,0x01	# stfle available ?
 	jz	0f
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 1c2cdd5..8a22c27 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -118,9 +118,10 @@
 				         "a" (__do_softirq)
 				     : "0", "1", "2", "3", "4", "5", "14",
 				       "cc", "memory" );
-		} else
+		} else {
 			/* We are already on the async stack. */
 			__do_softirq();
+		}
 	}
 
 	local_irq_restore(flags);
@@ -192,11 +193,12 @@
 	int index = ext_hash(code);
 
 	spin_lock_irqsave(&ext_int_hash_lock, flags);
-	list_for_each_entry_rcu(p, &ext_int_hash[index], entry)
+	list_for_each_entry_rcu(p, &ext_int_hash[index], entry) {
 		if (p->code == code && p->handler == handler) {
 			list_del_rcu(&p->entry);
 			kfree_rcu(p, rcu);
 		}
+	}
 	spin_unlock_irqrestore(&ext_int_hash_lock, flags);
 	return 0;
 }
@@ -211,9 +213,10 @@
 
 	old_regs = set_irq_regs(regs);
 	irq_enter();
-	if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
+	if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) {
 		/* Serve timer interrupts first. */
 		clock_comparator_work();
+	}
 	kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
 	if (ext_code.code != 0x1004)
 		__get_cpu_var(s390_idle).nohz_delay = 1;
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 4640508..cb019f4 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -178,7 +178,7 @@
 	err = lcctl(cpuhw->state);
 	if (err) {
 		pr_err("Enabling the performance measuring unit "
-		       "failed with rc=%lx\n", err);
+		       "failed with rc=%x\n", err);
 		return;
 	}
 
@@ -203,7 +203,7 @@
 	err = lcctl(inactive);
 	if (err) {
 		pr_err("Disabling the performance measuring unit "
-		       "failed with rc=%lx\n", err);
+		       "failed with rc=%x\n", err);
 		return;
 	}
 
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 7bb15fc..e1335dc 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -61,21 +61,14 @@
 	return copied < 0 ? -EFAULT : 0;
 }
 
-/*
- * Copy memory in real mode (kernel to kernel)
- */
-int memcpy_real(void *dest, void *src, size_t count)
+static int __memcpy_real(void *dest, void *src, size_t count)
 {
 	register unsigned long _dest asm("2") = (unsigned long) dest;
 	register unsigned long _len1 asm("3") = (unsigned long) count;
 	register unsigned long _src  asm("4") = (unsigned long) src;
 	register unsigned long _len2 asm("5") = (unsigned long) count;
-	unsigned long flags;
 	int rc = -EFAULT;
 
-	if (!count)
-		return 0;
-	flags = __arch_local_irq_stnsm(0xf8UL);
 	asm volatile (
 		"0:	mvcle	%1,%2,0x0\n"
 		"1:	jo	0b\n"
@@ -86,7 +79,23 @@
 		  "+d" (_len2), "=m" (*((long *) dest))
 		: "m" (*((long *) src))
 		: "cc", "memory");
-	arch_local_irq_restore(flags);
+	return rc;
+}
+
+/*
+ * Copy memory in real mode (kernel to kernel)
+ */
+int memcpy_real(void *dest, void *src, size_t count)
+{
+	unsigned long flags;
+	int rc;
+
+	if (!count)
+		return 0;
+	local_irq_save(flags);
+	__arch_local_irq_stnsm(0xfbUL);
+	rc = __memcpy_real(dest, src, count);
+	local_irq_restore(flags);
 	return rc;
 }
 
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 373adf6..6e765bf 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -678,8 +678,6 @@
 	}
 }
 
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-
 static void __page_table_free_rcu(void *table, unsigned bit)
 {
 	struct page *page;
@@ -733,7 +731,66 @@
 		free_pages((unsigned long) table, ALLOC_ORDER);
 }
 
-#endif
+static void tlb_remove_table_smp_sync(void *arg)
+{
+	/* Simply deliver the interrupt */
+}
+
+static void tlb_remove_table_one(void *table)
+{
+	/*
+	 * This isn't an RCU grace period and hence the page-tables cannot be
+	 * assumed to be actually RCU-freed.
+	 *
+	 * It is however sufficient for software page-table walkers that rely
+	 * on IRQ disabling. See the comment near struct mmu_table_batch.
+	 */
+	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
+	__tlb_remove_table(table);
+}
+
+static void tlb_remove_table_rcu(struct rcu_head *head)
+{
+	struct mmu_table_batch *batch;
+	int i;
+
+	batch = container_of(head, struct mmu_table_batch, rcu);
+
+	for (i = 0; i < batch->nr; i++)
+		__tlb_remove_table(batch->tables[i]);
+
+	free_page((unsigned long)batch);
+}
+
+void tlb_table_flush(struct mmu_gather *tlb)
+{
+	struct mmu_table_batch **batch = &tlb->batch;
+
+	if (*batch) {
+		__tlb_flush_mm(tlb->mm);
+		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
+		*batch = NULL;
+	}
+}
+
+void tlb_remove_table(struct mmu_gather *tlb, void *table)
+{
+	struct mmu_table_batch **batch = &tlb->batch;
+
+	if (*batch == NULL) {
+		*batch = (struct mmu_table_batch *)
+			__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
+		if (*batch == NULL) {
+			__tlb_flush_mm(tlb->mm);
+			tlb_remove_table_one(table);
+			return;
+		}
+		(*batch)->nr = 0;
+	}
+	(*batch)->tables[(*batch)->nr++] = table;
+	if ((*batch)->nr == MAX_TABLE_BATCH)
+		tlb_table_flush(tlb);
+}
 
 /*
  * switch on pgstes for its userspace process (for kvm)
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index c1d5a82..5f2bb42 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -61,6 +61,7 @@
 config DWARF_UNWINDER
 	bool "Enable the DWARF unwinder for stacktraces"
 	select FRAME_POINTER
+	depends on SUPERH32
 	default n
 	help
 	  Enabling this option will make stacktraces more accurate, at
diff --git a/arch/sh/boards/board-sh7785lcr.c b/arch/sh/boards/board-sh7785lcr.c
index d879848..d0d6221 100644
--- a/arch/sh/boards/board-sh7785lcr.c
+++ b/arch/sh/boards/board-sh7785lcr.c
@@ -28,6 +28,7 @@
 #include <cpu/sh7785.h>
 #include <asm/heartbeat.h>
 #include <asm/clock.h>
+#include <asm/bl_bit.h>
 
 /*
  * NOTE: This board has 2 physical memory maps.
diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
index adc9b4b..8b50cf7 100644
--- a/arch/sh/boards/mach-hp6xx/pm.c
+++ b/arch/sh/boards/mach-hp6xx/pm.c
@@ -14,6 +14,7 @@
 #include <linux/gfp.h>
 #include <asm/io.h>
 #include <asm/hd64461.h>
+#include <asm/bl_bit.h>
 #include <mach/hp6xx.h>
 #include <cpu/dac.h>
 #include <asm/freq.h>
diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c
index b1cb271..67ee956 100644
--- a/arch/sh/drivers/dma/dma-sysfs.c
+++ b/arch/sh/drivers/dma/dma-sysfs.c
@@ -54,7 +54,7 @@
 	if (unlikely(ret))
 		return ret;
 
-	return device_create_file(dma_subsys.dev_root, &dev_attr_devices.attr);
+	return device_create_file(dma_subsys.dev_root, &dev_attr_devices);
 }
 postcore_initcall(dma_subsys_init);
 
diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c
index 7f1b70c..f8f7af5 100644
--- a/arch/sh/kernel/cpu/fpu.c
+++ b/arch/sh/kernel/cpu/fpu.c
@@ -2,6 +2,7 @@
 #include <linux/slab.h>
 #include <asm/processor.h>
 #include <asm/fpu.h>
+#include <asm/traps.h>
 
 int init_fpu(struct task_struct *tsk)
 {
diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c
index 488d24e..98bbaa4 100644
--- a/arch/sh/kernel/cpu/sh2a/fpu.c
+++ b/arch/sh/kernel/cpu/sh2a/fpu.c
@@ -14,6 +14,7 @@
 #include <asm/processor.h>
 #include <asm/io.h>
 #include <asm/fpu.h>
+#include <asm/traps.h>
 
 /* The PR (precision) bit in the FP Status Register must be clear when
  * an frchg instruction is executed, otherwise the instruction is undefined.
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
index e74cd6c..69ab4d3 100644
--- a/arch/sh/kernel/cpu/sh4/fpu.c
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -16,6 +16,7 @@
 #include <cpu/fpu.h>
 #include <asm/processor.h>
 #include <asm/fpu.h>
+#include <asm/traps.h>
 
 /* The PR (precision) bit in the FP Status Register must be clear when
  * an frchg instruction is executed, otherwise the instruction is undefined.
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
index 5853989..04ab5ae 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
@@ -113,7 +113,7 @@
 	CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
 
 	/* MSTP32 clocks */
-	CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP004]),
+	CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP004]),
 	CLKDEV_CON_ID("riic0", &mstp_clks[MSTP000]),
 	CLKDEV_CON_ID("riic1", &mstp_clks[MSTP000]),
 	CLKDEV_CON_ID("riic2", &mstp_clks[MSTP000]),
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
index a6f95ae..08d27fa 100644
--- a/arch/sh/kernel/cpu/shmobile/pm.c
+++ b/arch/sh/kernel/cpu/shmobile/pm.c
@@ -16,6 +16,7 @@
 #include <asm/suspend.h>
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
+#include <asm/bl_bit.h>
 
 /*
  * Notifier lists for pre/post sleep notification
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 64852ec..ee226e2 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -17,8 +17,8 @@
 #include <linux/irqflags.h>
 #include <linux/smp.h>
 #include <linux/cpuidle.h>
-#include <asm/pgalloc.h>
 #include <linux/atomic.h>
+#include <asm/pgalloc.h>
 #include <asm/smp.h>
 #include <asm/bl_bit.h>
 
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index efb6d39..b117781 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -14,6 +14,7 @@
 #include <linux/irq.h>
 #include <linux/io.h>
 #include <asm/cacheflush.h>
+#include <asm/traps.h>
 
 /* Macros for single step instruction identification */
 #define OPCODE_BT(op)		(((op) & 0xff00) == 0x8900)
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index f72e3a9..94273aa 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -26,6 +26,7 @@
 #include <asm/mmu_context.h>
 #include <asm/fpu.h>
 #include <asm/syscalls.h>
+#include <asm/switch_to.h>
 
 void show_regs(struct pt_regs * regs)
 {
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index a17a14d..eaebdf6 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -27,6 +27,7 @@
 #include <asm/smp.h>
 #include <asm/cacheflush.h>
 #include <asm/sections.h>
+#include <asm/setup.h>
 
 int __cpu_number_map[NR_CPUS];		/* Map physical to logical */
 int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
diff --git a/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S b/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
index 555a64f..23af175 100644
--- a/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
+++ b/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
@@ -34,6 +34,41 @@
 1:	.short	__NR_rt_sigreturn
 .LEND_rt_sigreturn:
 	.size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
+	.previous
 
 	.section .eh_frame,"a",@progbits
+.LCIE1:
+	.ualong	.LCIE1_end - .LCIE1_start
+.LCIE1_start:
+	.ualong	0		/* CIE ID */
+	.byte	0x1		/* Version number */
+	.string	"zRS"		/* NUL-terminated augmentation string */
+	.uleb128 0x1		/* Code alignment factor */
+	.sleb128 -4		/* Data alignment factor */
+	.byte	0x11		/* Return address register column */
+	.uleb128 0x1		/* Augmentation length and data */
+	.byte 0x1b              /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */
+	.byte	0xc, 0xf, 0x0	/* DW_CFA_def_cfa: r15 ofs 0 */
+
+	.align 2
+.LCIE1_end:
+
+	.ualong	.LFDE0_end-.LFDE0_start	/* Length FDE0 */
+.LFDE0_start:
+	.ualong	.LFDE0_start-.LCIE1	/* CIE pointer */
+	.ualong	.LSTART_sigreturn-.	/* PC-relative start address */
+	.ualong	.LEND_sigreturn-.LSTART_sigreturn
+	.uleb128 0			/* Augmentation */
+	.align 2
+.LFDE0_end:
+
+	.ualong	.LFDE1_end-.LFDE1_start	/* Length FDE1 */
+.LFDE1_start:
+	.ualong	.LFDE1_start-.LCIE1	/* CIE pointer */
+	.ualong	.LSTART_rt_sigreturn-.	/* PC-relative start address */
+	.ualong	.LEND_rt_sigreturn-.LSTART_rt_sigreturn
+	.uleb128 0			/* Augmentation */
+	.align 2
+.LFDE1_end:
+
 	.previous
diff --git a/arch/sh/kernel/vsyscall/vsyscall-trapa.S b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
index 3e70f85..0eb74d0 100644
--- a/arch/sh/kernel/vsyscall/vsyscall-trapa.S
+++ b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
@@ -3,37 +3,34 @@
 	.type __kernel_vsyscall,@function
 __kernel_vsyscall:
 .LSTART_vsyscall:
-	/* XXX: We'll have to do something here once we opt to use the vDSO
-	 * page for something other than the signal trampoline.. as well as
-	 * fill out .eh_frame -- PFM. */
+	trapa	#0x10
+	 nop
 .LEND_vsyscall:
 	.size __kernel_vsyscall,.-.LSTART_vsyscall
+	.previous
 
 	.section .eh_frame,"a",@progbits
-	.previous
 .LCIE:
 	.ualong	.LCIE_end - .LCIE_start
 .LCIE_start:
 	.ualong	0		/* CIE ID */
 	.byte	0x1		/* Version number */
-	.string	"zRS"		/* NUL-terminated augmentation string */
+	.string	"zR"		/* NUL-terminated augmentation string */
 	.uleb128 0x1		/* Code alignment factor */
 	.sleb128 -4		/* Data alignment factor */
 	.byte	0x11		/* Return address register column */
-				/* Augmentation length and data (none) */
-	.byte	0xc		/* DW_CFA_def_cfa */
-	.uleb128 0xf		/* r15 */
-	.uleb128 0x0		/* offset 0 */
-
+	.uleb128 0x1		/* Augmentation length and data */
+	.byte 0x1b              /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */
+	.byte	0xc,0xf,0x0	/* DW_CFA_def_cfa: r15 ofs 0 */
 	.align 2
 .LCIE_end:
 
 	.ualong	.LFDE_end-.LFDE_start	/* Length FDE */
 .LFDE_start:
-	.ualong	.LCIE			/* CIE pointer */
-	.ualong	.LSTART_vsyscall-.	/* start address */
+	.ualong	.LFDE_start-.LCIE	/* CIE pointer */
+	.ualong	.LSTART_vsyscall-.	/* PC-relative start address */
 	.ualong	.LEND_vsyscall-.LSTART_vsyscall
-	.uleb128 0
+	.uleb128 0			/* Augmentation */
 	.align 2
 .LFDE_end:
 	.previous
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 112fea1..0e52928 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -18,6 +18,7 @@
 #include <linux/highmem.h>
 #include <asm/pgtable.h>
 #include <asm/mmu_context.h>
+#include <asm/cache_insns.h>
 #include <asm/cacheflush.h>
 
 /*
diff --git a/arch/sh/mm/flush-sh4.c b/arch/sh/mm/flush-sh4.c
index 75a17f5..0b85dd9 100644
--- a/arch/sh/mm/flush-sh4.c
+++ b/arch/sh/mm/flush-sh4.c
@@ -1,5 +1,6 @@
 #include <linux/mm.h>
 #include <asm/mmu_context.h>
+#include <asm/cache_insns.h>
 #include <asm/cacheflush.h>
 #include <asm/traps.h>
 
diff --git a/arch/sh/mm/sram.c b/arch/sh/mm/sram.c
index bc156ec..2d8fa71 100644
--- a/arch/sh/mm/sram.c
+++ b/arch/sh/mm/sram.c
@@ -9,6 +9,7 @@
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/errno.h>
 #include <asm/sram.h>
 
 /*
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 6c0683d..5f6acce 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -30,6 +30,7 @@
 	select USE_GENERIC_SMP_HELPERS if SMP
 	select GENERIC_PCI_IOMAP
 	select HAVE_NMI_WATCHDOG if SPARC64
+	select HAVE_BPF_JIT
 
 config SPARC32
 	def_bool !64BIT
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index eddcfb3..0e5de13 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -66,6 +66,7 @@
 
 core-y                 += arch/sparc/kernel/
 core-y                 += arch/sparc/mm/ arch/sparc/math-emu/
+core-y                 += arch/sparc/net/
 
 libs-y                 += arch/sparc/prom/
 libs-y                 += arch/sparc/lib/
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index fea13c7..b93c2c9 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -1264,4 +1264,4 @@
 	return vio_register_driver(&ds_driver);
 }
 
-subsys_initcall(ds_init);
+fs_initcall(ds_init);
diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
index aba6b95..19f5605 100644
--- a/arch/sparc/kernel/leon_pci.c
+++ b/arch/sparc/kernel/leon_pci.c
@@ -45,7 +45,6 @@
 
 void __devinit pcibios_fixup_bus(struct pci_bus *pbus)
 {
-	struct leon_pci_info *info = pbus->sysdata;
 	struct pci_dev *dev;
 	int i, has_io, has_mem;
 	u16 cmd;
@@ -111,18 +110,6 @@
 	return pci_enable_resources(dev, mask);
 }
 
-struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
-{
-	/*
-	 * Currently the OpenBoot nodes are not connected with the PCI device,
-	 * this is because the LEON PROM does not create PCI nodes. Eventually
-	 * this will change and the same approach as pcic.c can be used to
-	 * match PROM nodes with pci devices.
-	 */
-	return NULL;
-}
-EXPORT_SYMBOL(pci_device_to_OF_node);
-
 void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
 {
 #ifdef CONFIG_PCI_DEBUG
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index 1210fde..160cac9 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -23,6 +23,7 @@
 #include <linux/pm.h>
 #include <linux/delay.h>
 #include <linux/gfp.h>
+#include <linux/cpu.h>
 
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
@@ -78,6 +79,8 @@
 	local_flush_tlb_all();
 	leon_configure_cache_smp();
 
+	notify_cpu_starting(cpuid);
+
 	/* Get our local ticker going. */
 	smp_setup_percpu_timer();
 
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 77f1b95e..9171fc2 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -20,11 +20,6 @@
 
 		.text
 		.align			32
-__handle_softirq:
-		call			do_softirq
-		 nop
-		ba,a,pt			%xcc, __handle_softirq_continue
-		 nop
 __handle_preemption:
 		call			schedule
 		 wrpr			%g0, RTRAP_PSTATE, %pstate
@@ -89,9 +84,7 @@
 		cmp			%l1, 0
 
 		/* mm/ultra.S:xcall_report_regs KNOWS about this load. */
-		bne,pn			%icc, __handle_softirq
 		 ldx			[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
-__handle_softirq_continue:
 rtrap_xcall:
 		sethi			%hi(0xf << 20), %l4
 		and			%l1, %l4, %l4
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 232df99..3ee51f1 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -566,15 +566,10 @@
 
 SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
 {
-	long ret;
-
 	if (invalid_64bit_range(addr, len))
 		return -EINVAL;
 
-	down_write(&current->mm->mmap_sem);
-	ret = do_munmap(current->mm, addr, len);
-	up_write(&current->mm->mmap_sem);
-	return ret;
+	return vm_munmap(addr, len);
 }
 
 extern unsigned long do_mremap(unsigned long addr,
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 7705c67..df3155a 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -225,6 +225,8 @@
 	unsigned long g2;
 	int from_user = !(regs->psr & PSR_PS);
 	int fault, code;
+	unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+			      (write ? FAULT_FLAG_WRITE : 0));
 
 	if(text_fault)
 		address = regs->pc;
@@ -251,6 +253,7 @@
 
 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
+retry:
 	down_read(&mm->mmap_sem);
 
 	/*
@@ -289,7 +292,11 @@
 	 * make sure we exit gracefully rather than endlessly redo
 	 * the fault.
 	 */
-	fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
+	fault = handle_mm_fault(mm, vma, address, flags);
+
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+		return;
+
 	if (unlikely(fault & VM_FAULT_ERROR)) {
 		if (fault & VM_FAULT_OOM)
 			goto out_of_memory;
@@ -297,13 +304,29 @@
 			goto do_sigbus;
 		BUG();
 	}
-	if (fault & VM_FAULT_MAJOR) {
-		current->maj_flt++;
-		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
-	} else {
-		current->min_flt++;
-		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
+
+	if (flags & FAULT_FLAG_ALLOW_RETRY) {
+		if (fault & VM_FAULT_MAJOR) {
+			current->maj_flt++;
+			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
+				      1, regs, address);
+		} else {
+			current->min_flt++;
+			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
+				      1, regs, address);
+		}
+		if (fault & VM_FAULT_RETRY) {
+			flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+			/* No need to up_read(&mm->mmap_sem) as we would
+			 * have already released it in __lock_page_or_retry
+			 * in mm/filemap.c.
+			 */
+
+			goto retry;
+		}
 	}
+
 	up_read(&mm->mmap_sem);
 	return;
 
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 504c062..1fe0429 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -279,6 +279,7 @@
 	unsigned int insn = 0;
 	int si_code, fault_code, fault;
 	unsigned long address, mm_rss;
+	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
 	fault_code = get_thread_fault_code();
 
@@ -333,6 +334,8 @@
 			insn = get_fault_insn(regs, insn);
 			goto handle_kernel_fault;
 		}
+
+retry:
 		down_read(&mm->mmap_sem);
 	}
 
@@ -423,7 +426,12 @@
 			goto bad_area;
 	}
 
-	fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
+	flags |= ((fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
+	fault = handle_mm_fault(mm, vma, address, flags);
+
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+		return;
+
 	if (unlikely(fault & VM_FAULT_ERROR)) {
 		if (fault & VM_FAULT_OOM)
 			goto out_of_memory;
@@ -431,12 +439,27 @@
 			goto do_sigbus;
 		BUG();
 	}
-	if (fault & VM_FAULT_MAJOR) {
-		current->maj_flt++;
-		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
-	} else {
-		current->min_flt++;
-		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
+
+	if (flags & FAULT_FLAG_ALLOW_RETRY) {
+		if (fault & VM_FAULT_MAJOR) {
+			current->maj_flt++;
+			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
+				      1, regs, address);
+		} else {
+			current->min_flt++;
+			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
+				      1, regs, address);
+		}
+		if (fault & VM_FAULT_RETRY) {
+			flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+			/* No need to up_read(&mm->mmap_sem) as we would
+			 * have already released it in __lock_page_or_retry
+			 * in mm/filemap.c.
+			 */
+
+			goto retry;
+		}
 	}
 	up_read(&mm->mmap_sem);
 
diff --git a/arch/sparc/net/Makefile b/arch/sparc/net/Makefile
new file mode 100644
index 0000000..1306a58
--- /dev/null
+++ b/arch/sparc/net/Makefile
@@ -0,0 +1,4 @@
+#
+# Arch-specific network modules
+#
+obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o
diff --git a/arch/sparc/net/bpf_jit.h b/arch/sparc/net/bpf_jit.h
new file mode 100644
index 0000000..33d6b37
--- /dev/null
+++ b/arch/sparc/net/bpf_jit.h
@@ -0,0 +1,68 @@
+#ifndef _BPF_JIT_H
+#define _BPF_JIT_H
+
+/* Conventions:
+ *  %g1 : temporary
+ *  %g2 : Secondary temporary used by SKB data helper stubs.
+ *  %g3 : packet offset passed into SKB data helper stubs.
+ *  %o0 : pointer to skb (first argument given to JIT function)
+ *  %o1 : BPF A accumulator
+ *  %o2 : BPF X accumulator
+ *  %o3 : Holds saved %o7 so we can call helper functions without needing
+ *        to allocate a register window.
+ *  %o4 : skb->len - skb->data_len
+ *  %o5 : skb->data
+ */
+
+#ifndef __ASSEMBLER__
+#define G0		0x00
+#define G1		0x01
+#define G3		0x03
+#define G6		0x06
+#define O0		0x08
+#define O1		0x09
+#define O2		0x0a
+#define O3		0x0b
+#define O4		0x0c
+#define O5		0x0d
+#define SP		0x0e
+#define O7		0x0f
+#define FP		0x1e
+
+#define r_SKB		O0
+#define r_A		O1
+#define r_X		O2
+#define r_saved_O7	O3
+#define r_HEADLEN	O4
+#define r_SKB_DATA	O5
+#define r_TMP		G1
+#define r_TMP2		G2
+#define r_OFF		G3
+
+/* assembly code in arch/sparc/net/bpf_jit_asm.S */
+extern u32 bpf_jit_load_word[];
+extern u32 bpf_jit_load_half[];
+extern u32 bpf_jit_load_byte[];
+extern u32 bpf_jit_load_byte_msh[];
+extern u32 bpf_jit_load_word_positive_offset[];
+extern u32 bpf_jit_load_half_positive_offset[];
+extern u32 bpf_jit_load_byte_positive_offset[];
+extern u32 bpf_jit_load_byte_msh_positive_offset[];
+extern u32 bpf_jit_load_word_negative_offset[];
+extern u32 bpf_jit_load_half_negative_offset[];
+extern u32 bpf_jit_load_byte_negative_offset[];
+extern u32 bpf_jit_load_byte_msh_negative_offset[];
+
+#else
+#define r_SKB		%o0
+#define r_A		%o1
+#define r_X		%o2
+#define r_saved_O7	%o3
+#define r_HEADLEN	%o4
+#define r_SKB_DATA	%o5
+#define r_TMP		%g1
+#define r_TMP2		%g2
+#define r_OFF		%g3
+#endif
+
+#endif /* _BPF_JIT_H */
diff --git a/arch/sparc/net/bpf_jit_asm.S b/arch/sparc/net/bpf_jit_asm.S
new file mode 100644
index 0000000..9d016c7
--- /dev/null
+++ b/arch/sparc/net/bpf_jit_asm.S
@@ -0,0 +1,205 @@
+#include <asm/ptrace.h>
+
+#include "bpf_jit.h"
+
+#ifdef CONFIG_SPARC64
+#define SAVE_SZ		176
+#define SCRATCH_OFF	STACK_BIAS + 128
+#define BE_PTR(label)	be,pn %xcc, label
+#else
+#define SAVE_SZ		96
+#define SCRATCH_OFF	72
+#define BE_PTR(label)	be label
+#endif
+
+#define SKF_MAX_NEG_OFF	(-0x200000) /* SKF_LL_OFF from filter.h */
+
+	.text
+	.globl	bpf_jit_load_word
+bpf_jit_load_word:
+	cmp	r_OFF, 0
+	bl	bpf_slow_path_word_neg
+	 nop
+	.globl	bpf_jit_load_word_positive_offset
+bpf_jit_load_word_positive_offset:
+	sub	r_HEADLEN, r_OFF, r_TMP
+	cmp	r_TMP, 3
+	ble	bpf_slow_path_word
+	 add	r_SKB_DATA, r_OFF, r_TMP
+	andcc	r_TMP, 3, %g0
+	bne	load_word_unaligned
+	 nop
+	retl
+	 ld	[r_TMP], r_A
+load_word_unaligned:
+	ldub	[r_TMP + 0x0], r_OFF
+	ldub	[r_TMP + 0x1], r_TMP2
+	sll	r_OFF, 8, r_OFF
+	or	r_OFF, r_TMP2, r_OFF
+	ldub	[r_TMP + 0x2], r_TMP2
+	sll	r_OFF, 8, r_OFF
+	or	r_OFF, r_TMP2, r_OFF
+	ldub	[r_TMP + 0x3], r_TMP2
+	sll	r_OFF, 8, r_OFF
+	retl
+	 or	r_OFF, r_TMP2, r_A
+
+	.globl	bpf_jit_load_half
+bpf_jit_load_half:
+	cmp	r_OFF, 0
+	bl	bpf_slow_path_half_neg
+	 nop
+	.globl	bpf_jit_load_half_positive_offset
+bpf_jit_load_half_positive_offset:
+	sub	r_HEADLEN, r_OFF, r_TMP
+	cmp	r_TMP, 1
+	ble	bpf_slow_path_half
+	 add	r_SKB_DATA, r_OFF, r_TMP
+	andcc	r_TMP, 1, %g0
+	bne	load_half_unaligned
+	 nop
+	retl
+	 lduh	[r_TMP], r_A
+load_half_unaligned:
+	ldub	[r_TMP + 0x0], r_OFF
+	ldub	[r_TMP + 0x1], r_TMP2
+	sll	r_OFF, 8, r_OFF
+	retl
+	 or	r_OFF, r_TMP2, r_A
+
+	.globl	bpf_jit_load_byte
+bpf_jit_load_byte:
+	cmp	r_OFF, 0
+	bl	bpf_slow_path_byte_neg
+	 nop
+	.globl	bpf_jit_load_byte_positive_offset
+bpf_jit_load_byte_positive_offset:
+	cmp	r_OFF, r_HEADLEN
+	bge	bpf_slow_path_byte
+	 nop
+	retl
+	 ldub	[r_SKB_DATA + r_OFF], r_A
+
+	.globl	bpf_jit_load_byte_msh
+bpf_jit_load_byte_msh:
+	cmp	r_OFF, 0
+	bl	bpf_slow_path_byte_msh_neg
+	 nop
+	.globl	bpf_jit_load_byte_msh_positive_offset
+bpf_jit_load_byte_msh_positive_offset:
+	cmp	r_OFF, r_HEADLEN
+	bge	bpf_slow_path_byte_msh
+	 nop
+	ldub	[r_SKB_DATA + r_OFF], r_OFF
+	and	r_OFF, 0xf, r_OFF
+	retl
+	 sll	r_OFF, 2, r_X
+
+#define bpf_slow_path_common(LEN)	\
+	save	%sp, -SAVE_SZ, %sp;	\
+	mov	%i0, %o0;		\
+	mov	r_OFF, %o1;		\
+	add	%fp, SCRATCH_OFF, %o2;	\
+	call	skb_copy_bits;		\
+	 mov	(LEN), %o3;		\
+	cmp	%o0, 0;			\
+	restore;
+
+bpf_slow_path_word:
+	bpf_slow_path_common(4)
+	bl	bpf_error
+	 ld	[%sp + SCRATCH_OFF], r_A
+	retl
+	 nop
+bpf_slow_path_half:
+	bpf_slow_path_common(2)
+	bl	bpf_error
+	 lduh	[%sp + SCRATCH_OFF], r_A
+	retl
+	 nop
+bpf_slow_path_byte:
+	bpf_slow_path_common(1)
+	bl	bpf_error
+	 ldub	[%sp + SCRATCH_OFF], r_A
+	retl
+	 nop
+bpf_slow_path_byte_msh:
+	bpf_slow_path_common(1)
+	bl	bpf_error
+	 ldub	[%sp + SCRATCH_OFF], r_A
+	and	r_OFF, 0xf, r_OFF
+	retl
+	 sll	r_OFF, 2, r_X
+
+#define bpf_negative_common(LEN)			\
+	save	%sp, -SAVE_SZ, %sp;			\
+	mov	%i0, %o0;				\
+	mov	r_OFF, %o1;				\
+	call	bpf_internal_load_pointer_neg_helper;	\
+	 mov	(LEN), %o2;				\
+	mov	%o0, r_TMP;				\
+	cmp	%o0, 0;					\
+	BE_PTR(bpf_error);				\
+	 restore;
+
+bpf_slow_path_word_neg:
+	sethi	%hi(SKF_MAX_NEG_OFF), r_TMP
+	cmp	r_OFF, r_TMP
+	bl	bpf_error
+	 nop
+	.globl	bpf_jit_load_word_negative_offset
+bpf_jit_load_word_negative_offset:
+	bpf_negative_common(4)
+	andcc	r_TMP, 3, %g0
+	bne	load_word_unaligned
+	 nop
+	retl
+	 ld	[r_TMP], r_A
+
+bpf_slow_path_half_neg:
+	sethi	%hi(SKF_MAX_NEG_OFF), r_TMP
+	cmp	r_OFF, r_TMP
+	bl	bpf_error
+	 nop
+	.globl	bpf_jit_load_half_negative_offset
+bpf_jit_load_half_negative_offset:
+	bpf_negative_common(2)
+	andcc	r_TMP, 1, %g0
+	bne	load_half_unaligned
+	 nop
+	retl
+	 lduh	[r_TMP], r_A
+
+bpf_slow_path_byte_neg:
+	sethi	%hi(SKF_MAX_NEG_OFF), r_TMP
+	cmp	r_OFF, r_TMP
+	bl	bpf_error
+	 nop
+	.globl	bpf_jit_load_byte_negative_offset
+bpf_jit_load_byte_negative_offset:
+	bpf_negative_common(1)
+	retl
+	 ldub	[r_TMP], r_A
+
+bpf_slow_path_byte_msh_neg:
+	sethi	%hi(SKF_MAX_NEG_OFF), r_TMP
+	cmp	r_OFF, r_TMP
+	bl	bpf_error
+	 nop
+	.globl	bpf_jit_load_byte_msh_negative_offset
+bpf_jit_load_byte_msh_negative_offset:
+	bpf_negative_common(1)
+	ldub	[r_TMP], r_OFF
+	and	r_OFF, 0xf, r_OFF
+	retl
+	 sll	r_OFF, 2, r_X
+
+bpf_error:
+	/* Make the JIT program return zero.  The JIT epilogue
+	 * stores away the original %o7 into r_saved_O7.  The
+	 * normal leaf function return is to use "retl" which
+	 * would evalute to "jmpl %o7 + 8, %g0" but we want to
+	 * use the saved value thus the sequence you see here.
+	 */
+	jmpl	r_saved_O7 + 8, %g0
+	 clr	%o0
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
new file mode 100644
index 0000000..1a69244
--- /dev/null
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -0,0 +1,802 @@
+#include <linux/moduleloader.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/filter.h>
+#include <linux/cache.h>
+
+#include <asm/cacheflush.h>
+#include <asm/ptrace.h>
+
+#include "bpf_jit.h"
+
+int bpf_jit_enable __read_mostly;
+
+static inline bool is_simm13(unsigned int value)
+{
+	return value + 0x1000 < 0x2000;
+}
+
+static void bpf_flush_icache(void *start_, void *end_)
+{
+#ifdef CONFIG_SPARC64
+	/* Cheetah's I-cache is fully coherent.  */
+	if (tlb_type == spitfire) {
+		unsigned long start = (unsigned long) start_;
+		unsigned long end = (unsigned long) end_;
+
+		start &= ~7UL;
+		end = (end + 7UL) & ~7UL;
+		while (start < end) {
+			flushi(start);
+			start += 32;
+		}
+	}
+#endif
+}
+
+#define SEEN_DATAREF 1 /* might call external helpers */
+#define SEEN_XREG    2 /* ebx is used */
+#define SEEN_MEM     4 /* use mem[] for temporary storage */
+
+#define S13(X)		((X) & 0x1fff)
+#define IMMED		0x00002000
+#define RD(X)		((X) << 25)
+#define RS1(X)		((X) << 14)
+#define RS2(X)		((X))
+#define OP(X)		((X) << 30)
+#define OP2(X)		((X) << 22)
+#define OP3(X)		((X) << 19)
+#define COND(X)		((X) << 25)
+#define F1(X)		OP(X)
+#define F2(X, Y)	(OP(X) | OP2(Y))
+#define F3(X, Y)	(OP(X) | OP3(Y))
+
+#define CONDN		COND(0x0)
+#define CONDE		COND(0x1)
+#define CONDLE		COND(0x2)
+#define CONDL		COND(0x3)
+#define CONDLEU		COND(0x4)
+#define CONDCS		COND(0x5)
+#define CONDNEG		COND(0x6)
+#define CONDVC		COND(0x7)
+#define CONDA		COND(0x8)
+#define CONDNE		COND(0x9)
+#define CONDG		COND(0xa)
+#define CONDGE		COND(0xb)
+#define CONDGU		COND(0xc)
+#define CONDCC		COND(0xd)
+#define CONDPOS		COND(0xe)
+#define CONDVS		COND(0xf)
+
+#define CONDGEU		CONDCC
+#define CONDLU		CONDCS
+
+#define WDISP22(X)	(((X) >> 2) & 0x3fffff)
+
+#define BA		(F2(0, 2) | CONDA)
+#define BGU		(F2(0, 2) | CONDGU)
+#define BLEU		(F2(0, 2) | CONDLEU)
+#define BGEU		(F2(0, 2) | CONDGEU)
+#define BLU		(F2(0, 2) | CONDLU)
+#define BE		(F2(0, 2) | CONDE)
+#define BNE		(F2(0, 2) | CONDNE)
+
+#ifdef CONFIG_SPARC64
+#define BNE_PTR		(F2(0, 1) | CONDNE | (2 << 20))
+#else
+#define BNE_PTR		BNE
+#endif
+
+#define SETHI(K, REG)	\
+	(F2(0, 0x4) | RD(REG) | (((K) >> 10) & 0x3fffff))
+#define OR_LO(K, REG)	\
+	(F3(2, 0x02) | IMMED | RS1(REG) | ((K) & 0x3ff) | RD(REG))
+
+#define ADD		F3(2, 0x00)
+#define AND		F3(2, 0x01)
+#define ANDCC		F3(2, 0x11)
+#define OR		F3(2, 0x02)
+#define SUB		F3(2, 0x04)
+#define SUBCC		F3(2, 0x14)
+#define MUL		F3(2, 0x0a)	/* umul */
+#define DIV		F3(2, 0x0e)	/* udiv */
+#define SLL		F3(2, 0x25)
+#define SRL		F3(2, 0x26)
+#define JMPL		F3(2, 0x38)
+#define CALL		F1(1)
+#define BR		F2(0, 0x01)
+#define RD_Y		F3(2, 0x28)
+#define WR_Y		F3(2, 0x30)
+
+#define LD32		F3(3, 0x00)
+#define LD8		F3(3, 0x01)
+#define LD16		F3(3, 0x02)
+#define LD64		F3(3, 0x0b)
+#define ST32		F3(3, 0x04)
+
+#ifdef CONFIG_SPARC64
+#define LDPTR		LD64
+#define BASE_STACKFRAME	176
+#else
+#define LDPTR		LD32
+#define BASE_STACKFRAME	96
+#endif
+
+#define LD32I		(LD32 | IMMED)
+#define LD8I		(LD8 | IMMED)
+#define LD16I		(LD16 | IMMED)
+#define LD64I		(LD64 | IMMED)
+#define LDPTRI		(LDPTR | IMMED)
+#define ST32I		(ST32 | IMMED)
+
+#define emit_nop()		\
+do {				\
+	*prog++ = SETHI(0, G0);	\
+} while (0)
+
+#define emit_neg()					\
+do {	/* sub %g0, r_A, r_A */				\
+	*prog++ = SUB | RS1(G0) | RS2(r_A) | RD(r_A);	\
+} while (0)
+
+#define emit_reg_move(FROM, TO)				\
+do {	/* or %g0, FROM, TO */				\
+	*prog++ = OR | RS1(G0) | RS2(FROM) | RD(TO);	\
+} while (0)
+
+#define emit_clear(REG)					\
+do {	/* or %g0, %g0, REG */				\
+	*prog++ = OR | RS1(G0) | RS2(G0) | RD(REG);	\
+} while (0)
+
+#define emit_set_const(K, REG)					\
+do {	/* sethi %hi(K), REG */					\
+	*prog++ = SETHI(K, REG);				\
+	/* or REG, %lo(K), REG */				\
+	*prog++ = OR_LO(K, REG);				\
+} while (0)
+
+	/* Emit
+	 *
+	 *	OP	r_A, r_X, r_A
+	 */
+#define emit_alu_X(OPCODE)					\
+do {								\
+	seen |= SEEN_XREG;					\
+	*prog++ = OPCODE | RS1(r_A) | RS2(r_X) | RD(r_A);	\
+} while (0)
+
+	/* Emit either:
+	 *
+	 *	OP	r_A, K, r_A
+	 *
+	 * or
+	 *
+	 *	sethi	%hi(K), r_TMP
+	 *	or	r_TMP, %lo(K), r_TMP
+	 *	OP	r_A, r_TMP, r_A
+	 *
+	 * depending upon whether K fits in a signed 13-bit
+	 * immediate instruction field.  Emit nothing if K
+	 * is zero.
+	 */
+#define emit_alu_K(OPCODE, K)					\
+do {								\
+	if (K) {						\
+		unsigned int _insn = OPCODE;			\
+		_insn |= RS1(r_A) | RD(r_A);			\
+		if (is_simm13(K)) {				\
+			*prog++ = _insn | IMMED | S13(K);	\
+		} else {					\
+			emit_set_const(K, r_TMP);		\
+			*prog++ = _insn | RS2(r_TMP);		\
+		}						\
+	}							\
+} while (0)
+
+#define emit_loadimm(K, DEST)						\
+do {									\
+	if (is_simm13(K)) {						\
+		/* or %g0, K, DEST */					\
+		*prog++ = OR | IMMED | RS1(G0) | S13(K) | RD(DEST);	\
+	} else {							\
+		emit_set_const(K, DEST);				\
+	}								\
+} while (0)
+
+#define emit_loadptr(BASE, STRUCT, FIELD, DEST)				\
+do {	unsigned int _off = offsetof(STRUCT, FIELD);			\
+	BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(void *));	\
+	*prog++ = LDPTRI | RS1(BASE) | S13(_off) | RD(DEST);		\
+} while (0)
+
+#define emit_load32(BASE, STRUCT, FIELD, DEST)				\
+do {	unsigned int _off = offsetof(STRUCT, FIELD);			\
+	BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u32));	\
+	*prog++ = LD32I | RS1(BASE) | S13(_off) | RD(DEST);		\
+} while (0)
+
+#define emit_load16(BASE, STRUCT, FIELD, DEST)				\
+do {	unsigned int _off = offsetof(STRUCT, FIELD);			\
+	BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u16));	\
+	*prog++ = LD16I | RS1(BASE) | S13(_off) | RD(DEST);		\
+} while (0)
+
+#define __emit_load8(BASE, STRUCT, FIELD, DEST)				\
+do {	unsigned int _off = offsetof(STRUCT, FIELD);			\
+	*prog++ = LD8I | RS1(BASE) | S13(_off) | RD(DEST);		\
+} while (0)
+
+#define emit_load8(BASE, STRUCT, FIELD, DEST)				\
+do {	BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8));	\
+	__emit_load8(BASE, STRUCT, FIELD, DEST);			\
+} while (0)
+
+#define emit_ldmem(OFF, DEST)					\
+do {	*prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(DEST);	\
+} while (0)
+
+#define emit_stmem(OFF, SRC)					\
+do {	*prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(SRC);	\
+} while (0)
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_SPARC64
+#define emit_load_cpu(REG)						\
+	emit_load16(G6, struct thread_info, cpu, REG)
+#else
+#define emit_load_cpu(REG)						\
+	emit_load32(G6, struct thread_info, cpu, REG)
+#endif
+#else
+#define emit_load_cpu(REG)	emit_clear(REG)
+#endif
+
+#define emit_skb_loadptr(FIELD, DEST) \
+	emit_loadptr(r_SKB, struct sk_buff, FIELD, DEST)
+#define emit_skb_load32(FIELD, DEST) \
+	emit_load32(r_SKB, struct sk_buff, FIELD, DEST)
+#define emit_skb_load16(FIELD, DEST) \
+	emit_load16(r_SKB, struct sk_buff, FIELD, DEST)
+#define __emit_skb_load8(FIELD, DEST) \
+	__emit_load8(r_SKB, struct sk_buff, FIELD, DEST)
+#define emit_skb_load8(FIELD, DEST) \
+	emit_load8(r_SKB, struct sk_buff, FIELD, DEST)
+
+#define emit_jmpl(BASE, IMM_OFF, LREG) \
+	*prog++ = (JMPL | IMMED | RS1(BASE) | S13(IMM_OFF) | RD(LREG))
+
+#define emit_call(FUNC)					\
+do {	void *_here = image + addrs[i] - 8;		\
+	unsigned int _off = (void *)(FUNC) - _here;	\
+	*prog++ = CALL | (((_off) >> 2) & 0x3fffffff);	\
+	emit_nop();					\
+} while (0)
+
+#define emit_branch(BR_OPC, DEST)			\
+do {	unsigned int _here = addrs[i] - 8;		\
+	*prog++ = BR_OPC | WDISP22((DEST) - _here);	\
+} while (0)
+
+#define emit_branch_off(BR_OPC, OFF)			\
+do {	*prog++ = BR_OPC | WDISP22(OFF);		\
+} while (0)
+
+#define emit_jump(DEST)		emit_branch(BA, DEST)
+
+#define emit_read_y(REG)	*prog++ = RD_Y | RD(REG)
+#define emit_write_y(REG)	*prog++ = WR_Y | IMMED | RS1(REG) | S13(0)
+
+#define emit_cmp(R1, R2) \
+	*prog++ = (SUBCC | RS1(R1) | RS2(R2) | RD(G0))
+
+#define emit_cmpi(R1, IMM) \
+	*prog++ = (SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0));
+
+#define emit_btst(R1, R2) \
+	*prog++ = (ANDCC | RS1(R1) | RS2(R2) | RD(G0))
+
+#define emit_btsti(R1, IMM) \
+	*prog++ = (ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0));
+
+#define emit_sub(R1, R2, R3) \
+	*prog++ = (SUB | RS1(R1) | RS2(R2) | RD(R3))
+
+#define emit_subi(R1, IMM, R3) \
+	*prog++ = (SUB | IMMED | RS1(R1) | S13(IMM) | RD(R3))
+
+#define emit_add(R1, R2, R3) \
+	*prog++ = (ADD | RS1(R1) | RS2(R2) | RD(R3))
+
+#define emit_addi(R1, IMM, R3) \
+	*prog++ = (ADD | IMMED | RS1(R1) | S13(IMM) | RD(R3))
+
+#define emit_alloc_stack(SZ) \
+	*prog++ = (SUB | IMMED | RS1(SP) | S13(SZ) | RD(SP))
+
+#define emit_release_stack(SZ) \
+	*prog++ = (ADD | IMMED | RS1(SP) | S13(SZ) | RD(SP))
+
+/* A note about branch offset calculations.  The addrs[] array,
+ * indexed by BPF instruction, records the address after all the
+ * sparc instructions emitted for that BPF instruction.
+ *
+ * The most common case is to emit a branch at the end of such
+ * a code sequence.  So this would be two instructions, the
+ * branch and it's delay slot.
+ *
+ * Therefore by default the branch emitters calculate the branch
+ * offset field as:
+ *
+ *	destination - (addrs[i] - 8)
+ *
+ * This "addrs[i] - 8" is the address of the branch itself or
+ * what "." would be in assembler notation.  The "8" part is
+ * how we take into consideration the branch and it's delay
+ * slot mentioned above.
+ *
+ * Sometimes we need to emit a branch earlier in the code
+ * sequence.  And in these situations we adjust "destination"
+ * to accomodate this difference.  For example, if we needed
+ * to emit a branch (and it's delay slot) right before the
+ * final instruction emitted for a BPF opcode, we'd use
+ * "destination + 4" instead of just plain "destination" above.
+ *
+ * This is why you see all of these funny emit_branch() and
+ * emit_jump() calls with adjusted offsets.
+ */
+
+void bpf_jit_compile(struct sk_filter *fp)
+{
+	unsigned int cleanup_addr, proglen, oldproglen = 0;
+	u32 temp[8], *prog, *func, seen = 0, pass;
+	const struct sock_filter *filter = fp->insns;
+	int i, flen = fp->len, pc_ret0 = -1;
+	unsigned int *addrs;
+	void *image;
+
+	if (!bpf_jit_enable)
+		return;
+
+	addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
+	if (addrs == NULL)
+		return;
+
+	/* Before first pass, make a rough estimation of addrs[]
+	 * each bpf instruction is translated to less than 64 bytes
+	 */
+	for (proglen = 0, i = 0; i < flen; i++) {
+		proglen += 64;
+		addrs[i] = proglen;
+	}
+	cleanup_addr = proglen; /* epilogue address */
+	image = NULL;
+	for (pass = 0; pass < 10; pass++) {
+		u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
+
+		/* no prologue/epilogue for trivial filters (RET something) */
+		proglen = 0;
+		prog = temp;
+
+		/* Prologue */
+		if (seen_or_pass0) {
+			if (seen_or_pass0 & SEEN_MEM) {
+				unsigned int sz = BASE_STACKFRAME;
+				sz += BPF_MEMWORDS * sizeof(u32);
+				emit_alloc_stack(sz);
+			}
+
+			/* Make sure we dont leek kernel memory. */
+			if (seen_or_pass0 & SEEN_XREG)
+				emit_clear(r_X);
+
+			/* If this filter needs to access skb data,
+			 * load %o4 and %o5 with:
+			 *  %o4 = skb->len - skb->data_len
+			 *  %o5 = skb->data
+			 * And also back up %o7 into r_saved_O7 so we can
+			 * invoke the stubs using 'call'.
+			 */
+			if (seen_or_pass0 & SEEN_DATAREF) {
+				emit_load32(r_SKB, struct sk_buff, len, r_HEADLEN);
+				emit_load32(r_SKB, struct sk_buff, data_len, r_TMP);
+				emit_sub(r_HEADLEN, r_TMP, r_HEADLEN);
+				emit_loadptr(r_SKB, struct sk_buff, data, r_SKB_DATA);
+			}
+		}
+		emit_reg_move(O7, r_saved_O7);
+
+		switch (filter[0].code) {
+		case BPF_S_RET_K:
+		case BPF_S_LD_W_LEN:
+		case BPF_S_ANC_PROTOCOL:
+		case BPF_S_ANC_PKTTYPE:
+		case BPF_S_ANC_IFINDEX:
+		case BPF_S_ANC_MARK:
+		case BPF_S_ANC_RXHASH:
+		case BPF_S_ANC_CPU:
+		case BPF_S_ANC_QUEUE:
+		case BPF_S_LD_W_ABS:
+		case BPF_S_LD_H_ABS:
+		case BPF_S_LD_B_ABS:
+			/* The first instruction sets the A register (or is
+			 * a "RET 'constant'")
+			 */
+			break;
+		default:
+			/* Make sure we dont leak kernel information to the
+			 * user.
+			 */
+			emit_clear(r_A); /* A = 0 */
+		}
+
+		for (i = 0; i < flen; i++) {
+			unsigned int K = filter[i].k;
+			unsigned int t_offset;
+			unsigned int f_offset;
+			u32 t_op, f_op;
+			int ilen;
+
+			switch (filter[i].code) {
+			case BPF_S_ALU_ADD_X:	/* A += X; */
+				emit_alu_X(ADD);
+				break;
+			case BPF_S_ALU_ADD_K:	/* A += K; */
+				emit_alu_K(ADD, K);
+				break;
+			case BPF_S_ALU_SUB_X:	/* A -= X; */
+				emit_alu_X(SUB);
+				break;
+			case BPF_S_ALU_SUB_K:	/* A -= K */
+				emit_alu_K(SUB, K);
+				break;
+			case BPF_S_ALU_AND_X:	/* A &= X */
+				emit_alu_X(AND);
+				break;
+			case BPF_S_ALU_AND_K:	/* A &= K */
+				emit_alu_K(AND, K);
+				break;
+			case BPF_S_ALU_OR_X:	/* A |= X */
+				emit_alu_X(OR);
+				break;
+			case BPF_S_ALU_OR_K:	/* A |= K */
+				emit_alu_K(OR, K);
+				break;
+			case BPF_S_ALU_LSH_X:	/* A <<= X */
+				emit_alu_X(SLL);
+				break;
+			case BPF_S_ALU_LSH_K:	/* A <<= K */
+				emit_alu_K(SLL, K);
+				break;
+			case BPF_S_ALU_RSH_X:	/* A >>= X */
+				emit_alu_X(SRL);
+				break;
+			case BPF_S_ALU_RSH_K:	/* A >>= K */
+				emit_alu_K(SRL, K);
+				break;
+			case BPF_S_ALU_MUL_X:	/* A *= X; */
+				emit_alu_X(MUL);
+				break;
+			case BPF_S_ALU_MUL_K:	/* A *= K */
+				emit_alu_K(MUL, K);
+				break;
+			case BPF_S_ALU_DIV_K:	/* A /= K */
+				emit_alu_K(MUL, K);
+				emit_read_y(r_A);
+				break;
+			case BPF_S_ALU_DIV_X:	/* A /= X; */
+				emit_cmpi(r_X, 0);
+				if (pc_ret0 > 0) {
+					t_offset = addrs[pc_ret0 - 1];
+#ifdef CONFIG_SPARC32
+					emit_branch(BE, t_offset + 20);
+#else
+					emit_branch(BE, t_offset + 8);
+#endif
+					emit_nop(); /* delay slot */
+				} else {
+					emit_branch_off(BNE, 16);
+					emit_nop();
+#ifdef CONFIG_SPARC32
+					emit_jump(cleanup_addr + 20);
+#else
+					emit_jump(cleanup_addr + 8);
+#endif
+					emit_clear(r_A);
+				}
+				emit_write_y(G0);
+#ifdef CONFIG_SPARC32
+				/* The Sparc v8 architecture requires
+				 * three instructions between a %y
+				 * register write and the first use.
+				 */
+				emit_nop();
+				emit_nop();
+				emit_nop();
+#endif
+				emit_alu_X(DIV);
+				break;
+			case BPF_S_ALU_NEG:
+				emit_neg();
+				break;
+			case BPF_S_RET_K:
+				if (!K) {
+					if (pc_ret0 == -1)
+						pc_ret0 = i;
+					emit_clear(r_A);
+				} else {
+					emit_loadimm(K, r_A);
+				}
+				/* Fallthrough */
+			case BPF_S_RET_A:
+				if (seen_or_pass0) {
+					if (i != flen - 1) {
+						emit_jump(cleanup_addr);
+						emit_nop();
+						break;
+					}
+					if (seen_or_pass0 & SEEN_MEM) {
+						unsigned int sz = BASE_STACKFRAME;
+						sz += BPF_MEMWORDS * sizeof(u32);
+						emit_release_stack(sz);
+					}
+				}
+				/* jmpl %r_saved_O7 + 8, %g0 */
+				emit_jmpl(r_saved_O7, 8, G0);
+				emit_reg_move(r_A, O0); /* delay slot */
+				break;
+			case BPF_S_MISC_TAX:
+				seen |= SEEN_XREG;
+				emit_reg_move(r_A, r_X);
+				break;
+			case BPF_S_MISC_TXA:
+				seen |= SEEN_XREG;
+				emit_reg_move(r_X, r_A);
+				break;
+			case BPF_S_ANC_CPU:
+				emit_load_cpu(r_A);
+				break;
+			case BPF_S_ANC_PROTOCOL:
+				emit_skb_load16(protocol, r_A);
+				break;
+#if 0
+				/* GCC won't let us take the address of
+				 * a bit field even though we very much
+				 * know what we are doing here.
+				 */
+			case BPF_S_ANC_PKTTYPE:
+				__emit_skb_load8(pkt_type, r_A);
+				emit_alu_K(SRL, 5);
+				break;
+#endif
+			case BPF_S_ANC_IFINDEX:
+				emit_skb_loadptr(dev, r_A);
+				emit_cmpi(r_A, 0);
+				emit_branch(BNE_PTR, cleanup_addr + 4);
+				emit_nop();
+				emit_load32(r_A, struct net_device, ifindex, r_A);
+				break;
+			case BPF_S_ANC_MARK:
+				emit_skb_load32(mark, r_A);
+				break;
+			case BPF_S_ANC_QUEUE:
+				emit_skb_load16(queue_mapping, r_A);
+				break;
+			case BPF_S_ANC_HATYPE:
+				emit_skb_loadptr(dev, r_A);
+				emit_cmpi(r_A, 0);
+				emit_branch(BNE_PTR, cleanup_addr + 4);
+				emit_nop();
+				emit_load16(r_A, struct net_device, type, r_A);
+				break;
+			case BPF_S_ANC_RXHASH:
+				emit_skb_load32(rxhash, r_A);
+				break;
+
+			case BPF_S_LD_IMM:
+				emit_loadimm(K, r_A);
+				break;
+			case BPF_S_LDX_IMM:
+				emit_loadimm(K, r_X);
+				break;
+			case BPF_S_LD_MEM:
+				emit_ldmem(K * 4, r_A);
+				break;
+			case BPF_S_LDX_MEM:
+				emit_ldmem(K * 4, r_X);
+				break;
+			case BPF_S_ST:
+				emit_stmem(K * 4, r_A);
+				break;
+			case BPF_S_STX:
+				emit_stmem(K * 4, r_X);
+				break;
+
+#define CHOOSE_LOAD_FUNC(K, func) \
+	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
+
+			case BPF_S_LD_W_ABS:
+				func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word);
+common_load:			seen |= SEEN_DATAREF;
+				emit_loadimm(K, r_OFF);
+				emit_call(func);
+				break;
+			case BPF_S_LD_H_ABS:
+				func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half);
+				goto common_load;
+			case BPF_S_LD_B_ABS:
+				func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte);
+				goto common_load;
+			case BPF_S_LDX_B_MSH:
+				func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh);
+				goto common_load;
+			case BPF_S_LD_W_IND:
+				func = bpf_jit_load_word;
+common_load_ind:		seen |= SEEN_DATAREF | SEEN_XREG;
+				if (K) {
+					if (is_simm13(K)) {
+						emit_addi(r_X, K, r_OFF);
+					} else {
+						emit_loadimm(K, r_TMP);
+						emit_add(r_X, r_TMP, r_OFF);
+					}
+				} else {
+					emit_reg_move(r_X, r_OFF);
+				}
+				emit_call(func);
+				break;
+			case BPF_S_LD_H_IND:
+				func = bpf_jit_load_half;
+				goto common_load_ind;
+			case BPF_S_LD_B_IND:
+				func = bpf_jit_load_byte;
+				goto common_load_ind;
+			case BPF_S_JMP_JA:
+				emit_jump(addrs[i + K]);
+				emit_nop();
+				break;
+
+#define COND_SEL(CODE, TOP, FOP)	\
+	case CODE:			\
+		t_op = TOP;		\
+		f_op = FOP;		\
+		goto cond_branch
+
+			COND_SEL(BPF_S_JMP_JGT_K, BGU, BLEU);
+			COND_SEL(BPF_S_JMP_JGE_K, BGEU, BLU);
+			COND_SEL(BPF_S_JMP_JEQ_K, BE, BNE);
+			COND_SEL(BPF_S_JMP_JSET_K, BNE, BE);
+			COND_SEL(BPF_S_JMP_JGT_X, BGU, BLEU);
+			COND_SEL(BPF_S_JMP_JGE_X, BGEU, BLU);
+			COND_SEL(BPF_S_JMP_JEQ_X, BE, BNE);
+			COND_SEL(BPF_S_JMP_JSET_X, BNE, BE);
+
+cond_branch:			f_offset = addrs[i + filter[i].jf];
+				t_offset = addrs[i + filter[i].jt];
+
+				/* same targets, can avoid doing the test :) */
+				if (filter[i].jt == filter[i].jf) {
+					emit_jump(t_offset);
+					emit_nop();
+					break;
+				}
+
+				switch (filter[i].code) {
+				case BPF_S_JMP_JGT_X:
+				case BPF_S_JMP_JGE_X:
+				case BPF_S_JMP_JEQ_X:
+					seen |= SEEN_XREG;
+					emit_cmp(r_A, r_X);
+					break;
+				case BPF_S_JMP_JSET_X:
+					seen |= SEEN_XREG;
+					emit_btst(r_A, r_X);
+					break;
+				case BPF_S_JMP_JEQ_K:
+				case BPF_S_JMP_JGT_K:
+				case BPF_S_JMP_JGE_K:
+					if (is_simm13(K)) {
+						emit_cmpi(r_A, K);
+					} else {
+						emit_loadimm(K, r_TMP);
+						emit_cmp(r_A, r_TMP);
+					}
+					break;
+				case BPF_S_JMP_JSET_K:
+					if (is_simm13(K)) {
+						emit_btsti(r_A, K);
+					} else {
+						emit_loadimm(K, r_TMP);
+						emit_btst(r_A, r_TMP);
+					}
+					break;
+				}
+				if (filter[i].jt != 0) {
+					if (filter[i].jf)
+						t_offset += 8;
+					emit_branch(t_op, t_offset);
+					emit_nop(); /* delay slot */
+					if (filter[i].jf) {
+						emit_jump(f_offset);
+						emit_nop();
+					}
+					break;
+				}
+				emit_branch(f_op, f_offset);
+				emit_nop(); /* delay slot */
+				break;
+
+			default:
+				/* hmm, too complex filter, give up with jit compiler */
+				goto out;
+			}
+			ilen = (void *) prog - (void *) temp;
+			if (image) {
+				if (unlikely(proglen + ilen > oldproglen)) {
+					pr_err("bpb_jit_compile fatal error\n");
+					kfree(addrs);
+					module_free(NULL, image);
+					return;
+				}
+				memcpy(image + proglen, temp, ilen);
+			}
+			proglen += ilen;
+			addrs[i] = proglen;
+			prog = temp;
+		}
+		/* last bpf instruction is always a RET :
+		 * use it to give the cleanup instruction(s) addr
+		 */
+		cleanup_addr = proglen - 8; /* jmpl; mov r_A,%o0; */
+		if (seen_or_pass0 & SEEN_MEM)
+			cleanup_addr -= 4; /* add %sp, X, %sp; */
+
+		if (image) {
+			if (proglen != oldproglen)
+				pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n",
+				       proglen, oldproglen);
+			break;
+		}
+		if (proglen == oldproglen) {
+			image = module_alloc(max_t(unsigned int,
+						   proglen,
+						   sizeof(struct work_struct)));
+			if (!image)
+				goto out;
+		}
+		oldproglen = proglen;
+	}
+
+	if (bpf_jit_enable > 1)
+		pr_err("flen=%d proglen=%u pass=%d image=%p\n",
+		       flen, proglen, pass, image);
+
+	if (image) {
+		if (bpf_jit_enable > 1)
+			print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS,
+				       16, 1, image, proglen, false);
+		bpf_flush_icache(image, image + proglen);
+		fp->bpf_func = (void *)image;
+	}
+out:
+	kfree(addrs);
+	return;
+}
+
+static void jit_free_defer(struct work_struct *arg)
+{
+	module_free(NULL, arg);
+}
+
+/* run from softirq, we must use a work_struct to call
+ * module_free() from process context
+ */
+void bpf_jit_free(struct sk_filter *fp)
+{
+	if (fp->bpf_func != sk_run_filter) {
+		struct work_struct *work = (struct work_struct *)fp->bpf_func;
+
+		INIT_WORK(work, jit_free_defer);
+		schedule_work(work);
+	}
+}
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 11270ca..96033e2 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -12,7 +12,7 @@
 	select GENERIC_PENDING_IRQ if SMP
 	select GENERIC_IRQ_SHOW
 	select SYS_HYPERVISOR
-	select ARCH_HAVE_NMI_SAFE_CMPXCHG if !M386
+	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 
 # FIXME: investigate whether we need/want these options.
 #	select HAVE_IOREMAP_PROT
@@ -69,6 +69,9 @@
 config ARCH_DMA_ADDR_T_64BIT
 	def_bool y
 
+config NEED_DMA_MAP_STATE
+	def_bool y
+
 config LOCKDEP_SUPPORT
 	def_bool y
 
@@ -118,7 +121,7 @@
 
 config ARCH_DEFCONFIG
 	string
-	default "arch/tile/configs/tile_defconfig" if !TILEGX
+	default "arch/tile/configs/tilepro_defconfig" if !TILEGX
 	default "arch/tile/configs/tilegx_defconfig" if TILEGX
 
 source "init/Kconfig"
@@ -240,6 +243,7 @@
 
 config PAGE_OFFSET
 	hex
+	depends on !64BIT
 	default 0xF0000000 if VMSPLIT_3_75G
 	default 0xE0000000 if VMSPLIT_3_5G
 	default 0xB0000000 if VMSPLIT_2_75G
diff --git a/arch/tile/Makefile b/arch/tile/Makefile
index 17acce7..9520bc5 100644
--- a/arch/tile/Makefile
+++ b/arch/tile/Makefile
@@ -30,7 +30,8 @@
 KBUILD_CFLAGS   += $(CONFIG_DEBUG_EXTRA_FLAGS)
 endif
 
-LIBGCC_PATH     := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
+LIBGCC_PATH     := \
+  $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
 
 # Provide the path to use for "make defconfig".
 KBUILD_DEFCONFIG := $(ARCH)_defconfig
@@ -53,8 +54,6 @@
 # See arch/tile/Kbuild for content of core part of the kernel
 core-y		+= arch/tile/
 
-core-$(CONFIG_KVM) += arch/tile/kvm/
-
 ifdef TILERA_ROOT
 INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot
 endif
diff --git a/arch/tile/include/arch/spr_def.h b/arch/tile/include/arch/spr_def.h
index f548efe..d6ba449 100644
--- a/arch/tile/include/arch/spr_def.h
+++ b/arch/tile/include/arch/spr_def.h
@@ -60,8 +60,8 @@
 	_concat4(SPR_IPI_EVENT_, CONFIG_KERNEL_PL,,)
 #define SPR_IPI_EVENT_RESET_K \
 	_concat4(SPR_IPI_EVENT_RESET_, CONFIG_KERNEL_PL,,)
-#define SPR_IPI_MASK_SET_K \
-	_concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,)
+#define SPR_IPI_EVENT_SET_K \
+	_concat4(SPR_IPI_EVENT_SET_, CONFIG_KERNEL_PL,,)
 #define INT_IPI_K \
 	_concat4(INT_IPI_, CONFIG_KERNEL_PL,,)
 
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index bb696da..f246142 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -17,6 +17,8 @@
 #ifndef _ASM_TILE_ATOMIC_H
 #define _ASM_TILE_ATOMIC_H
 
+#include <asm/cmpxchg.h>
+
 #ifndef __ASSEMBLY__
 
 #include <linux/compiler.h>
@@ -121,54 +123,6 @@
  */
 #define atomic_add_negative(i, v)	(atomic_add_return((i), (v)) < 0)
 
-/* Nonexistent functions intended to cause link errors. */
-extern unsigned long __xchg_called_with_bad_pointer(void);
-extern unsigned long __cmpxchg_called_with_bad_pointer(void);
-
-#define xchg(ptr, x)							\
-	({								\
-		typeof(*(ptr)) __x;					\
-		switch (sizeof(*(ptr))) {				\
-		case 4:							\
-			__x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \
-				(atomic_t *)(ptr),			\
-				(u32)(typeof((x)-(x)))(x));		\
-			break;						\
-		case 8:							\
-			__x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \
-				(atomic64_t *)(ptr),			\
-				(u64)(typeof((x)-(x)))(x));		\
-			break;						\
-		default:						\
-			__xchg_called_with_bad_pointer();		\
-		}							\
-		__x;							\
-	})
-
-#define cmpxchg(ptr, o, n)						\
-	({								\
-		typeof(*(ptr)) __x;					\
-		switch (sizeof(*(ptr))) {				\
-		case 4:							\
-			__x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \
-				(atomic_t *)(ptr),			\
-				(u32)(typeof((o)-(o)))(o),		\
-				(u32)(typeof((n)-(n)))(n));		\
-			break;						\
-		case 8:							\
-			__x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \
-				(atomic64_t *)(ptr),			\
-				(u64)(typeof((o)-(o)))(o),		\
-				(u64)(typeof((n)-(n)))(n));		\
-			break;						\
-		default:						\
-			__cmpxchg_called_with_bad_pointer();		\
-		}							\
-		__x;							\
-	})
-
-#define tas(ptr) (xchg((ptr), 1))
-
 #endif /* __ASSEMBLY__ */
 
 #ifndef __tilegx__
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 466dc4a..54d1da8 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -200,7 +200,7 @@
  * @u: ...unless v is equal to u.
  *
  * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns the old value of @v.
+ * Returns non-zero if @v was not @u, and zero otherwise.
  */
 static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
 {
diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h
index 58d021a..60b87ee 100644
--- a/arch/tile/include/asm/bitops_64.h
+++ b/arch/tile/include/asm/bitops_64.h
@@ -38,10 +38,10 @@
 
 static inline void change_bit(unsigned nr, volatile unsigned long *addr)
 {
-	unsigned long old, mask = (1UL << (nr % BITS_PER_LONG));
-	long guess, oldval;
+	unsigned long mask = (1UL << (nr % BITS_PER_LONG));
+	unsigned long guess, oldval;
 	addr += nr / BITS_PER_LONG;
-	old = *addr;
+	oldval = *addr;
 	do {
 		guess = oldval;
 		oldval = atomic64_cmpxchg((atomic64_t *)addr,
@@ -85,7 +85,7 @@
 				      volatile unsigned long *addr)
 {
 	unsigned long mask = (1UL << (nr % BITS_PER_LONG));
-	long guess, oldval = *addr;
+	unsigned long guess, oldval;
 	addr += nr / BITS_PER_LONG;
 	oldval = *addr;
 	do {
diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h
new file mode 100644
index 0000000..276f067
--- /dev/null
+++ b/arch/tile/include/asm/cmpxchg.h
@@ -0,0 +1,73 @@
+/*
+ * cmpxchg.h -- forked from asm/atomic.h with this copyright:
+ *
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ */
+
+#ifndef _ASM_TILE_CMPXCHG_H
+#define _ASM_TILE_CMPXCHG_H
+
+#ifndef __ASSEMBLY__
+
+/* Nonexistent functions intended to cause link errors. */
+extern unsigned long __xchg_called_with_bad_pointer(void);
+extern unsigned long __cmpxchg_called_with_bad_pointer(void);
+
+#define xchg(ptr, x)							\
+	({								\
+		typeof(*(ptr)) __x;					\
+		switch (sizeof(*(ptr))) {				\
+		case 4:							\
+			__x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \
+				(atomic_t *)(ptr),			\
+				(u32)(typeof((x)-(x)))(x));		\
+			break;						\
+		case 8:							\
+			__x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \
+				(atomic64_t *)(ptr),			\
+				(u64)(typeof((x)-(x)))(x));		\
+			break;						\
+		default:						\
+			__xchg_called_with_bad_pointer();		\
+		}							\
+		__x;							\
+	})
+
+#define cmpxchg(ptr, o, n)						\
+	({								\
+		typeof(*(ptr)) __x;					\
+		switch (sizeof(*(ptr))) {				\
+		case 4:							\
+			__x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \
+				(atomic_t *)(ptr),			\
+				(u32)(typeof((o)-(o)))(o),		\
+				(u32)(typeof((n)-(n)))(n));		\
+			break;						\
+		case 8:							\
+			__x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \
+				(atomic64_t *)(ptr),			\
+				(u64)(typeof((o)-(o)))(o),		\
+				(u64)(typeof((n)-(n)))(n));		\
+			break;						\
+		default:						\
+			__cmpxchg_called_with_bad_pointer();		\
+		}							\
+		__x;							\
+	})
+
+#define tas(ptr) (xchg((ptr), 1))
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_TILE_CMPXCHG_H */
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h
index f80f8ce..33cff9a3 100644
--- a/arch/tile/include/asm/irq.h
+++ b/arch/tile/include/asm/irq.h
@@ -21,7 +21,7 @@
 #define NR_IRQS 32
 
 /* IRQ numbers used for linux IPIs. */
-#define IRQ_RESCHEDULE 1
+#define IRQ_RESCHEDULE 0
 
 #define irq_canonicalize(irq)   (irq)
 
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h
index 72be590..5f8b6a0 100644
--- a/arch/tile/include/asm/spinlock_64.h
+++ b/arch/tile/include/asm/spinlock_64.h
@@ -137,7 +137,7 @@
 static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
 	__insn_mf();
-	rw->lock = 0;
+	__insn_exch4(&rw->lock, 0);  /* Avoid waiting in the write buffer. */
 }
 
 static inline int arch_read_trylock(arch_rwlock_t *rw)
diff --git a/arch/tile/include/asm/stack.h b/arch/tile/include/asm/stack.h
index 4d97a2d..0e9d382a 100644
--- a/arch/tile/include/asm/stack.h
+++ b/arch/tile/include/asm/stack.h
@@ -25,7 +25,6 @@
 struct KBacktraceIterator {
 	BacktraceIterator it;
 	struct task_struct *task;     /* task we are backtracing */
-	pte_t *pgtable;		      /* page table for user space access */
 	int end;		      /* iteration complete. */
 	int new_context;              /* new context is starting */
 	int profile;                  /* profiling, so stop on async intrpt */
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h
index 5f20f92..e28c3df4 100644
--- a/arch/tile/include/asm/traps.h
+++ b/arch/tile/include/asm/traps.h
@@ -64,7 +64,11 @@
 
 
 #ifdef __tilegx__
+/* kernel/single_step.c */
 void gx_singlestep_handle(struct pt_regs *, int fault_num);
+
+/* kernel/intvec_64.S */
+void fill_ra_stack(void);
 #endif
 
-#endif /* _ASM_TILE_SYSCALLS_H */
+#endif /* _ASM_TILE_TRAPS_H */
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index 431e9ae6..ec91568 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -85,6 +85,7 @@
 /* Loop forever on a nap during SMP boot. */
 STD_ENTRY(smp_nap)
 	nap
+	nop       /* avoid provoking the icache prefetch with a jump */
 	j smp_nap /* we are not architecturally guaranteed not to exit nap */
 	jrp lr    /* clue in the backtracer */
 	STD_ENDPROC(smp_nap)
@@ -105,5 +106,6 @@
 	.global _cpu_idle_nap
 _cpu_idle_nap:
 	nap
+	nop       /* avoid provoking the icache prefetch with a jump */
 	jrp lr
 	STD_ENDPROC(_cpu_idle)
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index aecc8ed..5d56a1e 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -799,6 +799,10 @@
  * This routine takes a boolean in r30 indicating if this is an NMI.
  * If so, we also expect a boolean in r31 indicating whether to
  * re-enable the oprofile interrupts.
+ *
+ * Note that .Lresume_userspace is jumped to directly in several
+ * places, and we need to make sure r30 is set correctly in those
+ * callers as well.
  */
 STD_ENTRY(interrupt_return)
 	/* If we're resuming to kernel space, don't check thread flags. */
@@ -1237,7 +1241,10 @@
 	bzt     r30, 1f
 	jal	do_syscall_trace
 	FEEDBACK_REENTER(handle_syscall)
-1:	j       .Lresume_userspace   /* jump into middle of interrupt_return */
+1:	{
+	 movei  r30, 0               /* not an NMI */
+	 j      .Lresume_userspace   /* jump into middle of interrupt_return */
+	}
 
 .Linvalid_syscall:
 	/* Report an invalid syscall back to the user program */
@@ -1246,7 +1253,10 @@
 	 movei  r28, -ENOSYS
 	}
 	sw      r29, r28
-	j       .Lresume_userspace   /* jump into middle of interrupt_return */
+	{
+	 movei  r30, 0               /* not an NMI */
+	 j      .Lresume_userspace   /* jump into middle of interrupt_return */
+	}
 	STD_ENDPROC(handle_syscall)
 
 	/* Return the address for oprofile to suppress in backtraces. */
@@ -1262,7 +1272,10 @@
 	jal     sim_notify_fork
 	jal     schedule_tail
 	FEEDBACK_REENTER(ret_from_fork)
-	j       .Lresume_userspace   /* jump into middle of interrupt_return */
+	{
+	 movei  r30, 0               /* not an NMI */
+	 j      .Lresume_userspace   /* jump into middle of interrupt_return */
+	}
 	STD_ENDPROC(ret_from_fork)
 
 	/*
@@ -1376,7 +1389,10 @@
 
 	jal     send_sigtrap    /* issue a SIGTRAP */
 	FEEDBACK_REENTER(handle_ill)
-	j       .Lresume_userspace   /* jump into middle of interrupt_return */
+	{
+	 movei  r30, 0               /* not an NMI */
+	 j      .Lresume_userspace   /* jump into middle of interrupt_return */
+	}
 
 .Ldispatch_normal_ill:
 	{
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 79c93e1..49d9d66 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -22,6 +22,7 @@
 #include <asm/irqflags.h>
 #include <asm/asm-offsets.h>
 #include <asm/types.h>
+#include <asm/signal.h>
 #include <hv/hypervisor.h>
 #include <arch/abi.h>
 #include <arch/interrupts.h>
@@ -605,6 +606,10 @@
  * This routine takes a boolean in r30 indicating if this is an NMI.
  * If so, we also expect a boolean in r31 indicating whether to
  * re-enable the oprofile interrupts.
+ *
+ * Note that .Lresume_userspace is jumped to directly in several
+ * places, and we need to make sure r30 is set correctly in those
+ * callers as well.
  */
 STD_ENTRY(interrupt_return)
 	/* If we're resuming to kernel space, don't check thread flags. */
@@ -1039,11 +1044,28 @@
 
 	/* Do syscall trace again, if requested. */
 	ld	r30, r31
-	andi    r30, r30, _TIF_SYSCALL_TRACE
-	beqzt	r30, 1f
+	andi    r0, r30, _TIF_SYSCALL_TRACE
+	{
+	 andi    r0, r30, _TIF_SINGLESTEP
+	 beqzt   r0, 1f
+	}
 	jal	do_syscall_trace
 	FEEDBACK_REENTER(handle_syscall)
-1:	j       .Lresume_userspace   /* jump into middle of interrupt_return */
+	andi    r0, r30, _TIF_SINGLESTEP
+
+1:	beqzt	r0, 2f
+
+	/* Single stepping -- notify ptrace. */
+	{
+	 movei   r0, SIGTRAP
+	 jal     ptrace_notify
+	}
+	FEEDBACK_REENTER(handle_syscall)
+
+2:	{
+	 movei  r30, 0               /* not an NMI */
+	 j      .Lresume_userspace   /* jump into middle of interrupt_return */
+	}
 
 .Lcompat_syscall:
 	/*
@@ -1077,7 +1099,10 @@
 	 movei  r28, -ENOSYS
 	}
 	st      r29, r28
-	j       .Lresume_userspace   /* jump into middle of interrupt_return */
+	{
+	 movei  r30, 0               /* not an NMI */
+	 j      .Lresume_userspace   /* jump into middle of interrupt_return */
+	}
 	STD_ENDPROC(handle_syscall)
 
 	/* Return the address for oprofile to suppress in backtraces. */
@@ -1093,7 +1118,10 @@
 	jal     sim_notify_fork
 	jal     schedule_tail
 	FEEDBACK_REENTER(ret_from_fork)
-	j       .Lresume_userspace
+	{
+	 movei  r30, 0               /* not an NMI */
+	 j      .Lresume_userspace   /* jump into middle of interrupt_return */
+	}
 	STD_ENDPROC(ret_from_fork)
 
 /* Various stub interrupt handlers and syscall handlers */
@@ -1156,6 +1184,18 @@
 	push_extra_callee_saves r0
 	j       do_trap
 
+/* Fill the return address stack with nonzero entries. */
+STD_ENTRY(fill_ra_stack)
+	{
+	 move	r0, lr
+	 jal	1f
+	}
+1:	jal	2f
+2:	jal	3f
+3:	jal	4f
+4:	jrp	r0
+	STD_ENDPROC(fill_ra_stack)
+
 /* Include .intrpt1 array of interrupt vectors */
 	.section ".intrpt1", "ax"
 
@@ -1166,7 +1206,7 @@
 #define do_hardwall_trap bad_intr
 #endif
 
-	int_hand     INT_MEM_ERROR, MEM_ERROR, bad_intr
+	int_hand     INT_MEM_ERROR, MEM_ERROR, do_trap
 	int_hand     INT_SINGLE_STEP_3, SINGLE_STEP_3, bad_intr
 #if CONFIG_KERNEL_PL == 2
 	int_hand     INT_SINGLE_STEP_2, SINGLE_STEP_2, gx_singlestep_handle
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index b90ab99..98d4769 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -67,6 +67,8 @@
 	area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END);
 	if (!area)
 		goto error;
+	area->nr_pages = npages;
+	area->pages = pages;
 
 	if (map_vm_area(area, prot_rwx, &pages)) {
 		vunmap(area->addr);
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c
index 7a93270..446a7f5 100644
--- a/arch/tile/kernel/proc.c
+++ b/arch/tile/kernel/proc.c
@@ -146,7 +146,6 @@
 	},
 	{}
 };
-#endif
 
 static struct ctl_path tile_path[] = {
 	{ .procname = "tile" },
@@ -155,10 +154,9 @@
 
 static int __init proc_sys_tile_init(void)
 {
-#ifndef __tilegx__  /* FIXME: GX: no support for unaligned access yet */
 	register_sysctl_paths(tile_path, unaligned_table);
-#endif
 	return 0;
 }
 
 arch_initcall(proc_sys_tile_init);
+#endif
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 30caeca..2d5ef61 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -28,6 +28,7 @@
 #include <linux/tracehook.h>
 #include <linux/signal.h>
 #include <asm/stack.h>
+#include <asm/switch_to.h>
 #include <asm/homecache.h>
 #include <asm/syscalls.h>
 #include <asm/traps.h>
@@ -285,7 +286,7 @@
 	static struct task_struct corrupt = { .comm = "<corrupt>" };
 	struct task_struct *tsk = current;
 	if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
-		     (void *)tsk > high_memory ||
+		     (high_memory && (void *)tsk > high_memory) ||
 		     ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
 		pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
 		tsk = &corrupt;
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 92a94f4..bff23f4 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -103,13 +103,11 @@
 
 static int __init setup_maxmem(char *str)
 {
-	long maxmem_mb;
-	if (str == NULL || strict_strtol(str, 0, &maxmem_mb) != 0 ||
-	    maxmem_mb == 0)
+	unsigned long long maxmem;
+	if (str == NULL || (maxmem = memparse(str, NULL)) == 0)
 		return -EINVAL;
 
-	maxmem_pfn = (maxmem_mb >> (HPAGE_SHIFT - 20)) <<
-		(HPAGE_SHIFT - PAGE_SHIFT);
+	maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT);
 	pr_info("Forcing RAM used to no more than %dMB\n",
 	       maxmem_pfn >> (20 - PAGE_SHIFT));
 	return 0;
@@ -119,14 +117,15 @@
 static int __init setup_maxnodemem(char *str)
 {
 	char *endp;
-	long maxnodemem_mb, node;
+	unsigned long long maxnodemem;
+	long node;
 
 	node = str ? simple_strtoul(str, &endp, 0) : INT_MAX;
-	if (node >= MAX_NUMNODES || *endp != ':' ||
-	    strict_strtol(endp+1, 0, &maxnodemem_mb) != 0)
+	if (node >= MAX_NUMNODES || *endp != ':')
 		return -EINVAL;
 
-	maxnodemem_pfn[node] = (maxnodemem_mb >> (HPAGE_SHIFT - 20)) <<
+	maxnodemem = memparse(endp+1, NULL);
+	maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) <<
 		(HPAGE_SHIFT - PAGE_SHIFT);
 	pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
 	       node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
@@ -913,6 +912,13 @@
 
 #ifdef CONFIG_BLK_DEV_INITRD
 
+/*
+ * Note that the kernel can potentially support other compression
+ * techniques than gz, though we don't do so by default.  If we ever
+ * decide to do so we can either look for other filename extensions,
+ * or just allow a file with this name to be compressed with an
+ * arbitrary compressor (somewhat counterintuitively).
+ */
 static int __initdata set_initramfs_file;
 static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
 
@@ -928,9 +934,9 @@
 early_param("initramfs_file", setup_initramfs_file);
 
 /*
- * We look for an additional "initramfs.cpio.gz" file in the hvfs.
+ * We look for an "initramfs.cpio.gz" file in the hvfs.
  * If there is one, we allocate some memory for it and it will be
- * unpacked to the initramfs after any built-in initramfs_data.
+ * unpacked to the initramfs.
  */
 static void __init load_hv_initrd(void)
 {
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index bc1eb58..89529c9 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -153,6 +153,25 @@
 	if (((unsigned long)addr % size) == 0)
 		return bundle;
 
+	/*
+	 * Return SIGBUS with the unaligned address, if requested.
+	 * Note that we return SIGBUS even for completely invalid addresses
+	 * as long as they are in fact unaligned; this matches what the
+	 * tilepro hardware would be doing, if it could provide us with the
+	 * actual bad address in an SPR, which it doesn't.
+	 */
+	if (unaligned_fixup == 0) {
+		siginfo_t info = {
+			.si_signo = SIGBUS,
+			.si_code = BUS_ADRALN,
+			.si_addr = addr
+		};
+		trace_unhandled_signal("unaligned trap", regs,
+				       (unsigned long)addr, SIGBUS);
+		force_sig_info(info.si_signo, &info, current);
+		return (tilepro_bundle_bits) 0;
+	}
+
 #ifndef __LITTLE_ENDIAN
 # error We assume little-endian representation with copy_xx_user size 2 here
 #endif
@@ -192,18 +211,6 @@
 		return (tile_bundle_bits) 0;
 	}
 
-	if (unaligned_fixup == 0) {
-		siginfo_t info = {
-			.si_signo = SIGBUS,
-			.si_code = BUS_ADRALN,
-			.si_addr = addr
-		};
-		trace_unhandled_signal("unaligned trap", regs,
-				       (unsigned long)addr, SIGBUS);
-		force_sig_info(info.si_signo, &info, current);
-		return (tile_bundle_bits) 0;
-	}
-
 	if (unaligned_printk || unaligned_fixup_count == 0) {
 		pr_info("Process %d/%s: PC %#lx: Fixup of"
 			" unaligned %s at %#lx.\n",
@@ -339,12 +346,10 @@
 		}
 
 		/* allocate a cache line of writable, executable memory */
-		down_write(&current->mm->mmap_sem);
-		buffer = (void __user *) do_mmap(NULL, 0, 64,
+		buffer = (void __user *) vm_mmap(NULL, 0, 64,
 					  PROT_EXEC | PROT_READ | PROT_WRITE,
 					  MAP_PRIVATE | MAP_ANONYMOUS,
 					  0);
-		up_write(&current->mm->mmap_sem);
 
 		if (IS_ERR((void __force *)buffer)) {
 			kfree(state);
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index a44e103..91da0f7 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -103,7 +103,7 @@
 	set_cpu_online(smp_processor_id(), 0);
 	arch_local_irq_disable_all();
 	for (;;)
-		asm("nap");
+		asm("nap; nop");
 }
 
 /* This function calls the 'stop' function on all other CPUs in the system. */
@@ -113,6 +113,12 @@
 	send_IPI_allbutself(MSG_TAG_STOP_CPU);
 }
 
+/* On panic, just wait; we may get an smp_send_stop() later on. */
+void panic_smp_self_stop(void)
+{
+	while (1)
+		asm("nap; nop");
+}
 
 /*
  * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages.
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index b949edc..172aef7 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -196,6 +196,8 @@
 	/* This must be done before setting cpu_online_mask */
 	wmb();
 
+	notify_cpu_starting(smp_processor_id());
+
 	/*
 	 * We need to hold call_lock, so there is no inconsistency
 	 * between the time smp_call_function() determines number of
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index 37ee4d0..b2f44c2 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -21,10 +21,12 @@
 #include <linux/stacktrace.h>
 #include <linux/uaccess.h>
 #include <linux/mmzone.h>
+#include <linux/dcache.h>
+#include <linux/fs.h>
 #include <asm/backtrace.h>
 #include <asm/page.h>
-#include <asm/tlbflush.h>
 #include <asm/ucontext.h>
+#include <asm/switch_to.h>
 #include <asm/sigframe.h>
 #include <asm/stack.h>
 #include <arch/abi.h>
@@ -44,72 +46,23 @@
 	return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
 }
 
-/* Is address valid for reading? */
-static int valid_address(struct KBacktraceIterator *kbt, unsigned long address)
-{
-	HV_PTE *l1_pgtable = kbt->pgtable;
-	HV_PTE *l2_pgtable;
-	unsigned long pfn;
-	HV_PTE pte;
-	struct page *page;
-
-	if (l1_pgtable == NULL)
-		return 0;	/* can't read user space in other tasks */
-
-#ifdef CONFIG_64BIT
-	/* Find the real l1_pgtable by looking in the l0_pgtable. */
-	pte = l1_pgtable[HV_L0_INDEX(address)];
-	if (!hv_pte_get_present(pte))
-		return 0;
-	pfn = hv_pte_get_pfn(pte);
-	if (pte_huge(pte)) {
-		if (!pfn_valid(pfn)) {
-			pr_err("L0 huge page has bad pfn %#lx\n", pfn);
-			return 0;
-		}
-		return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
-	}
-	page = pfn_to_page(pfn);
-	BUG_ON(PageHighMem(page));  /* No HIGHMEM on 64-bit. */
-	l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
-#endif
-	pte = l1_pgtable[HV_L1_INDEX(address)];
-	if (!hv_pte_get_present(pte))
-		return 0;
-	pfn = hv_pte_get_pfn(pte);
-	if (pte_huge(pte)) {
-		if (!pfn_valid(pfn)) {
-			pr_err("huge page has bad pfn %#lx\n", pfn);
-			return 0;
-		}
-		return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
-	}
-
-	page = pfn_to_page(pfn);
-	if (PageHighMem(page)) {
-		pr_err("L2 page table not in LOWMEM (%#llx)\n",
-		       HV_PFN_TO_CPA(pfn));
-		return 0;
-	}
-	l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
-	pte = l2_pgtable[HV_L2_INDEX(address)];
-	return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
-}
-
 /* Callback for backtracer; basically a glorified memcpy */
 static bool read_memory_func(void *result, unsigned long address,
 			     unsigned int size, void *vkbt)
 {
 	int retval;
 	struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
+
+	if (address == 0)
+		return 0;
 	if (__kernel_text_address(address)) {
 		/* OK to read kernel code. */
 	} else if (address >= PAGE_OFFSET) {
 		/* We only tolerate kernel-space reads of this task's stack */
 		if (!in_kernel_stack(kbt, address))
 			return 0;
-	} else if (!valid_address(kbt, address)) {
-		return 0;	/* invalid user-space address */
+	} else if (!kbt->is_current) {
+		return 0;	/* can't read from other user address spaces */
 	}
 	pagefault_disable();
 	retval = __copy_from_user_inatomic(result,
@@ -127,6 +80,8 @@
 	unsigned long sp = kbt->it.sp;
 	struct pt_regs *p;
 
+	if (sp % sizeof(long) != 0)
+		return NULL;
 	if (!in_kernel_stack(kbt, sp))
 		return NULL;
 	if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
@@ -169,27 +124,27 @@
 }
 
 /* Return a pt_regs pointer for a valid signal handler frame */
-static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
+static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
+				      struct rt_sigframe* kframe)
 {
 	BacktraceIterator *b = &kbt->it;
 
-	if (b->pc == VDSO_BASE) {
-		struct rt_sigframe *frame;
-		unsigned long sigframe_top =
-			b->sp + sizeof(struct rt_sigframe) - 1;
-		if (!valid_address(kbt, b->sp) ||
-		    !valid_address(kbt, sigframe_top)) {
-			if (kbt->verbose)
-				pr_err("  (odd signal: sp %#lx?)\n",
-				       (unsigned long)(b->sp));
+	if (b->pc == VDSO_BASE && b->sp < PAGE_OFFSET &&
+	    b->sp % sizeof(long) == 0) {
+		int retval;
+		pagefault_disable();
+		retval = __copy_from_user_inatomic(
+			kframe, (void __user __force *)b->sp,
+			sizeof(*kframe));
+		pagefault_enable();
+		if (retval != 0 ||
+		    (unsigned int)(kframe->info.si_signo) >= _NSIG)
 			return NULL;
-		}
-		frame = (struct rt_sigframe *)b->sp;
 		if (kbt->verbose) {
 			pr_err("  <received signal %d>\n",
-			       frame->info.si_signo);
+			       kframe->info.si_signo);
 		}
-		return (struct pt_regs *)&frame->uc.uc_mcontext;
+		return (struct pt_regs *)&kframe->uc.uc_mcontext;
 	}
 	return NULL;
 }
@@ -202,10 +157,11 @@
 static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
 {
 	struct pt_regs *p;
+	struct rt_sigframe kframe;
 
 	p = valid_fault_handler(kbt);
 	if (p == NULL)
-		p = valid_sigframe(kbt);
+		p = valid_sigframe(kbt, &kframe);
 	if (p == NULL)
 		return 0;
 	backtrace_init(&kbt->it, read_memory_func, kbt,
@@ -265,41 +221,19 @@
 
 	/*
 	 * Set up callback information.  We grab the kernel stack base
-	 * so we will allow reads of that address range, and if we're
-	 * asking about the current process we grab the page table
-	 * so we can check user accesses before trying to read them.
-	 * We flush the TLB to avoid any weird skew issues.
+	 * so we will allow reads of that address range.
 	 */
-	is_current = (t == NULL);
+	is_current = (t == NULL || t == current);
 	kbt->is_current = is_current;
 	if (is_current)
 		t = validate_current();
 	kbt->task = t;
-	kbt->pgtable = NULL;
 	kbt->verbose = 0;   /* override in caller if desired */
 	kbt->profile = 0;   /* override in caller if desired */
 	kbt->end = KBT_ONGOING;
-	kbt->new_context = 0;
-	if (is_current) {
-		HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
-		if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) {
-			/*
-			 * Not just an optimization: this also allows
-			 * this to work at all before va/pa mappings
-			 * are set up.
-			 */
-			kbt->pgtable = swapper_pg_dir;
-		} else {
-			struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa));
-			if (!PageHighMem(page))
-				kbt->pgtable = __va(pgdir_pa);
-			else
-				pr_err("page table not in LOWMEM"
-				       " (%#llx)\n", pgdir_pa);
-		}
-		local_flush_tlb_all();
+	kbt->new_context = 1;
+	if (is_current)
 		validate_stack(regs);
-	}
 
 	if (regs == NULL) {
 		if (is_current || t->state == TASK_RUNNING) {
@@ -345,6 +279,78 @@
 }
 EXPORT_SYMBOL(KBacktraceIterator_next);
 
+static void describe_addr(struct KBacktraceIterator *kbt,
+			  unsigned long address,
+			  int have_mmap_sem, char *buf, size_t bufsize)
+{
+	struct vm_area_struct *vma;
+	size_t namelen, remaining;
+	unsigned long size, offset, adjust;
+	char *p, *modname;
+	const char *name;
+	int rc;
+
+	/*
+	 * Look one byte back for every caller frame (i.e. those that
+	 * aren't a new context) so we look up symbol data for the
+	 * call itself, not the following instruction, which may be on
+	 * a different line (or in a different function).
+	 */
+	adjust = !kbt->new_context;
+	address -= adjust;
+
+	if (address >= PAGE_OFFSET) {
+		/* Handle kernel symbols. */
+		BUG_ON(bufsize < KSYM_NAME_LEN);
+		name = kallsyms_lookup(address, &size, &offset,
+				       &modname, buf);
+		if (name == NULL) {
+			buf[0] = '\0';
+			return;
+		}
+		namelen = strlen(buf);
+		remaining = (bufsize - 1) - namelen;
+		p = buf + namelen;
+		rc = snprintf(p, remaining, "+%#lx/%#lx ",
+			      offset + adjust, size);
+		if (modname && rc < remaining)
+			snprintf(p + rc, remaining - rc, "[%s] ", modname);
+		buf[bufsize-1] = '\0';
+		return;
+	}
+
+	/* If we don't have the mmap_sem, we can't show any more info. */
+	buf[0] = '\0';
+	if (!have_mmap_sem)
+		return;
+
+	/* Find vma info. */
+	vma = find_vma(kbt->task->mm, address);
+	if (vma == NULL || address < vma->vm_start) {
+		snprintf(buf, bufsize, "[unmapped address] ");
+		return;
+	}
+
+	if (vma->vm_file) {
+		char *s;
+		p = d_path(&vma->vm_file->f_path, buf, bufsize);
+		if (IS_ERR(p))
+			p = "?";
+		s = strrchr(p, '/');
+		if (s)
+			p = s+1;
+	} else {
+		p = "anon";
+	}
+
+	/* Generate a string description of the vma info. */
+	namelen = strlen(p);
+	remaining = (bufsize - 1) - namelen;
+	memmove(buf, p, namelen);
+	snprintf(buf + namelen, remaining, "[%lx+%lx] ",
+		 vma->vm_start, vma->vm_end - vma->vm_start);
+}
+
 /*
  * This method wraps the backtracer's more generic support.
  * It is only invoked from the architecture-specific code; show_stack()
@@ -353,6 +359,7 @@
 void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
 {
 	int i;
+	int have_mmap_sem = 0;
 
 	if (headers) {
 		/*
@@ -369,31 +376,16 @@
 	kbt->verbose = 1;
 	i = 0;
 	for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
-		char *modname;
-		const char *name;
-		unsigned long address = kbt->it.pc;
-		unsigned long offset, size;
 		char namebuf[KSYM_NAME_LEN+100];
+		unsigned long address = kbt->it.pc;
 
-		if (address >= PAGE_OFFSET)
-			name = kallsyms_lookup(address, &size, &offset,
-					       &modname, namebuf);
-		else
-			name = NULL;
+		/* Try to acquire the mmap_sem as we pass into userspace. */
+		if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm)
+			have_mmap_sem =
+				down_read_trylock(&kbt->task->mm->mmap_sem);
 
-		if (!name)
-			namebuf[0] = '\0';
-		else {
-			size_t namelen = strlen(namebuf);
-			size_t remaining = (sizeof(namebuf) - 1) - namelen;
-			char *p = namebuf + namelen;
-			int rc = snprintf(p, remaining, "+%#lx/%#lx ",
-					  offset, size);
-			if (modname && rc < remaining)
-				snprintf(p + rc, remaining - rc,
-					 "[%s] ", modname);
-			namebuf[sizeof(namebuf)-1] = '\0';
-		}
+		describe_addr(kbt, address, have_mmap_sem,
+			      namebuf, sizeof(namebuf));
 
 		pr_err("  frame %d: 0x%lx %s(sp 0x%lx)\n",
 		       i++, address, namebuf, (unsigned long)(kbt->it.sp));
@@ -408,6 +400,8 @@
 		pr_err("Stack dump stopped; next frame identical to this one\n");
 	if (headers)
 		pr_err("Stack dump complete\n");
+	if (have_mmap_sem)
+		up_read(&kbt->task->mm->mmap_sem);
 }
 EXPORT_SYMBOL(tile_show_stack);
 
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 2bb6602..73cff81 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -200,7 +200,7 @@
 {
 	siginfo_t info = { 0 };
 	int signo, code;
-	unsigned long address;
+	unsigned long address = 0;
 	bundle_bits instr;
 
 	/* Re-enable interrupts. */
@@ -223,6 +223,10 @@
 	}
 
 	switch (fault_num) {
+	case INT_MEM_ERROR:
+		signo = SIGBUS;
+		code = BUS_OBJERR;
+		break;
 	case INT_ILL:
 		if (copy_from_user(&instr, (void __user *)regs->pc,
 				   sizeof(instr))) {
@@ -289,7 +293,10 @@
 		address = regs->pc;
 		break;
 #ifdef __tilegx__
-	case INT_ILL_TRANS:
+	case INT_ILL_TRANS: {
+		/* Avoid a hardware erratum with the return address stack. */
+		fill_ra_stack();
+
 		signo = SIGSEGV;
 		code = SEGV_MAPERR;
 		if (reason & SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK)
@@ -297,6 +304,7 @@
 		else
 			address = 0;  /* FIXME: GX: single-step for address */
 		break;
+	}
 #endif
 	default:
 		panic("Unexpected do_trap interrupt number %d", fault_num);
@@ -308,7 +316,8 @@
 	info.si_addr = (void __user *)address;
 	if (signo == SIGILL)
 		info.si_trapno = fault_num;
-	trace_unhandled_signal("trap", regs, address, signo);
+	if (signo != SIGTRAP)
+		trace_unhandled_signal("trap", regs, address, signo);
 	force_sig_info(signo, &info, current);
 }
 
diff --git a/arch/tile/lib/Makefile b/arch/tile/lib/Makefile
index 0c26086..985f598 100644
--- a/arch/tile/lib/Makefile
+++ b/arch/tile/lib/Makefile
@@ -7,6 +7,7 @@
 	strchr_$(BITS).o strlen_$(BITS).o
 
 ifeq ($(CONFIG_TILEGX),y)
+CFLAGS_REMOVE_memcpy_user_64.o = -fno-omit-frame-pointer
 lib-y += memcpy_user_64.o
 else
 lib-y += atomic_32.o atomic_asm_32.o memcpy_tile64.o
diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c
index 8928aac..db4fb89 100644
--- a/arch/tile/lib/cacheflush.c
+++ b/arch/tile/lib/cacheflush.c
@@ -39,7 +39,21 @@
 {
 	char *p, *base;
 	size_t step_size, load_count;
+
+	/*
+	 * On TILEPro the striping granularity is a fixed 8KB; on
+	 * TILE-Gx it is configurable, and we rely on the fact that
+	 * the hypervisor always configures maximum striping, so that
+	 * bits 9 and 10 of the PA are part of the stripe function, so
+	 * every 512 bytes we hit a striping boundary.
+	 *
+	 */
+#ifdef __tilegx__
+	const unsigned long STRIPE_WIDTH = 512;
+#else
 	const unsigned long STRIPE_WIDTH = 8192;
+#endif
+
 #ifdef __tilegx__
 	/*
 	 * On TILE-Gx, we must disable the dstream prefetcher before doing
@@ -74,7 +88,7 @@
 	 * memory, that one load would be sufficient, but since we may
 	 * be, we also need to back up to the last load issued to
 	 * another memory controller, which would be the point where
-	 * we crossed an 8KB boundary (the granularity of striping
+	 * we crossed a "striping" boundary (the granularity of striping
 	 * across memory controllers).  Keep backing up and doing this
 	 * until we are before the beginning of the buffer, or have
 	 * hit all the controllers.
@@ -88,12 +102,22 @@
 	 * every cache line on a full memory stripe on each
 	 * controller" that we simply do that, to simplify the logic.
 	 *
-	 * FIXME: See bug 9535 for some issues with this code.
+	 * On TILE-Gx the hash-for-home function is much more complex,
+	 * with the upshot being we can't readily guarantee we have
+	 * hit both entries in the 128-entry AMT that were hit by any
+	 * load in the entire range, so we just re-load them all.
+	 * With larger buffers, we may want to consider using a hypervisor
+	 * trap to issue loads directly to each hash-for-home tile for
+	 * each controller (doing it from Linux would trash the TLB).
 	 */
 	if (hfh) {
 		step_size = L2_CACHE_BYTES;
+#ifdef __tilegx__
+		load_count = (size + L2_CACHE_BYTES - 1) / L2_CACHE_BYTES;
+#else
 		load_count = (STRIPE_WIDTH / L2_CACHE_BYTES) *
 			      (1 << CHIP_LOG_NUM_MSHIMS());
+#endif
 	} else {
 		step_size = STRIPE_WIDTH;
 		load_count = (1 << CHIP_LOG_NUM_MSHIMS());
@@ -109,7 +133,7 @@
 
 	/* Figure out how far back we need to go. */
 	base = p - (step_size * (load_count - 2));
-	if ((long)base < (long)buffer)
+	if ((unsigned long)base < (unsigned long)buffer)
 		base = buffer;
 
 	/*
diff --git a/arch/tile/lib/memcpy_user_64.c b/arch/tile/lib/memcpy_user_64.c
index 4763b3a..37440ca 100644
--- a/arch/tile/lib/memcpy_user_64.c
+++ b/arch/tile/lib/memcpy_user_64.c
@@ -14,7 +14,13 @@
  * Do memcpy(), but trap and return "n" when a load or store faults.
  *
  * Note: this idiom only works when memcpy() compiles to a leaf function.
- * If "sp" is updated during memcpy, the "jrp lr" will be incorrect.
+ * Here leaf function not only means it does not have calls, but also
+ * requires no stack operations (sp, stack frame pointer) and no
+ * use of callee-saved registers, else "jrp lr" will be incorrect since
+ * unwinding stack frame is bypassed. Since memcpy() is not complex so
+ * these conditions are satisfied here, but we need to be careful when
+ * modifying this file. This is not a clean solution but is the best
+ * one so far.
  *
  * Also note that we are capturing "n" from the containing scope here.
  */
diff --git a/arch/tile/lib/spinlock_common.h b/arch/tile/lib/spinlock_common.h
index c101098..6ac3750 100644
--- a/arch/tile/lib/spinlock_common.h
+++ b/arch/tile/lib/spinlock_common.h
@@ -60,5 +60,5 @@
 	loops += __insn_crc32_32(stack_pointer, get_cycles_low()) &
 		(loops - 1);
 
-	relax(1 << exponent);
+	relax(loops);
 }
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index cba30e9..22e58f5 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -130,7 +130,7 @@
 }
 
 /*
- * Handle a fault on the vmalloc or module mapping area
+ * Handle a fault on the vmalloc area.
  */
 static inline int vmalloc_fault(pgd_t *pgd, unsigned long address)
 {
@@ -203,9 +203,14 @@
  * interrupt or a critical region, and must do as little as possible.
  * Similarly, we can't use atomic ops here, since we may be handling a
  * fault caused by an atomic op access.
+ *
+ * If we find a migrating PTE while we're in an NMI context, and we're
+ * at a PC that has a registered exception handler, we don't wait,
+ * since this thread may (e.g.) have been interrupted while migrating
+ * its own stack, which would then cause us to self-deadlock.
  */
 static int handle_migrating_pte(pgd_t *pgd, int fault_num,
-				unsigned long address,
+				unsigned long address, unsigned long pc,
 				int is_kernel_mode, int write)
 {
 	pud_t *pud;
@@ -227,6 +232,8 @@
 		pte_offset_kernel(pmd, address);
 	pteval = *pte;
 	if (pte_migrating(pteval)) {
+		if (in_nmi() && search_exception_tables(pc))
+			return 0;
 		wait_for_migration(pte);
 		return 1;
 	}
@@ -300,7 +307,7 @@
 	 * rather than trying to patch up the existing PTE.
 	 */
 	pgd = get_current_pgd();
-	if (handle_migrating_pte(pgd, fault_num, address,
+	if (handle_migrating_pte(pgd, fault_num, address, regs->pc,
 				 is_kernel_mode, write))
 		return 1;
 
@@ -335,9 +342,12 @@
 	/*
 	 * If we're trying to touch user-space addresses, we must
 	 * be either at PL0, or else with interrupts enabled in the
-	 * kernel, so either way we can re-enable interrupts here.
+	 * kernel, so either way we can re-enable interrupts here
+	 * unless we are doing atomic access to user space with
+	 * interrupts disabled.
 	 */
-	local_irq_enable();
+	if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
+		local_irq_enable();
 
 	mm = tsk->mm;
 
@@ -665,7 +675,7 @@
 	 */
 	if (fault_num == INT_DTLB_ACCESS)
 		write = 1;
-	if (handle_migrating_pte(pgd, fault_num, address, 1, write))
+	if (handle_migrating_pte(pgd, fault_num, address, pc, 1, write))
 		return state;
 
 	/* Return zero so that we continue on with normal fault handling. */
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 1cc6ae4..499f737 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -394,6 +394,7 @@
 		return pte_to_home(*virt_to_pte(NULL, kva));
 	}
 }
+EXPORT_SYMBOL(page_home);
 
 void homecache_change_page_home(struct page *page, int order, int home)
 {
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 830c490..6a9d20d 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -254,11 +254,6 @@
 		return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE);
 	}
 
-	/* As a performance optimization, keep the boot init stack here. */
-	if (address >= (ulong)&init_thread_union &&
-	    address < (ulong)&init_thread_union + THREAD_SIZE)
-		return construct_pgprot(PAGE_KERNEL, smp_processor_id());
-
 #ifndef __tilegx__
 #if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
 	/* Force the atomic_locks[] array page to be hash-for-home. */
@@ -557,6 +552,7 @@
 
 	address = MEM_SV_INTRPT;
 	pmd = get_pmd(pgtables, address);
+	pfn = 0;  /* code starts at PA 0 */
 	if (ktext_small) {
 		/* Allocate an L2 PTE for the kernel text */
 		int cpu = 0;
@@ -579,10 +575,15 @@
 		}
 
 		BUG_ON(address != (unsigned long)_stext);
-		pfn = 0;  /* code starts at PA 0 */
-		pte = alloc_pte();
-		for (pte_ofs = 0; address < (unsigned long)_einittext;
-		     pfn++, pte_ofs++, address += PAGE_SIZE) {
+		pte = NULL;
+		for (; address < (unsigned long)_einittext;
+		     pfn++, address += PAGE_SIZE) {
+			pte_ofs = pte_index(address);
+			if (pte_ofs == 0) {
+				if (pte)
+					assign_pte(pmd++, pte);
+				pte = alloc_pte();
+			}
 			if (!ktext_local) {
 				prot = set_remote_cache_cpu(prot, cpu);
 				cpu = cpumask_next(cpu, &ktext_mask);
@@ -591,7 +592,8 @@
 			}
 			pte[pte_ofs] = pfn_pte(pfn, prot);
 		}
-		assign_pte(pmd, pte);
+		if (pte)
+			assign_pte(pmd, pte);
 	} else {
 		pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
 		pteval = pte_mkhuge(pteval);
@@ -614,7 +616,9 @@
 		else
 			pteval = hv_pte_set_mode(pteval,
 						 HV_PTE_MODE_CACHE_NO_L3);
-		*(pte_t *)pmd = pteval;
+		for (; address < (unsigned long)_einittext;
+		     pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE)
+			*(pte_t *)(pmd++) = pfn_pte(pfn, pteval);
 	}
 
 	/* Set swapper_pgprot here so it is flushed to memory right away. */
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 8730369..2410aa8 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -177,14 +177,10 @@
 	if (!pmd_huge_page(*pmd))
 		return;
 
-	/*
-	 * Grab the pgd_lock, since we may need it to walk the pgd_list,
-	 * and since we need some kind of lock here to avoid races.
-	 */
-	spin_lock_irqsave(&pgd_lock, flags);
+	spin_lock_irqsave(&init_mm.page_table_lock, flags);
 	if (!pmd_huge_page(*pmd)) {
 		/* Lost the race to convert the huge page. */
-		spin_unlock_irqrestore(&pgd_lock, flags);
+		spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
 		return;
 	}
 
@@ -194,6 +190,7 @@
 
 #ifdef __PAGETABLE_PMD_FOLDED
 	/* Walk every pgd on the system and update the pmd there. */
+	spin_lock(&pgd_lock);
 	list_for_each(pos, &pgd_list) {
 		pmd_t *copy_pmd;
 		pgd = list_to_pgd(pos) + pgd_index(addr);
@@ -201,6 +198,7 @@
 		copy_pmd = pmd_offset(pud, addr);
 		__set_pmd(copy_pmd, *pmd);
 	}
+	spin_unlock(&pgd_lock);
 #endif
 
 	/* Tell every cpu to notice the change. */
@@ -208,7 +206,7 @@
 		     cpu_possible_mask, NULL, 0);
 
 	/* Hold the lock until the TLB flush is finished to avoid races. */
-	spin_unlock_irqrestore(&pgd_lock, flags);
+	spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
 }
 
 /*
@@ -217,9 +215,13 @@
  * against pageattr.c; it is the unique case in which a valid change
  * of kernel pagetables can't be lazily synchronized by vmalloc faults.
  * vmalloc faults work because attached pagetables are never freed.
- * The locking scheme was chosen on the basis of manfred's
- * recommendations and having no core impact whatsoever.
- * -- wli
+ *
+ * The lock is always taken with interrupts disabled, unlike on x86
+ * and other platforms, because we need to take the lock in
+ * shatter_huge_page(), which may be called from an interrupt context.
+ * We are not at risk from the tlbflush IPI deadlock that was seen on
+ * x86, since we use the flush_remote() API to have the hypervisor do
+ * the TLB flushes regardless of irq disabling.
  */
 DEFINE_SPINLOCK(pgd_lock);
 LIST_HEAD(pgd_list);
@@ -469,10 +471,18 @@
 
 void set_pte(pte_t *ptep, pte_t pte)
 {
-	struct page *page = pfn_to_page(pte_pfn(pte));
-
-	/* Update the home of a PTE if necessary */
-	pte = pte_set_home(pte, page_home(page));
+	if (pte_present(pte) &&
+	    (!CHIP_HAS_MMIO() || hv_pte_get_mode(pte) != HV_PTE_MODE_MMIO)) {
+		/* The PTE actually references physical memory. */
+		unsigned long pfn = pte_pfn(pte);
+		if (pfn_valid(pfn)) {
+			/* Update the home of the PTE from the struct page. */
+			pte = pte_set_home(pte, page_home(pfn_to_page(pfn)));
+		} else if (hv_pte_get_mode(pte) == 0) {
+			/* remap_pfn_range(), etc, must supply PTE mode. */
+			panic("set_pte(): out-of-range PFN and mode 0\n");
+		}
+	}
 
 	__set_pte(ptep, pte);
 }
diff --git a/arch/um/drivers/cow.h b/arch/um/drivers/cow.h
index dc36b22..6673508 100644
--- a/arch/um/drivers/cow.h
+++ b/arch/um/drivers/cow.h
@@ -3,41 +3,6 @@
 
 #include <asm/types.h>
 
-#if defined(__KERNEL__)
-
-# include <asm/byteorder.h>
-
-# if defined(__BIG_ENDIAN)
-#	define ntohll(x) (x)
-#	define htonll(x) (x)
-# elif defined(__LITTLE_ENDIAN)
-#	define ntohll(x)  be64_to_cpu(x)
-#	define htonll(x)  cpu_to_be64(x)
-# else
-#	error "Could not determine byte order"
-# endif
-
-#else
-/* For the definition of ntohl, htonl and __BYTE_ORDER */
-#include <endian.h>
-#include <netinet/in.h>
-#if defined(__BYTE_ORDER)
-
-#  if __BYTE_ORDER == __BIG_ENDIAN
-#	define ntohll(x) (x)
-#	define htonll(x) (x)
-#  elif __BYTE_ORDER == __LITTLE_ENDIAN
-#	define ntohll(x)  bswap_64(x)
-#	define htonll(x)  bswap_64(x)
-#  else
-#	error "Could not determine byte order: __BYTE_ORDER uncorrectly defined"
-#  endif
-
-#else  /* ! defined(__BYTE_ORDER) */
-#	error "Could not determine byte order: __BYTE_ORDER not defined"
-#endif
-#endif /* ! defined(__KERNEL__) */
-
 extern int init_cow_file(int fd, char *cow_file, char *backing_file,
 			 int sectorsize, int alignment, int *bitmap_offset_out,
 			 unsigned long *bitmap_len_out, int *data_offset_out);
diff --git a/arch/um/drivers/cow_user.c b/arch/um/drivers/cow_user.c
index 9cbb426..0ee9cc6 100644
--- a/arch/um/drivers/cow_user.c
+++ b/arch/um/drivers/cow_user.c
@@ -8,11 +8,10 @@
  * that.
  */
 #include <unistd.h>
-#include <byteswap.h>
 #include <errno.h>
 #include <string.h>
 #include <arpa/inet.h>
-#include <asm/types.h>
+#include <endian.h>
 #include "cow.h"
 #include "cow_sys.h"
 
@@ -214,8 +213,8 @@
 			   "header\n");
 		goto out;
 	}
-	header->magic = htonl(COW_MAGIC);
-	header->version = htonl(COW_VERSION);
+	header->magic = htobe32(COW_MAGIC);
+	header->version = htobe32(COW_VERSION);
 
 	err = -EINVAL;
 	if (strlen(backing_file) > sizeof(header->backing_file) - 1) {
@@ -246,10 +245,10 @@
 		goto out_free;
 	}
 
-	header->mtime = htonl(modtime);
-	header->size = htonll(*size);
-	header->sectorsize = htonl(sectorsize);
-	header->alignment = htonl(alignment);
+	header->mtime = htobe32(modtime);
+	header->size = htobe64(*size);
+	header->sectorsize = htobe32(sectorsize);
+	header->alignment = htobe32(alignment);
 	header->cow_format = COW_BITMAP;
 
 	err = cow_write_file(fd, header, sizeof(*header));
@@ -301,8 +300,8 @@
 	magic = header->v1.magic;
 	if (magic == COW_MAGIC)
 		version = header->v1.version;
-	else if (magic == ntohl(COW_MAGIC))
-		version = ntohl(header->v1.version);
+	else if (magic == be32toh(COW_MAGIC))
+		version = be32toh(header->v1.version);
 	/* No error printed because the non-COW case comes through here */
 	else goto out;
 
@@ -327,9 +326,9 @@
 				   "header\n");
 			goto out;
 		}
-		*mtime_out = ntohl(header->v2.mtime);
-		*size_out = ntohll(header->v2.size);
-		*sectorsize_out = ntohl(header->v2.sectorsize);
+		*mtime_out = be32toh(header->v2.mtime);
+		*size_out = be64toh(header->v2.size);
+		*sectorsize_out = be32toh(header->v2.sectorsize);
 		*bitmap_offset_out = sizeof(header->v2);
 		*align_out = *sectorsize_out;
 		file = header->v2.backing_file;
@@ -341,10 +340,10 @@
 				   "header\n");
 			goto out;
 		}
-		*mtime_out = ntohl(header->v3.mtime);
-		*size_out = ntohll(header->v3.size);
-		*sectorsize_out = ntohl(header->v3.sectorsize);
-		*align_out = ntohl(header->v3.alignment);
+		*mtime_out = be32toh(header->v3.mtime);
+		*size_out = be64toh(header->v3.size);
+		*sectorsize_out = be32toh(header->v3.sectorsize);
+		*align_out = be32toh(header->v3.alignment);
 		if (*align_out == 0) {
 			cow_printf("read_cow_header - invalid COW header, "
 				   "align == 0\n");
@@ -366,16 +365,16 @@
 		 * this was used until Dec2005 - 64bits are needed to represent
 		 * 2038+. I.e. we can safely do this truncating cast.
 		 *
-		 * Additionally, we must use ntohl() instead of ntohll(), since
+		 * Additionally, we must use be32toh() instead of be64toh(), since
 		 * the program used to use the former (tested - I got mtime
 		 * mismatch "0 vs whatever").
 		 *
 		 * Ever heard about bug-to-bug-compatibility ? ;-) */
-		*mtime_out = (time32_t) ntohl(header->v3_b.mtime);
+		*mtime_out = (time32_t) be32toh(header->v3_b.mtime);
 
-		*size_out = ntohll(header->v3_b.size);
-		*sectorsize_out = ntohl(header->v3_b.sectorsize);
-		*align_out = ntohl(header->v3_b.alignment);
+		*size_out = be64toh(header->v3_b.size);
+		*sectorsize_out = be32toh(header->v3_b.sectorsize);
+		*align_out = be32toh(header->v3_b.alignment);
 		if (*align_out == 0) {
 			cow_printf("read_cow_header - invalid COW header, "
 				   "align == 0\n");
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index e672bd6..43b39d6 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -22,6 +22,7 @@
 #include <linux/workqueue.h>
 #include <linux/mutex.h>
 #include <asm/uaccess.h>
+#include <asm/switch_to.h>
 
 #include "init.h"
 #include "irq_kern.h"
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 8419f5c..fff2435 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -1,3 +1,4 @@
 generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h
 generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
-generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h
+generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
+generic-y += switch_to.h
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 492bc4c..65a1c3d 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -3,9 +3,10 @@
 # Licensed under the GPL
 #
 
-CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START) \
-                        -DELF_ARCH=$(LDS_ELF_ARCH)        \
-                        -DELF_FORMAT=$(LDS_ELF_FORMAT)
+CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START)		\
+                        -DELF_ARCH=$(LDS_ELF_ARCH)	\
+                        -DELF_FORMAT=$(LDS_ELF_FORMAT)	\
+			$(LDS_EXTRA)
 extra-y := vmlinux.lds
 clean-files :=
 
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index f386d04..2b73ded 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -88,11 +88,8 @@
 
 extern void arch_switch_to(struct task_struct *to);
 
-void *_switch_to(void *prev, void *next, void *last)
+void *__switch_to(struct task_struct *from, struct task_struct *to)
 {
-	struct task_struct *from = prev;
-	struct task_struct *to = next;
-
 	to->thread.prev_sched = from;
 	set_current(to);
 
@@ -111,7 +108,6 @@
 	} while (current->thread.saved_task);
 
 	return current->thread.prev_sched;
-
 }
 
 void interrupt_end(void)
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 4947b31..0a49ef0 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -103,7 +103,6 @@
 
 void uml_setup_stubs(struct mm_struct *mm)
 {
-	struct page **pages;
 	int err, ret;
 
 	if (!skas_needs_stub)
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
index 4be406a..36b62bc 100644
--- a/arch/x86/Makefile.um
+++ b/arch/x86/Makefile.um
@@ -14,6 +14,9 @@
 
 export LDFLAGS
 
+LDS_EXTRA		:= -Ui386
+export LDS_EXTRA
+
 # First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y.
 include $(srctree)/arch/x86/Makefile_32.cpu
 
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index d511d95..4824fb4 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -119,9 +119,7 @@
 	end = PAGE_ALIGN(end);
 	if (end <= start)
 		return;
-	down_write(&current->mm->mmap_sem);
-	do_brk(start, end - start);
-	up_write(&current->mm->mmap_sem);
+	vm_brk(start, end - start);
 }
 
 #ifdef CORE_DUMP
@@ -332,9 +330,7 @@
 		pos = 32;
 		map_size = ex.a_text+ex.a_data;
 
-		down_write(&current->mm->mmap_sem);
-		error = do_brk(text_addr & PAGE_MASK, map_size);
-		up_write(&current->mm->mmap_sem);
+		error = vm_brk(text_addr & PAGE_MASK, map_size);
 
 		if (error != (text_addr & PAGE_MASK)) {
 			send_sig(SIGKILL, current, 0);
@@ -373,9 +369,7 @@
 		if (!bprm->file->f_op->mmap || (fd_offset & ~PAGE_MASK) != 0) {
 			loff_t pos = fd_offset;
 
-			down_write(&current->mm->mmap_sem);
-			do_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
-			up_write(&current->mm->mmap_sem);
+			vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
 			bprm->file->f_op->read(bprm->file,
 					(char __user *)N_TXTADDR(ex),
 					ex.a_text+ex.a_data, &pos);
@@ -385,26 +379,22 @@
 			goto beyond_if;
 		}
 
-		down_write(&current->mm->mmap_sem);
-		error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
+		error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
 				PROT_READ | PROT_EXEC,
 				MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
 				MAP_EXECUTABLE | MAP_32BIT,
 				fd_offset);
-		up_write(&current->mm->mmap_sem);
 
 		if (error != N_TXTADDR(ex)) {
 			send_sig(SIGKILL, current, 0);
 			return error;
 		}
 
-		down_write(&current->mm->mmap_sem);
-		error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
+		error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
 				PROT_READ | PROT_WRITE | PROT_EXEC,
 				MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
 				MAP_EXECUTABLE | MAP_32BIT,
 				fd_offset + ex.a_text);
-		up_write(&current->mm->mmap_sem);
 		if (error != N_DATADDR(ex)) {
 			send_sig(SIGKILL, current, 0);
 			return error;
@@ -476,9 +466,7 @@
 			error_time = jiffies;
 		}
 #endif
-		down_write(&current->mm->mmap_sem);
-		do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
-		up_write(&current->mm->mmap_sem);
+		vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
 
 		file->f_op->read(file, (char __user *)start_addr,
 			ex.a_text + ex.a_data, &pos);
@@ -490,12 +478,10 @@
 		goto out;
 	}
 	/* Now use mmap to map the library into memory. */
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap(file, start_addr, ex.a_text + ex.a_data,
+	error = vm_mmap(file, start_addr, ex.a_text + ex.a_data,
 			PROT_READ | PROT_WRITE | PROT_EXEC,
 			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_32BIT,
 			N_TXTOFF(ex));
-	up_write(&current->mm->mmap_sem);
 	retval = error;
 	if (error != start_addr)
 		goto out;
@@ -503,9 +489,7 @@
 	len = PAGE_ALIGN(ex.a_text + ex.a_data);
 	bss = ex.a_text + ex.a_data + ex.a_bss;
 	if (bss > len) {
-		down_write(&current->mm->mmap_sem);
-		error = do_brk(start_addr + len, bss - len);
-		up_write(&current->mm->mmap_sem);
+		error = vm_brk(start_addr + len, bss - len);
 		retval = error;
 		if (error != start_addr + len)
 			goto out;
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index b3b7332..99480e5 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -43,7 +43,7 @@
 		switch (sizeof(*(ptr))) {				\
 		case __X86_CASE_B:					\
 			asm volatile (lock #op "b %b0, %1\n"		\
-				      : "+r" (__ret), "+m" (*(ptr))	\
+				      : "+q" (__ret), "+m" (*(ptr))	\
 				      : : "memory", "cc");		\
 			break;						\
 		case __X86_CASE_W:					\
@@ -173,7 +173,7 @@
 		switch (sizeof(*(ptr))) {				\
 		case __X86_CASE_B:					\
 			asm volatile (lock "addb %b1, %0\n"		\
-				      : "+m" (*(ptr)) : "ri" (inc)	\
+				      : "+m" (*(ptr)) : "qi" (inc)	\
 				      : "memory", "cc");		\
 			break;						\
 		case __X86_CASE_W:					\
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 8be5f54..e054459 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -557,6 +557,8 @@
 
 extern unsigned long
 copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
+extern __must_check long
+strncpy_from_user(char *dst, const char __user *src, long count);
 
 /*
  * movsl can be slow when source and dest are not both 8-byte aligned
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 566e803..8084bc7 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -213,11 +213,6 @@
 	return n;
 }
 
-long __must_check strncpy_from_user(char *dst, const char __user *src,
-				    long count);
-long __must_check __strncpy_from_user(char *dst,
-				      const char __user *src, long count);
-
 /**
  * strlen_user: - Get the size of a string in user space.
  * @str: The string to measure.
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 1c66d30..fcd4b6f 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -208,10 +208,6 @@
 	}
 }
 
-__must_check long
-strncpy_from_user(char *dst, const char __user *src, long count);
-__must_check long
-__strncpy_from_user(char *dst, const char __user *src, long count);
 __must_check long strnlen_user(const char __user *str, long n);
 __must_check long __strnlen_user(const char __user *str, long n);
 __must_check long strlen_user(const char __user *str);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 694d801..b8ba6e4 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -38,6 +38,7 @@
 #include <asm/traps.h>
 #include <asm/desc.h>
 #include <asm/tlbflush.h>
+#include <asm/idle.h>
 
 static int kvmapf = 1;
 
@@ -253,7 +254,10 @@
 		kvm_async_pf_task_wait((u32)read_cr2());
 		break;
 	case KVM_PV_REASON_PAGE_READY:
+		rcu_irq_enter();
+		exit_idle();
 		kvm_async_pf_task_wake((u32)read_cr2());
+		rcu_irq_exit();
 		break;
 	}
 }
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index f386dc4..7515cf0e 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -216,9 +216,9 @@
 	current_thread_info()->sig_on_uaccess_error = 1;
 
 	/*
-	 * 0 is a valid user pointer (in the access_ok sense) on 32-bit and
+	 * NULL is a valid user pointer (in the access_ok sense) on 32-bit and
 	 * 64-bit, so we don't need to special-case it here.  For all the
-	 * vsyscalls, 0 means "don't write anything" not "write it at
+	 * vsyscalls, NULL means "don't write anything" not "write it at
 	 * address 0".
 	 */
 	ret = -EFAULT;
@@ -247,7 +247,7 @@
 
 		ret = sys_getcpu((unsigned __user *)regs->di,
 				 (unsigned __user *)regs->si,
-				 0);
+				 NULL);
 		break;
 	}
 
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index a73f0c1..2e88438 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -369,7 +369,7 @@
 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
 		if (pmu->fixed_ctr_ctrl == data)
 			return 0;
-		if (!(data & 0xfffffffffffff444)) {
+		if (!(data & 0xfffffffffffff444ull)) {
 			reprogram_fixed_counters(pmu, data);
 			return 0;
 		}
@@ -459,17 +459,17 @@
 	pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1);
 
 	if (pmu->version == 1) {
-		pmu->global_ctrl = (1 << pmu->nr_arch_gp_counters) - 1;
-		return;
+		pmu->nr_arch_fixed_counters = 0;
+	} else {
+		pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
+				X86_PMC_MAX_FIXED);
+		pmu->counter_bitmask[KVM_PMC_FIXED] =
+			((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
 	}
 
-	pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
-			X86_PMC_MAX_FIXED);
-	pmu->counter_bitmask[KVM_PMC_FIXED] =
-		((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
-	pmu->global_ctrl_mask = ~(((1 << pmu->nr_arch_gp_counters) - 1)
-			| (((1ull << pmu->nr_arch_fixed_counters) - 1)
-				<< X86_PMC_IDX_FIXED));
+	pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
+		(((1ull << pmu->nr_arch_fixed_counters) - 1) << X86_PMC_IDX_FIXED);
+	pmu->global_ctrl_mask = ~pmu->global_ctrl;
 }
 
 void kvm_pmu_init(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 280751c..4ff0ab9 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2210,9 +2210,12 @@
 		msr = find_msr_entry(vmx, msr_index);
 		if (msr) {
 			msr->data = data;
-			if (msr - vmx->guest_msrs < vmx->save_nmsrs)
+			if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
+				preempt_disable();
 				kvm_set_shared_msr(msr->index, msr->data,
 						   msr->mask);
+				preempt_enable();
+			}
 			break;
 		}
 		ret = kvm_set_msr_common(vcpu, msr_index, data);
@@ -3906,7 +3909,9 @@
 		vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
 
 	vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
+	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 	vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
+	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 	vmx_set_cr4(&vmx->vcpu, 0);
 	vmx_set_efer(&vmx->vcpu, 0);
 	vmx_fpu_activate(&vmx->vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4044ce0..91a5e98 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6336,13 +6336,11 @@
 		if (npages && !old.rmap) {
 			unsigned long userspace_addr;
 
-			down_write(&current->mm->mmap_sem);
-			userspace_addr = do_mmap(NULL, 0,
+			userspace_addr = vm_mmap(NULL, 0,
 						 npages * PAGE_SIZE,
 						 PROT_READ | PROT_WRITE,
 						 map_flags,
 						 0);
-			up_write(&current->mm->mmap_sem);
 
 			if (IS_ERR((void *)userspace_addr))
 				return PTR_ERR((void *)userspace_addr);
@@ -6366,10 +6364,8 @@
 	if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
 		int ret;
 
-		down_write(&current->mm->mmap_sem);
-		ret = do_munmap(current->mm, old.userspace_addr,
+		ret = vm_munmap(old.userspace_addr,
 				old.npages * PAGE_SIZE);
-		up_write(&current->mm->mmap_sem);
 		if (ret < 0)
 			printk(KERN_WARNING
 			       "kvm_vm_ioctl_set_memory_region: "
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 25feb1a..b1e6c4b 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -379,8 +379,8 @@
 	return;
 }
 
-/* Decode moffset16/32/64 */
-static void __get_moffset(struct insn *insn)
+/* Decode moffset16/32/64. Return 0 if failed */
+static int __get_moffset(struct insn *insn)
 {
 	switch (insn->addr_bytes) {
 	case 2:
@@ -397,15 +397,19 @@
 		insn->moffset2.value = get_next(int, insn);
 		insn->moffset2.nbytes = 4;
 		break;
+	default:	/* opnd_bytes must be modified manually */
+		goto err_out;
 	}
 	insn->moffset1.got = insn->moffset2.got = 1;
 
+	return 1;
+
 err_out:
-	return;
+	return 0;
 }
 
-/* Decode imm v32(Iz) */
-static void __get_immv32(struct insn *insn)
+/* Decode imm v32(Iz). Return 0 if failed */
+static int __get_immv32(struct insn *insn)
 {
 	switch (insn->opnd_bytes) {
 	case 2:
@@ -417,14 +421,18 @@
 		insn->immediate.value = get_next(int, insn);
 		insn->immediate.nbytes = 4;
 		break;
+	default:	/* opnd_bytes must be modified manually */
+		goto err_out;
 	}
 
+	return 1;
+
 err_out:
-	return;
+	return 0;
 }
 
-/* Decode imm v64(Iv/Ov) */
-static void __get_immv(struct insn *insn)
+/* Decode imm v64(Iv/Ov), Return 0 if failed */
+static int __get_immv(struct insn *insn)
 {
 	switch (insn->opnd_bytes) {
 	case 2:
@@ -441,15 +449,18 @@
 		insn->immediate2.value = get_next(int, insn);
 		insn->immediate2.nbytes = 4;
 		break;
+	default:	/* opnd_bytes must be modified manually */
+		goto err_out;
 	}
 	insn->immediate1.got = insn->immediate2.got = 1;
 
+	return 1;
 err_out:
-	return;
+	return 0;
 }
 
 /* Decode ptr16:16/32(Ap) */
-static void __get_immptr(struct insn *insn)
+static int __get_immptr(struct insn *insn)
 {
 	switch (insn->opnd_bytes) {
 	case 2:
@@ -462,14 +473,17 @@
 		break;
 	case 8:
 		/* ptr16:64 is not exist (no segment) */
-		return;
+		return 0;
+	default:	/* opnd_bytes must be modified manually */
+		goto err_out;
 	}
 	insn->immediate2.value = get_next(unsigned short, insn);
 	insn->immediate2.nbytes = 2;
 	insn->immediate1.got = insn->immediate2.got = 1;
 
+	return 1;
 err_out:
-	return;
+	return 0;
 }
 
 /**
@@ -489,7 +503,8 @@
 		insn_get_displacement(insn);
 
 	if (inat_has_moffset(insn->attr)) {
-		__get_moffset(insn);
+		if (!__get_moffset(insn))
+			goto err_out;
 		goto done;
 	}
 
@@ -517,16 +532,20 @@
 		insn->immediate2.nbytes = 4;
 		break;
 	case INAT_IMM_PTR:
-		__get_immptr(insn);
+		if (!__get_immptr(insn))
+			goto err_out;
 		break;
 	case INAT_IMM_VWORD32:
-		__get_immv32(insn);
+		if (!__get_immv32(insn))
+			goto err_out;
 		break;
 	case INAT_IMM_VWORD:
-		__get_immv(insn);
+		if (!__get_immv(insn))
+			goto err_out;
 		break;
 	default:
-		break;
+		/* Here, insn must have an immediate, but failed */
+		goto err_out;
 	}
 	if (inat_has_second_immediate(insn->attr)) {
 		insn->immediate2.value = get_next(char, insn);
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index 97be9cb..d6ae30b 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -7,6 +7,8 @@
 #include <linux/highmem.h>
 #include <linux/module.h>
 
+#include <asm/word-at-a-time.h>
+
 /*
  * best effort, GUP based copy_from_user() that is NMI-safe
  */
@@ -41,3 +43,104 @@
 	return len;
 }
 EXPORT_SYMBOL_GPL(copy_from_user_nmi);
+
+static inline unsigned long count_bytes(unsigned long mask)
+{
+	mask = (mask - 1) & ~mask;
+	mask >>= 7;
+	return count_masked_bytes(mask);
+}
+
+/*
+ * Do a strncpy, return length of string without final '\0'.
+ * 'count' is the user-supplied count (return 'count' if we
+ * hit it), 'max' is the address space maximum (and we return
+ * -EFAULT if we hit it).
+ */
+static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
+{
+	long res = 0;
+
+	/*
+	 * Truncate 'max' to the user-specified limit, so that
+	 * we only have one limit we need to check in the loop
+	 */
+	if (max > count)
+		max = count;
+
+	while (max >= sizeof(unsigned long)) {
+		unsigned long c;
+
+		/* Fall back to byte-at-a-time if we get a page fault */
+		if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
+			break;
+		/* This can write a few bytes past the NUL character, but that's ok */
+		*(unsigned long *)(dst+res) = c;
+		c = has_zero(c);
+		if (c)
+			return res + count_bytes(c);
+		res += sizeof(unsigned long);
+		max -= sizeof(unsigned long);
+	}
+
+	while (max) {
+		char c;
+
+		if (unlikely(__get_user(c,src+res)))
+			return -EFAULT;
+		dst[res] = c;
+		if (!c)
+			return res;
+		res++;
+		max--;
+	}
+
+	/*
+	 * Uhhuh. We hit 'max'. But was that the user-specified maximum
+	 * too? If so, that's ok - we got as much as the user asked for.
+	 */
+	if (res >= count)
+		return res;
+
+	/*
+	 * Nope: we hit the address space limit, and we still had more
+	 * characters the caller would have wanted. That's an EFAULT.
+	 */
+	return -EFAULT;
+}
+
+/**
+ * strncpy_from_user: - Copy a NUL terminated string from userspace.
+ * @dst:   Destination address, in kernel space.  This buffer must be at
+ *         least @count bytes long.
+ * @src:   Source address, in user space.
+ * @count: Maximum number of bytes to copy, including the trailing NUL.
+ *
+ * Copies a NUL-terminated string from userspace to kernel space.
+ *
+ * On success, returns the length of the string (not including the trailing
+ * NUL).
+ *
+ * If access to userspace fails, returns -EFAULT (some data may have been
+ * copied).
+ *
+ * If @count is smaller than the length of the string, copies @count bytes
+ * and returns @count.
+ */
+long
+strncpy_from_user(char *dst, const char __user *src, long count)
+{
+	unsigned long max_addr, src_addr;
+
+	if (unlikely(count <= 0))
+		return 0;
+
+	max_addr = current_thread_info()->addr_limit.seg;
+	src_addr = (unsigned long)src;
+	if (likely(src_addr < max_addr)) {
+		unsigned long max = max_addr - src_addr;
+		return do_strncpy_from_user(dst, src, count, max);
+	}
+	return -EFAULT;
+}
+EXPORT_SYMBOL(strncpy_from_user);
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index d9b094c..ef2a6a5 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -33,93 +33,6 @@
 	__movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
 
 /*
- * Copy a null terminated string from userspace.
- */
-
-#define __do_strncpy_from_user(dst, src, count, res)			   \
-do {									   \
-	int __d0, __d1, __d2;						   \
-	might_fault();							   \
-	__asm__ __volatile__(						   \
-		"	testl %1,%1\n"					   \
-		"	jz 2f\n"					   \
-		"0:	lodsb\n"					   \
-		"	stosb\n"					   \
-		"	testb %%al,%%al\n"				   \
-		"	jz 1f\n"					   \
-		"	decl %1\n"					   \
-		"	jnz 0b\n"					   \
-		"1:	subl %1,%0\n"					   \
-		"2:\n"							   \
-		".section .fixup,\"ax\"\n"				   \
-		"3:	movl %5,%0\n"					   \
-		"	jmp 2b\n"					   \
-		".previous\n"						   \
-		_ASM_EXTABLE(0b,3b)					   \
-		: "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1),	   \
-		  "=&D" (__d2)						   \
-		: "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
-		: "memory");						   \
-} while (0)
-
-/**
- * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
- * @dst:   Destination address, in kernel space.  This buffer must be at
- *         least @count bytes long.
- * @src:   Source address, in user space.
- * @count: Maximum number of bytes to copy, including the trailing NUL.
- *
- * Copies a NUL-terminated string from userspace to kernel space.
- * Caller must check the specified block with access_ok() before calling
- * this function.
- *
- * On success, returns the length of the string (not including the trailing
- * NUL).
- *
- * If access to userspace fails, returns -EFAULT (some data may have been
- * copied).
- *
- * If @count is smaller than the length of the string, copies @count bytes
- * and returns @count.
- */
-long
-__strncpy_from_user(char *dst, const char __user *src, long count)
-{
-	long res;
-	__do_strncpy_from_user(dst, src, count, res);
-	return res;
-}
-EXPORT_SYMBOL(__strncpy_from_user);
-
-/**
- * strncpy_from_user: - Copy a NUL terminated string from userspace.
- * @dst:   Destination address, in kernel space.  This buffer must be at
- *         least @count bytes long.
- * @src:   Source address, in user space.
- * @count: Maximum number of bytes to copy, including the trailing NUL.
- *
- * Copies a NUL-terminated string from userspace to kernel space.
- *
- * On success, returns the length of the string (not including the trailing
- * NUL).
- *
- * If access to userspace fails, returns -EFAULT (some data may have been
- * copied).
- *
- * If @count is smaller than the length of the string, copies @count bytes
- * and returns @count.
- */
-long
-strncpy_from_user(char *dst, const char __user *src, long count)
-{
-	long res = -EFAULT;
-	if (access_ok(VERIFY_READ, src, 1))
-		__do_strncpy_from_user(dst, src, count, res);
-	return res;
-}
-EXPORT_SYMBOL(strncpy_from_user);
-
-/*
  * Zero Userspace
  */
 
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index b7c2849..0d0326f 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -9,55 +9,6 @@
 #include <asm/uaccess.h>
 
 /*
- * Copy a null terminated string from userspace.
- */
-
-#define __do_strncpy_from_user(dst,src,count,res)			   \
-do {									   \
-	long __d0, __d1, __d2;						   \
-	might_fault();							   \
-	__asm__ __volatile__(						   \
-		"	testq %1,%1\n"					   \
-		"	jz 2f\n"					   \
-		"0:	lodsb\n"					   \
-		"	stosb\n"					   \
-		"	testb %%al,%%al\n"				   \
-		"	jz 1f\n"					   \
-		"	decq %1\n"					   \
-		"	jnz 0b\n"					   \
-		"1:	subq %1,%0\n"					   \
-		"2:\n"							   \
-		".section .fixup,\"ax\"\n"				   \
-		"3:	movq %5,%0\n"					   \
-		"	jmp 2b\n"					   \
-		".previous\n"						   \
-		_ASM_EXTABLE(0b,3b)					   \
-		: "=&r"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1),	   \
-		  "=&D" (__d2)						   \
-		: "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
-		: "memory");						   \
-} while (0)
-
-long
-__strncpy_from_user(char *dst, const char __user *src, long count)
-{
-	long res;
-	__do_strncpy_from_user(dst, src, count, res);
-	return res;
-}
-EXPORT_SYMBOL(__strncpy_from_user);
-
-long
-strncpy_from_user(char *dst, const char __user *src, long count)
-{
-	long res = -EFAULT;
-	if (access_ok(VERIFY_READ, src, 1))
-		return __strncpy_from_user(dst, src, count);
-	return res;
-}
-EXPORT_SYMBOL(strncpy_from_user);
-
-/*
  * Zero Userspace
  */
 
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
new file mode 100644
index 0000000..7d01b8c
--- /dev/null
+++ b/arch/x86/um/asm/barrier.h
@@ -0,0 +1,75 @@
+#ifndef _ASM_UM_BARRIER_H_
+#define _ASM_UM_BARRIER_H_
+
+#include <asm/asm.h>
+#include <asm/segment.h>
+#include <asm/cpufeature.h>
+#include <asm/cmpxchg.h>
+#include <asm/nops.h>
+
+#include <linux/kernel.h>
+#include <linux/irqflags.h>
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ */
+#ifdef CONFIG_X86_32
+
+#define mb()	alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
+#define rmb()	alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
+#define wmb()	alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
+
+#else /* CONFIG_X86_32 */
+
+#define mb()	asm volatile("mfence" : : : "memory")
+#define rmb()	asm volatile("lfence" : : : "memory")
+#define wmb()	asm volatile("sfence" : : : "memory")
+
+#endif /* CONFIG_X86_32 */
+
+#define read_barrier_depends()	do { } while (0)
+
+#ifdef CONFIG_SMP
+
+#define smp_mb()	mb()
+#ifdef CONFIG_X86_PPRO_FENCE
+#define smp_rmb()	rmb()
+#else /* CONFIG_X86_PPRO_FENCE */
+#define smp_rmb()	barrier()
+#endif /* CONFIG_X86_PPRO_FENCE */
+
+#ifdef CONFIG_X86_OOSTORE
+#define smp_wmb()	wmb()
+#else /* CONFIG_X86_OOSTORE */
+#define smp_wmb()	barrier()
+#endif /* CONFIG_X86_OOSTORE */
+
+#define smp_read_barrier_depends()	read_barrier_depends()
+#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
+
+#else /* CONFIG_SMP */
+
+#define smp_mb()	barrier()
+#define smp_rmb()	barrier()
+#define smp_wmb()	barrier()
+#define smp_read_barrier_depends()	do { } while (0)
+#define set_mb(var, value) do { var = value; barrier(); } while (0)
+
+#endif /* CONFIG_SMP */
+
+/*
+ * Stop RDTSC speculation. This is needed when you need to use RDTSC
+ * (or get_cycles or vread that possibly accesses the TSC) in a defined
+ * code region.
+ *
+ * (Could use an alternative three way for this if there was one.)
+ */
+static inline void rdtsc_barrier(void)
+{
+	alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
+	alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
+}
+
+#endif
diff --git a/arch/x86/um/asm/system.h b/arch/x86/um/asm/system.h
deleted file mode 100644
index a459fd9..0000000
--- a/arch/x86/um/asm/system.h
+++ /dev/null
@@ -1,135 +0,0 @@
-#ifndef _ASM_X86_SYSTEM_H_
-#define _ASM_X86_SYSTEM_H_
-
-#include <asm/asm.h>
-#include <asm/segment.h>
-#include <asm/cpufeature.h>
-#include <asm/cmpxchg.h>
-#include <asm/nops.h>
-
-#include <linux/kernel.h>
-#include <linux/irqflags.h>
-
-/* entries in ARCH_DLINFO: */
-#ifdef CONFIG_IA32_EMULATION
-# define AT_VECTOR_SIZE_ARCH 2
-#else
-# define AT_VECTOR_SIZE_ARCH 1
-#endif
-
-extern unsigned long arch_align_stack(unsigned long sp);
-
-void default_idle(void);
-
-/*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
- */
-#ifdef CONFIG_X86_32
-/*
- * Some non-Intel clones support out of order store. wmb() ceases to be a
- * nop for these.
- */
-#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
-#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
-#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
-#else
-#define mb() 	asm volatile("mfence":::"memory")
-#define rmb()	asm volatile("lfence":::"memory")
-#define wmb()	asm volatile("sfence" ::: "memory")
-#endif
-
-/**
- * read_barrier_depends - Flush all pending reads that subsequents reads
- * depend on.
- *
- * No data-dependent reads from memory-like regions are ever reordered
- * over this barrier.  All reads preceding this primitive are guaranteed
- * to access memory (but not necessarily other CPUs' caches) before any
- * reads following this primitive that depend on the data return by
- * any of the preceding reads.  This primitive is much lighter weight than
- * rmb() on most CPUs, and is never heavier weight than is
- * rmb().
- *
- * These ordering constraints are respected by both the local CPU
- * and the compiler.
- *
- * Ordering is not guaranteed by anything other than these primitives,
- * not even by data dependencies.  See the documentation for
- * memory_barrier() for examples and URLs to more information.
- *
- * For example, the following code would force ordering (the initial
- * value of "a" is zero, "b" is one, and "p" is "&a"):
- *
- * <programlisting>
- *	CPU 0				CPU 1
- *
- *	b = 2;
- *	memory_barrier();
- *	p = &b;				q = p;
- *					read_barrier_depends();
- *					d = *q;
- * </programlisting>
- *
- * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends().  However,
- * the following code, with the same initial values for "a" and "b":
- *
- * <programlisting>
- *	CPU 0				CPU 1
- *
- *	a = 2;
- *	memory_barrier();
- *	b = 3;				y = b;
- *					read_barrier_depends();
- *					x = a;
- * </programlisting>
- *
- * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b".  Therefore, on some CPUs, such
- * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
- * in cases like this where there are no data dependencies.
- **/
-
-#define read_barrier_depends()	do { } while (0)
-
-#ifdef CONFIG_SMP
-#define smp_mb()	mb()
-#ifdef CONFIG_X86_PPRO_FENCE
-# define smp_rmb()	rmb()
-#else
-# define smp_rmb()	barrier()
-#endif
-#ifdef CONFIG_X86_OOSTORE
-# define smp_wmb() 	wmb()
-#else
-# define smp_wmb()	barrier()
-#endif
-#define smp_read_barrier_depends()	read_barrier_depends()
-#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
-#else
-#define smp_mb()	barrier()
-#define smp_rmb()	barrier()
-#define smp_wmb()	barrier()
-#define smp_read_barrier_depends()	do { } while (0)
-#define set_mb(var, value) do { var = value; barrier(); } while (0)
-#endif
-
-/*
- * Stop RDTSC speculation. This is needed when you need to use RDTSC
- * (or get_cycles or vread that possibly accesses the TSC) in a defined
- * code region.
- *
- * (Could use an alternative three way for this if there was one.)
- */
-static inline void rdtsc_barrier(void)
-{
-	alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
-	alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
-}
-
-extern void *_switch_to(void *prev, void *next, void *last);
-#define switch_to(prev, next, last) prev = _switch_to(prev, next, last)
-
-#endif
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 988828b..b8e2794 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1859,6 +1859,7 @@
 #endif	/* CONFIG_X86_64 */
 
 static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
+static unsigned char fake_ioapic_mapping[PAGE_SIZE] __page_aligned_bss;
 
 static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
 {
@@ -1899,7 +1900,7 @@
 		 * We just don't map the IO APIC - all access is via
 		 * hypercalls.  Keep the address in the pte for reference.
 		 */
-		pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
+		pte = pfn_pte(PFN_DOWN(__pa(fake_ioapic_mapping)), PAGE_KERNEL);
 		break;
 #endif
 
@@ -2064,6 +2065,7 @@
 	pv_mmu_ops = xen_mmu_ops;
 
 	memset(dummy_mapping, 0xff, PAGE_SIZE);
+	memset(fake_ioapic_mapping, 0xfd, PAGE_SIZE);
 }
 
 /* Protected by xen_reservation_lock. */
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 02900e8..5fac691 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -59,7 +59,7 @@
 
 static void __cpuinit cpu_bringup(void)
 {
-	int cpu = smp_processor_id();
+	int cpu;
 
 	cpu_init();
 	touch_softlockup_watchdog();
diff --git a/block/blk-core.c b/block/blk-core.c
index 3a78b00..1f61b74 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -483,7 +483,7 @@
 	if (!q)
 		return NULL;
 
-	q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
+	q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
 	if (q->id < 0)
 		goto fail_q;
 
@@ -1277,7 +1277,8 @@
 	list_for_each_entry_reverse(rq, &plug->list, queuelist) {
 		int el_ret;
 
-		(*request_count)++;
+		if (rq->q == q)
+			(*request_count)++;
 
 		if (rq->q != q || !blk_rq_merge_ok(rq, bio))
 			continue;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 5eed6a7..f2ddb94 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1218,7 +1218,7 @@
 	struct bio_list bl;
 	struct bio *bio;
 
-	WARN_ON_ONCE(!queue_is_locked(q));
+	queue_lockdep_assert_held(q);
 
 	bio_list_init(&bl);
 
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 4572952..3c38536 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -295,6 +295,7 @@
 	unsigned int cfq_slice_idle;
 	unsigned int cfq_group_idle;
 	unsigned int cfq_latency;
+	unsigned int cfq_target_latency;
 
 	/*
 	 * Fallback dummy cfqq for extreme OOM conditions
@@ -604,7 +605,7 @@
 {
 	struct cfq_rb_root *st = &cfqd->grp_service_tree;
 
-	return cfq_target_latency * cfqg->weight / st->total_weight;
+	return cfqd->cfq_target_latency * cfqg->weight / st->total_weight;
 }
 
 static inline unsigned
@@ -2271,7 +2272,8 @@
 		 * to have higher weight. A more accurate thing would be to
 		 * calculate system wide asnc/sync ratio.
 		 */
-		tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
+		tmp = cfqd->cfq_target_latency *
+			cfqg_busy_async_queues(cfqd, cfqg);
 		tmp = tmp/cfqd->busy_queues;
 		slice = min_t(unsigned, slice, tmp);
 
@@ -3737,6 +3739,7 @@
 	cfqd->cfq_back_penalty = cfq_back_penalty;
 	cfqd->cfq_slice[0] = cfq_slice_async;
 	cfqd->cfq_slice[1] = cfq_slice_sync;
+	cfqd->cfq_target_latency = cfq_target_latency;
 	cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
 	cfqd->cfq_slice_idle = cfq_slice_idle;
 	cfqd->cfq_group_idle = cfq_group_idle;
@@ -3788,6 +3791,7 @@
 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
+SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
 #undef SHOW_FUNCTION
 
 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
@@ -3821,6 +3825,7 @@
 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
 		UINT_MAX, 0);
 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
+STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
 #undef STORE_FUNCTION
 
 #define CFQ_ATTR(name) \
@@ -3838,6 +3843,7 @@
 	CFQ_ATTR(slice_idle),
 	CFQ_ATTR(group_idle),
 	CFQ_ATTR(low_latency),
+	CFQ_ATTR(target_latency),
 	__ATTR_NULL
 };
 
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 21ff9d0..8e84225 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -627,7 +627,7 @@
 
 config CRYPTO_BLOWFISH_X86_64
 	tristate "Blowfish cipher algorithm (x86_64)"
-	depends on (X86 || UML_X86) && 64BIT
+	depends on X86 && 64BIT
 	select CRYPTO_ALGAPI
 	select CRYPTO_BLOWFISH_COMMON
 	help
@@ -657,7 +657,7 @@
 
 config CRYPTO_CAMELLIA_X86_64
 	tristate "Camellia cipher algorithm (x86_64)"
-	depends on (X86 || UML_X86) && 64BIT
+	depends on X86 && 64BIT
 	depends on CRYPTO
 	select CRYPTO_ALGAPI
 	select CRYPTO_LRW
@@ -893,7 +893,7 @@
 
 config CRYPTO_TWOFISH_X86_64_3WAY
 	tristate "Twofish cipher algorithm (x86_64, 3-way parallel)"
-	depends on (X86 || UML_X86) && 64BIT
+	depends on X86 && 64BIT
 	select CRYPTO_ALGAPI
 	select CRYPTO_TWOFISH_COMMON
 	select CRYPTO_TWOFISH_X86_64
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index 8d3a056..533de95 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -397,9 +397,9 @@
 	rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
 	rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
-		sizeof(struct crypto_report_blkcipher), &rblkcipher);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
+		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -478,9 +478,9 @@
 	rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
 	rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
-		sizeof(struct crypto_report_blkcipher), &rblkcipher);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
+		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/crypto/aead.c b/crypto/aead.c
index e4cb351..0b8121e 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -125,9 +125,9 @@
 	raead.maxauthsize = aead->maxauthsize;
 	raead.ivsize = aead->ivsize;
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_AEAD,
-		sizeof(struct crypto_report_aead), &raead);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD,
+		    sizeof(struct crypto_report_aead), &raead))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -210,9 +210,9 @@
 	raead.maxauthsize = aead->maxauthsize;
 	raead.ivsize = aead->ivsize;
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_AEAD,
-		sizeof(struct crypto_report_aead), &raead);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD,
+		    sizeof(struct crypto_report_aead), &raead))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 33bc9b6..3887856 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -409,9 +409,9 @@
 	rhash.blocksize = alg->cra_blocksize;
 	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_HASH,
-		sizeof(struct crypto_report_hash), &rhash);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
+		    sizeof(struct crypto_report_hash), &rhash))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 4dd80c7..a8d85a1 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -508,9 +508,9 @@
 	rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
 	rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
-		sizeof(struct crypto_report_blkcipher), &rblkcipher);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
+		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index f1ea0a0..5a37ead 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -81,9 +81,9 @@
 	rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
 	rcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_CIPHER,
-		sizeof(struct crypto_report_cipher), &rcipher);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_CIPHER,
+		    sizeof(struct crypto_report_cipher), &rcipher))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -96,9 +96,9 @@
 
 	snprintf(rcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "compression");
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_COMPRESS,
-		sizeof(struct crypto_report_comp), &rcomp);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
+		    sizeof(struct crypto_report_comp), &rcomp))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -117,16 +117,16 @@
 	ualg->cru_flags = alg->cra_flags;
 	ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
 
-	NLA_PUT_U32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority);
-
+	if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
+		goto nla_put_failure;
 	if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
 		struct crypto_report_larval rl;
 
 		snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval");
 
-		NLA_PUT(skb, CRYPTOCFGA_REPORT_LARVAL,
-			sizeof(struct crypto_report_larval), &rl);
-
+		if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
+			    sizeof(struct crypto_report_larval), &rl))
+			goto nla_put_failure;
 		goto out;
 	}
 
diff --git a/crypto/pcompress.c b/crypto/pcompress.c
index 2e458e5..04e083f 100644
--- a/crypto/pcompress.c
+++ b/crypto/pcompress.c
@@ -55,9 +55,9 @@
 
 	snprintf(rpcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "pcomp");
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_COMPRESS,
-		sizeof(struct crypto_report_comp), &rpcomp);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
+		    sizeof(struct crypto_report_comp), &rpcomp))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/crypto/rng.c b/crypto/rng.c
index 64f864f..f3b7894 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -69,9 +69,9 @@
 
 	rrng.seedsize = alg->cra_rng.seedsize;
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_RNG,
-		sizeof(struct crypto_report_rng), &rrng);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_RNG,
+		    sizeof(struct crypto_report_rng), &rrng))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 107f6f7..dd30f40 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -174,7 +174,7 @@
 	index = sctx->count[0] & 0x7f;
 
 	/* Update number of bytes */
-	if (!(sctx->count[0] += len))
+	if ((sctx->count[0] += len) < len)
 		sctx->count[1]++;
 
         part_len = 128 - index;
diff --git a/crypto/shash.c b/crypto/shash.c
index 21fc12e..32067f4 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -534,9 +534,9 @@
 	rhash.blocksize = alg->cra_blocksize;
 	rhash.digestsize = salg->digestsize;
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_HASH,
-		sizeof(struct crypto_report_hash), &rhash);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
+		    sizeof(struct crypto_report_hash), &rhash))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index ab513a9..a716fed 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -74,7 +74,8 @@
 
 	/* Check if the reset register is supported */
 
-	if (!reset_reg->address) {
+	if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) ||
+	    !reset_reg->address) {
 		return_ACPI_STATUS(AE_NOT_EXIST);
 	}
 
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index ba14fb9..c3881b2 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -607,8 +607,7 @@
 
 	acpi_irq_handler = handler;
 	acpi_irq_context = context;
-	if (request_threaded_irq(irq, NULL, acpi_irq, IRQF_SHARED, "acpi",
-				 acpi_irq)) {
+	if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
 		printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
 		acpi_irq_handler = NULL;
 		return AE_NOT_ACQUIRED;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index b3447f6..f3decb3 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -786,7 +786,7 @@
 	while (1) {
 
 		if (cx->entry_method == ACPI_CSTATE_HALT)
-			halt();
+			safe_halt();
 		else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
 			inb(cx->address);
 			/* See comment in acpi_idle_do_entry() */
diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c
index c1d6124..a6c77e8b 100644
--- a/drivers/acpi/reboot.c
+++ b/drivers/acpi/reboot.c
@@ -23,7 +23,8 @@
 	/* Is the reset register supported? The spec says we should be
 	 * checking the bit width and bit offset, but Windows ignores
 	 * these fields */
-	/* Ignore also acpi_gbl_FADT.flags.ACPI_FADT_RESET_REGISTER */
+	if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER))
+		return;
 
 	reset_value = acpi_gbl_FADT.reset_value;
 
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 01c2cf4..cc27322 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -247,8 +247,7 @@
 /*
  * Hooks to provide runtime PM of the pclk (bus clock).  It is safe to
  * enable/disable the bus clock at runtime PM suspend/resume as this
- * does not result in loss of context.  However, disabling vcore power
- * would do, so we leave that to the driver.
+ * does not result in loss of context.
  */
 static int amba_pm_runtime_suspend(struct device *dev)
 {
@@ -354,39 +353,6 @@
 	clk_put(pclk);
 }
 
-static int amba_get_enable_vcore(struct amba_device *pcdev)
-{
-	struct regulator *vcore = regulator_get(&pcdev->dev, "vcore");
-	int ret;
-
-	pcdev->vcore = vcore;
-
-	if (IS_ERR(vcore)) {
-		/* It is OK not to supply a vcore regulator */
-		if (PTR_ERR(vcore) == -ENODEV)
-			return 0;
-		return PTR_ERR(vcore);
-	}
-
-	ret = regulator_enable(vcore);
-	if (ret) {
-		regulator_put(vcore);
-		pcdev->vcore = ERR_PTR(-ENODEV);
-	}
-
-	return ret;
-}
-
-static void amba_put_disable_vcore(struct amba_device *pcdev)
-{
-	struct regulator *vcore = pcdev->vcore;
-
-	if (!IS_ERR(vcore)) {
-		regulator_disable(vcore);
-		regulator_put(vcore);
-	}
-}
-
 /*
  * These are the device model conversion veneers; they convert the
  * device model structures to our more specific structures.
@@ -399,10 +365,6 @@
 	int ret;
 
 	do {
-		ret = amba_get_enable_vcore(pcdev);
-		if (ret)
-			break;
-
 		ret = amba_get_enable_pclk(pcdev);
 		if (ret)
 			break;
@@ -420,7 +382,6 @@
 		pm_runtime_put_noidle(dev);
 
 		amba_put_disable_pclk(pcdev);
-		amba_put_disable_vcore(pcdev);
 	} while (0);
 
 	return ret;
@@ -442,7 +403,6 @@
 	pm_runtime_put_noidle(dev);
 
 	amba_put_disable_pclk(pcdev);
-	amba_put_disable_vcore(pcdev);
 
 	return ret;
 }
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 68013f9..7857e8f 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -329,6 +329,8 @@
 	{ 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
 	/* SATA Controller IDE (Lynx Point) */
 	{ 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+	/* SATA Controller IDE (DH89xxCC) */
+	{ 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
 	{ }	/* terminate list */
 };
 
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index e0bda9f..28db50b 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -95,7 +95,7 @@
 static void ata_dev_xfermask(struct ata_device *dev);
 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
 
-unsigned int ata_print_id = 1;
+atomic_t ata_print_id = ATOMIC_INIT(1);
 
 struct ata_force_param {
 	const char	*name;
@@ -6029,7 +6029,7 @@
 
 	/* give ports names and add SCSI hosts */
 	for (i = 0; i < host->n_ports; i++)
-		host->ports[i]->print_id = ata_print_id++;
+		host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
 
 
 	/* Create associated sysfs transport objects  */
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1ee00c8..93dabdc 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3843,7 +3843,7 @@
 	int rc = ap->ops->port_start(ap);
 
 	if (!rc) {
-		ap->print_id = ata_print_id++;
+		ap->print_id = atomic_inc_return(&ata_print_id);
 		__ata_port_probe(ap);
 	}
 
@@ -3867,7 +3867,7 @@
 	int rc = ap->ops->port_start(ap);
 
 	if (!rc) {
-		ap->print_id = ata_print_id++;
+		ap->print_id = atomic_inc_return(&ata_print_id);
 		rc = ata_port_probe(ap);
 	}
 
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 74aaee3..c341904 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -294,6 +294,7 @@
 	device_enable_async_suspend(dev);
 	pm_runtime_set_active(dev);
 	pm_runtime_enable(dev);
+	pm_runtime_forbid(dev);
 
 	transport_add_device(dev);
 	transport_configure_device(dev);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 2e26fca..9d0fd0b 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -53,7 +53,7 @@
 	ATA_DNXFER_QUIET	= (1 << 31),
 };
 
-extern unsigned int ata_print_id;
+extern atomic_t ata_print_id;
 extern int atapi_passthru16;
 extern int libata_fua;
 extern int libata_noacpi;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 38950ea..7336d4a 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4025,7 +4025,8 @@
 	struct ata_host *host;
 	struct mv_host_priv *hpriv;
 	struct resource *res;
-	int n_ports, rc;
+	int n_ports = 0;
+	int rc;
 
 	ata_print_version_once(&pdev->dev, DRV_VERSION);
 
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 75fd691..7d01c2a 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -2182,7 +2182,6 @@
     default:
       PRINTD (DBG_QOS|DBG_VCC, "Bad AAL!");
       return -EINVAL;
-      break;
   }
   
   // TX traffic parameters
@@ -2357,7 +2356,6 @@
       default: {
 	PRINTD (DBG_QOS, "unsupported TX traffic class");
 	return -EINVAL;
-	break;
       }
     }
   }
@@ -2433,7 +2431,6 @@
       default: {
 	PRINTD (DBG_QOS, "unsupported RX traffic class");
 	return -EINVAL;
-	break;
       }
     }
   }
@@ -2581,7 +2578,6 @@
 //	  break;
 	default:
 	  return -ENOPROTOOPT;
-	  break;
       };
       break;
   }
@@ -2601,7 +2597,6 @@
 //	  break;
 	default:
 	  return -ENOPROTOOPT;
-	  break;
       };
       break;
   }
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index fb14a63..92b779e 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -138,6 +138,7 @@
 	unsigned int base, top;
 	int nodes = 0;
 	int registers = 0;
+	int average;
 
 	mutex_lock(&map->lock);
 
@@ -152,8 +153,13 @@
 		registers += top - base + 1;
 	}
 
+	if (nodes)
+		average = registers / nodes;
+	else
+		average = 0;
+
 	seq_printf(s, "%d nodes, %d registers, average %d registers\n",
-		   nodes, registers, registers / nodes);
+		   nodes, registers, average);
 
 	mutex_unlock(&map->lock);
 
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 87f54db..74b6909 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -346,6 +346,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(regcache_sync_region);
 
 /**
  * regcache_cache_only: Put a register map into cache only mode
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index 05f1503..ba29b2e 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -15,7 +15,7 @@
 #include <linux/sys_soc.h>
 #include <linux/err.h>
 
-static DEFINE_IDR(soc_ida);
+static DEFINE_IDA(soc_ida);
 static DEFINE_SPINLOCK(soc_lock);
 
 static ssize_t soc_info_get(struct device *dev,
@@ -168,8 +168,6 @@
 
 static int __init soc_bus_register(void)
 {
-	spin_lock_init(&soc_lock);
-
 	return bus_register(&soc_bus_type);
 }
 core_initcall(soc_bus_register);
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index cdcf75c..3e2a600 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -404,16 +404,19 @@
 		return -EOPNOTSUPP;
 
 	if (!bcma_sprom_ext_available(bus)) {
+		bool sprom_onchip;
+
 		/*
 		 * External SPROM takes precedence so check
 		 * on-chip OTP only when no external SPROM
 		 * is present.
 		 */
-		if (bcma_sprom_onchip_available(bus)) {
+		sprom_onchip = bcma_sprom_onchip_available(bus);
+		if (sprom_onchip) {
 			/* determine offset */
 			offset = bcma_sprom_onchip_offset(bus);
 		}
-		if (!offset) {
+		if (!offset || !sprom_onchip) {
 			/*
 			 * Maybe there is no SPROM on the device?
 			 * Now we ask the arch code if there is some sprom
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index e820b68..acda773 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -866,6 +866,7 @@
 	sh->can_queue = cciss_tape_cmds;
 	sh->sg_tablesize = h->maxsgentries;
 	sh->max_cmd_len = MAX_COMMAND_SIZE;
+	sh->max_sectors = h->cciss_max_sectors;
 
 	((struct cciss_scsi_adapter_data_t *) 
 		h->scsi_ctlr)->scsi_host = sh;
@@ -1410,7 +1411,7 @@
 	/* track how many SG entries we are using */
 	if (request_nsgs > h->maxSG)
 		h->maxSG = request_nsgs;
-	c->Header.SGTotal = (__u8) request_nsgs + chained;
+	c->Header.SGTotal = (u16) request_nsgs + chained;
 	if (request_nsgs > h->max_cmd_sgentries)
 		c->Header.SGList = h->max_cmd_sgentries;
 	else
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 43beaca..436f519 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -664,7 +664,7 @@
 	timeo = mdev->net_conf->try_connect_int * HZ;
 	timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
 
-	s_listen->sk->sk_reuse    = 1; /* SO_REUSEADDR */
+	s_listen->sk->sk_reuse    = SK_CAN_REUSE; /* SO_REUSEADDR */
 	s_listen->sk->sk_rcvtimeo = timeo;
 	s_listen->sk->sk_sndtimeo = timeo;
 	drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
@@ -841,8 +841,8 @@
 		}
 	} while (1);
 
-	msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
-	sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
+	msock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
+	sock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
 
 	sock->sk->sk_allocation = GFP_NOIO;
 	msock->sk->sk_allocation = GFP_NOIO;
diff --git a/drivers/block/mtip32xx/Kconfig b/drivers/block/mtip32xx/Kconfig
index b5dd14e..0ba837f 100644
--- a/drivers/block/mtip32xx/Kconfig
+++ b/drivers/block/mtip32xx/Kconfig
@@ -4,6 +4,6 @@
 
 config BLK_DEV_PCIESSD_MTIP32XX
 	tristate "Block Device Driver for Micron PCIe SSDs"
-	depends on HOTPLUG_PCI_PCIE
+	depends on PCI
 	help
           This enables the block driver for Micron PCIe SSDs.
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 8eb81c9..00f9fc9 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -36,6 +36,7 @@
 #include <linux/idr.h>
 #include <linux/kthread.h>
 #include <../drivers/ata/ahci.h>
+#include <linux/export.h>
 #include "mtip32xx.h"
 
 #define HW_CMD_SLOT_SZ		(MTIP_MAX_COMMAND_SLOTS * 32)
@@ -44,6 +45,7 @@
 #define HW_PORT_PRIV_DMA_SZ \
 		(HW_CMD_SLOT_SZ + HW_CMD_TBL_AR_SZ + AHCI_RX_FIS_SZ)
 
+#define HOST_CAP_NZDMA		(1 << 19)
 #define HOST_HSORG		0xFC
 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
@@ -139,6 +141,12 @@
 	int group = 0, commandslot = 0, commandindex = 0;
 	struct mtip_cmd *command;
 	struct mtip_port *port = dd->port;
+	static int in_progress;
+
+	if (in_progress)
+		return;
+
+	in_progress = 1;
 
 	for (group = 0; group < 4; group++) {
 		for (commandslot = 0; commandslot < 32; commandslot++) {
@@ -165,7 +173,8 @@
 
 	up(&port->cmd_slot);
 
-	atomic_set(&dd->drv_cleanup_done, true);
+	set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag);
+	in_progress = 0;
 }
 
 /*
@@ -262,6 +271,9 @@
 		 && time_before(jiffies, timeout))
 		mdelay(1);
 
+	if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
+		return -1;
+
 	if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
 		return -1;
 
@@ -294,6 +306,10 @@
 			port->cmd_issue[MTIP_TAG_INDEX(tag)]);
 
 	spin_unlock_irqrestore(&port->cmd_issue_lock, flags);
+
+	/* Set the command's timeout value.*/
+	port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
+					MTIP_NCQ_COMMAND_TIMEOUT_MS);
 }
 
 /*
@@ -420,7 +436,12 @@
 		writel(0xFFFFFFFF, port->completed[i]);
 
 	/* Clear any pending interrupts for this port */
-	writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
+	writel(readl(port->dd->mmio + PORT_IRQ_STAT),
+					port->dd->mmio + PORT_IRQ_STAT);
+
+	/* Clear any pending interrupts on the HBA. */
+	writel(readl(port->dd->mmio + HOST_IRQ_STAT),
+					port->dd->mmio + HOST_IRQ_STAT);
 
 	/* Enable port interrupts */
 	writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK);
@@ -447,6 +468,9 @@
 		 && time_before(jiffies, timeout))
 		;
 
+	if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
+		return;
+
 	/*
 	 * Chip quirk: escalate to hba reset if
 	 * PxCMD.CR not clear after 500 ms
@@ -475,6 +499,9 @@
 	while (time_before(jiffies, timeout))
 		;
 
+	if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
+		return;
+
 	/* Clear PxSCTL.DET */
 	writel(readl(port->mmio + PORT_SCR_CTL) & ~1,
 			 port->mmio + PORT_SCR_CTL);
@@ -486,15 +513,35 @@
 			 && time_before(jiffies, timeout))
 		;
 
+	if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
+		return;
+
 	if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
 		dev_warn(&port->dd->pdev->dev,
 			"COM reset failed\n");
 
-	/* Clear SError, the PxSERR.DIAG.x should be set so clear it */
-	writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
+	mtip_init_port(port);
+	mtip_start_port(port);
 
-	/* Enable the DMA engine */
-	mtip_enable_engine(port, 1);
+}
+
+/*
+ * Helper function for tag logging
+ */
+static void print_tags(struct driver_data *dd,
+			char *msg,
+			unsigned long *tagbits,
+			int cnt)
+{
+	unsigned char tagmap[128];
+	int group, tagmap_len = 0;
+
+	memset(tagmap, 0, sizeof(tagmap));
+	for (group = SLOTBITS_IN_LONGS; group > 0; group--)
+		tagmap_len = sprintf(tagmap + tagmap_len, "%016lX ",
+						tagbits[group-1]);
+	dev_warn(&dd->pdev->dev,
+			"%d command(s) %s: tagmap [%s]", cnt, msg, tagmap);
 }
 
 /*
@@ -514,15 +561,18 @@
 	int tag, cmdto_cnt = 0;
 	unsigned int bit, group;
 	unsigned int num_command_slots = port->dd->slot_groups * 32;
+	unsigned long to, tagaccum[SLOTBITS_IN_LONGS];
 
 	if (unlikely(!port))
 		return;
 
-	if (atomic_read(&port->dd->resumeflag) == true) {
+	if (test_bit(MTIP_DDF_RESUME_BIT, &port->dd->dd_flag)) {
 		mod_timer(&port->cmd_timer,
 			jiffies + msecs_to_jiffies(30000));
 		return;
 	}
+	/* clear the tag accumulator */
+	memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
 
 	for (tag = 0; tag < num_command_slots; tag++) {
 		/*
@@ -540,12 +590,10 @@
 			command = &port->commands[tag];
 			fis = (struct host_to_dev_fis *) command->command;
 
-			dev_warn(&port->dd->pdev->dev,
-				"Timeout for command tag %d\n", tag);
-
+			set_bit(tag, tagaccum);
 			cmdto_cnt++;
 			if (cmdto_cnt == 1)
-				set_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
+				set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
 
 			/*
 			 * Clear the completed bit. This should prevent
@@ -578,15 +626,29 @@
 		}
 	}
 
-	if (cmdto_cnt) {
-		dev_warn(&port->dd->pdev->dev,
-			"%d commands timed out: restarting port",
-			cmdto_cnt);
+	if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
+		print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
+
 		mtip_restart_port(port);
-		clear_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
+		clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
 		wake_up_interruptible(&port->svc_wait);
 	}
 
+	if (port->ic_pause_timer) {
+		to  = port->ic_pause_timer + msecs_to_jiffies(1000);
+		if (time_after(jiffies, to)) {
+			if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
+				port->ic_pause_timer = 0;
+				clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
+				clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
+				clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
+				wake_up_interruptible(&port->svc_wait);
+			}
+
+
+		}
+	}
+
 	/* Restart the timer */
 	mod_timer(&port->cmd_timer,
 		jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
@@ -681,23 +743,18 @@
 	complete(waiting);
 }
 
-/*
- * Helper function for tag logging
- */
-static void print_tags(struct driver_data *dd,
-			char *msg,
-			unsigned long *tagbits)
+static void mtip_null_completion(struct mtip_port *port,
+			    int tag,
+			    void *data,
+			    int status)
 {
-	unsigned int tag, count = 0;
-
-	for (tag = 0; tag < (dd->slot_groups) * 32; tag++) {
-		if (test_bit(tag, tagbits))
-			count++;
-	}
-	if (count)
-		dev_info(&dd->pdev->dev, "%s [%i tags]\n", msg, count);
+	return;
 }
 
+static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
+				dma_addr_t buffer_dma, unsigned int sectors);
+static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
+						struct smart_attr *attrib);
 /*
  * Handle an error.
  *
@@ -708,12 +765,16 @@
  */
 static void mtip_handle_tfe(struct driver_data *dd)
 {
-	int group, tag, bit, reissue;
+	int group, tag, bit, reissue, rv;
 	struct mtip_port *port;
-	struct mtip_cmd  *command;
+	struct mtip_cmd  *cmd;
 	u32 completed;
 	struct host_to_dev_fis *fis;
 	unsigned long tagaccum[SLOTBITS_IN_LONGS];
+	unsigned int cmd_cnt = 0;
+	unsigned char *buf;
+	char *fail_reason = NULL;
+	int fail_all_ncq_write = 0, fail_all_ncq_cmds = 0;
 
 	dev_warn(&dd->pdev->dev, "Taskfile error\n");
 
@@ -722,8 +783,11 @@
 	/* Stop the timer to prevent command timeouts. */
 	del_timer(&port->cmd_timer);
 
+	/* clear the tag accumulator */
+	memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
+
 	/* Set eh_active */
-	set_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
+	set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
 
 	/* Loop through all the groups */
 	for (group = 0; group < dd->slot_groups; group++) {
@@ -732,9 +796,6 @@
 		/* clear completed status register in the hardware.*/
 		writel(completed, port->completed[group]);
 
-		/* clear the tag accumulator */
-		memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
-
 		/* Process successfully completed commands */
 		for (bit = 0; bit < 32 && completed; bit++) {
 			if (!(completed & (1<<bit)))
@@ -745,13 +806,14 @@
 			if (tag == MTIP_TAG_INTERNAL)
 				continue;
 
-			command = &port->commands[tag];
-			if (likely(command->comp_func)) {
+			cmd = &port->commands[tag];
+			if (likely(cmd->comp_func)) {
 				set_bit(tag, tagaccum);
-				atomic_set(&port->commands[tag].active, 0);
-				command->comp_func(port,
+				cmd_cnt++;
+				atomic_set(&cmd->active, 0);
+				cmd->comp_func(port,
 					 tag,
-					 command->comp_data,
+					 cmd->comp_data,
 					 0);
 			} else {
 				dev_err(&port->dd->pdev->dev,
@@ -765,12 +827,45 @@
 			}
 		}
 	}
-	print_tags(dd, "TFE tags completed:", tagaccum);
+
+	print_tags(dd, "completed (TFE)", tagaccum, cmd_cnt);
 
 	/* Restart the port */
 	mdelay(20);
 	mtip_restart_port(port);
 
+	/* Trying to determine the cause of the error */
+	rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
+				dd->port->log_buf,
+				dd->port->log_buf_dma, 1);
+	if (rv) {
+		dev_warn(&dd->pdev->dev,
+			"Error in READ LOG EXT (10h) command\n");
+		/* non-critical error, don't fail the load */
+	} else {
+		buf = (unsigned char *)dd->port->log_buf;
+		if (buf[259] & 0x1) {
+			dev_info(&dd->pdev->dev,
+				"Write protect bit is set.\n");
+			set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
+			fail_all_ncq_write = 1;
+			fail_reason = "write protect";
+		}
+		if (buf[288] == 0xF7) {
+			dev_info(&dd->pdev->dev,
+				"Exceeded Tmax, drive in thermal shutdown.\n");
+			set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
+			fail_all_ncq_cmds = 1;
+			fail_reason = "thermal shutdown";
+		}
+		if (buf[288] == 0xBF) {
+			dev_info(&dd->pdev->dev,
+				"Drive indicates rebuild has failed.\n");
+			fail_all_ncq_cmds = 1;
+			fail_reason = "rebuild failed";
+		}
+	}
+
 	/* clear the tag accumulator */
 	memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
 
@@ -779,32 +874,47 @@
 		for (bit = 0; bit < 32; bit++) {
 			reissue = 1;
 			tag = (group << 5) + bit;
+			cmd = &port->commands[tag];
 
 			/* If the active bit is set re-issue the command */
-			if (atomic_read(&port->commands[tag].active) == 0)
+			if (atomic_read(&cmd->active) == 0)
 				continue;
 
-			fis = (struct host_to_dev_fis *)
-				port->commands[tag].command;
+			fis = (struct host_to_dev_fis *)cmd->command;
 
 			/* Should re-issue? */
 			if (tag == MTIP_TAG_INTERNAL ||
 			    fis->command == ATA_CMD_SET_FEATURES)
 				reissue = 0;
+			else {
+				if (fail_all_ncq_cmds ||
+					(fail_all_ncq_write &&
+					fis->command == ATA_CMD_FPDMA_WRITE)) {
+					dev_warn(&dd->pdev->dev,
+					"  Fail: %s w/tag %d [%s].\n",
+					fis->command == ATA_CMD_FPDMA_WRITE ?
+						"write" : "read",
+					tag,
+					fail_reason != NULL ?
+						fail_reason : "unknown");
+					atomic_set(&cmd->active, 0);
+					if (cmd->comp_func) {
+						cmd->comp_func(port, tag,
+							cmd->comp_data,
+							-ENODATA);
+					}
+					continue;
+				}
+			}
 
 			/*
 			 * First check if this command has
 			 *  exceeded its retries.
 			 */
-			if (reissue &&
-			    (port->commands[tag].retries-- > 0)) {
+			if (reissue && (cmd->retries-- > 0)) {
 
 				set_bit(tag, tagaccum);
 
-				/* Update the timeout value. */
-				port->commands[tag].comp_time =
-					jiffies + msecs_to_jiffies(
-					MTIP_NCQ_COMMAND_TIMEOUT_MS);
 				/* Re-issue the command. */
 				mtip_issue_ncq_command(port, tag);
 
@@ -814,13 +924,13 @@
 			/* Retire a command that will not be reissued */
 			dev_warn(&port->dd->pdev->dev,
 				"retiring tag %d\n", tag);
-			atomic_set(&port->commands[tag].active, 0);
+			atomic_set(&cmd->active, 0);
 
-			if (port->commands[tag].comp_func)
-				port->commands[tag].comp_func(
+			if (cmd->comp_func)
+				cmd->comp_func(
 					port,
 					tag,
-					port->commands[tag].comp_data,
+					cmd->comp_data,
 					PORT_IRQ_TF_ERR);
 			else
 				dev_warn(&port->dd->pdev->dev,
@@ -828,10 +938,10 @@
 					tag);
 		}
 	}
-	print_tags(dd, "TFE tags reissued:", tagaccum);
+	print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
 
 	/* clear eh_active */
-	clear_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
+	clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
 	wake_up_interruptible(&port->svc_wait);
 
 	mod_timer(&port->cmd_timer,
@@ -899,7 +1009,7 @@
 	struct mtip_port *port = dd->port;
 	struct mtip_cmd *cmd = &port->commands[MTIP_TAG_INTERNAL];
 
-	if (test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) &&
+	if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
 	    (cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL])
 		& (1 << MTIP_TAG_INTERNAL))) {
 		if (cmd->comp_func) {
@@ -911,8 +1021,6 @@
 		}
 	}
 
-	dev_warn(&dd->pdev->dev, "IRQ status 0x%x ignored.\n", port_stat);
-
 	return;
 }
 
@@ -968,6 +1076,9 @@
 				/* don't proceed further */
 				return IRQ_HANDLED;
 			}
+			if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+							&dd->dd_flag))
+				return rv;
 
 			mtip_process_errors(dd, port_stat & PORT_IRQ_ERR);
 		}
@@ -1015,6 +1126,39 @@
 		port->cmd_issue[MTIP_TAG_INDEX(tag)]);
 }
 
+static bool mtip_pause_ncq(struct mtip_port *port,
+				struct host_to_dev_fis *fis)
+{
+	struct host_to_dev_fis *reply;
+	unsigned long task_file_data;
+
+	reply = port->rxfis + RX_FIS_D2H_REG;
+	task_file_data = readl(port->mmio+PORT_TFDATA);
+
+	if ((task_file_data & 1) || (fis->command == ATA_CMD_SEC_ERASE_UNIT))
+		return false;
+
+	if (fis->command == ATA_CMD_SEC_ERASE_PREP) {
+		set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
+		port->ic_pause_timer = jiffies;
+		return true;
+	} else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) &&
+					(fis->features == 0x03)) {
+		set_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
+		port->ic_pause_timer = jiffies;
+		return true;
+	} else if ((fis->command == ATA_CMD_SEC_ERASE_UNIT) ||
+		((fis->command == 0xFC) &&
+			(fis->features == 0x27 || fis->features == 0x72 ||
+			 fis->features == 0x62 || fis->features == 0x26))) {
+		/* Com reset after secure erase or lowlevel format */
+		mtip_restart_port(port);
+		return false;
+	}
+
+	return false;
+}
+
 /*
  * Wait for port to quiesce
  *
@@ -1033,11 +1177,13 @@
 
 	to = jiffies + msecs_to_jiffies(timeout);
 	do {
-		if (test_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags) &&
-			test_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags)) {
+		if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
+			test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
 			msleep(20);
 			continue; /* svc thd is actively issuing commands */
 		}
+		if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
+			return -EFAULT;
 		/*
 		 * Ignore s_active bit 0 of array element 0.
 		 * This bit will always be set
@@ -1074,7 +1220,7 @@
  *	-EAGAIN  Time out waiting for command to complete.
  */
 static int mtip_exec_internal_command(struct mtip_port *port,
-					void *fis,
+					struct host_to_dev_fis *fis,
 					int fis_len,
 					dma_addr_t buffer,
 					int buf_len,
@@ -1084,8 +1230,9 @@
 {
 	struct mtip_cmd_sg *command_sg;
 	DECLARE_COMPLETION_ONSTACK(wait);
-	int rv = 0;
+	int rv = 0, ready2go = 1;
 	struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL];
+	unsigned long to;
 
 	/* Make sure the buffer is 8 byte aligned. This is asic specific. */
 	if (buffer & 0x00000007) {
@@ -1094,23 +1241,38 @@
 		return -EFAULT;
 	}
 
-	/* Only one internal command should be running at a time */
-	if (test_and_set_bit(MTIP_TAG_INTERNAL, port->allocated)) {
+	to = jiffies + msecs_to_jiffies(timeout);
+	do {
+		ready2go = !test_and_set_bit(MTIP_TAG_INTERNAL,
+						port->allocated);
+		if (ready2go)
+			break;
+		mdelay(100);
+	} while (time_before(jiffies, to));
+	if (!ready2go) {
 		dev_warn(&port->dd->pdev->dev,
-			"Internal command already active\n");
+			"Internal cmd active. new cmd [%02X]\n", fis->command);
 		return -EBUSY;
 	}
-	set_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags);
+	set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
+	port->ic_pause_timer = 0;
+
+	if (fis->command == ATA_CMD_SEC_ERASE_UNIT)
+		clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
+	else if (fis->command == ATA_CMD_DOWNLOAD_MICRO)
+		clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
 
 	if (atomic == GFP_KERNEL) {
-		/* wait for io to complete if non atomic */
-		if (mtip_quiesce_io(port, 5000) < 0) {
-			dev_warn(&port->dd->pdev->dev,
-				"Failed to quiesce IO\n");
-			release_slot(port, MTIP_TAG_INTERNAL);
-			clear_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags);
-			wake_up_interruptible(&port->svc_wait);
-			return -EBUSY;
+		if (fis->command != ATA_CMD_STANDBYNOW1) {
+			/* wait for io to complete if non atomic */
+			if (mtip_quiesce_io(port, 5000) < 0) {
+				dev_warn(&port->dd->pdev->dev,
+					"Failed to quiesce IO\n");
+				release_slot(port, MTIP_TAG_INTERNAL);
+				clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
+				wake_up_interruptible(&port->svc_wait);
+				return -EBUSY;
+			}
 		}
 
 		/* Set the completion function and data for the command. */
@@ -1120,7 +1282,7 @@
 	} else {
 		/* Clear completion - we're going to poll */
 		int_cmd->comp_data = NULL;
-		int_cmd->comp_func = NULL;
+		int_cmd->comp_func = mtip_null_completion;
 	}
 
 	/* Copy the command to the command table */
@@ -1159,6 +1321,12 @@
 				"Internal command did not complete [%d] "
 				"within timeout of  %lu ms\n",
 				atomic, timeout);
+			if (mtip_check_surprise_removal(port->dd->pdev) ||
+				test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+						&port->dd->dd_flag)) {
+				rv = -ENXIO;
+				goto exec_ic_exit;
+			}
 			rv = -EAGAIN;
 		}
 
@@ -1166,31 +1334,59 @@
 			& (1 << MTIP_TAG_INTERNAL)) {
 			dev_warn(&port->dd->pdev->dev,
 				"Retiring internal command but CI is 1.\n");
+			if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+						&port->dd->dd_flag)) {
+				hba_reset_nosleep(port->dd);
+				rv = -ENXIO;
+			} else {
+				mtip_restart_port(port);
+				rv = -EAGAIN;
+			}
+			goto exec_ic_exit;
 		}
 
 	} else {
 		/* Spin for <timeout> checking if command still outstanding */
 		timeout = jiffies + msecs_to_jiffies(timeout);
-
-		while ((readl(
-			port->cmd_issue[MTIP_TAG_INTERNAL])
-			& (1 << MTIP_TAG_INTERNAL))
-			&& time_before(jiffies, timeout))
-			;
+		while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL])
+				& (1 << MTIP_TAG_INTERNAL))
+				&& time_before(jiffies, timeout)) {
+			if (mtip_check_surprise_removal(port->dd->pdev)) {
+				rv = -ENXIO;
+				goto exec_ic_exit;
+			}
+			if ((fis->command != ATA_CMD_STANDBYNOW1) &&
+				test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+						&port->dd->dd_flag)) {
+				rv = -ENXIO;
+				goto exec_ic_exit;
+			}
+		}
 
 		if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
 			& (1 << MTIP_TAG_INTERNAL)) {
 			dev_err(&port->dd->pdev->dev,
-				"Internal command did not complete [%d]\n",
-				atomic);
+				"Internal command did not complete [atomic]\n");
 			rv = -EAGAIN;
+			if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+						&port->dd->dd_flag)) {
+				hba_reset_nosleep(port->dd);
+				rv = -ENXIO;
+			} else {
+				mtip_restart_port(port);
+				rv = -EAGAIN;
+			}
 		}
 	}
-
+exec_ic_exit:
 	/* Clear the allocated and active bits for the internal command. */
 	atomic_set(&int_cmd->active, 0);
 	release_slot(port, MTIP_TAG_INTERNAL);
-	clear_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags);
+	if (rv >= 0 && mtip_pause_ncq(port, fis)) {
+		/* NCQ paused */
+		return rv;
+	}
+	clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
 	wake_up_interruptible(&port->svc_wait);
 
 	return rv;
@@ -1240,6 +1436,9 @@
 	int rv = 0;
 	struct host_to_dev_fis fis;
 
+	if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
+		return -EFAULT;
+
 	/* Build the FIS. */
 	memset(&fis, 0, sizeof(struct host_to_dev_fis));
 	fis.type	= 0x27;
@@ -1313,6 +1512,7 @@
 {
 	int rv;
 	struct host_to_dev_fis	fis;
+	unsigned long start;
 
 	/* Build the FIS. */
 	memset(&fis, 0, sizeof(struct host_to_dev_fis));
@@ -1320,15 +1520,150 @@
 	fis.opts	= 1 << 7;
 	fis.command	= ATA_CMD_STANDBYNOW1;
 
-	/* Execute the command.  Use a 15-second timeout for large drives. */
+	start = jiffies;
 	rv = mtip_exec_internal_command(port,
 					&fis,
 					5,
 					0,
 					0,
 					0,
-					GFP_KERNEL,
+					GFP_ATOMIC,
 					15000);
+	dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n",
+			jiffies_to_msecs(jiffies - start));
+	if (rv)
+		dev_warn(&port->dd->pdev->dev,
+			"STANDBY IMMEDIATE command failed.\n");
+
+	return rv;
+}
+
+/*
+ * Issue a READ LOG EXT command to the device.
+ *
+ * @port	pointer to the port structure.
+ * @page	page number to fetch
+ * @buffer	pointer to buffer
+ * @buffer_dma	dma address corresponding to @buffer
+ * @sectors	page length to fetch, in sectors
+ *
+ * return value
+ *	@rv	return value from mtip_exec_internal_command()
+ */
+static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
+				dma_addr_t buffer_dma, unsigned int sectors)
+{
+	struct host_to_dev_fis fis;
+
+	memset(&fis, 0, sizeof(struct host_to_dev_fis));
+	fis.type	= 0x27;
+	fis.opts	= 1 << 7;
+	fis.command	= ATA_CMD_READ_LOG_EXT;
+	fis.sect_count	= sectors & 0xFF;
+	fis.sect_cnt_ex	= (sectors >> 8) & 0xFF;
+	fis.lba_low	= page;
+	fis.lba_mid	= 0;
+	fis.device	= ATA_DEVICE_OBS;
+
+	memset(buffer, 0, sectors * ATA_SECT_SIZE);
+
+	return mtip_exec_internal_command(port,
+					&fis,
+					5,
+					buffer_dma,
+					sectors * ATA_SECT_SIZE,
+					0,
+					GFP_ATOMIC,
+					MTIP_INTERNAL_COMMAND_TIMEOUT_MS);
+}
+
+/*
+ * Issue a SMART READ DATA command to the device.
+ *
+ * @port	pointer to the port structure.
+ * @buffer	pointer to buffer
+ * @buffer_dma	dma address corresponding to @buffer
+ *
+ * return value
+ *	@rv	return value from mtip_exec_internal_command()
+ */
+static int mtip_get_smart_data(struct mtip_port *port, u8 *buffer,
+					dma_addr_t buffer_dma)
+{
+	struct host_to_dev_fis fis;
+
+	memset(&fis, 0, sizeof(struct host_to_dev_fis));
+	fis.type	= 0x27;
+	fis.opts	= 1 << 7;
+	fis.command	= ATA_CMD_SMART;
+	fis.features	= 0xD0;
+	fis.sect_count	= 1;
+	fis.lba_mid	= 0x4F;
+	fis.lba_hi	= 0xC2;
+	fis.device	= ATA_DEVICE_OBS;
+
+	return mtip_exec_internal_command(port,
+					&fis,
+					5,
+					buffer_dma,
+					ATA_SECT_SIZE,
+					0,
+					GFP_ATOMIC,
+					15000);
+}
+
+/*
+ * Get the value of a smart attribute
+ *
+ * @port	pointer to the port structure
+ * @id		attribute number
+ * @attrib	pointer to return attrib information corresponding to @id
+ *
+ * return value
+ *	-EINVAL	NULL buffer passed or unsupported attribute @id.
+ *	-EPERM	Identify data not valid, SMART not supported or not enabled
+ */
+static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
+						struct smart_attr *attrib)
+{
+	int rv, i;
+	struct smart_attr *pattr;
+
+	if (!attrib)
+		return -EINVAL;
+
+	if (!port->identify_valid) {
+		dev_warn(&port->dd->pdev->dev, "IDENTIFY DATA not valid\n");
+		return -EPERM;
+	}
+	if (!(port->identify[82] & 0x1)) {
+		dev_warn(&port->dd->pdev->dev, "SMART not supported\n");
+		return -EPERM;
+	}
+	if (!(port->identify[85] & 0x1)) {
+		dev_warn(&port->dd->pdev->dev, "SMART not enabled\n");
+		return -EPERM;
+	}
+
+	memset(port->smart_buf, 0, ATA_SECT_SIZE);
+	rv = mtip_get_smart_data(port, port->smart_buf, port->smart_buf_dma);
+	if (rv) {
+		dev_warn(&port->dd->pdev->dev, "Failed to ge SMART data\n");
+		return rv;
+	}
+
+	pattr = (struct smart_attr *)(port->smart_buf + 2);
+	for (i = 0; i < 29; i++, pattr++)
+		if (pattr->attr_id == id) {
+			memcpy(attrib, pattr, sizeof(struct smart_attr));
+			break;
+		}
+
+	if (i == 29) {
+		dev_warn(&port->dd->pdev->dev,
+			"Query for invalid SMART attribute ID\n");
+		rv = -EINVAL;
+	}
 
 	return rv;
 }
@@ -1504,10 +1839,7 @@
 	fis.cyl_hi	= command[5];
 	fis.device	= command[6] & ~0x10; /* Clear the dev bit*/
 
-
-	dbg_printk(MTIP_DRV_NAME "%s: User Command: cmd %x, feat %x, "
-		"nsect %x, sect %x, lcyl %x, "
-		"hcyl %x, sel %x\n",
+	dbg_printk(MTIP_DRV_NAME " %s: User Command: cmd %x, feat %x, nsect %x, sect %x, lcyl %x, hcyl %x, sel %x\n",
 		__func__,
 		command[0],
 		command[1],
@@ -1534,8 +1866,7 @@
 	command[4] = reply->cyl_low;
 	command[5] = reply->cyl_hi;
 
-	dbg_printk(MTIP_DRV_NAME "%s: Completion Status: stat %x, "
-		"err %x , cyl_lo %x cyl_hi %x\n",
+	dbg_printk(MTIP_DRV_NAME " %s: Completion Status: stat %x, err %x , cyl_lo %x cyl_hi %x\n",
 		__func__,
 		command[0],
 		command[1],
@@ -1578,7 +1909,7 @@
 	}
 
 	dbg_printk(MTIP_DRV_NAME
-		"%s: User Command: cmd %x, sect %x, "
+		" %s: User Command: cmd %x, sect %x, "
 		"feat %x, sectcnt %x\n",
 		__func__,
 		command[0],
@@ -1607,7 +1938,7 @@
 	command[2] = command[3];
 
 	dbg_printk(MTIP_DRV_NAME
-		"%s: Completion Status: stat %x, "
+		" %s: Completion Status: stat %x, "
 		"err %x, cmd %x\n",
 		__func__,
 		command[0],
@@ -1810,9 +2141,10 @@
 	}
 
 	dbg_printk(MTIP_DRV_NAME
-		"taskfile: cmd %x, feat %x, nsect %x,"
+		" %s: cmd %x, feat %x, nsect %x,"
 		" sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x,"
 		" head/dev %x\n",
+		__func__,
 		fis.command,
 		fis.features,
 		fis.sect_count,
@@ -1823,8 +2155,8 @@
 
 	switch (fis.command) {
 	case ATA_CMD_DOWNLOAD_MICRO:
-		/* Change timeout for Download Microcode to 60 seconds.*/
-		timeout = 60000;
+		/* Change timeout for Download Microcode to 2 minutes */
+		timeout = 120000;
 		break;
 	case ATA_CMD_SEC_ERASE_UNIT:
 		/* Change timeout for Security Erase Unit to 4 minutes.*/
@@ -1840,8 +2172,8 @@
 		timeout = 10000;
 		break;
 	case ATA_CMD_SMART:
-		/* Change timeout for vendor unique command to 10 secs */
-		timeout = 10000;
+		/* Change timeout for vendor unique command to 15 secs */
+		timeout = 15000;
 		break;
 	default:
 		timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
@@ -1903,18 +2235,8 @@
 		req_task->hob_ports[1] = reply->features_ex;
 		req_task->hob_ports[2] = reply->sect_cnt_ex;
 	}
-
-	/* Com rest after secure erase or lowlevel format */
-	if (((fis.command == ATA_CMD_SEC_ERASE_UNIT) ||
-		((fis.command == 0xFC) &&
-			(fis.features == 0x27 || fis.features == 0x72 ||
-			 fis.features == 0x62 || fis.features == 0x26))) &&
-			 !(reply->command & 1)) {
-		mtip_restart_port(dd->port);
-	}
-
 	dbg_printk(MTIP_DRV_NAME
-		"%s: Completion: stat %x,"
+		" %s: Completion: stat %x,"
 		"err %x, sect_cnt %x, lbalo %x,"
 		"lbamid %x, lbahi %x, dev %x\n",
 		__func__,
@@ -2080,14 +2402,10 @@
 	struct host_to_dev_fis	*fis;
 	struct mtip_port *port = dd->port;
 	struct mtip_cmd *command = &port->commands[tag];
+	int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 
 	/* Map the scatter list for DMA access */
-	if (dir == READ)
-		nents = dma_map_sg(&dd->pdev->dev, command->sg,
-					nents, DMA_FROM_DEVICE);
-	else
-		nents = dma_map_sg(&dd->pdev->dev, command->sg,
-					nents, DMA_TO_DEVICE);
+	nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
 
 	command->scatter_ents = nents;
 
@@ -2127,7 +2445,7 @@
 	 */
 	command->comp_data = dd;
 	command->comp_func = mtip_async_complete;
-	command->direction = (dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+	command->direction = dma_dir;
 
 	/*
 	 * Set the completion function and data for the command passed
@@ -2140,19 +2458,16 @@
 	 * To prevent this command from being issued
 	 * if an internal command is in progress or error handling is active.
 	 */
-	if (unlikely(test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) ||
-			test_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags))) {
+	if (port->flags & MTIP_PF_PAUSE_IO) {
 		set_bit(tag, port->cmds_to_issue);
-		set_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags);
+		set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
 		return;
 	}
 
 	/* Issue the command to the hardware */
 	mtip_issue_ncq_command(port, tag);
 
-	/* Set the command's timeout value.*/
-	port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
-					MTIP_NCQ_COMMAND_TIMEOUT_MS);
+	return;
 }
 
 /*
@@ -2191,6 +2506,10 @@
 	down(&dd->port->cmd_slot);
 	*tag = get_slot(dd->port);
 
+	if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
+		up(&dd->port->cmd_slot);
+		return NULL;
+	}
 	if (unlikely(*tag < 0))
 		return NULL;
 
@@ -2207,7 +2526,7 @@
  * return value
  *	The size, in bytes, of the data copied into buf.
  */
-static ssize_t hw_show_registers(struct device *dev,
+static ssize_t mtip_hw_show_registers(struct device *dev,
 				struct device_attribute *attr,
 				char *buf)
 {
@@ -2216,7 +2535,7 @@
 	int size = 0;
 	int n;
 
-	size += sprintf(&buf[size], "%s:\ns_active:\n", __func__);
+	size += sprintf(&buf[size], "S ACTive:\n");
 
 	for (n = 0; n < dd->slot_groups; n++)
 		size += sprintf(&buf[size], "0x%08x\n",
@@ -2240,20 +2559,39 @@
 				 group_allocated);
 	}
 
-	size += sprintf(&buf[size], "completed:\n");
+	size += sprintf(&buf[size], "Completed:\n");
 
 	for (n = 0; n < dd->slot_groups; n++)
 		size += sprintf(&buf[size], "0x%08x\n",
 				readl(dd->port->completed[n]));
 
-	size += sprintf(&buf[size], "PORT_IRQ_STAT 0x%08x\n",
+	size += sprintf(&buf[size], "PORT IRQ STAT : 0x%08x\n",
 				readl(dd->port->mmio + PORT_IRQ_STAT));
-	size += sprintf(&buf[size], "HOST_IRQ_STAT 0x%08x\n",
+	size += sprintf(&buf[size], "HOST IRQ STAT : 0x%08x\n",
 				readl(dd->mmio + HOST_IRQ_STAT));
 
 	return size;
 }
-static DEVICE_ATTR(registers, S_IRUGO, hw_show_registers, NULL);
+
+static ssize_t mtip_hw_show_status(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct driver_data *dd = dev_to_disk(dev)->private_data;
+	int size = 0;
+
+	if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
+		size += sprintf(buf, "%s", "thermal_shutdown\n");
+	else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
+		size += sprintf(buf, "%s", "write_protect\n");
+	else
+		size += sprintf(buf, "%s", "online\n");
+
+	return size;
+}
+
+static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
+static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
 
 /*
  * Create the sysfs related attributes.
@@ -2272,7 +2610,10 @@
 
 	if (sysfs_create_file(kobj, &dev_attr_registers.attr))
 		dev_warn(&dd->pdev->dev,
-			"Error creating registers sysfs entry\n");
+			"Error creating 'registers' sysfs entry\n");
+	if (sysfs_create_file(kobj, &dev_attr_status.attr))
+		dev_warn(&dd->pdev->dev,
+			"Error creating 'status' sysfs entry\n");
 	return 0;
 }
 
@@ -2292,6 +2633,7 @@
 		return -EINVAL;
 
 	sysfs_remove_file(kobj, &dev_attr_registers.attr);
+	sysfs_remove_file(kobj, &dev_attr_status.attr);
 
 	return 0;
 }
@@ -2384,10 +2726,12 @@
 		"FTL rebuild in progress. Polling for completion.\n");
 
 	start = jiffies;
-	dd->ftlrebuildflag = 1;
 	timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS);
 
 	do {
+		if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+				&dd->dd_flag)))
+			return -EFAULT;
 		if (mtip_check_surprise_removal(dd->pdev))
 			return -EFAULT;
 
@@ -2408,22 +2752,17 @@
 			dev_warn(&dd->pdev->dev,
 				"FTL rebuild complete (%d secs).\n",
 			jiffies_to_msecs(jiffies - start) / 1000);
-			dd->ftlrebuildflag = 0;
 			mtip_block_initialize(dd);
-			break;
+			return 0;
 		}
 		ssleep(10);
 	} while (time_before(jiffies, timeout));
 
 	/* Check for timeout */
-	if (dd->ftlrebuildflag) {
-		dev_err(&dd->pdev->dev,
+	dev_err(&dd->pdev->dev,
 		"Timed out waiting for FTL rebuild to complete (%d secs).\n",
 		jiffies_to_msecs(jiffies - start) / 1000);
-		return -EFAULT;
-	}
-
-	return 0;
+	return -EFAULT;
 }
 
 /*
@@ -2448,14 +2787,17 @@
 		 * is in progress nor error handling is active
 		 */
 		wait_event_interruptible(port->svc_wait, (port->flags) &&
-			!test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) &&
-			!test_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags));
+			!(port->flags & MTIP_PF_PAUSE_IO));
 
 		if (kthread_should_stop())
 			break;
 
-		set_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags);
-		if (test_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags)) {
+		if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+				&dd->dd_flag)))
+			break;
+
+		set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
+		if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
 			slot = 1;
 			/* used to restrict the loop to one iteration */
 			slot_start = num_cmd_slots;
@@ -2480,21 +2822,19 @@
 				/* Issue the command to the hardware */
 				mtip_issue_ncq_command(port, slot);
 
-				/* Set the command's timeout value.*/
-				port->commands[slot].comp_time = jiffies +
-				msecs_to_jiffies(MTIP_NCQ_COMMAND_TIMEOUT_MS);
-
 				clear_bit(slot, port->cmds_to_issue);
 			}
 
-			clear_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags);
-		} else if (test_bit(MTIP_FLAG_REBUILD_BIT, &port->flags)) {
-			mtip_ftl_rebuild_poll(dd);
-			clear_bit(MTIP_FLAG_REBUILD_BIT, &port->flags);
+			clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
+		} else if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
+			if (!mtip_ftl_rebuild_poll(dd))
+				set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
+							&dd->dd_flag);
+			clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
 		}
-		clear_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags);
+		clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
 
-		if (test_bit(MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT, &port->flags))
+		if (test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
 			break;
 	}
 	return 0;
@@ -2513,6 +2853,9 @@
 	int i;
 	int rv;
 	unsigned int num_command_slots;
+	unsigned long timeout, timetaken;
+	unsigned char *buf;
+	struct smart_attr attr242;
 
 	dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
 
@@ -2547,7 +2890,7 @@
 	/* Allocate memory for the command list. */
 	dd->port->command_list =
 		dmam_alloc_coherent(&dd->pdev->dev,
-			HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
+			HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
 			&dd->port->command_list_dma,
 			GFP_KERNEL);
 	if (!dd->port->command_list) {
@@ -2560,7 +2903,7 @@
 	/* Clear the memory we have allocated. */
 	memset(dd->port->command_list,
 		0,
-		HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2));
+		HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4));
 
 	/* Setup the addresse of the RX FIS. */
 	dd->port->rxfis	    = dd->port->command_list + HW_CMD_SLOT_SZ;
@@ -2576,10 +2919,19 @@
 	dd->port->identify_dma = dd->port->command_tbl_dma +
 					HW_CMD_TBL_AR_SZ;
 
-	/* Setup the address of the sector buffer. */
+	/* Setup the address of the sector buffer - for some non-ncq cmds */
 	dd->port->sector_buffer	= (void *) dd->port->identify + ATA_SECT_SIZE;
 	dd->port->sector_buffer_dma = dd->port->identify_dma + ATA_SECT_SIZE;
 
+	/* Setup the address of the log buf - for read log command */
+	dd->port->log_buf = (void *)dd->port->sector_buffer  + ATA_SECT_SIZE;
+	dd->port->log_buf_dma = dd->port->sector_buffer_dma + ATA_SECT_SIZE;
+
+	/* Setup the address of the smart buf - for smart read data command */
+	dd->port->smart_buf = (void *)dd->port->log_buf  + ATA_SECT_SIZE;
+	dd->port->smart_buf_dma = dd->port->log_buf_dma + ATA_SECT_SIZE;
+
+
 	/* Point the command headers at the command tables. */
 	for (i = 0; i < num_command_slots; i++) {
 		dd->port->commands[i].command_header =
@@ -2623,14 +2975,43 @@
 			dd->port->mmio + i*0x80 + PORT_SDBV;
 	}
 
-	/* Reset the HBA. */
-	if (mtip_hba_reset(dd) < 0) {
-		dev_err(&dd->pdev->dev,
-			"Card did not reset within timeout\n");
-		rv = -EIO;
+	timetaken = jiffies;
+	timeout = jiffies + msecs_to_jiffies(30000);
+	while (((readl(dd->port->mmio + PORT_SCR_STAT) & 0x0F) != 0x03) &&
+		 time_before(jiffies, timeout)) {
+		mdelay(100);
+	}
+	if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
+		timetaken = jiffies - timetaken;
+		dev_warn(&dd->pdev->dev,
+			"Surprise removal detected at %u ms\n",
+			jiffies_to_msecs(timetaken));
+		rv = -ENODEV;
+		goto out2 ;
+	}
+	if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
+		timetaken = jiffies - timetaken;
+		dev_warn(&dd->pdev->dev,
+			"Removal detected at %u ms\n",
+			jiffies_to_msecs(timetaken));
+		rv = -EFAULT;
 		goto out2;
 	}
 
+	/* Conditionally reset the HBA. */
+	if (!(readl(dd->mmio + HOST_CAP) & HOST_CAP_NZDMA)) {
+		if (mtip_hba_reset(dd) < 0) {
+			dev_err(&dd->pdev->dev,
+				"Card did not reset within timeout\n");
+			rv = -EIO;
+			goto out2;
+		}
+	} else {
+		/* Clear any pending interrupts on the HBA */
+		writel(readl(dd->mmio + HOST_IRQ_STAT),
+			dd->mmio + HOST_IRQ_STAT);
+	}
+
 	mtip_init_port(dd->port);
 	mtip_start_port(dd->port);
 
@@ -2660,6 +3041,12 @@
 	mod_timer(&dd->port->cmd_timer,
 		jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
 
+
+	if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
+		rv = -EFAULT;
+		goto out3;
+	}
+
 	if (mtip_get_identify(dd->port, NULL) < 0) {
 		rv = -EFAULT;
 		goto out3;
@@ -2667,10 +3054,47 @@
 
 	if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
 		MTIP_FTL_REBUILD_MAGIC) {
-		set_bit(MTIP_FLAG_REBUILD_BIT, &dd->port->flags);
+		set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
 		return MTIP_FTL_REBUILD_MAGIC;
 	}
 	mtip_dump_identify(dd->port);
+
+	/* check write protect, over temp and rebuild statuses */
+	rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
+				dd->port->log_buf,
+				dd->port->log_buf_dma, 1);
+	if (rv) {
+		dev_warn(&dd->pdev->dev,
+			"Error in READ LOG EXT (10h) command\n");
+		/* non-critical error, don't fail the load */
+	} else {
+		buf = (unsigned char *)dd->port->log_buf;
+		if (buf[259] & 0x1) {
+			dev_info(&dd->pdev->dev,
+				"Write protect bit is set.\n");
+			set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
+		}
+		if (buf[288] == 0xF7) {
+			dev_info(&dd->pdev->dev,
+				"Exceeded Tmax, drive in thermal shutdown.\n");
+			set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
+		}
+		if (buf[288] == 0xBF) {
+			dev_info(&dd->pdev->dev,
+				"Drive indicates rebuild has failed.\n");
+			/* TODO */
+		}
+	}
+
+	/* get write protect progess */
+	memset(&attr242, 0, sizeof(struct smart_attr));
+	if (mtip_get_smart_attr(dd->port, 242, &attr242))
+		dev_warn(&dd->pdev->dev,
+				"Unable to check write protect progress\n");
+	else
+		dev_info(&dd->pdev->dev,
+				"Write protect progress: %d%% (%d blocks)\n",
+				attr242.cur, attr242.data);
 	return rv;
 
 out3:
@@ -2688,7 +3112,7 @@
 
 	/* Free the command/command header memory. */
 	dmam_free_coherent(&dd->pdev->dev,
-				HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
+				HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
 				dd->port->command_list,
 				dd->port->command_list_dma);
 out1:
@@ -2712,9 +3136,12 @@
 	 * Send standby immediate (E0h) to the drive so that it
 	 * saves its state.
 	 */
-	if (atomic_read(&dd->drv_cleanup_done) != true) {
+	if (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
 
-		mtip_standby_immediate(dd->port);
+		if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags))
+			if (mtip_standby_immediate(dd->port))
+				dev_warn(&dd->pdev->dev,
+					"STANDBY IMMEDIATE failed\n");
 
 		/* de-initialize the port. */
 		mtip_deinit_port(dd->port);
@@ -2734,7 +3161,7 @@
 
 	/* Free the command/command header memory. */
 	dmam_free_coherent(&dd->pdev->dev,
-			HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
+			HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
 			dd->port->command_list,
 			dd->port->command_list_dma);
 	/* Free the memory allocated for the for structure. */
@@ -2892,6 +3319,9 @@
 	if (!dd)
 		return -ENOTTY;
 
+	if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
+		return -ENOTTY;
+
 	switch (cmd) {
 	case BLKFLSBUF:
 		return -ENOTTY;
@@ -2927,6 +3357,9 @@
 	if (!dd)
 		return -ENOTTY;
 
+	if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
+		return -ENOTTY;
+
 	switch (cmd) {
 	case BLKFLSBUF:
 		return -ENOTTY;
@@ -3049,6 +3482,24 @@
 	int nents = 0;
 	int tag = 0;
 
+	if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
+		if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+							&dd->dd_flag))) {
+			bio_endio(bio, -ENXIO);
+			return;
+		}
+		if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) {
+			bio_endio(bio, -ENODATA);
+			return;
+		}
+		if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT,
+							&dd->dd_flag) &&
+				bio_data_dir(bio))) {
+			bio_endio(bio, -ENODATA);
+			return;
+		}
+	}
+
 	if (unlikely(!bio_has_data(bio))) {
 		blk_queue_flush(queue, 0);
 		bio_endio(bio, 0);
@@ -3061,7 +3512,7 @@
 
 		if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) {
 			dev_warn(&dd->pdev->dev,
-				"Maximum number of SGL entries exceeded");
+				"Maximum number of SGL entries exceeded\n");
 			bio_io_error(bio);
 			mtip_hw_release_scatterlist(dd, tag);
 			return;
@@ -3210,8 +3661,10 @@
 		kobject_put(kobj);
 	}
 
-	if (dd->mtip_svc_handler)
+	if (dd->mtip_svc_handler) {
+		set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
 		return rv; /* service thread created for handling rebuild */
+	}
 
 start_service_thread:
 	sprintf(thd_name, "mtip_svc_thd_%02d", index);
@@ -3220,12 +3673,15 @@
 						dd, thd_name);
 
 	if (IS_ERR(dd->mtip_svc_handler)) {
-		printk(KERN_ERR "mtip32xx: service thread failed to start\n");
+		dev_err(&dd->pdev->dev, "service thread failed to start\n");
 		dd->mtip_svc_handler = NULL;
 		rv = -EFAULT;
 		goto kthread_run_error;
 	}
 
+	if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
+		rv = wait_for_rebuild;
+
 	return rv;
 
 kthread_run_error:
@@ -3266,16 +3722,18 @@
 	struct kobject *kobj;
 
 	if (dd->mtip_svc_handler) {
-		set_bit(MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT, &dd->port->flags);
+		set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
 		wake_up_interruptible(&dd->port->svc_wait);
 		kthread_stop(dd->mtip_svc_handler);
 	}
 
-	/* Clean up the sysfs attributes managed by the protocol layer. */
-	kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
-	if (kobj) {
-		mtip_hw_sysfs_exit(dd, kobj);
-		kobject_put(kobj);
+	/* Clean up the sysfs attributes, if created */
+	if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
+		kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
+		if (kobj) {
+			mtip_hw_sysfs_exit(dd, kobj);
+			kobject_put(kobj);
+		}
 	}
 
 	/*
@@ -3283,6 +3741,11 @@
 	 * from /dev
 	 */
 	del_gendisk(dd->disk);
+
+	spin_lock(&rssd_index_lock);
+	ida_remove(&rssd_index_ida, dd->index);
+	spin_unlock(&rssd_index_lock);
+
 	blk_cleanup_queue(dd->queue);
 	dd->disk  = NULL;
 	dd->queue = NULL;
@@ -3312,6 +3775,11 @@
 
 	/* Delete our gendisk structure, and cleanup the blk queue. */
 	del_gendisk(dd->disk);
+
+	spin_lock(&rssd_index_lock);
+	ida_remove(&rssd_index_ida, dd->index);
+	spin_unlock(&rssd_index_lock);
+
 	blk_cleanup_queue(dd->queue);
 	dd->disk  = NULL;
 	dd->queue = NULL;
@@ -3359,11 +3827,6 @@
 		return -ENOMEM;
 	}
 
-	/* Set the atomic variable as 1 in case of SRSI */
-	atomic_set(&dd->drv_cleanup_done, true);
-
-	atomic_set(&dd->resumeflag, false);
-
 	/* Attach the private data to this PCI device.  */
 	pci_set_drvdata(pdev, dd);
 
@@ -3420,7 +3883,8 @@
 	 * instance number.
 	 */
 	instance++;
-
+	if (rv != MTIP_FTL_REBUILD_MAGIC)
+		set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
 	goto done;
 
 block_initialize_err:
@@ -3434,9 +3898,6 @@
 	pci_set_drvdata(pdev, NULL);
 	return rv;
 done:
-	/* Set the atomic variable as 0 in case of SRSI */
-	atomic_set(&dd->drv_cleanup_done, true);
-
 	return rv;
 }
 
@@ -3452,8 +3913,10 @@
 	struct driver_data *dd = pci_get_drvdata(pdev);
 	int counter = 0;
 
+	set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
+
 	if (mtip_check_surprise_removal(pdev)) {
-		while (atomic_read(&dd->drv_cleanup_done) == false) {
+		while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
 			counter++;
 			msleep(20);
 			if (counter == 10) {
@@ -3463,8 +3926,6 @@
 			}
 		}
 	}
-	/* Set the atomic variable as 1 in case of SRSI */
-	atomic_set(&dd->drv_cleanup_done, true);
 
 	/* Clean up the block layer. */
 	mtip_block_remove(dd);
@@ -3493,7 +3954,7 @@
 		return -EFAULT;
 	}
 
-	atomic_set(&dd->resumeflag, true);
+	set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
 
 	/* Disable ports & interrupts then send standby immediate */
 	rv = mtip_block_suspend(dd);
@@ -3559,7 +4020,7 @@
 		dev_err(&pdev->dev, "Unable to resume\n");
 
 err:
-	atomic_set(&dd->resumeflag, false);
+	clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
 
 	return rv;
 }
@@ -3608,18 +4069,25 @@
  */
 static int __init mtip_init(void)
 {
+	int error;
+
 	printk(KERN_INFO MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
 
 	/* Allocate a major block device number to use with this driver. */
-	mtip_major = register_blkdev(0, MTIP_DRV_NAME);
-	if (mtip_major < 0) {
+	error = register_blkdev(0, MTIP_DRV_NAME);
+	if (error <= 0) {
 		printk(KERN_ERR "Unable to register block device (%d)\n",
-		mtip_major);
+		error);
 		return -EBUSY;
 	}
+	mtip_major = error;
 
 	/* Register our PCI operations. */
-	return pci_register_driver(&mtip_pci_driver);
+	error = pci_register_driver(&mtip_pci_driver);
+	if (error)
+		unregister_blkdev(mtip_major, MTIP_DRV_NAME);
+
+	return error;
 }
 
 /*
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index e0554a8..4ef5833 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -34,8 +34,8 @@
 /* offset of Device Control register in PCIe extended capabilites space */
 #define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET	0x48
 
-/* # of times to retry timed out IOs */
-#define MTIP_MAX_RETRIES	5
+/* # of times to retry timed out/failed IOs */
+#define MTIP_MAX_RETRIES	2
 
 /* Various timeout values in ms */
 #define MTIP_NCQ_COMMAND_TIMEOUT_MS       5000
@@ -114,12 +114,41 @@
 #define __force_bit2int (unsigned int __force)
 
 /* below are bit numbers in 'flags' defined in mtip_port */
-#define MTIP_FLAG_IC_ACTIVE_BIT			0
-#define MTIP_FLAG_EH_ACTIVE_BIT			1
-#define MTIP_FLAG_SVC_THD_ACTIVE_BIT		2
-#define MTIP_FLAG_ISSUE_CMDS_BIT		4
-#define MTIP_FLAG_REBUILD_BIT			5
-#define MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT	8
+#define MTIP_PF_IC_ACTIVE_BIT		0 /* pio/ioctl */
+#define MTIP_PF_EH_ACTIVE_BIT		1 /* error handling */
+#define MTIP_PF_SE_ACTIVE_BIT		2 /* secure erase */
+#define MTIP_PF_DM_ACTIVE_BIT		3 /* download microcde */
+#define MTIP_PF_PAUSE_IO	((1 << MTIP_PF_IC_ACTIVE_BIT) | \
+				(1 << MTIP_PF_EH_ACTIVE_BIT) | \
+				(1 << MTIP_PF_SE_ACTIVE_BIT) | \
+				(1 << MTIP_PF_DM_ACTIVE_BIT))
+
+#define MTIP_PF_SVC_THD_ACTIVE_BIT	4
+#define MTIP_PF_ISSUE_CMDS_BIT		5
+#define MTIP_PF_REBUILD_BIT		6
+#define MTIP_PF_SVC_THD_STOP_BIT	8
+
+/* below are bit numbers in 'dd_flag' defined in driver_data */
+#define MTIP_DDF_REMOVE_PENDING_BIT	1
+#define MTIP_DDF_OVER_TEMP_BIT		2
+#define MTIP_DDF_WRITE_PROTECT_BIT	3
+#define MTIP_DDF_STOP_IO	((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
+				(1 << MTIP_DDF_OVER_TEMP_BIT) | \
+				(1 << MTIP_DDF_WRITE_PROTECT_BIT))
+
+#define MTIP_DDF_CLEANUP_BIT		5
+#define MTIP_DDF_RESUME_BIT		6
+#define MTIP_DDF_INIT_DONE_BIT		7
+#define MTIP_DDF_REBUILD_FAILED_BIT	8
+
+__packed struct smart_attr{
+	u8 attr_id;
+	u16 flags;
+	u8 cur;
+	u8 worst;
+	u32 data;
+	u8 res[3];
+};
 
 /* Register Frame Information Structure (FIS), host to device. */
 struct host_to_dev_fis {
@@ -345,6 +374,12 @@
 	 * when the command slot and all associated data structures
 	 * are no longer needed.
 	 */
+	u16 *log_buf;
+	dma_addr_t log_buf_dma;
+
+	u8 *smart_buf;
+	dma_addr_t smart_buf_dma;
+
 	unsigned long allocated[SLOTBITS_IN_LONGS];
 	/*
 	 * used to queue commands when an internal command is in progress
@@ -368,6 +403,7 @@
 	 * Timer used to complete commands that have been active for too long.
 	 */
 	struct timer_list cmd_timer;
+	unsigned long ic_pause_timer;
 	/*
 	 * Semaphore used to block threads if there are no
 	 * command slots available.
@@ -404,13 +440,9 @@
 
 	unsigned slot_groups; /* number of slot groups the product supports */
 
-	atomic_t drv_cleanup_done; /* Atomic variable for SRSI */
-
 	unsigned long index; /* Index to determine the disk name */
 
-	unsigned int ftlrebuildflag; /* FTL rebuild flag */
-
-	atomic_t resumeflag; /* Atomic variable to track suspend/resume */
+	unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
 
 	struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
 };
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index c4a60ba..0d39f2f 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -351,6 +351,7 @@
 		  cap_str_10, cap_str_2);
 
 	set_capacity(vblk->disk, capacity);
+	revalidate_disk(vblk->disk);
 done:
 	mutex_unlock(&vblk->config_lock);
 }
@@ -374,6 +375,34 @@
 	return err;
 }
 
+/*
+ * Legacy naming scheme used for virtio devices.  We are stuck with it for
+ * virtio blk but don't ever use it for any new driver.
+ */
+static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
+{
+	const int base = 'z' - 'a' + 1;
+	char *begin = buf + strlen(prefix);
+	char *end = buf + buflen;
+	char *p;
+	int unit;
+
+	p = end - 1;
+	*p = '\0';
+	unit = base;
+	do {
+		if (p == begin)
+			return -EINVAL;
+		*--p = 'a' + (index % unit);
+		index = (index / unit) - 1;
+	} while (index >= 0);
+
+	memmove(begin, p, end - p);
+	memcpy(buf, prefix, strlen(prefix));
+
+	return 0;
+}
+
 static int __devinit virtblk_probe(struct virtio_device *vdev)
 {
 	struct virtio_blk *vblk;
@@ -442,18 +471,7 @@
 
 	q->queuedata = vblk;
 
-	if (index < 26) {
-		sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
-	} else if (index < (26 + 1) * 26) {
-		sprintf(vblk->disk->disk_name, "vd%c%c",
-			'a' + index / 26 - 1, 'a' + index % 26);
-	} else {
-		const unsigned int m1 = (index / 26 - 1) / 26 - 1;
-		const unsigned int m2 = (index / 26 - 1) % 26;
-		const unsigned int m3 =  index % 26;
-		sprintf(vblk->disk->disk_name, "vd%c%c%c",
-			'a' + m1, 'a' + m2, 'a' + m3);
-	}
+	virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
 
 	vblk->disk->major = major;
 	vblk->disk->first_minor = index_to_minor(index);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 0088bf6..73f196c 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -321,6 +321,7 @@
 static void xen_blkbk_unmap(struct pending_req *req)
 {
 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 	unsigned int i, invcount = 0;
 	grant_handle_t handle;
 	int ret;
@@ -332,25 +333,12 @@
 		gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
 				    GNTMAP_host_map, handle);
 		pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
+		pages[invcount] = virt_to_page(vaddr(req, i));
 		invcount++;
 	}
 
-	ret = HYPERVISOR_grant_table_op(
-		GNTTABOP_unmap_grant_ref, unmap, invcount);
+	ret = gnttab_unmap_refs(unmap, pages, invcount, false);
 	BUG_ON(ret);
-	/*
-	 * Note, we use invcount, so nr->pages, so we can't index
-	 * using vaddr(req, i).
-	 */
-	for (i = 0; i < invcount; i++) {
-		ret = m2p_remove_override(
-			virt_to_page(unmap[i].host_addr), false);
-		if (ret) {
-			pr_alert(DRV_PFX "Failed to remove M2P override for %lx\n",
-				 (unsigned long)unmap[i].host_addr);
-			continue;
-		}
-	}
 }
 
 static int xen_blkbk_map(struct blkif_request *req,
@@ -378,7 +366,7 @@
 				  pending_req->blkif->domid);
 	}
 
-	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
+	ret = gnttab_map_refs(map, NULL, &blkbk->pending_page(pending_req, 0), nseg);
 	BUG_ON(ret);
 
 	/*
@@ -398,15 +386,6 @@
 		if (ret)
 			continue;
 
-		ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
-			blkbk->pending_page(pending_req, i), NULL);
-		if (ret) {
-			pr_alert(DRV_PFX "Failed to install M2P override for %lx (ret: %d)\n",
-				 (unsigned long)map[i].dev_bus_addr, ret);
-			/* We could switch over to GNTTABOP_copy */
-			continue;
-		}
-
 		seg[i].buf  = map[i].dev_bus_addr |
 			(req->u.rw.seg[i].first_sect << 9);
 	}
@@ -419,21 +398,18 @@
 	int err = 0;
 	int status = BLKIF_RSP_OKAY;
 	struct block_device *bdev = blkif->vbd.bdev;
+	unsigned long secure;
 
 	blkif->st_ds_req++;
 
 	xen_blkif_get(blkif);
-	if (blkif->blk_backend_type == BLKIF_BACKEND_PHY ||
-	    blkif->blk_backend_type == BLKIF_BACKEND_FILE) {
-		unsigned long secure = (blkif->vbd.discard_secure &&
-			(req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
-			BLKDEV_DISCARD_SECURE : 0;
-		err = blkdev_issue_discard(bdev,
-				req->u.discard.sector_number,
-				req->u.discard.nr_sectors,
-				GFP_KERNEL, secure);
-	} else
-		err = -EOPNOTSUPP;
+	secure = (blkif->vbd.discard_secure &&
+		 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
+		 BLKDEV_DISCARD_SECURE : 0;
+
+	err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
+				   req->u.discard.nr_sectors,
+				   GFP_KERNEL, secure);
 
 	if (err == -EOPNOTSUPP) {
 		pr_debug(DRV_PFX "discard op failed, not supported\n");
@@ -830,7 +806,7 @@
 	int i, mmap_pages;
 	int rc = 0;
 
-	if (!xen_pv_domain())
+	if (!xen_domain())
 		return -ENODEV;
 
 	blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index d0ee7ed..773cf27 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -146,11 +146,6 @@
 	BLKIF_PROTOCOL_X86_64 = 3,
 };
 
-enum blkif_backend_type {
-	BLKIF_BACKEND_PHY  = 1,
-	BLKIF_BACKEND_FILE = 2,
-};
-
 struct xen_vbd {
 	/* What the domain refers to this vbd as. */
 	blkif_vdev_t		handle;
@@ -177,7 +172,6 @@
 	unsigned int		irq;
 	/* Comms information. */
 	enum blkif_protocol	blk_protocol;
-	enum blkif_backend_type blk_backend_type;
 	union blkif_back_rings	blk_rings;
 	void			*blk_ring;
 	/* The VBD attached to this interface. */
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 24a2fb5..4f66171 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -381,72 +381,49 @@
 	err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
 			    "%d", state);
 	if (err)
-		xenbus_dev_fatal(dev, err, "writing feature-flush-cache");
+		dev_warn(&dev->dev, "writing feature-flush-cache (%d)", err);
 
 	return err;
 }
 
-int xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
+static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
 {
 	struct xenbus_device *dev = be->dev;
 	struct xen_blkif *blkif = be->blkif;
-	char *type;
 	int err;
 	int state = 0;
+	struct block_device *bdev = be->blkif->vbd.bdev;
+	struct request_queue *q = bdev_get_queue(bdev);
 
-	type = xenbus_read(XBT_NIL, dev->nodename, "type", NULL);
-	if (!IS_ERR(type)) {
-		if (strncmp(type, "file", 4) == 0) {
-			state = 1;
-			blkif->blk_backend_type = BLKIF_BACKEND_FILE;
+	if (blk_queue_discard(q)) {
+		err = xenbus_printf(xbt, dev->nodename,
+			"discard-granularity", "%u",
+			q->limits.discard_granularity);
+		if (err) {
+			dev_warn(&dev->dev, "writing discard-granularity (%d)", err);
+			return;
 		}
-		if (strncmp(type, "phy", 3) == 0) {
-			struct block_device *bdev = be->blkif->vbd.bdev;
-			struct request_queue *q = bdev_get_queue(bdev);
-			if (blk_queue_discard(q)) {
-				err = xenbus_printf(xbt, dev->nodename,
-					"discard-granularity", "%u",
-					q->limits.discard_granularity);
-				if (err) {
-					xenbus_dev_fatal(dev, err,
-						"writing discard-granularity");
-					goto kfree;
-				}
-				err = xenbus_printf(xbt, dev->nodename,
-					"discard-alignment", "%u",
-					q->limits.discard_alignment);
-				if (err) {
-					xenbus_dev_fatal(dev, err,
-						"writing discard-alignment");
-					goto kfree;
-				}
-				state = 1;
-				blkif->blk_backend_type = BLKIF_BACKEND_PHY;
-			}
-			/* Optional. */
-			err = xenbus_printf(xbt, dev->nodename,
-				"discard-secure", "%d",
-				blkif->vbd.discard_secure);
-			if (err) {
-				xenbus_dev_fatal(dev, err,
-					"writting discard-secure");
-				goto kfree;
-			}
+		err = xenbus_printf(xbt, dev->nodename,
+			"discard-alignment", "%u",
+			q->limits.discard_alignment);
+		if (err) {
+			dev_warn(&dev->dev, "writing discard-alignment (%d)", err);
+			return;
 		}
-	} else {
-		err = PTR_ERR(type);
-		xenbus_dev_fatal(dev, err, "reading type");
-		goto out;
+		state = 1;
+		/* Optional. */
+		err = xenbus_printf(xbt, dev->nodename,
+				    "discard-secure", "%d",
+				    blkif->vbd.discard_secure);
+		if (err) {
+			dev_warn(&dev->dev, "writing discard-secure (%d)", err);
+			return;
+		}
 	}
-
 	err = xenbus_printf(xbt, dev->nodename, "feature-discard",
 			    "%d", state);
 	if (err)
-		xenbus_dev_fatal(dev, err, "writing feature-discard");
-kfree:
-	kfree(type);
-out:
-	return err;
+		dev_warn(&dev->dev, "writing feature-discard (%d)", err);
 }
 int xen_blkbk_barrier(struct xenbus_transaction xbt,
 		      struct backend_info *be, int state)
@@ -457,7 +434,7 @@
 	err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
 			    "%d", state);
 	if (err)
-		xenbus_dev_fatal(dev, err, "writing feature-barrier");
+		dev_warn(&dev->dev, "writing feature-barrier (%d)", err);
 
 	return err;
 }
@@ -689,14 +666,12 @@
 		return;
 	}
 
-	err = xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
-	if (err)
-		goto abort;
-
-	err = xen_blkbk_discard(xbt, be);
-
 	/* If we can't advertise it is OK. */
-	err = xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
+	xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
+
+	xen_blkbk_discard(xbt, be);
+
+	xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
 
 	err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
 			    (unsigned long long)vbd_sz(&be->blkif->vbd));
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index d5e1ab9..4e86393 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -43,6 +43,7 @@
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/scatterlist.h>
+#include <linux/bitmap.h>
 
 #include <xen/xen.h>
 #include <xen/xenbus.h>
@@ -81,6 +82,7 @@
  */
 struct blkfront_info
 {
+	spinlock_t io_lock;
 	struct mutex mutex;
 	struct xenbus_device *xbdev;
 	struct gendisk *gd;
@@ -105,8 +107,6 @@
 	int is_ready;
 };
 
-static DEFINE_SPINLOCK(blkif_io_lock);
-
 static unsigned int nr_minors;
 static unsigned long *minors;
 static DEFINE_SPINLOCK(minor_lock);
@@ -177,8 +177,7 @@
 
 	spin_lock(&minor_lock);
 	if (find_next_bit(minors, end, minor) >= end) {
-		for (; minor < end; ++minor)
-			__set_bit(minor, minors);
+		bitmap_set(minors, minor, nr);
 		rc = 0;
 	} else
 		rc = -EBUSY;
@@ -193,8 +192,7 @@
 
 	BUG_ON(end > nr_minors);
 	spin_lock(&minor_lock);
-	for (; minor < end; ++minor)
-		__clear_bit(minor, minors);
+	bitmap_clear(minors,  minor, nr);
 	spin_unlock(&minor_lock);
 }
 
@@ -419,7 +417,7 @@
 	struct request_queue *rq;
 	struct blkfront_info *info = gd->private_data;
 
-	rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
+	rq = blk_init_queue(do_blkif_request, &info->io_lock);
 	if (rq == NULL)
 		return -1;
 
@@ -636,14 +634,14 @@
 	if (info->rq == NULL)
 		return;
 
-	spin_lock_irqsave(&blkif_io_lock, flags);
+	spin_lock_irqsave(&info->io_lock, flags);
 
 	/* No more blkif_request(). */
 	blk_stop_queue(info->rq);
 
 	/* No more gnttab callback work. */
 	gnttab_cancel_free_callback(&info->callback);
-	spin_unlock_irqrestore(&blkif_io_lock, flags);
+	spin_unlock_irqrestore(&info->io_lock, flags);
 
 	/* Flush gnttab callback work. Must be done with no locks held. */
 	flush_work_sync(&info->work);
@@ -675,16 +673,16 @@
 {
 	struct blkfront_info *info = container_of(work, struct blkfront_info, work);
 
-	spin_lock_irq(&blkif_io_lock);
+	spin_lock_irq(&info->io_lock);
 	if (info->connected == BLKIF_STATE_CONNECTED)
 		kick_pending_request_queues(info);
-	spin_unlock_irq(&blkif_io_lock);
+	spin_unlock_irq(&info->io_lock);
 }
 
 static void blkif_free(struct blkfront_info *info, int suspend)
 {
 	/* Prevent new requests being issued until we fix things up. */
-	spin_lock_irq(&blkif_io_lock);
+	spin_lock_irq(&info->io_lock);
 	info->connected = suspend ?
 		BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
 	/* No more blkif_request(). */
@@ -692,7 +690,7 @@
 		blk_stop_queue(info->rq);
 	/* No more gnttab callback work. */
 	gnttab_cancel_free_callback(&info->callback);
-	spin_unlock_irq(&blkif_io_lock);
+	spin_unlock_irq(&info->io_lock);
 
 	/* Flush gnttab callback work. Must be done with no locks held. */
 	flush_work_sync(&info->work);
@@ -728,10 +726,10 @@
 	struct blkfront_info *info = (struct blkfront_info *)dev_id;
 	int error;
 
-	spin_lock_irqsave(&blkif_io_lock, flags);
+	spin_lock_irqsave(&info->io_lock, flags);
 
 	if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
-		spin_unlock_irqrestore(&blkif_io_lock, flags);
+		spin_unlock_irqrestore(&info->io_lock, flags);
 		return IRQ_HANDLED;
 	}
 
@@ -816,7 +814,7 @@
 
 	kick_pending_request_queues(info);
 
-	spin_unlock_irqrestore(&blkif_io_lock, flags);
+	spin_unlock_irqrestore(&info->io_lock, flags);
 
 	return IRQ_HANDLED;
 }
@@ -991,6 +989,7 @@
 	}
 
 	mutex_init(&info->mutex);
+	spin_lock_init(&info->io_lock);
 	info->xbdev = dev;
 	info->vdevice = vdevice;
 	info->connected = BLKIF_STATE_DISCONNECTED;
@@ -1068,7 +1067,7 @@
 
 	xenbus_switch_state(info->xbdev, XenbusStateConnected);
 
-	spin_lock_irq(&blkif_io_lock);
+	spin_lock_irq(&info->io_lock);
 
 	/* Now safe for us to use the shared ring */
 	info->connected = BLKIF_STATE_CONNECTED;
@@ -1079,7 +1078,7 @@
 	/* Kick any other new requests queued since we resumed */
 	kick_pending_request_queues(info);
 
-	spin_unlock_irq(&blkif_io_lock);
+	spin_unlock_irq(&info->io_lock);
 
 	return 0;
 }
@@ -1277,10 +1276,10 @@
 	xenbus_switch_state(info->xbdev, XenbusStateConnected);
 
 	/* Kick pending requests. */
-	spin_lock_irq(&blkif_io_lock);
+	spin_lock_irq(&info->io_lock);
 	info->connected = BLKIF_STATE_CONNECTED;
 	kick_pending_request_queues(info);
-	spin_unlock_irq(&blkif_io_lock);
+	spin_unlock_irq(&info->io_lock);
 
 	add_disk(info->gd);
 
@@ -1410,7 +1409,6 @@
 	mutex_lock(&blkfront_mutex);
 
 	bdev = bdget_disk(disk, 0);
-	bdput(bdev);
 
 	if (bdev->bd_openers)
 		goto out;
@@ -1441,6 +1439,7 @@
 	}
 
 out:
+	bdput(bdev);
 	mutex_unlock(&blkfront_mutex);
 	return 0;
 }
@@ -1475,7 +1474,7 @@
 	if (!xen_domain())
 		return -ENODEV;
 
-	if (!xen_platform_pci_unplug)
+	if (xen_hvm_domain() && !xen_platform_pci_unplug)
 		return -ENODEV;
 
 	if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 3845ab4..dfd7876 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -906,8 +906,8 @@
 		hpetp->hp_which, hdp->hd_phys_address,
 		hpetp->hp_ntimer > 1 ? "s" : "");
 	for (i = 0; i < hpetp->hp_ntimer; i++)
-		printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
-	printk("\n");
+		printk(KERN_CONT "%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
+	printk(KERN_CONT "\n");
 
 	temp = hpetp->hp_tick_freq;
 	remainder = do_div(temp, 1000000);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 54ca8b2..4ec04a7 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1260,10 +1260,15 @@
 	uuid = table->data;
 	if (!uuid) {
 		uuid = tmp_uuid;
-		uuid[8] = 0;
-	}
-	if (uuid[8] == 0)
 		generate_random_uuid(uuid);
+	} else {
+		static DEFINE_SPINLOCK(bootid_spinlock);
+
+		spin_lock(&bootid_spinlock);
+		if (!uuid[8])
+			generate_random_uuid(uuid);
+		spin_unlock(&bootid_spinlock);
+	}
 
 	sprintf(buf, "%pU", uuid);
 
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
index 4dc0194..3b22a60 100644
--- a/drivers/char/tile-srom.c
+++ b/drivers/char/tile-srom.c
@@ -194,17 +194,17 @@
 
 		hv_retval = _srom_read(srom->hv_devhdl, kernbuf,
 				       *f_pos, bytes_this_pass);
-		if (hv_retval > 0) {
-			if (copy_to_user(buf, kernbuf, hv_retval) != 0) {
-				retval = -EFAULT;
-				break;
-			}
-		} else if (hv_retval <= 0) {
+		if (hv_retval <= 0) {
 			if (retval == 0)
 				retval = hv_retval;
 			break;
 		}
 
+		if (copy_to_user(buf, kernbuf, hv_retval) != 0) {
+			retval = -EFAULT;
+			break;
+		}
+
 		retval += hv_retval;
 		*f_pos += hv_retval;
 		buf += hv_retval;
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index 82e8820..6b5cf02 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -23,7 +23,6 @@
 #include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
-#include <linux/async.h>
 #include <asm/io.h>
 
 /*
@@ -180,15 +179,17 @@
 /* Number of reads we try to get two different values */
 #define ACPI_PM_READ_CHECKS 10000
 
-static void __init acpi_pm_clocksource_async(void *unused, async_cookie_t cookie)
+static int __init init_acpi_pm_clocksource(void)
 {
 	cycle_t value1, value2;
 	unsigned int i, j = 0;
 
+	if (!pmtmr_ioport)
+		return -ENODEV;
 
 	/* "verify" this timing source: */
 	for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) {
-		usleep_range(100 * j, 100 * j + 100);
+		udelay(100 * j);
 		value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
 		for (i = 0; i < ACPI_PM_READ_CHECKS; i++) {
 			value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
@@ -202,34 +203,25 @@
 			       " 0x%#llx, 0x%#llx - aborting.\n",
 			       value1, value2);
 			pmtmr_ioport = 0;
-			return;
+			return -EINVAL;
 		}
 		if (i == ACPI_PM_READ_CHECKS) {
 			printk(KERN_INFO "PM-Timer failed consistency check "
 			       " (0x%#llx) - aborting.\n", value1);
 			pmtmr_ioport = 0;
-			return;
+			return -ENODEV;
 		}
 	}
 
 	if (verify_pmtmr_rate() != 0){
 		pmtmr_ioport = 0;
-		return;
+		return -ENODEV;
 	}
 
-	clocksource_register_hz(&clocksource_acpi_pm,
+	return clocksource_register_hz(&clocksource_acpi_pm,
 						PMTMR_TICKS_PER_SEC);
 }
 
-static int __init init_acpi_pm_clocksource(void)
-{
-	if (!pmtmr_ioport)
-		return -ENODEV;
-
-	async_schedule(acpi_pm_clocksource_async, NULL);
-	return 0;
-}
-
 /* We use fs_initcall because we want the PCI fixups to have run
  * but we still need to load before device_initcall
  */
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index ffbb446..5961e64 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -4,6 +4,7 @@
 
 config ARM_OMAP2PLUS_CPUFREQ
 	bool "TI OMAP2+"
+	depends on ARCH_OMAP2PLUS
 	default ARCH_OMAP2PLUS
 	select CPU_FREQ_TABLE
 
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 87411ce..2f0083a 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -74,7 +74,7 @@
 /**
  * cpuidle_play_dead - cpu off-lining
  *
- * Only returns in case of an error
+ * Returns in case of an error or no driver
  */
 int cpuidle_play_dead(void)
 {
@@ -83,6 +83,9 @@
 	int i, dead_state = -1;
 	int power_usage = -1;
 
+	if (!drv)
+		return -ENODEV;
+
 	/* Find lowest-power state that supports long-term idle */
 	for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
 		struct cpuidle_state *s = &drv->states[i];
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 0053d7e..8f3f74c 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -18,6 +18,7 @@
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
 #include <linux/gfp.h>
+#include <linux/module.h>
 
 #include <crypto/ctr.h>
 #include <crypto/des.h>
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index dc641c7..921039e 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -124,6 +124,9 @@
 	void __iomem *reg;
 	int irq[2];
 
+	/* SEC global registers lock  */
+	spinlock_t reg_lock ____cacheline_aligned;
+
 	/* SEC version geometry (from device tree node) */
 	unsigned int num_channels;
 	unsigned int chfifo_len;
@@ -412,6 +415,7 @@
 {									\
 	struct device *dev = (struct device *)data;			\
 	struct talitos_private *priv = dev_get_drvdata(dev);		\
+	unsigned long flags;						\
 									\
 	if (ch_done_mask & 1)						\
 		flush_channel(dev, 0, 0, 0);				\
@@ -427,8 +431,10 @@
 out:									\
 	/* At this point, all completed channels have been processed */	\
 	/* Unmask done interrupts for channels completed later on. */	\
+	spin_lock_irqsave(&priv->reg_lock, flags);			\
 	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
 	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);	\
+	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
 }
 DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE)
 DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE)
@@ -619,22 +625,28 @@
 	struct device *dev = data;					       \
 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
 	u32 isr, isr_lo;						       \
+	unsigned long flags;						       \
 									       \
+	spin_lock_irqsave(&priv->reg_lock, flags);			       \
 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
 	/* Acknowledge interrupt */					       \
 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
 									       \
-	if (unlikely((isr & ~TALITOS_ISR_4CHDONE) & ch_err_mask || isr_lo))    \
-		talitos_error(dev, isr, isr_lo);			       \
-	else								       \
+	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
+		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
+		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
+	}								       \
+	else {								       \
 		if (likely(isr & ch_done_mask)) {			       \
 			/* mask further done interrupts. */		       \
 			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
 			/* done_task will unmask done interrupts at exit */    \
 			tasklet_schedule(&priv->done_task[tlet]);	       \
 		}							       \
+		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
+	}								       \
 									       \
 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
 								IRQ_NONE;      \
@@ -2719,6 +2731,8 @@
 
 	priv->ofdev = ofdev;
 
+	spin_lock_init(&priv->reg_lock);
+
 	err = talitos_probe_irq(ofdev);
 	if (err)
 		goto err_out;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index cf9da36..ef378b5 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -91,11 +91,10 @@
 
 config AT_HDMAC
 	tristate "Atmel AHB DMA support"
-	depends on ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
+	depends on ARCH_AT91
 	select DMA_ENGINE
 	help
-	  Support the Atmel AHB DMA controller.  This can be integrated in
-	  chips such as the Atmel AT91SAM9RL.
+	  Support the Atmel AHB DMA controller.
 
 config FSL_DMA
 	tristate "Freescale Elo and Elo Plus DMA support"
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 767bcc3..2397f6f 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -332,6 +332,20 @@
 }
 EXPORT_SYMBOL(dma_find_channel);
 
+/*
+ * net_dma_find_channel - find a channel for net_dma
+ * net_dma has alignment requirements
+ */
+struct dma_chan *net_dma_find_channel(void)
+{
+	struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
+	if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
+		return NULL;
+
+	return chan;
+}
+EXPORT_SYMBOL(net_dma_find_channel);
+
 /**
  * dma_issue_pending_all - flush all pending operations across all channels
  */
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 31493d8..73b2b65 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -546,9 +546,9 @@
 			   PCI_DMA_TODEVICE, flags, 0);
 }
 
-unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
+dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
 {
-	unsigned long phys_complete;
+	dma_addr_t phys_complete;
 	u64 completion;
 
 	completion = *chan->completion;
@@ -569,7 +569,7 @@
 }
 
 bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
-			   unsigned long *phys_complete)
+			   dma_addr_t *phys_complete)
 {
 	*phys_complete = ioat_get_current_completion(chan);
 	if (*phys_complete == chan->last_completion)
@@ -580,14 +580,14 @@
 	return true;
 }
 
-static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
+static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
 {
 	struct ioat_chan_common *chan = &ioat->base;
 	struct list_head *_desc, *n;
 	struct dma_async_tx_descriptor *tx;
 
-	dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
-		 __func__, phys_complete);
+	dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
+		 __func__, (unsigned long long) phys_complete);
 	list_for_each_safe(_desc, n, &ioat->used_desc) {
 		struct ioat_desc_sw *desc;
 
@@ -652,7 +652,7 @@
 static void ioat1_cleanup(struct ioat_dma_chan *ioat)
 {
 	struct ioat_chan_common *chan = &ioat->base;
-	unsigned long phys_complete;
+	dma_addr_t phys_complete;
 
 	prefetch(chan->completion);
 
@@ -698,7 +698,7 @@
 		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
 		spin_unlock_bh(&ioat->desc_lock);
 	} else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
-		unsigned long phys_complete;
+		dma_addr_t phys_complete;
 
 		spin_lock_bh(&ioat->desc_lock);
 		/* if we haven't made progress and we have already
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index c7888bc..5e8fe01 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -88,7 +88,7 @@
 struct ioat_chan_common {
 	struct dma_chan common;
 	void __iomem *reg_base;
-	unsigned long last_completion;
+	dma_addr_t last_completion;
 	spinlock_t cleanup_lock;
 	unsigned long state;
 	#define IOAT_COMPLETION_PENDING 0
@@ -310,7 +310,7 @@
 void __devexit ioat_dma_remove(struct ioatdma_device *device);
 struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
 					      void __iomem *iobase);
-unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
+dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan);
 void ioat_init_channel(struct ioatdma_device *device,
 		       struct ioat_chan_common *chan, int idx);
 enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
@@ -318,7 +318,7 @@
 void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
 		    size_t len, struct ioat_dma_descriptor *hw);
 bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
-			   unsigned long *phys_complete);
+			   dma_addr_t *phys_complete);
 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
 void ioat_kobject_del(struct ioatdma_device *device);
 extern const struct sysfs_ops ioat_sysfs_ops;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index e8e110f..8689576 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -128,7 +128,7 @@
 	spin_unlock_bh(&ioat->prep_lock);
 }
 
-static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
+static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
 {
 	struct ioat_chan_common *chan = &ioat->base;
 	struct dma_async_tx_descriptor *tx;
@@ -179,7 +179,7 @@
 static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
 {
 	struct ioat_chan_common *chan = &ioat->base;
-	unsigned long phys_complete;
+	dma_addr_t phys_complete;
 
 	spin_lock_bh(&chan->cleanup_lock);
 	if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -260,7 +260,7 @@
 static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
 {
 	struct ioat_chan_common *chan = &ioat->base;
-	unsigned long phys_complete;
+	dma_addr_t phys_complete;
 
 	ioat2_quiesce(chan, 0);
 	if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -275,7 +275,7 @@
 	struct ioat_chan_common *chan = &ioat->base;
 
 	if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
-		unsigned long phys_complete;
+		dma_addr_t phys_complete;
 		u64 status;
 
 		status = ioat_chansts(chan);
@@ -572,9 +572,9 @@
 	 */
 	struct ioat_chan_common *chan = &ioat->base;
 	struct dma_chan *c = &chan->common;
-	const u16 curr_size = ioat2_ring_size(ioat);
+	const u32 curr_size = ioat2_ring_size(ioat);
 	const u16 active = ioat2_ring_active(ioat);
-	const u16 new_size = 1 << order;
+	const u32 new_size = 1 << order;
 	struct ioat_ring_ent **ring;
 	u16 i;
 
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index a2c413b..be2a55b 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -74,7 +74,7 @@
 	return container_of(chan, struct ioat2_dma_chan, base);
 }
 
-static inline u16 ioat2_ring_size(struct ioat2_dma_chan *ioat)
+static inline u32 ioat2_ring_size(struct ioat2_dma_chan *ioat)
 {
 	return 1 << ioat->alloc_order;
 }
@@ -91,7 +91,7 @@
 	return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat));
 }
 
-static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat)
+static inline u32 ioat2_ring_space(struct ioat2_dma_chan *ioat)
 {
 	return ioat2_ring_size(ioat) - ioat2_ring_active(ioat);
 }
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 2c4476c..f7f1dc6 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -257,7 +257,7 @@
  * The difference from the dma_v2.c __cleanup() is that this routine
  * handles extended descriptors and dma-unmapping raid operations.
  */
-static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
+static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
 {
 	struct ioat_chan_common *chan = &ioat->base;
 	struct ioat_ring_ent *desc;
@@ -314,7 +314,7 @@
 static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
 {
 	struct ioat_chan_common *chan = &ioat->base;
-	unsigned long phys_complete;
+	dma_addr_t phys_complete;
 
 	spin_lock_bh(&chan->cleanup_lock);
 	if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -333,7 +333,7 @@
 static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
 {
 	struct ioat_chan_common *chan = &ioat->base;
-	unsigned long phys_complete;
+	dma_addr_t phys_complete;
 
 	ioat2_quiesce(chan, 0);
 	if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -348,7 +348,7 @@
 	struct ioat_chan_common *chan = &ioat->base;
 
 	if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
-		unsigned long phys_complete;
+		dma_addr_t phys_complete;
 		u64 status;
 
 		status = ioat_chansts(chan);
@@ -1149,6 +1149,44 @@
 	return ioat2_reset_sync(chan, msecs_to_jiffies(200));
 }
 
+static bool is_jf_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool is_snb_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
+		return true;
+	default:
+		return false;
+	}
+}
+
 int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
 {
 	struct pci_dev *pdev = device->pdev;
@@ -1169,6 +1207,9 @@
 	dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
 	dma->device_free_chan_resources = ioat2_free_chan_resources;
 
+	if (is_jf_ioat(pdev) || is_snb_ioat(pdev))
+		dma->copy_align = 6;
+
 	dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
 	dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
 
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index da6c4c2..79e3eba 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1252,8 +1252,8 @@
 	struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
 	/* address conversion buffers (dma_map / page_address) */
 	void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
-	dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST];
-	dma_addr_t pq_dest[2];
+	dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2];
+	dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST];
 
 	int i;
 	struct dma_async_tx_descriptor *tx;
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c
index 1d5cf06..e99d009 100644
--- a/drivers/edac/tile_edac.c
+++ b/drivers/edac/tile_edac.c
@@ -145,7 +145,11 @@
 	mci->edac_ctl_cap = EDAC_FLAG_SECDED;
 
 	mci->mod_name = DRV_NAME;
+#ifdef __tilegx__
+	mci->ctl_name = "TILEGx_Memory_Controller";
+#else
 	mci->ctl_name = "TILEPro_Memory_Controller";
+#endif
 	mci->dev_name = dev_name(&pdev->dev);
 	mci->edac_check = tile_edac_check;
 
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index edadbda..e03653d 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -430,7 +430,7 @@
 
 config GPIO_SODAVILLE
 	bool "Intel Sodaville GPIO support"
-	depends on X86 && PCI && OF && BROKEN
+	depends on X86 && PCI && OF
 	select GPIO_GENERIC
 	select GENERIC_IRQ_CHIP
 	help
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index 9ad1703..ae5d7f1 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -252,7 +252,7 @@
 		if (ret < 0)
 			memset(dev->irq_stat, 0, ARRAY_SIZE(dev->irq_stat));
 
-		for (bank = 0; bank <= ADP5588_BANK(ADP5588_MAXGPIO);
+		for (bank = 0, bit = 0; bank <= ADP5588_BANK(ADP5588_MAXGPIO);
 			bank++, bit = 0) {
 			pending = dev->irq_stat[bank] & dev->irq_mask[bank];
 
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 4627787..19d6fc0 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -2382,8 +2382,8 @@
 #endif
 };
 
-static struct samsung_gpio_chip exynos5_gpios_1[] = {
 #ifdef CONFIG_ARCH_EXYNOS5
+static struct samsung_gpio_chip exynos5_gpios_1[] = {
 	{
 		.chip	= {
 			.base	= EXYNOS5_GPA0(0),
@@ -2541,11 +2541,11 @@
 			.to_irq	= samsung_gpiolib_to_irq,
 		},
 	},
-#endif
 };
+#endif
 
-static struct samsung_gpio_chip exynos5_gpios_2[] = {
 #ifdef CONFIG_ARCH_EXYNOS5
+static struct samsung_gpio_chip exynos5_gpios_2[] = {
 	{
 		.chip	= {
 			.base	= EXYNOS5_GPE0(0),
@@ -2602,11 +2602,11 @@
 
 		},
 	},
-#endif
 };
+#endif
 
-static struct samsung_gpio_chip exynos5_gpios_3[] = {
 #ifdef CONFIG_ARCH_EXYNOS5
+static struct samsung_gpio_chip exynos5_gpios_3[] = {
 	{
 		.chip	= {
 			.base	= EXYNOS5_GPV0(0),
@@ -2638,11 +2638,11 @@
 			.label	= "GPV4",
 		},
 	},
-#endif
 };
+#endif
 
-static struct samsung_gpio_chip exynos5_gpios_4[] = {
 #ifdef CONFIG_ARCH_EXYNOS5
+static struct samsung_gpio_chip exynos5_gpios_4[] = {
 	{
 		.chip	= {
 			.base	= EXYNOS5_GPZ(0),
@@ -2650,8 +2650,8 @@
 			.label	= "GPZ",
 		},
 	},
-#endif
 };
+#endif
 
 
 #if defined(CONFIG_ARCH_EXYNOS) && defined(CONFIG_OF)
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c
index 9ba15d3..031e5d2 100644
--- a/drivers/gpio/gpio-sodaville.c
+++ b/drivers/gpio/gpio-sodaville.c
@@ -41,7 +41,7 @@
 struct sdv_gpio_chip_data {
 	int irq_base;
 	void __iomem *gpio_pub_base;
-	struct irq_domain id;
+	struct irq_domain *id;
 	struct irq_chip_generic *gc;
 	struct bgpio_chip bgpio;
 };
@@ -51,10 +51,9 @@
 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 	struct sdv_gpio_chip_data *sd = gc->private;
 	void __iomem *type_reg;
-	u32 irq_offs = d->irq - sd->irq_base;
 	u32 reg;
 
-	if (irq_offs < 8)
+	if (d->hwirq < 8)
 		type_reg = sd->gpio_pub_base + GPIT1R0;
 	else
 		type_reg = sd->gpio_pub_base + GPIT1R1;
@@ -63,11 +62,11 @@
 
 	switch (type) {
 	case IRQ_TYPE_LEVEL_HIGH:
-		reg &= ~BIT(4 * (irq_offs % 8));
+		reg &= ~BIT(4 * (d->hwirq % 8));
 		break;
 
 	case IRQ_TYPE_LEVEL_LOW:
-		reg |= BIT(4 * (irq_offs % 8));
+		reg |= BIT(4 * (d->hwirq % 8));
 		break;
 
 	default:
@@ -91,7 +90,7 @@
 		u32 irq_bit = __fls(irq_stat);
 
 		irq_stat &= ~BIT(irq_bit);
-		generic_handle_irq(sd->irq_base + irq_bit);
+		generic_handle_irq(irq_find_mapping(sd->id, irq_bit));
 	}
 
 	return IRQ_HANDLED;
@@ -127,7 +126,7 @@
 }
 
 static struct irq_domain_ops irq_domain_sdv_ops = {
-	.dt_translate	= sdv_xlate,
+	.xlate = sdv_xlate,
 };
 
 static __devinit int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
@@ -149,10 +148,6 @@
 	if (ret)
 		goto out_free_desc;
 
-	sd->id.irq_base = sd->irq_base;
-	sd->id.of_node = of_node_get(pdev->dev.of_node);
-	sd->id.ops = &irq_domain_sdv_ops;
-
 	/*
 	 * This gpio irq controller latches level irqs. Testing shows that if
 	 * we unmask & ACK the IRQ before the source of the interrupt is gone
@@ -179,7 +174,10 @@
 			IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST,
 			IRQ_LEVEL | IRQ_NOPROBE);
 
-	irq_domain_add(&sd->id);
+	sd->id = irq_domain_add_legacy(pdev->dev.of_node, SDV_NUM_PUB_GPIOS,
+				sd->irq_base, 0, &irq_domain_sdv_ops, sd);
+	if (!sd->id)
+		goto out_free_irq;
 	return 0;
 out_free_irq:
 	free_irq(pdev->irq, sd);
@@ -260,7 +258,6 @@
 {
 	struct sdv_gpio_chip_data *sd = pci_get_drvdata(pdev);
 
-	irq_domain_del(&sd->id);
 	free_irq(pdev->irq, sd);
 	irq_free_descs(sd->irq_base, SDV_NUM_PUB_GPIOS);
 
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 30372f7..348b367 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -1510,8 +1510,8 @@
  * \param arg pointer to a drm_buf_map structure.
  * \return zero on success or a negative number on failure.
  *
- * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
- * about each buffer into user space. For PCI buffers, it calls do_mmap() with
+ * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
+ * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
  * drm_mmap_dma().
  */
@@ -1553,18 +1553,14 @@
 				retcode = -EINVAL;
 				goto done;
 			}
-			down_write(&current->mm->mmap_sem);
-			virtual = do_mmap(file_priv->filp, 0, map->size,
+			virtual = vm_mmap(file_priv->filp, 0, map->size,
 					  PROT_READ | PROT_WRITE,
 					  MAP_SHARED,
 					  token);
-			up_write(&current->mm->mmap_sem);
 		} else {
-			down_write(&current->mm->mmap_sem);
-			virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
+			virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
 					  PROT_READ | PROT_WRITE,
 					  MAP_SHARED, 0);
-			up_write(&current->mm->mmap_sem);
 		}
 		if (virtual > -1024UL) {
 			/* Real error */
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d3aaeb6..c79870a 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -3335,10 +3335,12 @@
 
 	ret = crtc->funcs->page_flip(crtc, fb, e);
 	if (ret) {
-		spin_lock_irqsave(&dev->event_lock, flags);
-		file_priv->event_space += sizeof e->event;
-		spin_unlock_irqrestore(&dev->event_lock, flags);
-		kfree(e);
+		if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+			spin_lock_irqsave(&dev->event_lock, flags);
+			file_priv->event_space += sizeof e->event;
+			spin_unlock_irqrestore(&dev->event_lock, flags);
+			kfree(e);
+		}
 	}
 
 out:
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index cdfbf27..123de28 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -507,12 +507,12 @@
 
 	drm_events_release(file_priv);
 
-	if (dev->driver->driver_features & DRIVER_GEM)
-		drm_gem_release(dev, file_priv);
-
 	if (dev->driver->driver_features & DRIVER_MODESET)
 		drm_fb_release(file_priv);
 
+	if (dev->driver->driver_features & DRIVER_GEM)
+		drm_gem_release(dev, file_priv);
+
 	mutex_lock(&dev->ctxlist_mutex);
 	if (!list_empty(&dev->ctxlist)) {
 		struct drm_ctx_list *pos, *n;
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index c8c83da..37c9a52 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -1,6 +1,6 @@
 #include "drmP.h"
 #include <linux/usb.h>
-#include <linux/export.h>
+#include <linux/module.h>
 
 int drm_get_usb_dev(struct usb_interface *interface,
 		    const struct usb_device_id *id,
@@ -114,3 +114,7 @@
 	usb_deregister(udriver);
 }
 EXPORT_SYMBOL(drm_usb_exit);
+
+MODULE_AUTHOR("David Airlie");
+MODULE_DESCRIPTION("USB DRM support");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 4a3a5f7..de8d209 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -34,14 +34,14 @@
 static int lowlevel_buffer_allocate(struct drm_device *dev,
 		unsigned int flags, struct exynos_drm_gem_buf *buf)
 {
-	dma_addr_t start_addr, end_addr;
+	dma_addr_t start_addr;
 	unsigned int npages, page_size, i = 0;
 	struct scatterlist *sgl;
 	int ret = 0;
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (flags & EXYNOS_BO_NONCONTIG) {
+	if (IS_NONCONTIG_BUFFER(flags)) {
 		DRM_DEBUG_KMS("not support allocation type.\n");
 		return -EINVAL;
 	}
@@ -52,13 +52,13 @@
 	}
 
 	if (buf->size >= SZ_1M) {
-		npages = (buf->size >> SECTION_SHIFT) + 1;
+		npages = buf->size >> SECTION_SHIFT;
 		page_size = SECTION_SIZE;
 	} else if (buf->size >= SZ_64K) {
-		npages = (buf->size >> 16) + 1;
+		npages = buf->size >> 16;
 		page_size = SZ_64K;
 	} else {
-		npages = (buf->size >> PAGE_SHIFT) + 1;
+		npages = buf->size >> PAGE_SHIFT;
 		page_size = PAGE_SIZE;
 	}
 
@@ -76,26 +76,13 @@
 		return -ENOMEM;
 	}
 
-		buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
-				&buf->dma_addr, GFP_KERNEL);
-		if (!buf->kvaddr) {
-			DRM_ERROR("failed to allocate buffer.\n");
-			ret = -ENOMEM;
-			goto err1;
-		}
-
-		start_addr = buf->dma_addr;
-		end_addr = buf->dma_addr + buf->size;
-
-		buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
-		if (!buf->pages) {
-			DRM_ERROR("failed to allocate pages.\n");
-			ret = -ENOMEM;
-			goto err2;
-		}
-
-	start_addr = buf->dma_addr;
-	end_addr = buf->dma_addr + buf->size;
+	buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
+			&buf->dma_addr, GFP_KERNEL);
+	if (!buf->kvaddr) {
+		DRM_ERROR("failed to allocate buffer.\n");
+		ret = -ENOMEM;
+		goto err1;
+	}
 
 	buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
 	if (!buf->pages) {
@@ -105,23 +92,17 @@
 	}
 
 	sgl = buf->sgt->sgl;
+	start_addr = buf->dma_addr;
 
 	while (i < npages) {
 		buf->pages[i] = phys_to_page(start_addr);
 		sg_set_page(sgl, buf->pages[i], page_size, 0);
 		sg_dma_address(sgl) = start_addr;
 		start_addr += page_size;
-		if (end_addr - start_addr < page_size)
-			break;
 		sgl = sg_next(sgl);
 		i++;
 	}
 
-	buf->pages[i] = phys_to_page(start_addr);
-
-	sgl = sg_next(sgl);
-	sg_set_page(sgl, buf->pages[i+1], end_addr - start_addr, 0);
-
 	DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
 			(unsigned long)buf->kvaddr,
 			(unsigned long)buf->dma_addr,
@@ -150,7 +131,7 @@
 	 * non-continuous memory would be released by exynos
 	 * gem framework.
 	 */
-	if (flags & EXYNOS_BO_NONCONTIG) {
+	if (IS_NONCONTIG_BUFFER(flags)) {
 		DRM_DEBUG_KMS("not support allocation type.\n");
 		return;
 	}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 411832e..eaf630d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -54,16 +54,18 @@
 		 *
 		 * P.S. note that this driver is considered for modularization.
 		 */
-		ret = subdrv->probe(dev, subdrv->manager.dev);
+		ret = subdrv->probe(dev, subdrv->dev);
 		if (ret)
 			return ret;
 	}
 
-	if (subdrv->is_local)
+	if (!subdrv->manager)
 		return 0;
 
+	subdrv->manager->dev = subdrv->dev;
+
 	/* create and initialize a encoder for this sub driver. */
-	encoder = exynos_drm_encoder_create(dev, &subdrv->manager,
+	encoder = exynos_drm_encoder_create(dev, subdrv->manager,
 			(1 << MAX_CRTC) - 1);
 	if (!encoder) {
 		DRM_ERROR("failed to create encoder\n");
@@ -186,7 +188,7 @@
 
 	list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
 		if (subdrv->open) {
-			ret = subdrv->open(dev, subdrv->manager.dev, file);
+			ret = subdrv->open(dev, subdrv->dev, file);
 			if (ret)
 				goto err;
 		}
@@ -197,7 +199,7 @@
 err:
 	list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
 		if (subdrv->close)
-			subdrv->close(dev, subdrv->manager.dev, file);
+			subdrv->close(dev, subdrv->dev, file);
 	}
 	return ret;
 }
@@ -209,7 +211,7 @@
 
 	list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
 		if (subdrv->close)
-			subdrv->close(dev, subdrv->manager.dev, file);
+			subdrv->close(dev, subdrv->dev, file);
 	}
 }
 EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index fbd0a23..1d81417 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -225,24 +225,25 @@
  * Exynos drm sub driver structure.
  *
  * @list: sub driver has its own list object to register to exynos drm driver.
+ * @dev: pointer to device object for subdrv device driver.
  * @drm_dev: pointer to drm_device and this pointer would be set
  *	when sub driver calls exynos_drm_subdrv_register().
- * @is_local: appear encoder and connector disrelated device.
+ * @manager: subdrv has its own manager to control a hardware appropriately
+ *	and we can access a hardware drawing on this manager.
  * @probe: this callback would be called by exynos drm driver after
  *	subdrv is registered to it.
  * @remove: this callback is used to release resources created
  *	by probe callback.
  * @open: this would be called with drm device file open.
  * @close: this would be called with drm device file close.
- * @manager: subdrv has its own manager to control a hardware appropriately
- *	and we can access a hardware drawing on this manager.
  * @encoder: encoder object owned by this sub driver.
  * @connector: connector object owned by this sub driver.
  */
 struct exynos_drm_subdrv {
 	struct list_head list;
+	struct device *dev;
 	struct drm_device *drm_dev;
-	bool is_local;
+	struct exynos_drm_manager *manager;
 
 	int (*probe)(struct drm_device *drm_dev, struct device *dev);
 	void (*remove)(struct drm_device *dev);
@@ -251,7 +252,6 @@
 	void (*close)(struct drm_device *drm_dev, struct device *dev,
 			struct drm_file *file);
 
-	struct exynos_drm_manager manager;
 	struct drm_encoder *encoder;
 	struct drm_connector *connector;
 };
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index ecb6db2..29fdbfe 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -172,7 +172,7 @@
 static void fimd_apply(struct device *subdrv_dev)
 {
 	struct fimd_context *ctx = get_fimd_context(subdrv_dev);
-	struct exynos_drm_manager *mgr = &ctx->subdrv.manager;
+	struct exynos_drm_manager *mgr = ctx->subdrv.manager;
 	struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
 	struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
 	struct fimd_win_data *win_data;
@@ -577,6 +577,13 @@
 	.disable = fimd_win_disable,
 };
 
+static struct exynos_drm_manager fimd_manager = {
+	.pipe		= -1,
+	.ops		= &fimd_manager_ops,
+	.overlay_ops	= &fimd_overlay_ops,
+	.display_ops	= &fimd_display_ops,
+};
+
 static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
 {
 	struct exynos_drm_private *dev_priv = drm_dev->dev_private;
@@ -628,7 +635,7 @@
 	struct fimd_context *ctx = (struct fimd_context *)dev_id;
 	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
 	struct drm_device *drm_dev = subdrv->drm_dev;
-	struct exynos_drm_manager *manager = &subdrv->manager;
+	struct exynos_drm_manager *manager = subdrv->manager;
 	u32 val;
 
 	val = readl(ctx->regs + VIDINTCON1);
@@ -744,7 +751,7 @@
 static int fimd_power_on(struct fimd_context *ctx, bool enable)
 {
 	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
-	struct device *dev = subdrv->manager.dev;
+	struct device *dev = subdrv->dev;
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -867,13 +874,10 @@
 
 	subdrv = &ctx->subdrv;
 
+	subdrv->dev = dev;
+	subdrv->manager = &fimd_manager;
 	subdrv->probe = fimd_subdrv_probe;
 	subdrv->remove = fimd_subdrv_remove;
-	subdrv->manager.pipe = -1;
-	subdrv->manager.ops = &fimd_manager_ops;
-	subdrv->manager.overlay_ops = &fimd_overlay_ops;
-	subdrv->manager.display_ops = &fimd_display_ops;
-	subdrv->manager.dev = dev;
 
 	mutex_init(&ctx->lock);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index fa1aa94..392ce71 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -56,9 +56,28 @@
 	return out_msg;
 }
 
-static unsigned int mask_gem_flags(unsigned int flags)
+static int check_gem_flags(unsigned int flags)
 {
-	return flags &= EXYNOS_BO_NONCONTIG;
+	if (flags & ~(EXYNOS_BO_MASK)) {
+		DRM_ERROR("invalid flags.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
+{
+	if (!IS_NONCONTIG_BUFFER(flags)) {
+		if (size >= SZ_1M)
+			return roundup(size, SECTION_SIZE);
+		else if (size >= SZ_64K)
+			return roundup(size, SZ_64K);
+		else
+			goto out;
+	}
+out:
+	return roundup(size, PAGE_SIZE);
 }
 
 static struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
@@ -319,10 +338,17 @@
 	struct exynos_drm_gem_buf *buf;
 	int ret;
 
-	size = roundup(size, PAGE_SIZE);
-	DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
+	if (!size) {
+		DRM_ERROR("invalid size.\n");
+		return ERR_PTR(-EINVAL);
+	}
 
-	flags = mask_gem_flags(flags);
+	size = roundup_gem_size(size, flags);
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	ret = check_gem_flags(flags);
+	if (ret)
+		return ERR_PTR(ret);
 
 	buf = exynos_drm_init_buf(dev, size);
 	if (!buf)
@@ -331,7 +357,7 @@
 	exynos_gem_obj = exynos_drm_gem_init(dev, size);
 	if (!exynos_gem_obj) {
 		ret = -ENOMEM;
-		goto err;
+		goto err_fini_buf;
 	}
 
 	exynos_gem_obj->buffer = buf;
@@ -347,18 +373,19 @@
 		ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
 		if (ret < 0) {
 			drm_gem_object_release(&exynos_gem_obj->base);
-			goto err;
+			goto err_fini_buf;
 		}
 	} else {
 		ret = exynos_drm_alloc_buf(dev, buf, flags);
 		if (ret < 0) {
 			drm_gem_object_release(&exynos_gem_obj->base);
-			goto err;
+			goto err_fini_buf;
 		}
 	}
 
 	return exynos_gem_obj;
-err:
+
+err_fini_buf:
 	exynos_drm_fini_buf(dev, buf);
 	return ERR_PTR(ret);
 }
@@ -554,10 +581,8 @@
 	obj->filp->f_op = &exynos_drm_gem_fops;
 	obj->filp->private_data = obj;
 
-	down_write(&current->mm->mmap_sem);
-	addr = do_mmap(obj->filp, 0, args->size,
+	addr = vm_mmap(obj->filp, 0, args->size,
 			PROT_READ | PROT_WRITE, MAP_SHARED, 0);
-	up_write(&current->mm->mmap_sem);
 
 	drm_gem_object_unreference_unlocked(obj);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index e40fbad..4ed8420 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -29,6 +29,8 @@
 #define to_exynos_gem_obj(x)	container_of(x,\
 			struct exynos_drm_gem_obj, base)
 
+#define IS_NONCONTIG_BUFFER(f)		(f & EXYNOS_BO_NONCONTIG)
+
 /*
  * exynos drm gem buffer structure.
  *
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 14eb26b..3424463 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -30,9 +30,8 @@
 					struct drm_hdmi_context, subdrv);
 
 /* these callback points shoud be set by specific drivers. */
-static struct exynos_hdmi_display_ops *hdmi_display_ops;
-static struct exynos_hdmi_manager_ops *hdmi_manager_ops;
-static struct exynos_hdmi_overlay_ops *hdmi_overlay_ops;
+static struct exynos_hdmi_ops *hdmi_ops;
+static struct exynos_mixer_ops *mixer_ops;
 
 struct drm_hdmi_context {
 	struct exynos_drm_subdrv	subdrv;
@@ -40,31 +39,20 @@
 	struct exynos_drm_hdmi_context	*mixer_ctx;
 };
 
-void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
-					*display_ops)
+void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops)
 {
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (display_ops)
-		hdmi_display_ops = display_ops;
+	if (ops)
+		hdmi_ops = ops;
 }
 
-void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
-					*manager_ops)
+void exynos_mixer_ops_register(struct exynos_mixer_ops *ops)
 {
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (manager_ops)
-		hdmi_manager_ops = manager_ops;
-}
-
-void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
-					*overlay_ops)
-{
-	DRM_DEBUG_KMS("%s\n", __FILE__);
-
-	if (overlay_ops)
-		hdmi_overlay_ops = overlay_ops;
+	if (ops)
+		mixer_ops = ops;
 }
 
 static bool drm_hdmi_is_connected(struct device *dev)
@@ -73,8 +61,8 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_display_ops && hdmi_display_ops->is_connected)
-		return hdmi_display_ops->is_connected(ctx->hdmi_ctx->ctx);
+	if (hdmi_ops && hdmi_ops->is_connected)
+		return hdmi_ops->is_connected(ctx->hdmi_ctx->ctx);
 
 	return false;
 }
@@ -86,9 +74,9 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_display_ops && hdmi_display_ops->get_edid)
-		return hdmi_display_ops->get_edid(ctx->hdmi_ctx->ctx,
-				connector, edid, len);
+	if (hdmi_ops && hdmi_ops->get_edid)
+		return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector, edid,
+					  len);
 
 	return 0;
 }
@@ -99,9 +87,8 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_display_ops && hdmi_display_ops->check_timing)
-		return hdmi_display_ops->check_timing(ctx->hdmi_ctx->ctx,
-				timing);
+	if (hdmi_ops && hdmi_ops->check_timing)
+		return hdmi_ops->check_timing(ctx->hdmi_ctx->ctx, timing);
 
 	return 0;
 }
@@ -112,8 +99,8 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_display_ops && hdmi_display_ops->power_on)
-		return hdmi_display_ops->power_on(ctx->hdmi_ctx->ctx, mode);
+	if (hdmi_ops && hdmi_ops->power_on)
+		return hdmi_ops->power_on(ctx->hdmi_ctx->ctx, mode);
 
 	return 0;
 }
@@ -130,13 +117,13 @@
 {
 	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
 	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
-	struct exynos_drm_manager *manager = &subdrv->manager;
+	struct exynos_drm_manager *manager = subdrv->manager;
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_overlay_ops && hdmi_overlay_ops->enable_vblank)
-		return hdmi_overlay_ops->enable_vblank(ctx->mixer_ctx->ctx,
-							manager->pipe);
+	if (mixer_ops && mixer_ops->enable_vblank)
+		return mixer_ops->enable_vblank(ctx->mixer_ctx->ctx,
+						manager->pipe);
 
 	return 0;
 }
@@ -147,8 +134,8 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_overlay_ops && hdmi_overlay_ops->disable_vblank)
-		return hdmi_overlay_ops->disable_vblank(ctx->mixer_ctx->ctx);
+	if (mixer_ops && mixer_ops->disable_vblank)
+		return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
 }
 
 static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
@@ -160,9 +147,9 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_manager_ops && hdmi_manager_ops->mode_fixup)
-		hdmi_manager_ops->mode_fixup(ctx->hdmi_ctx->ctx, connector,
-						mode, adjusted_mode);
+	if (hdmi_ops && hdmi_ops->mode_fixup)
+		hdmi_ops->mode_fixup(ctx->hdmi_ctx->ctx, connector, mode,
+				     adjusted_mode);
 }
 
 static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
@@ -171,8 +158,8 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_manager_ops && hdmi_manager_ops->mode_set)
-		hdmi_manager_ops->mode_set(ctx->hdmi_ctx->ctx, mode);
+	if (hdmi_ops && hdmi_ops->mode_set)
+		hdmi_ops->mode_set(ctx->hdmi_ctx->ctx, mode);
 }
 
 static void drm_hdmi_get_max_resol(struct device *subdrv_dev,
@@ -182,9 +169,8 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_manager_ops && hdmi_manager_ops->get_max_resol)
-		hdmi_manager_ops->get_max_resol(ctx->hdmi_ctx->ctx, width,
-							height);
+	if (hdmi_ops && hdmi_ops->get_max_resol)
+		hdmi_ops->get_max_resol(ctx->hdmi_ctx->ctx, width, height);
 }
 
 static void drm_hdmi_commit(struct device *subdrv_dev)
@@ -193,8 +179,8 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_manager_ops && hdmi_manager_ops->commit)
-		hdmi_manager_ops->commit(ctx->hdmi_ctx->ctx);
+	if (hdmi_ops && hdmi_ops->commit)
+		hdmi_ops->commit(ctx->hdmi_ctx->ctx);
 }
 
 static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
@@ -209,8 +195,8 @@
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 	case DRM_MODE_DPMS_OFF:
-		if (hdmi_manager_ops && hdmi_manager_ops->disable)
-			hdmi_manager_ops->disable(ctx->hdmi_ctx->ctx);
+		if (hdmi_ops && hdmi_ops->disable)
+			hdmi_ops->disable(ctx->hdmi_ctx->ctx);
 		break;
 	default:
 		DRM_DEBUG_KMS("unkown dps mode: %d\n", mode);
@@ -235,8 +221,8 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_overlay_ops && hdmi_overlay_ops->win_mode_set)
-		hdmi_overlay_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay);
+	if (mixer_ops && mixer_ops->win_mode_set)
+		mixer_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay);
 }
 
 static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
@@ -245,8 +231,8 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_overlay_ops && hdmi_overlay_ops->win_commit)
-		hdmi_overlay_ops->win_commit(ctx->mixer_ctx->ctx, zpos);
+	if (mixer_ops && mixer_ops->win_commit)
+		mixer_ops->win_commit(ctx->mixer_ctx->ctx, zpos);
 }
 
 static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
@@ -255,8 +241,8 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	if (hdmi_overlay_ops && hdmi_overlay_ops->win_disable)
-		hdmi_overlay_ops->win_disable(ctx->mixer_ctx->ctx, zpos);
+	if (mixer_ops && mixer_ops->win_disable)
+		mixer_ops->win_disable(ctx->mixer_ctx->ctx, zpos);
 }
 
 static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
@@ -265,6 +251,12 @@
 	.disable = drm_mixer_disable,
 };
 
+static struct exynos_drm_manager hdmi_manager = {
+	.pipe		= -1,
+	.ops		= &drm_hdmi_manager_ops,
+	.overlay_ops	= &drm_hdmi_overlay_ops,
+	.display_ops	= &drm_hdmi_display_ops,
+};
 
 static int hdmi_subdrv_probe(struct drm_device *drm_dev,
 		struct device *dev)
@@ -332,12 +324,9 @@
 
 	subdrv = &ctx->subdrv;
 
+	subdrv->dev = dev;
+	subdrv->manager = &hdmi_manager;
 	subdrv->probe = hdmi_subdrv_probe;
-	subdrv->manager.pipe = -1;
-	subdrv->manager.ops = &drm_hdmi_manager_ops;
-	subdrv->manager.overlay_ops = &drm_hdmi_overlay_ops;
-	subdrv->manager.display_ops = &drm_hdmi_display_ops;
-	subdrv->manager.dev = dev;
 
 	platform_set_drvdata(pdev, subdrv);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index 44497cf..f3ae192 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -38,15 +38,15 @@
 	void			*ctx;
 };
 
-struct exynos_hdmi_display_ops {
+struct exynos_hdmi_ops {
+	/* display */
 	bool (*is_connected)(void *ctx);
 	int (*get_edid)(void *ctx, struct drm_connector *connector,
 			u8 *edid, int len);
 	int (*check_timing)(void *ctx, void *timing);
 	int (*power_on)(void *ctx, int mode);
-};
 
-struct exynos_hdmi_manager_ops {
+	/* manager */
 	void (*mode_fixup)(void *ctx, struct drm_connector *connector,
 				struct drm_display_mode *mode,
 				struct drm_display_mode *adjusted_mode);
@@ -57,22 +57,17 @@
 	void (*disable)(void *ctx);
 };
 
-struct exynos_hdmi_overlay_ops {
+struct exynos_mixer_ops {
+	/* manager */
 	int (*enable_vblank)(void *ctx, int pipe);
 	void (*disable_vblank)(void *ctx);
+
+	/* overlay */
 	void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
 	void (*win_commit)(void *ctx, int zpos);
 	void (*win_disable)(void *ctx, int zpos);
 };
 
-extern struct platform_driver hdmi_driver;
-extern struct platform_driver mixer_driver;
-
-void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
-					*display_ops);
-void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
-					*manager_ops);
-void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
-					*overlay_ops);
-
+void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops);
+void exynos_mixer_ops_register(struct exynos_mixer_ops *ops);
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index c277a3a..f92fe4c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -24,6 +24,10 @@
 
 static const uint32_t formats[] = {
 	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_NV12,
+	DRM_FORMAT_NV12M,
+	DRM_FORMAT_NV12MT,
 };
 
 static int
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 8e1339f..7b9c153 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -199,7 +199,7 @@
 static void vidi_apply(struct device *subdrv_dev)
 {
 	struct vidi_context *ctx = get_vidi_context(subdrv_dev);
-	struct exynos_drm_manager *mgr = &ctx->subdrv.manager;
+	struct exynos_drm_manager *mgr = ctx->subdrv.manager;
 	struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
 	struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
 	struct vidi_win_data *win_data;
@@ -374,6 +374,13 @@
 	.disable = vidi_win_disable,
 };
 
+static struct exynos_drm_manager vidi_manager = {
+	.pipe		= -1,
+	.ops		= &vidi_manager_ops,
+	.overlay_ops	= &vidi_overlay_ops,
+	.display_ops	= &vidi_display_ops,
+};
+
 static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
 {
 	struct exynos_drm_private *dev_priv = drm_dev->dev_private;
@@ -425,7 +432,7 @@
 	struct vidi_context *ctx = container_of(work, struct vidi_context,
 					work);
 	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
-	struct exynos_drm_manager *manager = &subdrv->manager;
+	struct exynos_drm_manager *manager = subdrv->manager;
 
 	if (manager->pipe < 0)
 		return;
@@ -471,7 +478,7 @@
 static int vidi_power_on(struct vidi_context *ctx, bool enable)
 {
 	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
-	struct device *dev = subdrv->manager.dev;
+	struct device *dev = subdrv->dev;
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -611,13 +618,10 @@
 	ctx->raw_edid = (struct edid *)fake_edid_info;
 
 	subdrv = &ctx->subdrv;
+	subdrv->dev = dev;
+	subdrv->manager = &vidi_manager;
 	subdrv->probe = vidi_subdrv_probe;
 	subdrv->remove = vidi_subdrv_remove;
-	subdrv->manager.pipe = -1;
-	subdrv->manager.ops = &vidi_manager_ops;
-	subdrv->manager.overlay_ops = &vidi_overlay_ops;
-	subdrv->manager.display_ops = &vidi_display_ops;
-	subdrv->manager.dev = dev;
 
 	mutex_init(&ctx->lock);
 
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 575a8cb..b003538 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -40,7 +40,6 @@
 
 #include "exynos_hdmi.h"
 
-#define HDMI_OVERLAY_NUMBER	3
 #define MAX_WIDTH		1920
 #define MAX_HEIGHT		1080
 #define get_hdmi_context(dev)	platform_get_drvdata(to_platform_device(dev))
@@ -1194,7 +1193,7 @@
 
 static bool hdmi_is_connected(void *ctx)
 {
-	struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+	struct hdmi_context *hdata = ctx;
 	u32 val = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
 
 	if (val)
@@ -1207,7 +1206,7 @@
 				u8 *edid, int len)
 {
 	struct edid *raw_edid;
-	struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+	struct hdmi_context *hdata = ctx;
 
 	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
@@ -1275,7 +1274,7 @@
 
 static int hdmi_check_timing(void *ctx, void *timing)
 {
-	struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+	struct hdmi_context *hdata = ctx;
 	struct fb_videomode *check_timing = timing;
 
 	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
@@ -1312,13 +1311,6 @@
 	return 0;
 }
 
-static struct exynos_hdmi_display_ops display_ops = {
-	.is_connected	= hdmi_is_connected,
-	.get_edid	= hdmi_get_edid,
-	.check_timing	= hdmi_check_timing,
-	.power_on	= hdmi_display_power_on,
-};
-
 static void hdmi_set_acr(u32 freq, u8 *acr)
 {
 	u32 n, cts;
@@ -1914,7 +1906,7 @@
 				struct drm_display_mode *adjusted_mode)
 {
 	struct drm_display_mode *m;
-	struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+	struct hdmi_context *hdata = ctx;
 	int index;
 
 	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
@@ -1951,7 +1943,7 @@
 
 static void hdmi_mode_set(void *ctx, void *mode)
 {
-	struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+	struct hdmi_context *hdata = ctx;
 	int conf_idx;
 
 	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
@@ -1974,7 +1966,7 @@
 
 static void hdmi_commit(void *ctx)
 {
-	struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+	struct hdmi_context *hdata = ctx;
 
 	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
@@ -1985,7 +1977,7 @@
 
 static void hdmi_disable(void *ctx)
 {
-	struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+	struct hdmi_context *hdata = ctx;
 
 	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
@@ -1996,7 +1988,14 @@
 	}
 }
 
-static struct exynos_hdmi_manager_ops manager_ops = {
+static struct exynos_hdmi_ops hdmi_ops = {
+	/* display */
+	.is_connected	= hdmi_is_connected,
+	.get_edid	= hdmi_get_edid,
+	.check_timing	= hdmi_check_timing,
+	.power_on	= hdmi_display_power_on,
+
+	/* manager */
 	.mode_fixup	= hdmi_mode_fixup,
 	.mode_set	= hdmi_mode_set,
 	.get_max_resol	= hdmi_get_max_resol,
@@ -2020,7 +2019,7 @@
 static irqreturn_t hdmi_irq_handler(int irq, void *arg)
 {
 	struct exynos_drm_hdmi_context *ctx = arg;
-	struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx;
+	struct hdmi_context *hdata = ctx->ctx;
 	u32 intc_flag;
 
 	intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
@@ -2173,7 +2172,7 @@
 
 	DRM_DEBUG_KMS("%s\n", __func__);
 
-	hdmi_resource_poweroff((struct hdmi_context *)ctx->ctx);
+	hdmi_resource_poweroff(ctx->ctx);
 
 	return 0;
 }
@@ -2184,7 +2183,7 @@
 
 	DRM_DEBUG_KMS("%s\n", __func__);
 
-	hdmi_resource_poweron((struct hdmi_context *)ctx->ctx);
+	hdmi_resource_poweron(ctx->ctx);
 
 	return 0;
 }
@@ -2322,8 +2321,7 @@
 	hdata->irq = res->start;
 
 	/* register specific callbacks to common hdmi. */
-	exynos_drm_display_ops_register(&display_ops);
-	exynos_drm_manager_ops_register(&manager_ops);
+	exynos_hdmi_ops_register(&hdmi_ops);
 
 	hdmi_resource_poweron(hdata);
 
@@ -2351,7 +2349,7 @@
 static int __devexit hdmi_remove(struct platform_device *pdev)
 {
 	struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
-	struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx;
+	struct hdmi_context *hdata = ctx->ctx;
 
 	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 4d5f41e..e15438c 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -37,7 +37,8 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_hdmi.h"
 
-#define HDMI_OVERLAY_NUMBER	3
+#define MIXER_WIN_NR		3
+#define MIXER_DEFAULT_WIN	0
 
 #define get_mixer_context(dev)	platform_get_drvdata(to_platform_device(dev))
 
@@ -75,16 +76,12 @@
 };
 
 struct mixer_context {
-	struct fb_videomode	*default_timing;
-	unsigned int		default_win;
-	unsigned int		default_bpp;
 	unsigned int		irq;
 	int			pipe;
 	bool			interlace;
-	bool			vp_enabled;
 
 	struct mixer_resources	mixer_res;
-	struct hdmi_win_data	win_data[HDMI_OVERLAY_NUMBER];
+	struct hdmi_win_data	win_data[MIXER_WIN_NR];
 };
 
 static const u8 filter_y_horiz_tap8[] = {
@@ -643,9 +640,9 @@
 
 	win = overlay->zpos;
 	if (win == DEFAULT_ZPOS)
-		win = mixer_ctx->default_win;
+		win = MIXER_DEFAULT_WIN;
 
-	if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+	if (win < 0 || win > MIXER_WIN_NR) {
 		DRM_ERROR("overlay plane[%d] is wrong\n", win);
 		return;
 	}
@@ -683,9 +680,9 @@
 	DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
 
 	if (win == DEFAULT_ZPOS)
-		win = mixer_ctx->default_win;
+		win = MIXER_DEFAULT_WIN;
 
-	if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+	if (win < 0 || win > MIXER_WIN_NR) {
 		DRM_ERROR("overlay plane[%d] is wrong\n", win);
 		return;
 	}
@@ -706,9 +703,9 @@
 	DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
 
 	if (win == DEFAULT_ZPOS)
-		win = mixer_ctx->default_win;
+		win = MIXER_DEFAULT_WIN;
 
-	if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+	if (win < 0 || win > MIXER_WIN_NR) {
 		DRM_ERROR("overlay plane[%d] is wrong\n", win);
 		return;
 	}
@@ -722,9 +719,12 @@
 	spin_unlock_irqrestore(&res->reg_slock, flags);
 }
 
-static struct exynos_hdmi_overlay_ops overlay_ops = {
+static struct exynos_mixer_ops mixer_ops = {
+	/* manager */
 	.enable_vblank		= mixer_enable_vblank,
 	.disable_vblank		= mixer_disable_vblank,
+
+	/* overlay */
 	.win_mode_set		= mixer_win_mode_set,
 	.win_commit		= mixer_win_commit,
 	.win_disable		= mixer_win_disable,
@@ -771,8 +771,7 @@
 static irqreturn_t mixer_irq_handler(int irq, void *arg)
 {
 	struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
-	struct mixer_context *ctx =
-			(struct mixer_context *)drm_hdmi_ctx->ctx;
+	struct mixer_context *ctx = drm_hdmi_ctx->ctx;
 	struct mixer_resources *res = &ctx->mixer_res;
 	u32 val, val_base;
 
@@ -902,7 +901,7 @@
 
 	DRM_DEBUG_KMS("resume - start\n");
 
-	mixer_resource_poweron((struct mixer_context *)ctx->ctx);
+	mixer_resource_poweron(ctx->ctx);
 
 	return 0;
 }
@@ -913,7 +912,7 @@
 
 	DRM_DEBUG_KMS("suspend - start\n");
 
-	mixer_resource_poweroff((struct mixer_context *)ctx->ctx);
+	mixer_resource_poweroff(ctx->ctx);
 
 	return 0;
 }
@@ -926,8 +925,7 @@
 static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
 				 struct platform_device *pdev)
 {
-	struct mixer_context *mixer_ctx =
-			(struct mixer_context *)ctx->ctx;
+	struct mixer_context *mixer_ctx = ctx->ctx;
 	struct device *dev = &pdev->dev;
 	struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
 	struct resource *res;
@@ -1076,7 +1074,7 @@
 		goto fail;
 
 	/* register specific callback point to common hdmi. */
-	exynos_drm_overlay_ops_register(&overlay_ops);
+	exynos_mixer_ops_register(&mixer_ops);
 
 	mixer_resource_poweron(ctx);
 
@@ -1093,7 +1091,7 @@
 	struct device *dev = &pdev->dev;
 	struct exynos_drm_hdmi_context *drm_hdmi_ctx =
 					platform_get_drvdata(pdev);
-	struct mixer_context *ctx = (struct mixer_context *)drm_hdmi_ctx->ctx;
+	struct mixer_context *ctx = drm_hdmi_ctx->ctx;
 
 	dev_info(dev, "remove successful\n");
 
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
index 21071ce..36eb074 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.h
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
@@ -29,7 +29,6 @@
 #define __MDFLD_DSI_OUTPUT_H__
 
 #include <linux/backlight.h>
-#include <linux/version.h>
 #include <drm/drmP.h>
 #include <drm/drm.h>
 #include <drm/drm_crtc.h>
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 2c8a60c..f920fb5 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -129,6 +129,7 @@
 	if (buf_priv->currently_mapped == I810_BUF_MAPPED)
 		return -EINVAL;
 
+	/* This is all entirely broken */
 	down_write(&current->mm->mmap_sem);
 	old_fops = file_priv->filp->f_op;
 	file_priv->filp->f_op = &i810_buffer_fops;
@@ -157,11 +158,8 @@
 	if (buf_priv->currently_mapped != I810_BUF_MAPPED)
 		return -EINVAL;
 
-	down_write(&current->mm->mmap_sem);
-	retcode = do_munmap(current->mm,
-			    (unsigned long)buf_priv->virtual,
+	retcode = vm_munmap((unsigned long)buf_priv->virtual,
 			    (size_t) buf->total);
-	up_write(&current->mm->mmap_sem);
 
 	buf_priv->currently_mapped = I810_BUF_UNMAPPED;
 	buf_priv->virtual = NULL;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index dfa55e7..ae8a64f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -64,7 +64,7 @@
 		"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
 
 int i915_enable_rc6 __read_mostly = -1;
-module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
+module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
 MODULE_PARM_DESC(i915_enable_rc6,
 		"Enable power-saving render C-state 6. "
 		"Different stages can be selected via bitmask values "
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4c65c63..0d1e4b7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1087,11 +1087,9 @@
 	if (obj == NULL)
 		return -ENOENT;
 
-	down_write(&current->mm->mmap_sem);
-	addr = do_mmap(obj->filp, 0, args->size,
+	addr = vm_mmap(obj->filp, 0, args->size,
 		       PROT_READ | PROT_WRITE, MAP_SHARED,
 		       args->offset);
-	up_write(&current->mm->mmap_sem);
 	drm_gem_object_unreference_unlocked(obj);
 	if (IS_ERR((void *)addr))
 		return addr;
@@ -1493,6 +1491,7 @@
 {
 	list_del_init(&obj->ring_list);
 	obj->last_rendering_seqno = 0;
+	obj->last_fenced_seqno = 0;
 }
 
 static void
@@ -1521,6 +1520,7 @@
 	BUG_ON(!list_empty(&obj->gpu_write_list));
 	BUG_ON(!obj->active);
 	obj->ring = NULL;
+	obj->last_fenced_ring = NULL;
 
 	i915_gem_object_move_off_active(obj);
 	obj->fenced_gpu_access = false;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2abf4eb..b4bb1ef 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3728,6 +3728,9 @@
 #define  GT_FIFO_FREE_ENTRIES			0x120008
 #define    GT_FIFO_NUM_RESERVED_ENTRIES		20
 
+#define GEN6_UCGCTL1				0x9400
+# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE		(1 << 5)
+
 #define GEN6_UCGCTL2				0x9404
 # define GEN6_RCZUNIT_CLOCK_GATE_DISABLE		(1 << 13)
 # define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE		(1 << 12)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 91b35fd..5908cd5 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2245,6 +2245,33 @@
 }
 
 static int
+intel_finish_fb(struct drm_framebuffer *old_fb)
+{
+	struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	bool was_interruptible = dev_priv->mm.interruptible;
+	int ret;
+
+	wait_event(dev_priv->pending_flip_queue,
+		   atomic_read(&dev_priv->mm.wedged) ||
+		   atomic_read(&obj->pending_flip) == 0);
+
+	/* Big Hammer, we also need to ensure that any pending
+	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+	 * current scanout is retired before unpinning the old
+	 * framebuffer.
+	 *
+	 * This should only fail upon a hung GPU, in which case we
+	 * can safely continue.
+	 */
+	dev_priv->mm.interruptible = false;
+	ret = i915_gem_object_finish_gpu(obj);
+	dev_priv->mm.interruptible = was_interruptible;
+
+	return ret;
+}
+
+static int
 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 		    struct drm_framebuffer *old_fb)
 {
@@ -2282,25 +2309,8 @@
 		return ret;
 	}
 
-	if (old_fb) {
-		struct drm_i915_private *dev_priv = dev->dev_private;
-		struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
-
-		wait_event(dev_priv->pending_flip_queue,
-			   atomic_read(&dev_priv->mm.wedged) ||
-			   atomic_read(&obj->pending_flip) == 0);
-
-		/* Big Hammer, we also need to ensure that any pending
-		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
-		 * current scanout is retired before unpinning the old
-		 * framebuffer.
-		 *
-		 * This should only fail upon a hung GPU, in which case we
-		 * can safely continue.
-		 */
-		ret = i915_gem_object_finish_gpu(obj);
-		(void) ret;
-	}
+	if (old_fb)
+		intel_finish_fb(old_fb);
 
 	ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
 					 LEAVE_ATOMIC_MODE_SET);
@@ -3371,6 +3381,23 @@
 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 	struct drm_device *dev = crtc->dev;
 
+	/* Flush any pending WAITs before we disable the pipe. Note that
+	 * we need to drop the struct_mutex in order to acquire it again
+	 * during the lowlevel dpms routines around a couple of the
+	 * operations. It does not look trivial nor desirable to move
+	 * that locking higher. So instead we leave a window for the
+	 * submission of further commands on the fb before we can actually
+	 * disable it. This race with userspace exists anyway, and we can
+	 * only rely on the pipe being disabled by userspace after it
+	 * receives the hotplug notification and has flushed any pending
+	 * batches.
+	 */
+	if (crtc->fb) {
+		mutex_lock(&dev->struct_mutex);
+		intel_finish_fb(crtc->fb);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
 	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
 	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
 	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
@@ -3451,8 +3478,11 @@
 			return false;
 	}
 
-	/* All interlaced capable intel hw wants timings in frames. */
-	drm_mode_set_crtcinfo(adjusted_mode, 0);
+	/* All interlaced capable intel hw wants timings in frames. Note though
+	 * that intel_lvds_mode_fixup does some funny tricks with the crtc
+	 * timings, so we need to be careful not to clobber these.*/
+	if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
+		drm_mode_set_crtcinfo(adjusted_mode, 0);
 
 	return true;
 }
@@ -7438,7 +7468,13 @@
 	OUT_RING(fb->pitches[0] | obj->tiling_mode);
 	OUT_RING(obj->gtt_offset);
 
-	pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
+	/* Contrary to the suggestions in the documentation,
+	 * "Enable Panel Fitter" does not seem to be required when page
+	 * flipping with a non-native mode, and worse causes a normal
+	 * modeset to fail.
+	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
+	 */
+	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
 	OUT_RING(pf | pipesrc);
 	ADVANCE_LP_RING();
@@ -8529,6 +8565,10 @@
 	I915_WRITE(WM2_LP_ILK, 0);
 	I915_WRITE(WM1_LP_ILK, 0);
 
+	I915_WRITE(GEN6_UCGCTL1,
+		   I915_READ(GEN6_UCGCTL1) |
+		   GEN6_BLBUNIT_CLOCK_GATE_DISABLE);
+
 	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
 	 * gating disable must be set.  Failure to set it results in
 	 * flickering pixels due to Z write ordering failures after
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 110552f..4b63791 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -219,14 +219,38 @@
 	return (max_link_clock * max_lanes * 8) / 10;
 }
 
+static bool
+intel_dp_adjust_dithering(struct intel_dp *intel_dp,
+			  struct drm_display_mode *mode,
+			  struct drm_display_mode *adjusted_mode)
+{
+	int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
+	int max_lanes = intel_dp_max_lane_count(intel_dp);
+	int max_rate, mode_rate;
+
+	mode_rate = intel_dp_link_required(mode->clock, 24);
+	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+
+	if (mode_rate > max_rate) {
+		mode_rate = intel_dp_link_required(mode->clock, 18);
+		if (mode_rate > max_rate)
+			return false;
+
+		if (adjusted_mode)
+			adjusted_mode->private_flags
+				|= INTEL_MODE_DP_FORCE_6BPC;
+
+		return true;
+	}
+
+	return true;
+}
+
 static int
 intel_dp_mode_valid(struct drm_connector *connector,
 		    struct drm_display_mode *mode)
 {
 	struct intel_dp *intel_dp = intel_attached_dp(connector);
-	int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
-	int max_lanes = intel_dp_max_lane_count(intel_dp);
-	int max_rate, mode_rate;
 
 	if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
 		if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -236,16 +260,8 @@
 			return MODE_PANEL;
 	}
 
-	mode_rate = intel_dp_link_required(mode->clock, 24);
-	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
-
-	if (mode_rate > max_rate) {
-			mode_rate = intel_dp_link_required(mode->clock, 18);
-			if (mode_rate > max_rate)
-				return MODE_CLOCK_HIGH;
-			else
-				mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
-	}
+	if (!intel_dp_adjust_dithering(intel_dp, mode, NULL))
+		return MODE_CLOCK_HIGH;
 
 	if (mode->clock < 10000)
 		return MODE_CLOCK_LOW;
@@ -672,7 +688,7 @@
 	int lane_count, clock;
 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
 	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
-	int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+	int bpp;
 	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 
 	if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -686,6 +702,11 @@
 		mode->clock = intel_dp->panel_fixed_mode->clock;
 	}
 
+	if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode))
+		return false;
+
+	bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+
 	for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
 		for (clock = 0; clock <= max_clock; clock++) {
 			int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 5a14149..715afa1 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -105,6 +105,10 @@
 #define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
 #define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
 #define INTEL_MODE_DP_FORCE_6BPC (0x10)
+/* This flag must be set by the encoder's mode_fixup if it changes the crtc
+ * timings in the mode to prevent the crtc fixup from overwriting them.
+ * Currently only lvds needs that. */
+#define INTEL_MODE_CRTC_TIMINGS_SET (0x20)
 
 static inline void
 intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 19ecd78..6e9ee33 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -279,6 +279,8 @@
 	struct drm_mode_config *config = &dev->mode_config;
 	struct drm_plane *plane;
 
+	mutex_lock(&dev->mode_config.mutex);
+
 	ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
 	if (ret)
 		DRM_DEBUG("failed to restore crtc mode\n");
@@ -286,4 +288,6 @@
 	/* Be sure to shut off any planes that may be active */
 	list_for_each_entry(plane, &config->plane_list, head)
 		plane->funcs->disable_plane(plane);
+
+	mutex_unlock(&dev->mode_config.mutex);
 }
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 601c86e..8fdc957 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -390,7 +390,7 @@
 		bus->has_gpio = intel_gpio_setup(bus, i);
 
 		/* XXX force bit banging until GMBUS is fully debugged */
-		if (bus->has_gpio && IS_GEN2(dev))
+		if (bus->has_gpio)
 			bus->force_bit = true;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 95db2e9..30e2c82 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -187,6 +187,8 @@
 
 	mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
 	mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
+
+	mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
 }
 
 static void
@@ -208,6 +210,8 @@
 
 	mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
 	mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
+
+	mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
 }
 
 static inline u32 panel_fitter_scaling(u32 source, u32 target)
@@ -283,6 +287,8 @@
 	for_each_pipe(pipe)
 		I915_WRITE(BCLRPAT(pipe), 0);
 
+	drm_mode_set_crtcinfo(adjusted_mode, 0);
+
 	switch (intel_lvds->fitting_mode) {
 	case DRM_MODE_SCALE_CENTER:
 		/*
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 230a141..48177ec 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -47,8 +47,6 @@
 	adjusted_mode->vtotal = fixed_mode->vtotal;
 
 	adjusted_mode->clock = fixed_mode->clock;
-
-	drm_mode_set_crtcinfo(adjusted_mode, 0);
 }
 
 /* adjusted_mode has been preset to be the panel's fixed mode */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e25581a..f75806e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1038,7 +1038,7 @@
 	 * of the buffer.
 	 */
 	ring->effective_size = ring->size;
-	if (IS_I830(ring->dev))
+	if (IS_I830(ring->dev) || IS_845G(ring->dev))
 		ring->effective_size -= 128;
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index a464771..e90dfb6 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -95,7 +95,6 @@
 	/* must disable */
 	sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
 	sprctl |= SPRITE_ENABLE;
-	sprctl |= SPRITE_DEST_KEY;
 
 	/* Sizes are 0 based */
 	src_w--;
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 34d591b..da3e7c3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -235,6 +235,7 @@
 		return -EPERM;
 
 	strncpy(string, profile, sizeof(string));
+	string[sizeof(string) - 1] = 0;
 	if ((ptr = strchr(string, '\n')))
 		*ptr = '\0';
 
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index a7844ab..2746402 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -42,7 +42,7 @@
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
 	static const u8 nv50[] = { 16, 8, 0, 24 };
-	if (dev_priv->card_type == 0xaf)
+	if (dev_priv->chipset == 0xaf)
 		return nvaf[lane];
 	return nv50[lane];
 }
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index e607c4d..2d39f99 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -230,6 +230,10 @@
 	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
 		return;
 
+	/* some R4xx chips have the wrong frev */
+	if (rdev->family <= CHIP_RV410)
+		frev = 1;
+
 	switch (frev) {
 	case 1:
 		switch (crev) {
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 81801c1..fe33d35 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2553,7 +2553,7 @@
 	 * or the chip could hang on a subsequent access
 	 */
 	if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
-		udelay(5000);
+		mdelay(5);
 	}
 
 	/* This function is required to workaround a hardware bug in some (all?)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 391bd26..c8187c4 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1135,7 +1135,7 @@
 	}
 	if (rdev->flags & RADEON_IS_AGP) {
 		size_bf = mc->gtt_start;
-		size_af = 0xFFFFFFFF - mc->gtt_end + 1;
+		size_af = 0xFFFFFFFF - mc->gtt_end;
 		if (size_bf > size_af) {
 			if (mc->mc_vram_size > size_bf) {
 				dev_warn(rdev->dev, "limiting VRAM\n");
@@ -1149,7 +1149,7 @@
 				mc->real_vram_size = size_af;
 				mc->mc_vram_size = size_af;
 			}
-			mc->vram_start = mc->gtt_end;
+			mc->vram_start = mc->gtt_end + 1;
 		}
 		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
 		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
@@ -2839,7 +2839,7 @@
 		/* r7xx asics need to soft reset RLC before halting */
 		WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
 		RREG32(SRBM_SOFT_RESET);
-		udelay(15000);
+		mdelay(15);
 		WREG32(SRBM_SOFT_RESET, 0);
 		RREG32(SRBM_SOFT_RESET);
 	}
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 84c5462..75ed17c 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -407,7 +407,7 @@
 
 	RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
 	RADEON_READ(R600_GRBM_SOFT_RESET);
-	DRM_UDELAY(15000);
+	mdelay(15);
 	RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
 
 	fw_data = (const __be32 *)dev_priv->me_fw->data;
@@ -500,7 +500,7 @@
 
 	RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
 	RADEON_READ(R600_GRBM_SOFT_RESET);
-	DRM_UDELAY(15000);
+	mdelay(15);
 	RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
 
 	fw_data = (const __be32 *)dev_priv->pfp_fw->data;
@@ -1797,7 +1797,7 @@
 
 	RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
 	RADEON_READ(R600_GRBM_SOFT_RESET);
-	DRM_UDELAY(15000);
+	mdelay(15);
 	RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
 
 
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 6ae0c75..9c6b29a 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -633,7 +633,7 @@
 				tmp &= ~(R300_SCLK_FORCE_VAP);
 				tmp |= RADEON_SCLK_FORCE_CP;
 				WREG32_PLL(RADEON_SCLK_CNTL, tmp);
-				udelay(15000);
+				mdelay(15);
 
 				tmp = RREG32_PLL(R300_SCLK_CNTL2);
 				tmp &= ~(R300_SCLK_FORCE_TCL |
@@ -651,12 +651,12 @@
 			tmp |= (RADEON_ENGIN_DYNCLK_MODE |
 				(0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT));
 			WREG32_PLL(RADEON_CLK_PWRMGT_CNTL, tmp);
-			udelay(15000);
+			mdelay(15);
 
 			tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
 			tmp |= RADEON_SCLK_DYN_START_CNTL;
 			WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
-			udelay(15000);
+			mdelay(15);
 
 			/* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200
 			   to lockup randomly, leave them as set by BIOS.
@@ -696,7 +696,7 @@
 					tmp |= RADEON_SCLK_MORE_FORCEON;
 				}
 				WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
-				udelay(15000);
+				mdelay(15);
 			}
 
 			/* RV200::A11 A12, RV250::A11 A12 */
@@ -709,7 +709,7 @@
 				tmp |= RADEON_TCL_BYPASS_DISABLE;
 				WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
 			}
-			udelay(15000);
+			mdelay(15);
 
 			/*enable dynamic mode for display clocks (PIXCLK and PIX2CLK) */
 			tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
@@ -722,14 +722,14 @@
 				RADEON_PIXCLK_TMDS_ALWAYS_ONb);
 
 			WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
-			udelay(15000);
+			mdelay(15);
 
 			tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
 			tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
 				RADEON_PIXCLK_DAC_ALWAYS_ONb);
 
 			WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
-			udelay(15000);
+			mdelay(15);
 		}
 	} else {
 		/* Turn everything OFF (ForceON to everything) */
@@ -861,7 +861,7 @@
 			}
 			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
 
-			udelay(16000);
+			mdelay(16);
 
 			if ((rdev->family == CHIP_R300) ||
 			    (rdev->family == CHIP_R350)) {
@@ -870,7 +870,7 @@
 					R300_SCLK_FORCE_GA |
 					R300_SCLK_FORCE_CBA);
 				WREG32_PLL(R300_SCLK_CNTL2, tmp);
-				udelay(16000);
+				mdelay(16);
 			}
 
 			if (rdev->flags & RADEON_IS_IGP) {
@@ -878,7 +878,7 @@
 				tmp &= ~(RADEON_FORCEON_MCLKA |
 					 RADEON_FORCEON_YCLKA);
 				WREG32_PLL(RADEON_MCLK_CNTL, tmp);
-				udelay(16000);
+				mdelay(16);
 			}
 
 			if ((rdev->family == CHIP_RV200) ||
@@ -887,7 +887,7 @@
 				tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
 				tmp |= RADEON_SCLK_MORE_FORCEON;
 				WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
-				udelay(16000);
+				mdelay(16);
 			}
 
 			tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
@@ -900,7 +900,7 @@
 				 RADEON_PIXCLK_TMDS_ALWAYS_ONb);
 
 			WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
-			udelay(16000);
+			mdelay(16);
 
 			tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
 			tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 81fc100..2cad9fd 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2845,7 +2845,7 @@
 					case 4:
 						val = RBIOS16(index);
 						index += 2;
-						udelay(val * 1000);
+						mdelay(val);
 						break;
 					case 6:
 						slave_addr = id & 0xff;
@@ -3044,7 +3044,7 @@
 					udelay(150);
 					break;
 				case 2:
-					udelay(1000);
+					mdelay(1);
 					break;
 				case 3:
 					while (tmp--) {
@@ -3075,13 +3075,13 @@
 						/*mclk_cntl |= 0x00001111;*//* ??? */
 						WREG32_PLL(RADEON_MCLK_CNTL,
 							   mclk_cntl);
-						udelay(10000);
+						mdelay(10);
 #endif
 						WREG32_PLL
 						    (RADEON_CLK_PWRMGT_CNTL,
 						     tmp &
 						     ~RADEON_CG_NO1_DEBUG_0);
-						udelay(10000);
+						mdelay(10);
 					}
 					break;
 				default:
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index bd05156..3c2e7a0 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -970,7 +970,7 @@
 
 			encoder = obj_to_encoder(obj);
 
-			if (encoder->encoder_type != DRM_MODE_ENCODER_DAC ||
+			if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
 			    encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
 				continue;
 
@@ -1000,6 +1000,7 @@
 	 * cases the DVI port is actually a virtual KVM port connected to the service
 	 * processor.
 	 */
+out:
 	if ((!rdev->is_atom_bios) &&
 	    (ret == connector_status_disconnected) &&
 	    rdev->mode_info.bios_hardcoded_edid_size) {
@@ -1007,7 +1008,6 @@
 		ret = connector_status_connected;
 	}
 
-out:
 	/* updated in get modes as well since we need to know if it's analog or digital */
 	radeon_connector_update_scratch_regs(connector, ret);
 	return ret;
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 85bcfc8..3edec1c 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -900,6 +900,10 @@
 	struct radeon_i2c_chan *i2c;
 	int ret;
 
+	/* don't add the mm_i2c bus unless hw_i2c is enabled */
+	if (rec->mm_i2c && (radeon_hw_i2c == 0))
+		return NULL;
+
 	i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
 	if (i2c == NULL)
 		return NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 66d5fe1..65060b7 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -147,6 +147,12 @@
 	    (rdev->pdev->subsystem_device == 0x01fd))
 		return true;
 
+	/* RV515 seems to have MSI issues where it loses
+	 * MSI rearms occasionally. This leads to lockups and freezes.
+	 * disable it by default.
+	 */
+	if (rdev->family == CHIP_RV515)
+		return false;
 	if (rdev->flags & RADEON_IS_IGP) {
 		/* APUs work fine with MSIs */
 		if (rdev->family >= CHIP_PALM)
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 2f46e0c..42db254 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -88,7 +88,7 @@
 		lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
 		lvds_pll_cntl |= RADEON_LVDS_PLL_EN;
 		WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
-		udelay(1000);
+		mdelay(1);
 
 		lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
 		lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET;
@@ -101,7 +101,7 @@
 				  (backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT));
 		if (is_mac)
 			lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
-		udelay(panel_pwr_delay * 1000);
+		mdelay(panel_pwr_delay);
 		WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
 		break;
 	case DRM_MODE_DPMS_STANDBY:
@@ -118,10 +118,10 @@
 			WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
 			lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
 		}
-		udelay(panel_pwr_delay * 1000);
+		mdelay(panel_pwr_delay);
 		WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
 		WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
-		udelay(panel_pwr_delay * 1000);
+		mdelay(panel_pwr_delay);
 		break;
 	}
 
@@ -656,7 +656,7 @@
 
 	WREG32(RADEON_DAC_MACRO_CNTL, tmp);
 
-	udelay(2000);
+	mdelay(2);
 
 	if (RREG32(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT)
 		found = connector_status_connected;
@@ -1499,7 +1499,7 @@
 	tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN;
 	WREG32(RADEON_DAC_CNTL2, tmp);
 
-	udelay(10000);
+	mdelay(10);
 
 	if (ASIC_IS_R300(rdev)) {
 		if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index c62ae4b..cdab1ae 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -969,7 +969,7 @@
 	}
 	if (rdev->flags & RADEON_IS_AGP) {
 		size_bf = mc->gtt_start;
-		size_af = 0xFFFFFFFF - mc->gtt_end + 1;
+		size_af = 0xFFFFFFFF - mc->gtt_end;
 		if (size_bf > size_af) {
 			if (mc->mc_vram_size > size_bf) {
 				dev_warn(rdev->dev, "limiting VRAM\n");
@@ -983,7 +983,7 @@
 				mc->real_vram_size = size_af;
 				mc->mc_vram_size = size_af;
 			}
-			mc->vram_start = mc->gtt_end;
+			mc->vram_start = mc->gtt_end + 1;
 		}
 		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
 		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index ac7a199..27bda98 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2999,8 +2999,8 @@
 	}
 	r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
 			  &rdev->rlc.save_restore_gpu_addr);
+	radeon_bo_unreserve(rdev->rlc.save_restore_obj);
 	if (r) {
-		radeon_bo_unreserve(rdev->rlc.save_restore_obj);
 		dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
 		si_rlc_fini(rdev);
 		return r;
@@ -3023,9 +3023,8 @@
 	}
 	r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
 			  &rdev->rlc.clear_state_gpu_addr);
+	radeon_bo_unreserve(rdev->rlc.clear_state_obj);
 	if (r) {
-
-		radeon_bo_unreserve(rdev->rlc.clear_state_obj);
 		dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
 		si_rlc_fini(rdev);
 		return r;
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index 031aaaf..b6d8608 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -988,7 +988,7 @@
 	 * for locking on FreeBSD.
 	 */
 	if (cmdbuf->size) {
-		kcmd_addr = kmalloc(cmdbuf->size * 8, GFP_KERNEL);
+		kcmd_addr = kmalloc_array(cmdbuf->size, 8, GFP_KERNEL);
 		if (kcmd_addr == NULL)
 			return -ENOMEM;
 
@@ -1015,8 +1015,8 @@
 		cmdbuf->vb_addr = kvb_addr;
 	}
 	if (cmdbuf->nbox) {
-		kbox_addr = kmalloc(cmdbuf->nbox * sizeof(struct drm_clip_rect),
-				    GFP_KERNEL);
+		kbox_addr = kmalloc_array(cmdbuf->nbox, sizeof(struct drm_clip_rect),
+					  GFP_KERNEL);
 		if (kbox_addr == NULL) {
 			ret = -ENOMEM;
 			goto done;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index a3d0332..ffddcba 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -34,7 +34,7 @@
 config HID_BATTERY_STRENGTH
 	bool
 	depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
-	default y
+	default n
 
 config HIDRAW
 	bool "/dev/hidraw raw HID device support"
diff --git a/drivers/hid/hid-tivo.c b/drivers/hid/hid-tivo.c
index de47039..9f85f82 100644
--- a/drivers/hid/hid-tivo.c
+++ b/drivers/hid/hid-tivo.c
@@ -62,7 +62,7 @@
 
 static const struct hid_device_id tivo_devices[] = {
 	/* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */
-	{ HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
 	{ }
 };
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 8af25a0..7233c88 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -30,37 +30,6 @@
 #include "hyperv_vmbus.h"
 
 
-/* #defines */
-
-
-/* Amount of space to write to */
-#define BYTES_AVAIL_TO_WRITE(r, w, z) \
-	((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w))
-
-
-/*
- *
- * hv_get_ringbuffer_availbytes()
- *
- * Get number of bytes available to read and to write to
- * for the specified ring buffer
- */
-static inline void
-hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
-			  u32 *read, u32 *write)
-{
-	u32 read_loc, write_loc;
-
-	smp_read_barrier_depends();
-
-	/* Capture the read/write indices before they changed */
-	read_loc = rbi->ring_buffer->read_index;
-	write_loc = rbi->ring_buffer->write_index;
-
-	*write = BYTES_AVAIL_TO_WRITE(read_loc, write_loc, rbi->ring_datasize);
-	*read = rbi->ring_datasize - *write;
-}
-
 /*
  * hv_get_next_write_location()
  *
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 145f135..9140236 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -391,6 +391,7 @@
 		break;
 	default:
 		BUG();
+		val = "";
 	}
 
 	return sprintf(buf, "%s\n", val);
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
index 7765e4f..1958f03 100644
--- a/drivers/hwmon/ads1015.c
+++ b/drivers/hwmon/ads1015.c
@@ -59,14 +59,11 @@
 	struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
 };
 
-static int ads1015_read_value(struct i2c_client *client, unsigned int channel,
-			      int *value)
+static int ads1015_read_adc(struct i2c_client *client, unsigned int channel)
 {
 	u16 config;
-	s16 conversion;
 	struct ads1015_data *data = i2c_get_clientdata(client);
 	unsigned int pga = data->channel_data[channel].pga;
-	int fullscale;
 	unsigned int data_rate = data->channel_data[channel].data_rate;
 	unsigned int conversion_time_ms;
 	int res;
@@ -78,7 +75,6 @@
 	if (res < 0)
 		goto err_unlock;
 	config = res;
-	fullscale = fullscale_table[pga];
 	conversion_time_ms = DIV_ROUND_UP(1000, data_rate_table[data_rate]);
 
 	/* setup and start single conversion */
@@ -105,33 +101,36 @@
 	}
 
 	res = i2c_smbus_read_word_swapped(client, ADS1015_CONVERSION);
-	if (res < 0)
-		goto err_unlock;
-	conversion = res;
-
-	mutex_unlock(&data->update_lock);
-
-	*value = DIV_ROUND_CLOSEST(conversion * fullscale, 0x7ff0);
-
-	return 0;
 
 err_unlock:
 	mutex_unlock(&data->update_lock);
 	return res;
 }
 
+static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
+			     s16 reg)
+{
+	struct ads1015_data *data = i2c_get_clientdata(client);
+	unsigned int pga = data->channel_data[channel].pga;
+	int fullscale = fullscale_table[pga];
+
+	return DIV_ROUND_CLOSEST(reg * fullscale, 0x7ff0);
+}
+
 /* sysfs callback function */
 static ssize_t show_in(struct device *dev, struct device_attribute *da,
 	char *buf)
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
 	struct i2c_client *client = to_i2c_client(dev);
-	int in;
 	int res;
+	int index = attr->index;
 
-	res = ads1015_read_value(client, attr->index, &in);
+	res = ads1015_read_adc(client, index);
+	if (res < 0)
+		return res;
 
-	return (res < 0) ? res : sprintf(buf, "%d\n", in);
+	return sprintf(buf, "%d\n", ads1015_reg_to_mv(client, index, res));
 }
 
 static const struct sensor_device_attribute ads1015_in[] = {
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index b7494af..37a8fc9 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -122,6 +122,38 @@
 	return true;
 }
 
+/*
+ * Newer BKDG versions have an updated recommendation on how to properly
+ * initialize the running average range (was: 0xE, now: 0x9). This avoids
+ * counter saturations resulting in bogus power readings.
+ * We correct this value ourselves to cope with older BIOSes.
+ */
+static void __devinit tweak_runavg_range(struct pci_dev *pdev)
+{
+	u32 val;
+	const struct pci_device_id affected_device = {
+		PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) };
+
+	/*
+	 * let this quirk apply only to the current version of the
+	 * northbridge, since future versions may change the behavior
+	 */
+	if (!pci_match_id(&affected_device, pdev))
+		return;
+
+	pci_bus_read_config_dword(pdev->bus,
+		PCI_DEVFN(PCI_SLOT(pdev->devfn), 5),
+		REG_TDP_RUNNING_AVERAGE, &val);
+	if ((val & 0xf) != 0xe)
+		return;
+
+	val &= ~0xf;
+	val |=  0x9;
+	pci_bus_write_config_dword(pdev->bus,
+		PCI_DEVFN(PCI_SLOT(pdev->devfn), 5),
+		REG_TDP_RUNNING_AVERAGE, val);
+}
+
 static void __devinit fam15h_power_init_data(struct pci_dev *f4,
 					     struct fam15h_power_data *data)
 {
@@ -155,6 +187,13 @@
 	struct device *dev;
 	int err;
 
+	/*
+	 * though we ignore every other northbridge, we still have to
+	 * do the tweaking on _each_ node in MCM processors as the counters
+	 * are working hand-in-hand
+	 */
+	tweak_runavg_range(pdev);
+
 	if (!fam15h_power_is_internal_node0(pdev)) {
 		err = -ENODEV;
 		goto exit;
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index be51037..29b319d 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -710,13 +710,13 @@
  * If a negative value is stored in any of the referenced registers, this value
  * reflects an error code which will be returned.
  */
-static int pmbus_get_boolean(struct pmbus_data *data, int index, int *val)
+static int pmbus_get_boolean(struct pmbus_data *data, int index)
 {
 	u8 s1 = (index >> 24) & 0xff;
 	u8 s2 = (index >> 16) & 0xff;
 	u8 reg = (index >> 8) & 0xff;
 	u8 mask = index & 0xff;
-	int status;
+	int ret, status;
 	u8 regval;
 
 	status = data->status[reg];
@@ -725,7 +725,7 @@
 
 	regval = status & mask;
 	if (!s1 && !s2)
-		*val = !!regval;
+		ret = !!regval;
 	else {
 		long v1, v2;
 		struct pmbus_sensor *sensor1, *sensor2;
@@ -739,9 +739,9 @@
 
 		v1 = pmbus_reg2data(data, sensor1);
 		v2 = pmbus_reg2data(data, sensor2);
-		*val = !!(regval && v1 >= v2);
+		ret = !!(regval && v1 >= v2);
 	}
-	return 0;
+	return ret;
 }
 
 static ssize_t pmbus_show_boolean(struct device *dev,
@@ -750,11 +750,10 @@
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
 	struct pmbus_data *data = pmbus_update_device(dev);
 	int val;
-	int err;
 
-	err = pmbus_get_boolean(data, attr->index, &val);
-	if (err)
-		return err;
+	val = pmbus_get_boolean(data, attr->index);
+	if (val < 0)
+		return val;
 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
 }
 
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index d3b778d..c5f6be4 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -343,10 +343,11 @@
 	return err;
 }
 
-static int __init smsc47b397_find(unsigned short *addr)
+static int __init smsc47b397_find(void)
 {
 	u8 id, rev;
 	char *name;
+	unsigned short addr;
 
 	superio_enter();
 	id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
@@ -370,14 +371,14 @@
 	rev = superio_inb(SUPERIO_REG_DEVREV);
 
 	superio_select(SUPERIO_REG_LD8);
-	*addr = (superio_inb(SUPERIO_REG_BASE_MSB) << 8)
+	addr = (superio_inb(SUPERIO_REG_BASE_MSB) << 8)
 		 |  superio_inb(SUPERIO_REG_BASE_LSB);
 
 	pr_info("found SMSC %s (base address 0x%04x, revision %u)\n",
-		name, *addr, rev);
+		name, addr, rev);
 
 	superio_exit();
-	return 0;
+	return addr;
 }
 
 static int __init smsc47b397_init(void)
@@ -385,9 +386,10 @@
 	unsigned short address;
 	int ret;
 
-	ret = smsc47b397_find(&address);
-	if (ret)
+	ret = smsc47b397_find();
+	if (ret < 0)
 		return ret;
+	address = ret;
 
 	ret = platform_driver_register(&smsc47b397_driver);
 	if (ret)
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index c590c14..b5aa38d 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -491,10 +491,10 @@
 	.attrs = smsc47m1_attributes,
 };
 
-static int __init smsc47m1_find(unsigned short *addr,
-				struct smsc47m1_sio_data *sio_data)
+static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data)
 {
 	u8 val;
+	unsigned short addr;
 
 	superio_enter();
 	val = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
@@ -546,9 +546,9 @@
 	}
 
 	superio_select();
-	*addr = (superio_inb(SUPERIO_REG_BASE) << 8)
+	addr = (superio_inb(SUPERIO_REG_BASE) << 8)
 	      |  superio_inb(SUPERIO_REG_BASE + 1);
-	if (*addr == 0) {
+	if (addr == 0) {
 		pr_info("Device address not set, will not use\n");
 		superio_exit();
 		return -ENODEV;
@@ -565,7 +565,7 @@
 	}
 
 	superio_exit();
-	return 0;
+	return addr;
 }
 
 /* Restore device to its initial state */
@@ -938,13 +938,15 @@
 	unsigned short address;
 	struct smsc47m1_sio_data sio_data;
 
-	if (smsc47m1_find(&address, &sio_data))
-		return -ENODEV;
+	err = smsc47m1_find(&sio_data);
+	if (err < 0)
+		return err;
+	address = err;
 
 	/* Sets global pdev as a side effect */
 	err = smsc47m1_device_add(address, &sio_data);
 	if (err)
-		goto exit;
+		return err;
 
 	err = platform_driver_probe(&smsc47m1_driver, smsc47m1_probe);
 	if (err)
@@ -955,7 +957,6 @@
 exit_device:
 	platform_device_unregister(pdev);
 	smsc47m1_restore(&sio_data);
-exit:
 	return err;
 }
 
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 37f4211..00e8f21 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -182,7 +182,6 @@
 	pci_restore_state(pdev);
 
 	i2c_dw_init(i2c);
-	i2c_dw_enable(i2c);
 	return 0;
 }
 
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index e3e470f..59fbd70 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -42,6 +42,7 @@
 #include <linux/inetdevice.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <net/route.h>
 
 #include <net/tcp.h>
 #include <net/ipv6.h>
@@ -1826,7 +1827,10 @@
 	route->path_rec->reversible = 1;
 	route->path_rec->pkey = cpu_to_be16(0xffff);
 	route->path_rec->mtu_selector = IB_SA_EQ;
-	route->path_rec->sl = id_priv->tos >> 5;
+	route->path_rec->sl = netdev_get_prio_tc_map(
+			ndev->priv_flags & IFF_802_1Q_VLAN ?
+				vlan_dev_real_dev(ndev) : ndev,
+			rt_tos2priority(id_priv->tos));
 
 	route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
 	route->path_rec->rate_selector = IB_SA_EQ;
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 396e293..e497dfbe 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -125,7 +125,8 @@
 	unsigned char *prev_tail;
 
 	prev_tail = skb_tail_pointer(skb);
-	NLA_PUT(skb, type, len, data);
+	if (nla_put(skb, type, len, data))
+		goto nla_put_failure;
 	nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail;
 	return 0;
 
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 83b720e..246fdc1 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -179,7 +179,7 @@
 {
 	struct ib_port_attr attr;
 	char *speed = "";
-	int rate = -1;		/* in deci-Gb/sec */
+	int rate;		/* in deci-Gb/sec */
 	ssize_t ret;
 
 	ret = ib_query_port(p->ibdev, p->port_num, &attr);
@@ -187,9 +187,6 @@
 		return ret;
 
 	switch (attr.active_speed) {
-	case IB_SPEED_SDR:
-		rate = 25;
-		break;
 	case IB_SPEED_DDR:
 		speed = " DDR";
 		rate = 50;
@@ -210,6 +207,10 @@
 		speed = " EDR";
 		rate = 250;
 		break;
+	case IB_SPEED_SDR:
+	default:		/* default to SDR for invalid rates */
+		rate = 25;
+		break;
 	}
 
 	rate *= ib_width_enum_to_int(attr.active_width);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 5861cdb..8002ae6 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -66,12 +66,6 @@
 	{ }
 };
 
-static struct ctl_path ucma_ctl_path[] = {
-	{ .procname = "net" },
-	{ .procname = "rdma_ucm" },
-	{ }
-};
-
 struct ucma_file {
 	struct mutex		mut;
 	struct file		*filp;
@@ -1392,7 +1386,7 @@
 		goto err1;
 	}
 
-	ucma_ctl_table_hdr = register_sysctl_paths(ucma_ctl_path, ucma_ctl_table);
+	ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
 	if (!ucma_ctl_table_hdr) {
 		printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
 		ret = -ENOMEM;
@@ -1408,7 +1402,7 @@
 
 static void __exit ucma_cleanup(void)
 {
-	unregister_sysctl_table(ucma_ctl_table_hdr);
+	unregister_net_sysctl_table(ucma_ctl_table_hdr);
 	device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
 	misc_deregister(&ucma_misc);
 	idr_destroy(&ctx_idr);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 75d3056..669673e 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -253,6 +253,11 @@
 		if (out_mad->data[15] & 0x1)
 			props->active_speed = IB_SPEED_FDR10;
 	}
+
+	/* Avoid wrong speed value returned by FW if the IB link is down. */
+	if (props->state == IB_PORT_DOWN)
+		 props->active_speed = IB_SPEED_SDR;
+
 out:
 	kfree(in_mad);
 	kfree(out_mad);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 69e2ad0..daf21b8 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3232,6 +3232,7 @@
 	srq_attr.attr.max_wr = sdev->srq_size;
 	srq_attr.attr.max_sge = 1;
 	srq_attr.attr.srq_limit = 0;
+	srq_attr.srq_type = IB_SRQT_BASIC;
 
 	sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
 	if (IS_ERR(sdev->srq))
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 2d78779..7faf4a7 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -380,8 +380,7 @@
 
 config INPUT_TWL6040_VIBRA
 	tristate "Support for TWL6040 Vibrator"
-	depends on TWL4030_CORE
-	select TWL6040_CORE
+	depends on TWL6040_CORE
 	select INPUT_FF_MEMLESS
 	help
 	  This option enables support for TWL6040 Vibrator Driver.
diff --git a/drivers/input/misc/da9052_onkey.c b/drivers/input/misc/da9052_onkey.c
index 34aebb8..3c843cd 100644
--- a/drivers/input/misc/da9052_onkey.c
+++ b/drivers/input/misc/da9052_onkey.c
@@ -95,7 +95,8 @@
 	input_dev = input_allocate_device();
 	if (!onkey || !input_dev) {
 		dev_err(&pdev->dev, "Failed to allocate memory\n");
-		return -ENOMEM;
+		error = -ENOMEM;
+		goto err_free_mem;
 	}
 
 	onkey->input = input_dev;
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index 45874fe..14e94f5 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -28,7 +28,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/workqueue.h>
-#include <linux/i2c/twl.h>
+#include <linux/input.h>
 #include <linux/mfd/twl6040.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
@@ -257,7 +257,7 @@
 
 static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
 {
-	struct twl4030_vibra_data *pdata = pdev->dev.platform_data;
+	struct twl6040_vibra_data *pdata = pdev->dev.platform_data;
 	struct vibra_info *info;
 	int ret;
 
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index d2c0db1..4790110 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -486,7 +486,6 @@
 	unsigned char *packet = psmouse->packet;
 
 	input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
-	input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
 	input_mt_report_pointer_emulation(dev, true);
 	input_sync(dev);
 }
@@ -967,6 +966,7 @@
 	if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width))
 		return -1;
 
+	__set_bit(INPUT_PROP_POINTER, dev->propbit);
 	__set_bit(EV_KEY, dev->evbit);
 	__set_bit(EV_ABS, dev->evbit);
 	__clear_bit(EV_REL, dev->evbit);
@@ -1017,7 +1017,9 @@
 			 */
 			psmouse_warn(psmouse, "couldn't query resolution data.\n");
 		}
-
+		/* v4 is clickpad, with only one button. */
+		__set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
+		__clear_bit(BTN_RIGHT, dev->keybit);
 		__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
 		/* For X to recognize me as touchpad. */
 		input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
@@ -1245,6 +1247,8 @@
  */
 static int elantech_reconnect(struct psmouse *psmouse)
 {
+	psmouse_reset(psmouse);
+
 	if (elantech_detect(psmouse, 0))
 		return -1;
 
@@ -1324,6 +1328,8 @@
 	if (!etd)
 		return -ENOMEM;
 
+	psmouse_reset(psmouse);
+
 	etd->parity[0] = 1;
 	for (i = 1; i < 256; i++)
 		etd->parity[i] = etd->parity[i & (i - 1)] ^ 1;
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index a9ad8e1..39fe9b7 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -12,9 +12,9 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/input-polldev.h>
+#include <linux/gpio.h>
 #include <linux/gpio_mouse.h>
 
-#include <asm/gpio.h>
 
 /*
  * Timer function which is run every scan_ms ms when the device is opened.
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index a977bfa..661a0ca 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -741,6 +741,14 @@
 			}
 		} else {
 			/* SFAC packet */
+			if ((packet[0] & (FSP_PB0_LBTN|FSP_PB0_PHY_BTN)) ==
+				FSP_PB0_LBTN) {
+				/* On-pad click in SFAC mode should be handled
+				 * by userspace.  On-pad clicks in MFMC mode
+				 * are real clickpad clicks, and not ignored.
+				 */
+				packet[0] &= ~FSP_PB0_LBTN;
+			}
 
 			/* no multi-finger information */
 			ad->last_mt_fgr = 0;
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 22b2180..f310249 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -304,7 +304,7 @@
 		return 0;
 
 	if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
-		printk(KERN_WARNING "trackpoint.c: failed to get extended button data\n");
+		psmouse_warn(psmouse, "failed to get extended button data\n");
 		button_info = 0;
 	}
 
@@ -326,16 +326,18 @@
 
 	error = sysfs_create_group(&ps2dev->serio->dev.kobj, &trackpoint_attr_group);
 	if (error) {
-		printk(KERN_ERR
-			"trackpoint.c: failed to create sysfs attributes, error: %d\n",
-			error);
+		psmouse_err(psmouse,
+			    "failed to create sysfs attributes, error: %d\n",
+			    error);
 		kfree(psmouse->private);
 		psmouse->private = NULL;
 		return -1;
 	}
 
-	printk(KERN_INFO "IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
-		firmware_id, (button_info & 0xf0) >> 4, button_info & 0x0f);
+	psmouse_info(psmouse,
+		     "IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
+		     firmware_id,
+		     (button_info & 0xf0) >> 4, button_info & 0x0f);
 
 	return 0;
 }
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
index 6c6f6d8..f7eda3d0 100644
--- a/drivers/input/touchscreen/tps6507x-ts.c
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -1,6 +1,4 @@
 /*
- * drivers/input/touchscreen/tps6507x_ts.c
- *
  * Touchscreen driver for the tps6507x chip.
  *
  * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com)
@@ -376,4 +374,4 @@
 MODULE_AUTHOR("Todd Fischer <todd.fischer@ridgerun.com>");
 MODULE_DESCRIPTION("TPS6507x - TouchScreen driver");
 MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:tps6507x-tsc");
+MODULE_ALIAS("platform:tps6507x-ts");
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index b3d6ac1..a6d9fd2 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -176,7 +176,7 @@
 	struct cardstate *cs = tty->driver_data;
 
 	if (!cs) { /* happens if we didn't find cs in open */
-		printk(KERN_DEBUG "%s: no cardstate\n", __func__);
+		gig_dbg(DEBUG_IF, "%s: no cardstate", __func__);
 		return;
 	}
 
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index ba91333..88e4f0e 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -156,17 +156,9 @@
 hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
 {
 	int rc;
-	unsigned char valbuf[128];
 	hysdn_card *card = file->private_data;
 
-	if (count > (sizeof(valbuf) - 1))
-		count = sizeof(valbuf) - 1;	/* limit length */
-	if (copy_from_user(valbuf, buf, count))
-		return (-EFAULT);	/* copy failed */
-
-	valbuf[count] = 0;	/* terminating 0 */
-
-	rc = kstrtoul(valbuf, 0, &card->debug_flags);
+	rc = kstrtoul_from_user(buf, count, 0, &card->debug_flags);
 	if (rc < 0)
 		return rc;
 	hysdn_addlog(card, "debug set to 0x%lx", card->debug_flags);
diff --git a/drivers/leds/leds-atmel-pwm.c b/drivers/leds/leds-atmel-pwm.c
index 800243b..64ad702 100644
--- a/drivers/leds/leds-atmel-pwm.c
+++ b/drivers/leds/leds-atmel-pwm.c
@@ -35,7 +35,7 @@
  * NOTE:  we reuse the platform_data structure of GPIO leds,
  * but repurpose its "gpio" number as a PWM channel number.
  */
-static int __init pwmled_probe(struct platform_device *pdev)
+static int __devinit pwmled_probe(struct platform_device *pdev)
 {
 	const struct gpio_led_platform_data	*pdata;
 	struct pwmled				*leds;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 3d0dfa7..97e73e5 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -539,9 +539,6 @@
 	bitmap->events_cleared = bitmap->mddev->events;
 	sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
 
-	bitmap->flags |= BITMAP_HOSTENDIAN;
-	sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN);
-
 	kunmap_atomic(sb);
 
 	return 0;
@@ -1788,7 +1785,9 @@
 		 * re-add of a missing device */
 		start = mddev->recovery_cp;
 
+	mutex_lock(&mddev->bitmap_info.mutex);
 	err = bitmap_init_from_disk(bitmap, start);
+	mutex_unlock(&mddev->bitmap_info.mutex);
 
 	if (err)
 		goto out;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index b0ba524..68965e6 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -859,7 +859,7 @@
 	int ret;
 	unsigned redundancy = 0;
 	struct raid_dev *dev;
-	struct md_rdev *rdev, *freshest;
+	struct md_rdev *rdev, *tmp, *freshest;
 	struct mddev *mddev = &rs->md;
 
 	switch (rs->raid_type->level) {
@@ -877,7 +877,7 @@
 	}
 
 	freshest = NULL;
-	rdev_for_each(rdev, mddev) {
+	rdev_for_each_safe(rdev, tmp, mddev) {
 		if (!rdev->meta_bdev)
 			continue;
 
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b572e1e..477eb2e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7560,14 +7560,14 @@
 		 * any transients in the value of "sync_action".
 		 */
 		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
-		clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 		/* Clear some bits that don't mean anything, but
 		 * might be left set
 		 */
 		clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
 		clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
 
-		if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
+		if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
+		    test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
 			goto unlock;
 		/* no recovery is running.
 		 * remove any failed drives, then
@@ -8140,7 +8140,8 @@
 
 	for_each_mddev(mddev, tmp) {
 		if (mddev_trylock(mddev)) {
-			__md_stop_writes(mddev);
+			if (mddev->pers)
+				__md_stop_writes(mddev);
 			mddev->safemode = 2;
 			mddev_unlock(mddev);
 		}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d35e4c9..15dd59b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1712,6 +1712,7 @@
 	struct r1conf *conf = mddev->private;
 	int primary;
 	int i;
+	int vcnt;
 
 	for (primary = 0; primary < conf->raid_disks * 2; primary++)
 		if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
@@ -1721,9 +1722,9 @@
 			break;
 		}
 	r1_bio->read_disk = primary;
+	vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
 	for (i = 0; i < conf->raid_disks * 2; i++) {
 		int j;
-		int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
 		struct bio *pbio = r1_bio->bios[primary];
 		struct bio *sbio = r1_bio->bios[i];
 		int size;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index fff7821..c8dbb84 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1788,6 +1788,7 @@
 	struct r10conf *conf = mddev->private;
 	int i, first;
 	struct bio *tbio, *fbio;
+	int vcnt;
 
 	atomic_set(&r10_bio->remaining, 1);
 
@@ -1802,10 +1803,10 @@
 	first = i;
 	fbio = r10_bio->devs[i].bio;
 
+	vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
 	/* now find blocks with errors */
 	for (i=0 ; i < conf->copies ; i++) {
 		int  j, d;
-		int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
 
 		tbio = r10_bio->devs[i].bio;
 
@@ -1871,7 +1872,6 @@
 	 */
 	for (i = 0; i < conf->copies; i++) {
 		int j, d;
-		int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
 
 		tbio = r10_bio->devs[i].repl_bio;
 		if (!tbio || !tbio->bi_end_io)
diff --git a/drivers/media/common/tuners/xc5000.c b/drivers/media/common/tuners/xc5000.c
index 7f98984..eab2ea4 100644
--- a/drivers/media/common/tuners/xc5000.c
+++ b/drivers/media/common/tuners/xc5000.c
@@ -54,6 +54,7 @@
 	struct list_head hybrid_tuner_instance_list;
 
 	u32 if_khz;
+	u32 xtal_khz;
 	u32 freq_hz;
 	u32 bandwidth;
 	u8  video_standard;
@@ -214,9 +215,9 @@
 	.size = 12401,
 };
 
-static const struct xc5000_fw_cfg xc5000c_41_024_5_31875 = {
-	.name = "dvb-fe-xc5000c-41.024.5-31875.fw",
-	.size = 16503,
+static const struct xc5000_fw_cfg xc5000c_41_024_5 = {
+	.name = "dvb-fe-xc5000c-41.024.5.fw",
+	.size = 16497,
 };
 
 static inline const struct xc5000_fw_cfg *xc5000_assign_firmware(int chip_id)
@@ -226,7 +227,7 @@
 	case XC5000A:
 		return &xc5000a_1_6_114;
 	case XC5000C:
-		return &xc5000c_41_024_5_31875;
+		return &xc5000c_41_024_5;
 	}
 }
 
@@ -572,6 +573,31 @@
 	return found;
 }
 
+static int xc_set_xtal(struct dvb_frontend *fe)
+{
+	struct xc5000_priv *priv = fe->tuner_priv;
+	int ret = XC_RESULT_SUCCESS;
+
+	switch (priv->chip_id) {
+	default:
+	case XC5000A:
+		/* 32.000 MHz xtal is default */
+		break;
+	case XC5000C:
+		switch (priv->xtal_khz) {
+		default:
+		case 32000:
+			/* 32.000 MHz xtal is default */
+			break;
+		case 31875:
+			/* 31.875 MHz xtal configuration */
+			ret = xc_write_reg(priv, 0x000f, 0x8081);
+			break;
+		}
+		break;
+	}
+	return ret;
+}
 
 static int xc5000_fwupload(struct dvb_frontend *fe)
 {
@@ -603,6 +629,8 @@
 	} else {
 		printk(KERN_INFO "xc5000: firmware uploading...\n");
 		ret = xc_load_i2c_sequence(fe,  fw->data);
+		if (XC_RESULT_SUCCESS == ret)
+			ret = xc_set_xtal(fe);
 		printk(KERN_INFO "xc5000: firmware upload complete...\n");
 	}
 
@@ -1164,6 +1192,9 @@
 		priv->if_khz = cfg->if_khz;
 	}
 
+	if (priv->xtal_khz == 0)
+		priv->xtal_khz = cfg->xtal_khz;
+
 	if (priv->radio_input == 0)
 		priv->radio_input = cfg->radio_input;
 
diff --git a/drivers/media/common/tuners/xc5000.h b/drivers/media/common/tuners/xc5000.h
index 3396f8e..39a73bf 100644
--- a/drivers/media/common/tuners/xc5000.h
+++ b/drivers/media/common/tuners/xc5000.h
@@ -34,6 +34,7 @@
 	u8   i2c_address;
 	u32  if_khz;
 	u8   radio_input;
+	u32  xtal_khz;
 
 	int chip_id;
 };
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 4555baa..0f64d71 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -143,10 +143,12 @@
 static void dvb_frontend_wakeup(struct dvb_frontend *fe);
 static int dtv_get_frontend(struct dvb_frontend *fe,
 			    struct dvb_frontend_parameters *p_out);
+static int dtv_property_legacy_params_sync(struct dvb_frontend *fe,
+					   struct dvb_frontend_parameters *p);
 
 static bool has_get_frontend(struct dvb_frontend *fe)
 {
-	return fe->ops.get_frontend;
+	return fe->ops.get_frontend != NULL;
 }
 
 /*
@@ -697,6 +699,7 @@
 					fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN;
 					fepriv->delay = HZ / 2;
 				}
+				dtv_property_legacy_params_sync(fe, &fepriv->parameters_out);
 				fe->ops.read_status(fe, &s);
 				if (s != fepriv->status) {
 					dvb_frontend_add_event(fe, s); /* update event list */
@@ -1443,6 +1446,28 @@
 				__func__);
 			return -EINVAL;
 		}
+		/*
+		 * Get a delivery system that is compatible with DVBv3
+		 * NOTE: in order for this to work with softwares like Kaffeine that
+		 *	uses a DVBv5 call for DVB-S2 and a DVBv3 call to go back to
+		 *	DVB-S, drivers that support both should put the SYS_DVBS entry
+		 *	before the SYS_DVBS2, otherwise it won't switch back to DVB-S.
+		 *	The real fix is that userspace applications should not use DVBv3
+		 *	and not trust on calling FE_SET_FRONTEND to switch the delivery
+		 *	system.
+		 */
+		ncaps = 0;
+		while (fe->ops.delsys[ncaps] && ncaps < MAX_DELSYS) {
+			if (fe->ops.delsys[ncaps] == desired_system) {
+				delsys = desired_system;
+				break;
+			}
+			ncaps++;
+		}
+		if (delsys == SYS_UNDEFINED) {
+			dprintk("%s() Couldn't find a delivery system that matches %d\n",
+				__func__, desired_system);
+		}
 	} else {
 		/*
 		 * This is a DVBv5 call. So, it likely knows the supported
@@ -1491,9 +1516,10 @@
 				__func__);
 			return -EINVAL;
 		}
-		c->delivery_system = delsys;
 	}
 
+	c->delivery_system = delsys;
+
 	/*
 	 * The DVBv3 or DVBv5 call is requesting a different system. So,
 	 * emulation is needed.
@@ -1833,6 +1859,13 @@
 		return -EINVAL;
 
 	/*
+	 * Initialize output parameters to match the values given by
+	 * the user. FE_SET_FRONTEND triggers an initial frontend event
+	 * with status = 0, which copies output parameters to userspace.
+	 */
+	dtv_property_legacy_params_sync(fe, &fepriv->parameters_out);
+
+	/*
 	 * Be sure that the bandwidth will be filled for all
 	 * non-satellite systems, as tuners need to know what
 	 * low pass/Nyquist half filter should be applied, in
diff --git a/drivers/media/dvb/dvb-usb/it913x.c b/drivers/media/dvb/dvb-usb/it913x.c
index 3b7b102..482d249 100644
--- a/drivers/media/dvb/dvb-usb/it913x.c
+++ b/drivers/media/dvb/dvb-usb/it913x.c
@@ -238,12 +238,27 @@
 
 static u32 it913x_query(struct usb_device *udev, u8 pro)
 {
-	int ret;
+	int ret, i;
 	u8 data[4];
-	ret = it913x_io(udev, READ_LONG, pro, CMD_DEMOD_READ,
-		0x1222, 0, &data[0], 3);
+	u8 ver;
 
-	it913x_config.chip_ver = data[0];
+	for (i = 0; i < 5; i++) {
+		ret = it913x_io(udev, READ_LONG, pro, CMD_DEMOD_READ,
+			0x1222, 0, &data[0], 3);
+		ver = data[0];
+		if (ver > 0 && ver < 3)
+			break;
+		msleep(100);
+	}
+
+	if (ver < 1 || ver > 2) {
+		info("Failed to identify chip version applying 1");
+		it913x_config.chip_ver = 0x1;
+		it913x_config.chip_type = 0x9135;
+		return 0;
+	}
+
+	it913x_config.chip_ver = ver;
 	it913x_config.chip_type = (u16)(data[2] << 8) + data[1];
 
 	info("Chip Version=%02x Chip Type=%04x", it913x_config.chip_ver,
@@ -660,30 +675,41 @@
 			if ((packet_size > min_pkt) || (i == fw->size)) {
 				fw_data = (u8 *)(fw->data + pos);
 				pos += packet_size;
-				if (packet_size > 0)
-					ret |= it913x_io(udev, WRITE_DATA,
+				if (packet_size > 0) {
+					ret = it913x_io(udev, WRITE_DATA,
 						DEV_0, CMD_SCATTER_WRITE, 0,
 						0, fw_data, packet_size);
+					if (ret < 0)
+						break;
+				}
 				udelay(1000);
 			}
 		}
 		i++;
 	}
 
-	ret |= it913x_io(udev, WRITE_CMD, DEV_0, CMD_BOOT, 0, 0, NULL, 0);
-
-	msleep(100);
-
 	if (ret < 0)
-		info("FRM Firmware Download Failed (%04x)" , ret);
+		info("FRM Firmware Download Failed (%d)" , ret);
 	else
 		info("FRM Firmware Download Completed - Resetting Device");
 
-	ret |= it913x_return_status(udev);
+	msleep(30);
+
+	ret = it913x_io(udev, WRITE_CMD, DEV_0, CMD_BOOT, 0, 0, NULL, 0);
+	if (ret < 0)
+		info("FRM Device not responding to reboot");
+
+	ret = it913x_return_status(udev);
+	if (ret == 0) {
+		info("FRM Failed to reboot device");
+		return -ENODEV;
+	}
 
 	msleep(30);
 
-	ret |= it913x_wr_reg(udev, DEV_0,  I2C_CLK, I2C_CLK_400);
+	ret = it913x_wr_reg(udev, DEV_0,  I2C_CLK, I2C_CLK_400);
+
+	msleep(30);
 
 	/* Tuner function */
 	if (it913x_config.dual_mode)
@@ -901,5 +927,5 @@
 
 MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
 MODULE_DESCRIPTION("it913x USB 2 Driver");
-MODULE_VERSION("1.27");
+MODULE_VERSION("1.28");
 MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/drxk_hard.c b/drivers/media/dvb/frontends/drxk_hard.c
index 36d1175..a414b1f 100644
--- a/drivers/media/dvb/frontends/drxk_hard.c
+++ b/drivers/media/dvb/frontends/drxk_hard.c
@@ -1520,8 +1520,10 @@
 	dprintk(1, "\n");
 
 	if ((cmd == 0) || ((parameterLen > 0) && (parameter == NULL)) ||
-	    ((resultLen > 0) && (result == NULL)))
-		goto error;
+	    ((resultLen > 0) && (result == NULL))) {
+		printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+		return status;
+	}
 
 	mutex_lock(&state->mutex);
 
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index b09c5fa..af52658 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -1046,6 +1046,7 @@
 		goto exit_unregister_led;
 	}
 
+	data->dev->driver_type = RC_DRIVER_IR_RAW;
 	data->dev->driver_name = WBCIR_NAME;
 	data->dev->input_name = WBCIR_NAME;
 	data->dev->input_phys = "wbcir/cir0";
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index f2479c5..ce1e7ba 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -492,7 +492,7 @@
 
 config VIDEO_MT9M032
 	tristate "MT9M032 camera sensor support"
-	depends on I2C && VIDEO_V4L2
+	depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
 	select VIDEO_APTINA_PLL
 	---help---
 	  This driver supports MT9M032 camera sensors from Aptina, monochrome
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 5452bee..989e556 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -1763,13 +1763,13 @@
 		IVTV_DEBUG_IOCTL("AUDIO_CHANNEL_SELECT\n");
 		if (iarg > AUDIO_STEREO_SWAPPED)
 			return -EINVAL;
-		return v4l2_ctrl_s_ctrl(itv->ctrl_audio_playback, iarg);
+		return v4l2_ctrl_s_ctrl(itv->ctrl_audio_playback, iarg + 1);
 
 	case AUDIO_BILINGUAL_CHANNEL_SELECT:
 		IVTV_DEBUG_IOCTL("AUDIO_BILINGUAL_CHANNEL_SELECT\n");
 		if (iarg > AUDIO_STEREO_SWAPPED)
 			return -EINVAL;
-		return v4l2_ctrl_s_ctrl(itv->ctrl_audio_multilingual_playback, iarg);
+		return v4l2_ctrl_s_ctrl(itv->ctrl_audio_multilingual_playback, iarg + 1);
 
 	default:
 		return -EINVAL;
diff --git a/drivers/media/video/mt9m032.c b/drivers/media/video/mt9m032.c
index 7636672..645973c 100644
--- a/drivers/media/video/mt9m032.c
+++ b/drivers/media/video/mt9m032.c
@@ -392,10 +392,11 @@
 	}
 
 	/* Scaling is not supported, the format is thus fixed. */
-	ret = mt9m032_get_pad_format(subdev, fh, fmt);
+	fmt->format = *__mt9m032_get_pad_format(sensor, fh, fmt->which);
+	ret = 0;
 
 done:
-	mutex_lock(&sensor->lock);
+	mutex_unlock(&sensor->lock);
 	return ret;
 }
 
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 4a44f9a..b76b0ac 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -468,22 +468,30 @@
 	spin_unlock_irqrestore(&stream->clock.lock, flags);
 }
 
+static void uvc_video_clock_reset(struct uvc_streaming *stream)
+{
+	struct uvc_clock *clock = &stream->clock;
+
+	clock->head = 0;
+	clock->count = 0;
+	clock->last_sof = -1;
+	clock->sof_offset = -1;
+}
+
 static int uvc_video_clock_init(struct uvc_streaming *stream)
 {
 	struct uvc_clock *clock = &stream->clock;
 
 	spin_lock_init(&clock->lock);
-	clock->head = 0;
-	clock->count = 0;
 	clock->size = 32;
-	clock->last_sof = -1;
-	clock->sof_offset = -1;
 
 	clock->samples = kmalloc(clock->size * sizeof(*clock->samples),
 				 GFP_KERNEL);
 	if (clock->samples == NULL)
 		return -ENOMEM;
 
+	uvc_video_clock_reset(stream);
+
 	return 0;
 }
 
@@ -1424,8 +1432,6 @@
 
 	if (free_buffers)
 		uvc_free_urb_buffers(stream);
-
-	uvc_video_clock_cleanup(stream);
 }
 
 /*
@@ -1555,10 +1561,6 @@
 
 	uvc_video_stats_start(stream);
 
-	ret = uvc_video_clock_init(stream);
-	if (ret < 0)
-		return ret;
-
 	if (intf->num_altsetting > 1) {
 		struct usb_host_endpoint *best_ep = NULL;
 		unsigned int best_psize = 3 * 1024;
@@ -1683,6 +1685,8 @@
 
 	stream->frozen = 0;
 
+	uvc_video_clock_reset(stream);
+
 	ret = uvc_commit_video(stream, &stream->ctrl);
 	if (ret < 0) {
 		uvc_queue_enable(&stream->queue, 0);
@@ -1819,25 +1823,35 @@
 		uvc_uninit_video(stream, 1);
 		usb_set_interface(stream->dev->udev, stream->intfnum, 0);
 		uvc_queue_enable(&stream->queue, 0);
+		uvc_video_clock_cleanup(stream);
 		return 0;
 	}
 
-	ret = uvc_queue_enable(&stream->queue, 1);
+	ret = uvc_video_clock_init(stream);
 	if (ret < 0)
 		return ret;
 
+	ret = uvc_queue_enable(&stream->queue, 1);
+	if (ret < 0)
+		goto error_queue;
+
 	/* Commit the streaming parameters. */
 	ret = uvc_commit_video(stream, &stream->ctrl);
-	if (ret < 0) {
-		uvc_queue_enable(&stream->queue, 0);
-		return ret;
-	}
+	if (ret < 0)
+		goto error_commit;
 
 	ret = uvc_init_video(stream, GFP_KERNEL);
-	if (ret < 0) {
-		usb_set_interface(stream->dev->udev, stream->intfnum, 0);
-		uvc_queue_enable(&stream->queue, 0);
-	}
+	if (ret < 0)
+		goto error_video;
+
+	return 0;
+
+error_video:
+	usb_set_interface(stream->dev->udev, stream->intfnum, 0);
+error_commit:
+	uvc_queue_enable(&stream->queue, 0);
+error_queue:
+	uvc_video_clock_cleanup(stream);
 
 	return ret;
 }
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 29f463c..11e44386 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -268,10 +268,17 @@
 	  This is used to control charging LED brightness.
 
 config TWL6040_CORE
-	bool
-	depends on TWL4030_CORE && GENERIC_HARDIRQS
+	bool "Support for TWL6040 audio codec"
+	depends on I2C=y && GENERIC_HARDIRQS
 	select MFD_CORE
+	select REGMAP_I2C
 	default n
+	help
+	  Say yes here if you want support for Texas Instruments TWL6040 audio
+	  codec.
+	  This driver provides common support for accessing the device,
+	  additional drivers must be enabled in order to use the
+	  functionality of the device (audio, vibra).
 
 config MFD_STMPE
 	bool "Support STMicroelectronics STMPE"
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 1895cf9..1582c3d 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -527,7 +527,9 @@
 
 static int asic3_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
 {
-	return (offset < ASIC3_NUM_GPIOS) ? IRQ_BOARD_START + offset : -ENXIO;
+	struct asic3 *asic = container_of(chip, struct asic3, gpio);
+
+	return (offset < ASIC3_NUM_GPIOS) ? asic->irq_base + offset : -ENXIO;
 }
 
 static __init int asic3_gpio_probe(struct platform_device *pdev,
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index ebc1e86..5be3248 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2788,6 +2788,7 @@
 		.constraints = {
 			.name = "db8500-vape",
 			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+			.always_on = true,
 		},
 		.consumer_supplies = db8500_vape_consumers,
 		.num_consumer_supplies = ARRAY_SIZE(db8500_vape_consumers),
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 95a2e54..c8aae66 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -25,7 +25,6 @@
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 #include <linux/spinlock.h>
-#include <linux/gpio.h>
 #include <plat/usb.h>
 #include <linux/pm_runtime.h>
 
@@ -502,19 +501,6 @@
 	pm_runtime_get_sync(dev);
 	spin_lock_irqsave(&omap->lock, flags);
 
-	if (pdata->ehci_data->phy_reset) {
-		if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
-			gpio_request_one(pdata->ehci_data->reset_gpio_port[0],
-					 GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
-
-		if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
-			gpio_request_one(pdata->ehci_data->reset_gpio_port[1],
-					 GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
-
-		/* Hold the PHY in RESET for enough time till DIR is high */
-		udelay(10);
-	}
-
 	omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
 	dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
 
@@ -593,39 +579,10 @@
 			usbhs_omap_tll_init(dev, OMAP_TLL_CHANNEL_COUNT);
 	}
 
-	if (pdata->ehci_data->phy_reset) {
-		/* Hold the PHY in RESET for enough time till
-		 * PHY is settled and ready
-		 */
-		udelay(10);
-
-		if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
-			gpio_set_value
-				(pdata->ehci_data->reset_gpio_port[0], 1);
-
-		if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
-			gpio_set_value
-				(pdata->ehci_data->reset_gpio_port[1], 1);
-	}
-
 	spin_unlock_irqrestore(&omap->lock, flags);
 	pm_runtime_put_sync(dev);
 }
 
-static void omap_usbhs_deinit(struct device *dev)
-{
-	struct usbhs_hcd_omap		*omap = dev_get_drvdata(dev);
-	struct usbhs_omap_platform_data	*pdata = &omap->platdata;
-
-	if (pdata->ehci_data->phy_reset) {
-		if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
-			gpio_free(pdata->ehci_data->reset_gpio_port[0]);
-
-		if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
-			gpio_free(pdata->ehci_data->reset_gpio_port[1]);
-	}
-}
-
 
 /**
  * usbhs_omap_probe - initialize TI-based HCDs
@@ -860,7 +817,6 @@
 {
 	struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
 
-	omap_usbhs_deinit(&pdev->dev);
 	iounmap(omap->tll_base);
 	iounmap(omap->uhh_base);
 	clk_put(omap->init_60m_fclk);
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index 99ef944..44afae0 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -80,44 +80,6 @@
 	{.name = "rc5t583-key",      }
 };
 
-int rc5t583_write(struct device *dev, uint8_t reg, uint8_t val)
-{
-	struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
-	return regmap_write(rc5t583->regmap, reg, val);
-}
-
-int rc5t583_read(struct device *dev, uint8_t reg, uint8_t *val)
-{
-	struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
-	unsigned int ival;
-	int ret;
-	ret = regmap_read(rc5t583->regmap, reg, &ival);
-	if (!ret)
-		*val = (uint8_t)ival;
-	return ret;
-}
-
-int rc5t583_set_bits(struct device *dev, unsigned int reg,
-			unsigned int bit_mask)
-{
-	struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
-	return regmap_update_bits(rc5t583->regmap, reg, bit_mask, bit_mask);
-}
-
-int rc5t583_clear_bits(struct device *dev, unsigned int reg,
-			unsigned int bit_mask)
-{
-	struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
-	return regmap_update_bits(rc5t583->regmap, reg, bit_mask, 0);
-}
-
-int rc5t583_update(struct device *dev, unsigned int reg,
-		unsigned int val, unsigned int mask)
-{
-	struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
-	return regmap_update_bits(rc5t583->regmap, reg, mask, val);
-}
-
 static int __rc5t583_set_ext_pwrreq1_control(struct device *dev,
 	int id, int ext_pwr, int slots)
 {
@@ -197,6 +159,7 @@
 			ds_id, ext_pwr_req);
 	return 0;
 }
+EXPORT_SYMBOL(rc5t583_ext_power_req_config);
 
 static int rc5t583_clear_ext_power_req(struct rc5t583 *rc5t583,
 	struct rc5t583_platform_data *pdata)
diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040-core.c
index b2d8e51..2d6beda 100644
--- a/drivers/mfd/twl6040-core.c
+++ b/drivers/mfd/twl6040-core.c
@@ -30,7 +30,9 @@
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
 #include <linux/delay.h>
-#include <linux/i2c/twl.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/err.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/twl6040.h>
 
@@ -39,7 +41,7 @@
 int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg)
 {
 	int ret;
-	u8 val = 0;
+	unsigned int val;
 
 	mutex_lock(&twl6040->io_mutex);
 	/* Vibra control registers from cache */
@@ -47,7 +49,7 @@
 		     reg == TWL6040_REG_VIBCTLR)) {
 		val = twl6040->vibra_ctrl_cache[VIBRACTRL_MEMBER(reg)];
 	} else {
-		ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg);
+		ret = regmap_read(twl6040->regmap, reg, &val);
 		if (ret < 0) {
 			mutex_unlock(&twl6040->io_mutex);
 			return ret;
@@ -64,7 +66,7 @@
 	int ret;
 
 	mutex_lock(&twl6040->io_mutex);
-	ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
+	ret = regmap_write(twl6040->regmap, reg, val);
 	/* Cache the vibra control registers */
 	if (reg == TWL6040_REG_VIBCTLL || reg == TWL6040_REG_VIBCTLR)
 		twl6040->vibra_ctrl_cache[VIBRACTRL_MEMBER(reg)] = val;
@@ -77,16 +79,9 @@
 int twl6040_set_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask)
 {
 	int ret;
-	u8 val;
 
 	mutex_lock(&twl6040->io_mutex);
-	ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg);
-	if (ret)
-		goto out;
-
-	val |= mask;
-	ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
-out:
+	ret = regmap_update_bits(twl6040->regmap, reg, mask, mask);
 	mutex_unlock(&twl6040->io_mutex);
 	return ret;
 }
@@ -95,16 +90,9 @@
 int twl6040_clear_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask)
 {
 	int ret;
-	u8 val;
 
 	mutex_lock(&twl6040->io_mutex);
-	ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg);
-	if (ret)
-		goto out;
-
-	val &= ~mask;
-	ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
-out:
+	ret = regmap_update_bits(twl6040->regmap, reg, mask, 0);
 	mutex_unlock(&twl6040->io_mutex);
 	return ret;
 }
@@ -494,32 +482,58 @@
 	},
 };
 
-static int __devinit twl6040_probe(struct platform_device *pdev)
+static bool twl6040_readable_reg(struct device *dev, unsigned int reg)
 {
-	struct twl4030_audio_data *pdata = pdev->dev.platform_data;
+	/* Register 0 is not readable */
+	if (!reg)
+		return false;
+	return true;
+}
+
+static struct regmap_config twl6040_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.max_register = TWL6040_REG_STATUS, /* 0x2e */
+
+	.readable_reg = twl6040_readable_reg,
+};
+
+static int __devinit twl6040_probe(struct i2c_client *client,
+				     const struct i2c_device_id *id)
+{
+	struct twl6040_platform_data *pdata = client->dev.platform_data;
 	struct twl6040 *twl6040;
 	struct mfd_cell *cell = NULL;
 	int ret, children = 0;
 
 	if (!pdata) {
-		dev_err(&pdev->dev, "Platform data is missing\n");
+		dev_err(&client->dev, "Platform data is missing\n");
 		return -EINVAL;
 	}
 
 	/* In order to operate correctly we need valid interrupt config */
-	if (!pdata->naudint_irq || !pdata->irq_base) {
-		dev_err(&pdev->dev, "Invalid IRQ configuration\n");
+	if (!client->irq || !pdata->irq_base) {
+		dev_err(&client->dev, "Invalid IRQ configuration\n");
 		return -EINVAL;
 	}
 
-	twl6040 = kzalloc(sizeof(struct twl6040), GFP_KERNEL);
-	if (!twl6040)
-		return -ENOMEM;
+	twl6040 = devm_kzalloc(&client->dev, sizeof(struct twl6040),
+			       GFP_KERNEL);
+	if (!twl6040) {
+		ret = -ENOMEM;
+		goto err;
+	}
 
-	platform_set_drvdata(pdev, twl6040);
+	twl6040->regmap = regmap_init_i2c(client, &twl6040_regmap_config);
+	if (IS_ERR(twl6040->regmap)) {
+		ret = PTR_ERR(twl6040->regmap);
+		goto err;
+	}
 
-	twl6040->dev = &pdev->dev;
-	twl6040->irq = pdata->naudint_irq;
+	i2c_set_clientdata(client, twl6040);
+
+	twl6040->dev = &client->dev;
+	twl6040->irq = client->irq;
 	twl6040->irq_base = pdata->irq_base;
 
 	mutex_init(&twl6040->mutex);
@@ -588,12 +602,12 @@
 	}
 
 	if (children) {
-		ret = mfd_add_devices(&pdev->dev, pdev->id, twl6040->cells,
+		ret = mfd_add_devices(&client->dev, -1, twl6040->cells,
 				      children, NULL, 0);
 		if (ret)
 			goto mfd_err;
 	} else {
-		dev_err(&pdev->dev, "No platform data found for children\n");
+		dev_err(&client->dev, "No platform data found for children\n");
 		ret = -ENODEV;
 		goto mfd_err;
 	}
@@ -608,14 +622,15 @@
 	if (gpio_is_valid(twl6040->audpwron))
 		gpio_free(twl6040->audpwron);
 gpio1_err:
-	platform_set_drvdata(pdev, NULL);
-	kfree(twl6040);
+	i2c_set_clientdata(client, NULL);
+	regmap_exit(twl6040->regmap);
+err:
 	return ret;
 }
 
-static int __devexit twl6040_remove(struct platform_device *pdev)
+static int __devexit twl6040_remove(struct i2c_client *client)
 {
-	struct twl6040 *twl6040 = platform_get_drvdata(pdev);
+	struct twl6040 *twl6040 = i2c_get_clientdata(client);
 
 	if (twl6040->power_count)
 		twl6040_power(twl6040, 0);
@@ -626,23 +641,30 @@
 	free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
 	twl6040_irq_exit(twl6040);
 
-	mfd_remove_devices(&pdev->dev);
-	platform_set_drvdata(pdev, NULL);
-	kfree(twl6040);
+	mfd_remove_devices(&client->dev);
+	i2c_set_clientdata(client, NULL);
+	regmap_exit(twl6040->regmap);
 
 	return 0;
 }
 
-static struct platform_driver twl6040_driver = {
+static const struct i2c_device_id twl6040_i2c_id[] = {
+	{ "twl6040", 0, },
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, twl6040_i2c_id);
+
+static struct i2c_driver twl6040_driver = {
+	.driver = {
+		.name = "twl6040",
+		.owner = THIS_MODULE,
+	},
 	.probe		= twl6040_probe,
 	.remove		= __devexit_p(twl6040_remove),
-	.driver		= {
-		.owner	= THIS_MODULE,
-		.name	= "twl6040",
-	},
+	.id_table	= twl6040_i2c_id,
 };
 
-module_platform_driver(twl6040_driver);
+module_i2c_driver(twl6040_driver);
 
 MODULE_DESCRIPTION("TWL6040 MFD");
 MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>");
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index eed213a..dabec55 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -873,7 +873,7 @@
 {
 	struct mmc_blk_data *md = mq->data;
 	struct mmc_card *card = md->queue.card;
-	unsigned int from, nr, arg;
+	unsigned int from, nr, arg, trim_arg, erase_arg;
 	int err = 0, type = MMC_BLK_SECDISCARD;
 
 	if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
@@ -881,20 +881,26 @@
 		goto out;
 	}
 
-	/* The sanitize operation is supported at v4.5 only */
-	if (mmc_can_sanitize(card)) {
-		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-				EXT_CSD_SANITIZE_START, 1, 0);
-		goto out;
-	}
-
 	from = blk_rq_pos(req);
 	nr = blk_rq_sectors(req);
 
-	if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
-		arg = MMC_SECURE_TRIM1_ARG;
-	else
-		arg = MMC_SECURE_ERASE_ARG;
+	/* The sanitize operation is supported at v4.5 only */
+	if (mmc_can_sanitize(card)) {
+		erase_arg = MMC_ERASE_ARG;
+		trim_arg = MMC_TRIM_ARG;
+	} else {
+		erase_arg = MMC_SECURE_ERASE_ARG;
+		trim_arg = MMC_SECURE_TRIM1_ARG;
+	}
+
+	if (mmc_erase_group_aligned(card, from, nr))
+		arg = erase_arg;
+	else if (mmc_can_trim(card))
+		arg = trim_arg;
+	else {
+		err = -EINVAL;
+		goto out;
+	}
 retry:
 	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -904,25 +910,41 @@
 				 INAND_CMD38_ARG_SECERASE,
 				 0);
 		if (err)
-			goto out;
+			goto out_retry;
 	}
+
 	err = mmc_erase(card, from, nr, arg);
-	if (!err && arg == MMC_SECURE_TRIM1_ARG) {
+	if (err == -EIO)
+		goto out_retry;
+	if (err)
+		goto out;
+
+	if (arg == MMC_SECURE_TRIM1_ARG) {
 		if (card->quirks & MMC_QUIRK_INAND_CMD38) {
 			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 					 INAND_CMD38_ARG_EXT_CSD,
 					 INAND_CMD38_ARG_SECTRIM2,
 					 0);
 			if (err)
-				goto out;
+				goto out_retry;
 		}
+
 		err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
+		if (err == -EIO)
+			goto out_retry;
+		if (err)
+			goto out;
 	}
-out:
-	if (err == -EIO && !mmc_blk_reset(md, card->host, type))
+
+	if (mmc_can_sanitize(card))
+		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+				 EXT_CSD_SANITIZE_START, 1, 0);
+out_retry:
+	if (err && !mmc_blk_reset(md, card->host, type))
 		goto retry;
 	if (!err)
 		mmc_blk_reset_success(md, type);
+out:
 	spin_lock_irq(&md->lock);
 	__blk_end_request(req, err, blk_rq_bytes(req));
 	spin_unlock_irq(&md->lock);
@@ -1623,24 +1645,6 @@
 	return ret;
 }
 
-static int
-mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
-{
-	int err;
-
-	mmc_claim_host(card->host);
-	err = mmc_set_blocklen(card, 512);
-	mmc_release_host(card->host);
-
-	if (err) {
-		pr_err("%s: unable to set block size to 512: %d\n",
-			md->disk->disk_name, err);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
 static void mmc_blk_remove_req(struct mmc_blk_data *md)
 {
 	struct mmc_card *card;
@@ -1768,7 +1772,6 @@
 static int mmc_blk_probe(struct mmc_card *card)
 {
 	struct mmc_blk_data *md, *part_md;
-	int err;
 	char cap_str[10];
 
 	/*
@@ -1781,10 +1784,6 @@
 	if (IS_ERR(md))
 		return PTR_ERR(md);
 
-	err = mmc_blk_set_blksize(md, card);
-	if (err)
-		goto out;
-
 	string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
 			cap_str, sizeof(cap_str));
 	pr_info("%s: %s %s %s %s\n",
@@ -1809,7 +1808,7 @@
  out:
 	mmc_blk_remove_parts(card, md);
 	mmc_blk_remove_req(md);
-	return err;
+	return 0;
 }
 
 static void mmc_blk_remove(struct mmc_card *card)
@@ -1825,7 +1824,7 @@
 }
 
 #ifdef CONFIG_PM
-static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
+static int mmc_blk_suspend(struct mmc_card *card)
 {
 	struct mmc_blk_data *part_md;
 	struct mmc_blk_data *md = mmc_get_drvdata(card);
@@ -1845,8 +1844,6 @@
 	struct mmc_blk_data *md = mmc_get_drvdata(card);
 
 	if (md) {
-		mmc_blk_set_blksize(md, card);
-
 		/*
 		 * Resume involves the card going into idle state,
 		 * so current partition is always the main one.
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 2517547..996f8e3 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -139,7 +139,7 @@
 
 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
 	q->limits.max_discard_sectors = max_discard;
-	if (card->erased_byte == 0)
+	if (card->erased_byte == 0 && !mmc_can_discard(card))
 		q->limits.discard_zeroes_data = 1;
 	q->limits.discard_granularity = card->pref_erase << 9;
 	/* granularity must not be greater than max. discard */
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 5d011a3..c60cee9 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -122,14 +122,14 @@
 	return 0;
 }
 
-static int mmc_bus_suspend(struct device *dev, pm_message_t state)
+static int mmc_bus_suspend(struct device *dev)
 {
 	struct mmc_driver *drv = to_mmc_driver(dev->driver);
 	struct mmc_card *card = mmc_dev_to_card(dev);
 	int ret = 0;
 
 	if (dev->driver && drv->suspend)
-		ret = drv->suspend(card, state);
+		ret = drv->suspend(card);
 	return ret;
 }
 
@@ -165,20 +165,14 @@
 	return pm_runtime_suspend(dev);
 }
 
-static const struct dev_pm_ops mmc_bus_pm_ops = {
-	.runtime_suspend	= mmc_runtime_suspend,
-	.runtime_resume		= mmc_runtime_resume,
-	.runtime_idle		= mmc_runtime_idle,
-};
-
-#define MMC_PM_OPS_PTR	(&mmc_bus_pm_ops)
-
-#else /* !CONFIG_PM_RUNTIME */
-
-#define MMC_PM_OPS_PTR	NULL
-
 #endif /* !CONFIG_PM_RUNTIME */
 
+static const struct dev_pm_ops mmc_bus_pm_ops = {
+	SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume,
+			mmc_runtime_idle)
+	SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_suspend, mmc_bus_resume)
+};
+
 static struct bus_type mmc_bus_type = {
 	.name		= "mmc",
 	.dev_attrs	= mmc_dev_attrs,
@@ -186,9 +180,7 @@
 	.uevent		= mmc_bus_uevent,
 	.probe		= mmc_bus_probe,
 	.remove		= mmc_bus_remove,
-	.suspend	= mmc_bus_suspend,
-	.resume		= mmc_bus_resume,
-	.pm		= MMC_PM_OPS_PTR,
+	.pm		= &mmc_bus_pm_ops,
 };
 
 int mmc_register_bus(void)
@@ -267,6 +259,15 @@
 {
 	int ret;
 	const char *type;
+	const char *uhs_bus_speed_mode = "";
+	static const char *const uhs_speeds[] = {
+		[UHS_SDR12_BUS_SPEED] = "SDR12 ",
+		[UHS_SDR25_BUS_SPEED] = "SDR25 ",
+		[UHS_SDR50_BUS_SPEED] = "SDR50 ",
+		[UHS_SDR104_BUS_SPEED] = "SDR104 ",
+		[UHS_DDR50_BUS_SPEED] = "DDR50 ",
+	};
+
 
 	dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca);
 
@@ -296,6 +297,10 @@
 		break;
 	}
 
+	if (mmc_sd_card_uhs(card) &&
+		(card->sd_bus_speed < ARRAY_SIZE(uhs_speeds)))
+		uhs_bus_speed_mode = uhs_speeds[card->sd_bus_speed];
+
 	if (mmc_host_is_spi(card->host)) {
 		pr_info("%s: new %s%s%s card on SPI\n",
 			mmc_hostname(card->host),
@@ -303,13 +308,13 @@
 			mmc_card_ddr_mode(card) ? "DDR " : "",
 			type);
 	} else {
-		pr_info("%s: new %s%s%s%s card at address %04x\n",
+		pr_info("%s: new %s%s%s%s%s card at address %04x\n",
 			mmc_hostname(card->host),
 			mmc_card_uhs(card) ? "ultra high speed " :
 			(mmc_card_highspeed(card) ? "high speed " : ""),
 			(mmc_card_hs200(card) ? "HS200 " : ""),
 			mmc_card_ddr_mode(card) ? "DDR " : "",
-			type, card->rca);
+			uhs_bus_speed_mode, type, card->rca);
 	}
 
 #ifdef CONFIG_DEBUG_FS
diff --git a/drivers/mmc/core/cd-gpio.c b/drivers/mmc/core/cd-gpio.c
index 29de31e..2c14be7 100644
--- a/drivers/mmc/core/cd-gpio.c
+++ b/drivers/mmc/core/cd-gpio.c
@@ -12,6 +12,7 @@
 #include <linux/gpio.h>
 #include <linux/interrupt.h>
 #include <linux/jiffies.h>
+#include <linux/mmc/cd-gpio.h>
 #include <linux/mmc/host.h>
 #include <linux/module.h>
 #include <linux/slab.h>
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 14f262e..ba821fe 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -527,10 +527,14 @@
 
 		if (data->flags & MMC_DATA_WRITE)
 			/*
-			 * The limit is really 250 ms, but that is
-			 * insufficient for some crappy cards.
+			 * The MMC spec "It is strongly recommended
+			 * for hosts to implement more than 500ms
+			 * timeout value even if the card indicates
+			 * the 250ms maximum busy length."  Even the
+			 * previous value of 300ms is known to be
+			 * insufficient for some cards.
 			 */
-			limit_us = 300000;
+			limit_us = 3000000;
 		else
 			limit_us = 100000;
 
@@ -1405,7 +1409,10 @@
 {
 	unsigned int erase_timeout;
 
-	if (card->ext_csd.erase_group_def & 1) {
+	if (arg == MMC_DISCARD_ARG ||
+	    (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
+		erase_timeout = card->ext_csd.trim_timeout;
+	} else if (card->ext_csd.erase_group_def & 1) {
 		/* High Capacity Erase Group Size uses HC timeouts */
 		if (arg == MMC_TRIM_ARG)
 			erase_timeout = card->ext_csd.trim_timeout;
@@ -1677,8 +1684,6 @@
 {
 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
 		return 1;
-	if (mmc_can_discard(card))
-		return 1;
 	return 0;
 }
 EXPORT_SYMBOL(mmc_can_trim);
@@ -1697,6 +1702,8 @@
 
 int mmc_can_sanitize(struct mmc_card *card)
 {
+	if (!mmc_can_trim(card) && !mmc_can_erase(card))
+		return 0;
 	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
 		return 1;
 	return 0;
@@ -2231,6 +2238,7 @@
 			mmc_card_is_removable(host))
 		return err;
 
+	mmc_claim_host(host);
 	if (card && mmc_card_mmc(card) &&
 			(card->ext_csd.cache_size > 0)) {
 		enable = !!enable;
@@ -2248,6 +2256,7 @@
 				card->ext_csd.cache_ctrl = enable;
 		}
 	}
+	mmc_release_host(host);
 
 	return err;
 }
@@ -2265,49 +2274,32 @@
 
 	cancel_delayed_work(&host->detect);
 	mmc_flush_scheduled_work();
-	if (mmc_try_claim_host(host)) {
-		err = mmc_cache_ctrl(host, 0);
-		mmc_release_host(host);
-	} else {
-		err = -EBUSY;
-	}
 
+	err = mmc_cache_ctrl(host, 0);
 	if (err)
 		goto out;
 
 	mmc_bus_get(host);
 	if (host->bus_ops && !host->bus_dead) {
 
-		/*
-		 * A long response time is not acceptable for device drivers
-		 * when doing suspend. Prevent mmc_claim_host in the suspend
-		 * sequence, to potentially wait "forever" by trying to
-		 * pre-claim the host.
-		 */
-		if (mmc_try_claim_host(host)) {
-			if (host->bus_ops->suspend) {
-				err = host->bus_ops->suspend(host);
-			}
-			mmc_release_host(host);
+		if (host->bus_ops->suspend)
+			err = host->bus_ops->suspend(host);
 
-			if (err == -ENOSYS || !host->bus_ops->resume) {
-				/*
-				 * We simply "remove" the card in this case.
-				 * It will be redetected on resume.  (Calling
-				 * bus_ops->remove() with a claimed host can
-				 * deadlock.)
-				 */
-				if (host->bus_ops->remove)
-					host->bus_ops->remove(host);
-				mmc_claim_host(host);
-				mmc_detach_bus(host);
-				mmc_power_off(host);
-				mmc_release_host(host);
-				host->pm_flags = 0;
-				err = 0;
-			}
-		} else {
-			err = -EBUSY;
+		if (err == -ENOSYS || !host->bus_ops->resume) {
+			/*
+			 * We simply "remove" the card in this case.
+			 * It will be redetected on resume.  (Calling
+			 * bus_ops->remove() with a claimed host can
+			 * deadlock.)
+			 */
+			if (host->bus_ops->remove)
+				host->bus_ops->remove(host);
+			mmc_claim_host(host);
+			mmc_detach_bus(host);
+			mmc_power_off(host);
+			mmc_release_host(host);
+			host->pm_flags = 0;
+			err = 0;
 		}
 	}
 	mmc_bus_put(host);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 02914d6..54df5ad 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -695,6 +695,11 @@
 		else if (host->ios.clock <= 200000000)
 			index = EXT_CSD_PWR_CL_200_195;
 		break;
+	case MMC_VDD_27_28:
+	case MMC_VDD_28_29:
+	case MMC_VDD_29_30:
+	case MMC_VDD_30_31:
+	case MMC_VDD_31_32:
 	case MMC_VDD_32_33:
 	case MMC_VDD_33_34:
 	case MMC_VDD_34_35:
@@ -1111,11 +1116,10 @@
 		ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
 				EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
 		err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
-		if (err) {
-			pr_err("%s: power class selection to bus width %d failed\n",
-				mmc_hostname(card->host), 1 << bus_width);
-			goto err;
-		}
+		if (err)
+			pr_warning("%s: power class selection to bus width %d"
+				   " failed\n", mmc_hostname(card->host),
+				   1 << bus_width);
 	}
 
 	/*
@@ -1147,10 +1151,10 @@
 			err = mmc_select_powerclass(card, ext_csd_bits[idx][0],
 						    ext_csd);
 			if (err)
-				pr_err("%s: power class selection to "
-				       "bus width %d failed\n",
-				       mmc_hostname(card->host),
-				       1 << bus_width);
+				pr_warning("%s: power class selection to "
+					   "bus width %d failed\n",
+					   mmc_hostname(card->host),
+					   1 << bus_width);
 
 			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 					 EXT_CSD_BUS_WIDTH,
@@ -1178,10 +1182,10 @@
 			err = mmc_select_powerclass(card, ext_csd_bits[idx][1],
 						    ext_csd);
 			if (err)
-				pr_err("%s: power class selection to "
-				       "bus width %d ddr %d failed\n",
-				       mmc_hostname(card->host),
-				       1 << bus_width, ddr);
+				pr_warning("%s: power class selection to "
+					   "bus width %d ddr %d failed\n",
+					   mmc_hostname(card->host),
+					   1 << bus_width, ddr);
 
 			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 					 EXT_CSD_BUS_WIDTH,
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 40989e6..236842e 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -192,9 +192,15 @@
 	return ret;
 }
 
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
+
+static int pm_no_operation(struct device *dev)
+{
+	return 0;
+}
 
 static const struct dev_pm_ops sdio_bus_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pm_no_operation, pm_no_operation)
 	SET_RUNTIME_PM_OPS(
 		pm_generic_runtime_suspend,
 		pm_generic_runtime_resume,
@@ -204,11 +210,11 @@
 
 #define SDIO_PM_OPS_PTR	(&sdio_bus_pm_ops)
 
-#else /* !CONFIG_PM_RUNTIME */
+#else /* !CONFIG_PM */
 
 #define SDIO_PM_OPS_PTR	NULL
 
-#endif /* !CONFIG_PM_RUNTIME */
+#endif /* !CONFIG_PM */
 
 static struct bus_type sdio_bus_type = {
 	.name		= "sdio",
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h
index 000b3ad..787aba1 100644
--- a/drivers/mmc/host/atmel-mci-regs.h
+++ b/drivers/mmc/host/atmel-mci-regs.h
@@ -31,6 +31,7 @@
 # define ATMCI_MR_PDCFBYTE		(  1 << 13)	/* Force Byte Transfer */
 # define ATMCI_MR_PDCPADV		(  1 << 14)	/* Padding Value */
 # define ATMCI_MR_PDCMODE		(  1 << 15)	/* PDC-oriented Mode */
+# define ATMCI_MR_CLKODD(x)		((x) << 16)	/* LSB of Clock Divider */
 #define ATMCI_DTOR			0x0008	/* Data Timeout */
 # define ATMCI_DTOCYC(x)		((x) <<  0)	/* Data Timeout Cycles */
 # define ATMCI_DTOMUL(x)		((x) <<  4)	/* Data Timeout Multiplier */
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 9819dc0..e94476b 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -77,6 +77,7 @@
 	bool    has_cstor_reg;
 	bool    has_highspeed;
 	bool    has_rwproof;
+	bool	has_odd_clk_div;
 };
 
 struct atmel_mci_dma {
@@ -482,7 +483,14 @@
 static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
 					unsigned int ns)
 {
-	return (ns * (host->bus_hz / 1000000) + 999) / 1000;
+	/*
+	 * It is easier here to use us instead of ns for the timeout,
+	 * it prevents from overflows during calculation.
+	 */
+	unsigned int us = DIV_ROUND_UP(ns, 1000);
+
+	/* Maximum clock frequency is host->bus_hz/2 */
+	return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
 }
 
 static void atmci_set_timeout(struct atmel_mci *host,
@@ -1127,16 +1135,27 @@
 		}
 
 		/* Calculate clock divider */
-		clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
-		if (clkdiv > 255) {
-			dev_warn(&mmc->class_dev,
-				"clock %u too slow; using %lu\n",
-				clock_min, host->bus_hz / (2 * 256));
-			clkdiv = 255;
+		if (host->caps.has_odd_clk_div) {
+			clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
+			if (clkdiv > 511) {
+				dev_warn(&mmc->class_dev,
+				         "clock %u too slow; using %lu\n",
+				         clock_min, host->bus_hz / (511 + 2));
+				clkdiv = 511;
+			}
+			host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1)
+			                 | ATMCI_MR_CLKODD(clkdiv & 1);
+		} else {
+			clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
+			if (clkdiv > 255) {
+				dev_warn(&mmc->class_dev,
+				         "clock %u too slow; using %lu\n",
+				         clock_min, host->bus_hz / (2 * 256));
+				clkdiv = 255;
+			}
+			host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
 		}
 
-		host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
-
 		/*
 		 * WRPROOF and RDPROOF prevent overruns/underruns by
 		 * stopping the clock when the FIFO is full/empty.
@@ -2007,35 +2026,35 @@
 			"version: 0x%x\n", version);
 
 	host->caps.has_dma = 0;
-	host->caps.has_pdc = 0;
+	host->caps.has_pdc = 1;
 	host->caps.has_cfg_reg = 0;
 	host->caps.has_cstor_reg = 0;
 	host->caps.has_highspeed = 0;
 	host->caps.has_rwproof = 0;
+	host->caps.has_odd_clk_div = 0;
 
 	/* keep only major version number */
 	switch (version & 0xf00) {
-	case 0x100:
-	case 0x200:
-		host->caps.has_pdc = 1;
-		host->caps.has_rwproof = 1;
-		break;
-	case 0x300:
-	case 0x400:
 	case 0x500:
+		host->caps.has_odd_clk_div = 1;
+	case 0x400:
+	case 0x300:
 #ifdef CONFIG_AT_HDMAC
 		host->caps.has_dma = 1;
 #else
-		host->caps.has_dma = 0;
 		dev_info(&host->pdev->dev,
 			"has dma capability but dma engine is not selected, then use pio\n");
 #endif
+		host->caps.has_pdc = 0;
 		host->caps.has_cfg_reg = 1;
 		host->caps.has_cstor_reg = 1;
 		host->caps.has_highspeed = 1;
+	case 0x200:
 		host->caps.has_rwproof = 1;
+	case 0x100:
 		break;
 	default:
+		host->caps.has_pdc = 0;
 		dev_warn(&host->pdev->dev,
 				"Unmanaged mci version, set minimum capabilities\n");
 		break;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index bf3c9b4..ab3fc46 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -526,8 +526,10 @@
 		return -ENODEV;
 
 	sg_len = dw_mci_pre_dma_transfer(host, data, 0);
-	if (sg_len < 0)
+	if (sg_len < 0) {
+		host->dma_ops->stop(host);
 		return sg_len;
+	}
 
 	host->using_dma = 1;
 
@@ -1879,7 +1881,8 @@
 	if (!host->dma_ops)
 		goto no_dma;
 
-	if (host->dma_ops->init) {
+	if (host->dma_ops->init && host->dma_ops->start &&
+	    host->dma_ops->stop && host->dma_ops->cleanup) {
 		if (host->dma_ops->init(host)) {
 			dev_err(&host->dev, "%s: Unable to initialize "
 				"DMA Controller.\n", __func__);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 47adb16..56d4499 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -249,7 +249,7 @@
 	 * the pbias cell programming support is still missing when
 	 * booting with Device tree
 	 */
-	if (of_have_populated_dt() && !vdd)
+	if (dev->of_node && !vdd)
 		return 0;
 
 	if (mmc_slot(host).before_set_reg)
@@ -1549,7 +1549,7 @@
 			 * can't be allowed when booting with device
 			 * tree.
 			 */
-			(!of_have_populated_dt())) {
+			!host->dev->of_node) {
 				/*
 				 * The mmc_select_voltage fn of the core does
 				 * not seem to set the power_mode to
@@ -1741,7 +1741,7 @@
 		.data = &omap4_reg_offset,
 	},
 	{},
-}
+};
 MODULE_DEVICE_TABLE(of, omap_mmc_of_match);
 
 static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
@@ -1785,7 +1785,7 @@
 }
 #endif
 
-static int __init omap_hsmmc_probe(struct platform_device *pdev)
+static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
 {
 	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
 	struct mmc_host *mmc;
@@ -1818,8 +1818,6 @@
 	if (res == NULL || irq < 0)
 		return -ENXIO;
 
-	res->start += pdata->reg_offset;
-	res->end += pdata->reg_offset;
 	res = request_mem_region(res->start, resource_size(res), pdev->name);
 	if (res == NULL)
 		return -EBUSY;
@@ -1843,7 +1841,7 @@
 	host->dma_ch	= -1;
 	host->irq	= irq;
 	host->slot_id	= 0;
-	host->mapbase	= res->start;
+	host->mapbase	= res->start + pdata->reg_offset;
 	host->base	= ioremap(host->mapbase, SZ_4K);
 	host->power_mode = MMC_POWER_OFF;
 	host->next_data.cookie = 1;
@@ -1875,8 +1873,6 @@
 		goto err1;
 	}
 
-	omap_hsmmc_context_save(host);
-
 	if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
 		dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
 		mmc->caps2 |= MMC_CAP2_NO_MULTI_READ;
@@ -1887,6 +1883,8 @@
 	pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY);
 	pm_runtime_use_autosuspend(host->dev);
 
+	omap_hsmmc_context_save(host);
+
 	if (cpu_is_omap2430()) {
 		host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
 		/*
@@ -2018,8 +2016,7 @@
 err_irq_cd_init:
 	free_irq(host->irq, host);
 err_irq:
-	pm_runtime_mark_last_busy(host->dev);
-	pm_runtime_put_autosuspend(host->dev);
+	pm_runtime_put_sync(host->dev);
 	pm_runtime_disable(host->dev);
 	clk_put(host->fclk);
 	if (host->got_dbclk) {
@@ -2037,35 +2034,33 @@
 	return ret;
 }
 
-static int omap_hsmmc_remove(struct platform_device *pdev)
+static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
 {
 	struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
 	struct resource *res;
 
-	if (host) {
-		pm_runtime_get_sync(host->dev);
-		mmc_remove_host(host->mmc);
-		if (host->use_reg)
-			omap_hsmmc_reg_put(host);
-		if (host->pdata->cleanup)
-			host->pdata->cleanup(&pdev->dev);
-		free_irq(host->irq, host);
-		if (mmc_slot(host).card_detect_irq)
-			free_irq(mmc_slot(host).card_detect_irq, host);
+	pm_runtime_get_sync(host->dev);
+	mmc_remove_host(host->mmc);
+	if (host->use_reg)
+		omap_hsmmc_reg_put(host);
+	if (host->pdata->cleanup)
+		host->pdata->cleanup(&pdev->dev);
+	free_irq(host->irq, host);
+	if (mmc_slot(host).card_detect_irq)
+		free_irq(mmc_slot(host).card_detect_irq, host);
 
-		pm_runtime_put_sync(host->dev);
-		pm_runtime_disable(host->dev);
-		clk_put(host->fclk);
-		if (host->got_dbclk) {
-			clk_disable(host->dbclk);
-			clk_put(host->dbclk);
-		}
-
-		mmc_free_host(host->mmc);
-		iounmap(host->base);
-		omap_hsmmc_gpio_free(pdev->dev.platform_data);
+	pm_runtime_put_sync(host->dev);
+	pm_runtime_disable(host->dev);
+	clk_put(host->fclk);
+	if (host->got_dbclk) {
+		clk_disable(host->dbclk);
+		clk_put(host->dbclk);
 	}
 
+	mmc_free_host(host->mmc);
+	iounmap(host->base);
+	omap_hsmmc_gpio_free(pdev->dev.platform_data);
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (res)
 		release_mem_region(res->start, resource_size(res));
@@ -2078,49 +2073,45 @@
 static int omap_hsmmc_suspend(struct device *dev)
 {
 	int ret = 0;
-	struct platform_device *pdev = to_platform_device(dev);
-	struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
+	struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+
+	if (!host)
+		return 0;
 
 	if (host && host->suspended)
 		return 0;
 
-	if (host) {
-		pm_runtime_get_sync(host->dev);
-		host->suspended = 1;
-		if (host->pdata->suspend) {
-			ret = host->pdata->suspend(&pdev->dev,
-							host->slot_id);
-			if (ret) {
-				dev_dbg(mmc_dev(host->mmc),
-					"Unable to handle MMC board"
-					" level suspend\n");
-				host->suspended = 0;
-				return ret;
-			}
-		}
-		ret = mmc_suspend_host(host->mmc);
-
+	pm_runtime_get_sync(host->dev);
+	host->suspended = 1;
+	if (host->pdata->suspend) {
+		ret = host->pdata->suspend(dev, host->slot_id);
 		if (ret) {
+			dev_dbg(dev, "Unable to handle MMC board"
+					" level suspend\n");
 			host->suspended = 0;
-			if (host->pdata->resume) {
-				ret = host->pdata->resume(&pdev->dev,
-							  host->slot_id);
-				if (ret)
-					dev_dbg(mmc_dev(host->mmc),
-						"Unmask interrupt failed\n");
-			}
-			goto err;
+			return ret;
 		}
-
-		if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
-			omap_hsmmc_disable_irq(host);
-			OMAP_HSMMC_WRITE(host->base, HCTL,
-				OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
-		}
-		if (host->got_dbclk)
-			clk_disable(host->dbclk);
-
 	}
+	ret = mmc_suspend_host(host->mmc);
+
+	if (ret) {
+		host->suspended = 0;
+		if (host->pdata->resume) {
+			ret = host->pdata->resume(dev, host->slot_id);
+			if (ret)
+				dev_dbg(dev, "Unmask interrupt failed\n");
+		}
+		goto err;
+	}
+
+	if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
+		omap_hsmmc_disable_irq(host);
+		OMAP_HSMMC_WRITE(host->base, HCTL,
+				OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
+	}
+
+	if (host->got_dbclk)
+		clk_disable(host->dbclk);
 err:
 	pm_runtime_put_sync(host->dev);
 	return ret;
@@ -2130,39 +2121,38 @@
 static int omap_hsmmc_resume(struct device *dev)
 {
 	int ret = 0;
-	struct platform_device *pdev = to_platform_device(dev);
-	struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
+	struct omap_hsmmc_host *host = dev_get_drvdata(dev);
+
+	if (!host)
+		return 0;
 
 	if (host && !host->suspended)
 		return 0;
 
-	if (host) {
-		pm_runtime_get_sync(host->dev);
+	pm_runtime_get_sync(host->dev);
 
-		if (host->got_dbclk)
-			clk_enable(host->dbclk);
+	if (host->got_dbclk)
+		clk_enable(host->dbclk);
 
-		if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
-			omap_hsmmc_conf_bus_power(host);
+	if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
+		omap_hsmmc_conf_bus_power(host);
 
-		if (host->pdata->resume) {
-			ret = host->pdata->resume(&pdev->dev, host->slot_id);
-			if (ret)
-				dev_dbg(mmc_dev(host->mmc),
-					"Unmask interrupt failed\n");
-		}
-
-		omap_hsmmc_protect_card(host);
-
-		/* Notify the core to resume the host */
-		ret = mmc_resume_host(host->mmc);
-		if (ret == 0)
-			host->suspended = 0;
-
-		pm_runtime_mark_last_busy(host->dev);
-		pm_runtime_put_autosuspend(host->dev);
+	if (host->pdata->resume) {
+		ret = host->pdata->resume(dev, host->slot_id);
+		if (ret)
+			dev_dbg(dev, "Unmask interrupt failed\n");
 	}
 
+	omap_hsmmc_protect_card(host);
+
+	/* Notify the core to resume the host */
+	ret = mmc_resume_host(host->mmc);
+	if (ret == 0)
+		host->suspended = 0;
+
+	pm_runtime_mark_last_busy(host->dev);
+	pm_runtime_put_autosuspend(host->dev);
+
 	return ret;
 
 }
@@ -2178,7 +2168,7 @@
 
 	host = platform_get_drvdata(to_platform_device(dev));
 	omap_hsmmc_context_save(host);
-	dev_dbg(mmc_dev(host->mmc), "disabled\n");
+	dev_dbg(dev, "disabled\n");
 
 	return 0;
 }
@@ -2189,7 +2179,7 @@
 
 	host = platform_get_drvdata(to_platform_device(dev));
 	omap_hsmmc_context_restore(host);
-	dev_dbg(mmc_dev(host->mmc), "enabled\n");
+	dev_dbg(dev, "enabled\n");
 
 	return 0;
 }
@@ -2202,7 +2192,8 @@
 };
 
 static struct platform_driver omap_hsmmc_driver = {
-	.remove		= omap_hsmmc_remove,
+	.probe		= omap_hsmmc_probe,
+	.remove		= __devexit_p(omap_hsmmc_remove),
 	.driver		= {
 		.name = DRIVER_NAME,
 		.owner = THIS_MODULE,
@@ -2211,21 +2202,7 @@
 	},
 };
 
-static int __init omap_hsmmc_init(void)
-{
-	/* Register the MMC driver */
-	return platform_driver_probe(&omap_hsmmc_driver, omap_hsmmc_probe);
-}
-
-static void __exit omap_hsmmc_cleanup(void)
-{
-	/* Unregister MMC driver */
-	platform_driver_unregister(&omap_hsmmc_driver);
-}
-
-module_init(omap_hsmmc_init);
-module_exit(omap_hsmmc_cleanup);
-
+module_platform_driver(omap_hsmmc_driver);
 MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index 46fd1fd..177f697 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -20,6 +20,7 @@
  */
 
 #include <linux/io.h>
+#include <linux/module.h>
 #include <linux/mmc/host.h>
 
 #include "sdhci-pltfm.h"
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 6193a0d..8abdaf6 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -467,8 +467,7 @@
 	clk_prepare_enable(clk);
 	pltfm_host->clk = clk;
 
-	if (!is_imx25_esdhc(imx_data))
-		host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+	host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
 
 	if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data))
 		/* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index fbbebe2..69ef0be 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -1418,8 +1418,6 @@
 
 	slots = chip->num_slots;	/* Quirk may have changed this */
 
-	pci_enable_msi(pdev);
-
 	for (i = 0; i < slots; i++) {
 		slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i);
 		if (IS_ERR(slot)) {
@@ -1438,8 +1436,6 @@
 	return 0;
 
 free:
-	pci_disable_msi(pdev);
-
 	pci_set_drvdata(pdev, NULL);
 	kfree(chip);
 
@@ -1462,8 +1458,6 @@
 		for (i = 0; i < chip->num_slots; i++)
 			sdhci_pci_remove_slot(chip->slots[i]);
 
-		pci_disable_msi(pdev);
-
 		pci_set_drvdata(pdev, NULL);
 		kfree(chip);
 	}
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index b19e7d4..55a164f 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -20,6 +20,10 @@
 #include <linux/io.h>
 #include <linux/gpio.h>
 #include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
 
 #include <linux/mmc/host.h>
 
@@ -53,6 +57,18 @@
 	struct clk		*clk_bus[MAX_BUS_CLK];
 };
 
+/**
+ * struct sdhci_s3c_driver_data - S3C SDHCI platform specific driver data
+ * @sdhci_quirks: sdhci host specific quirks.
+ *
+ * Specifies platform specific configuration of sdhci controller.
+ * Note: A structure for driver specific platform data is used for future
+ * expansion of its usage.
+ */
+struct sdhci_s3c_drv_data {
+	unsigned int	sdhci_quirks;
+};
+
 static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host)
 {
 	return sdhci_priv(host);
@@ -132,10 +148,10 @@
 		return UINT_MAX;
 
 	/*
-	 * Clock divider's step is different as 1 from that of host controller
-	 * when 'clk_type' is S3C_SDHCI_CLK_DIV_EXTERNAL.
+	 * If controller uses a non-standard clock division, find the best clock
+	 * speed possible with selected clock source and skip the division.
 	 */
-	if (ourhost->pdata->clk_type) {
+	if (ourhost->host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
 		rate = clk_round_rate(clksrc, wanted);
 		return wanted - rate;
 	}
@@ -272,6 +288,8 @@
 static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
 {
 	struct sdhci_s3c *ourhost = to_s3c(host);
+	unsigned long timeout;
+	u16 clk = 0;
 
 	/* don't bother if the clock is going off */
 	if (clock == 0)
@@ -282,6 +300,25 @@
 	clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
 
 	host->clock = clock;
+
+	clk = SDHCI_CLOCK_INT_EN;
+	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+	/* Wait max 20 ms */
+	timeout = 20;
+	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
+		& SDHCI_CLOCK_INT_STABLE)) {
+		if (timeout == 0) {
+			printk(KERN_ERR "%s: Internal clock never "
+				"stabilised.\n", mmc_hostname(host->mmc));
+			return;
+		}
+		timeout--;
+		mdelay(1);
+	}
+
+	clk |= SDHCI_CLOCK_CARD_EN;
+	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
 }
 
 /**
@@ -382,16 +419,24 @@
 	}
 }
 
+static inline struct sdhci_s3c_drv_data *sdhci_s3c_get_driver_data(
+			struct platform_device *pdev)
+{
+	return (struct sdhci_s3c_drv_data *)
+			platform_get_device_id(pdev)->driver_data;
+}
+
 static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
 {
-	struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data;
+	struct s3c_sdhci_platdata *pdata;
+	struct sdhci_s3c_drv_data *drv_data;
 	struct device *dev = &pdev->dev;
 	struct sdhci_host *host;
 	struct sdhci_s3c *sc;
 	struct resource *res;
 	int ret, irq, ptr, clks;
 
-	if (!pdata) {
+	if (!pdev->dev.platform_data) {
 		dev_err(dev, "no device data specified\n");
 		return -ENOENT;
 	}
@@ -402,18 +447,20 @@
 		return irq;
 	}
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(dev, "no memory specified\n");
-		return -ENOENT;
-	}
-
 	host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c));
 	if (IS_ERR(host)) {
 		dev_err(dev, "sdhci_alloc_host() failed\n");
 		return PTR_ERR(host);
 	}
 
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata) {
+		ret = -ENOMEM;
+		goto err_io_clk;
+	}
+	memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
+
+	drv_data = sdhci_s3c_get_driver_data(pdev);
 	sc = sdhci_priv(host);
 
 	sc->host = host;
@@ -464,15 +511,8 @@
 		goto err_no_busclks;
 	}
 
-	sc->ioarea = request_mem_region(res->start, resource_size(res),
-					mmc_hostname(host->mmc));
-	if (!sc->ioarea) {
-		dev_err(dev, "failed to reserve register area\n");
-		ret = -ENXIO;
-		goto err_req_regs;
-	}
-
-	host->ioaddr = ioremap_nocache(res->start, resource_size(res));
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	host->ioaddr = devm_request_and_ioremap(&pdev->dev, res);
 	if (!host->ioaddr) {
 		dev_err(dev, "failed to map registers\n");
 		ret = -ENXIO;
@@ -491,6 +531,8 @@
 	/* Setup quirks for the controller */
 	host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
 	host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
+	if (drv_data)
+		host->quirks |= drv_data->sdhci_quirks;
 
 #ifndef CONFIG_MMC_SDHCI_S3C_DMA
 
@@ -518,6 +560,14 @@
 	if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
 		host->mmc->caps = MMC_CAP_NONREMOVABLE;
 
+	switch (pdata->max_width) {
+	case 8:
+		host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+	case 4:
+		host->mmc->caps |= MMC_CAP_4_BIT_DATA;
+		break;
+	}
+
 	if (pdata->pm_caps)
 		host->mmc->pm_caps |= pdata->pm_caps;
 
@@ -531,7 +581,7 @@
 	 * If controller does not have internal clock divider,
 	 * we can use overriding functions instead of default.
 	 */
-	if (pdata->clk_type) {
+	if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
 		sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
 		sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
 		sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
@@ -544,10 +594,17 @@
 	if (pdata->host_caps2)
 		host->mmc->caps2 |= pdata->host_caps2;
 
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_suspend_ignore_children(&pdev->dev, 1);
+
 	ret = sdhci_add_host(host);
 	if (ret) {
 		dev_err(dev, "sdhci_add_host() failed\n");
-		goto err_add_host;
+		pm_runtime_forbid(&pdev->dev);
+		pm_runtime_get_noresume(&pdev->dev);
+		goto err_req_regs;
 	}
 
 	/* The following two methods of card detection might call
@@ -561,10 +618,6 @@
 
 	return 0;
 
- err_add_host:
-	release_resource(sc->ioarea);
-	kfree(sc->ioarea);
-
  err_req_regs:
 	for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
 		if (sc->clk_bus[ptr]) {
@@ -601,6 +654,8 @@
 
 	sdhci_remove_host(host, 1);
 
+	pm_runtime_disable(&pdev->dev);
+
 	for (ptr = 0; ptr < 3; ptr++) {
 		if (sc->clk_bus[ptr]) {
 			clk_disable(sc->clk_bus[ptr]);
@@ -610,18 +665,13 @@
 	clk_disable(sc->clk_io);
 	clk_put(sc->clk_io);
 
-	iounmap(host->ioaddr);
-	release_resource(sc->ioarea);
-	kfree(sc->ioarea);
-
 	sdhci_free_host(host);
 	platform_set_drvdata(pdev, NULL);
 
 	return 0;
 }
 
-#ifdef CONFIG_PM
-
+#ifdef CONFIG_PM_SLEEP
 static int sdhci_s3c_suspend(struct device *dev)
 {
 	struct sdhci_host *host = dev_get_drvdata(dev);
@@ -635,10 +685,29 @@
 
 	return sdhci_resume_host(host);
 }
+#endif
 
+#ifdef CONFIG_PM_RUNTIME
+static int sdhci_s3c_runtime_suspend(struct device *dev)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+
+	return sdhci_runtime_suspend_host(host);
+}
+
+static int sdhci_s3c_runtime_resume(struct device *dev)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+
+	return sdhci_runtime_resume_host(host);
+}
+#endif
+
+#ifdef CONFIG_PM
 static const struct dev_pm_ops sdhci_s3c_pmops = {
-	.suspend	= sdhci_s3c_suspend,
-	.resume		= sdhci_s3c_resume,
+	SET_SYSTEM_SLEEP_PM_OPS(sdhci_s3c_suspend, sdhci_s3c_resume)
+	SET_RUNTIME_PM_OPS(sdhci_s3c_runtime_suspend, sdhci_s3c_runtime_resume,
+			   NULL)
 };
 
 #define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops)
@@ -647,9 +716,31 @@
 #define SDHCI_S3C_PMOPS NULL
 #endif
 
+#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212)
+static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
+	.sdhci_quirks = SDHCI_QUIRK_NONSTANDARD_CLOCK,
+};
+#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)&exynos4_sdhci_drv_data)
+#else
+#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)NULL)
+#endif
+
+static struct platform_device_id sdhci_s3c_driver_ids[] = {
+	{
+		.name		= "s3c-sdhci",
+		.driver_data	= (kernel_ulong_t)NULL,
+	}, {
+		.name		= "exynos4-sdhci",
+		.driver_data	= EXYNOS4_SDHCI_DRV_DATA,
+	},
+	{ }
+};
+MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids);
+
 static struct platform_driver sdhci_s3c_driver = {
 	.probe		= sdhci_s3c_probe,
 	.remove		= __devexit_p(sdhci_s3c_remove),
+	.id_table	= sdhci_s3c_driver_ids,
 	.driver		= {
 		.owner	= THIS_MODULE,
 		.name	= "s3c-sdhci",
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 8262cad..ccefdeb 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -147,7 +147,7 @@
 	u32 present, irqs;
 
 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
-	    !mmc_card_is_removable(host->mmc))
+	    (host->mmc->caps & MMC_CAP_NONREMOVABLE))
 		return;
 
 	present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
@@ -2782,8 +2782,9 @@
 	    mmc_card_is_removable(mmc))
 		mmc->caps |= MMC_CAP_NEEDS_POLL;
 
-	/* UHS-I mode(s) supported by the host controller. */
-	if (host->version >= SDHCI_SPEC_300)
+	/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
+	if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
+		       SDHCI_SUPPORT_DDR50))
 		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
 
 	/* SDR104 supports also implies SDR50 support */
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index aafaf0b..724b35e 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -454,7 +454,8 @@
 		sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
 	else
 		sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
-				((fls(host->clk / clk) - 1) << 16));
+				((fls(DIV_ROUND_UP(host->clk,
+						   clk) - 1) - 1) << 16));
 
 	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
 }
@@ -1297,14 +1298,8 @@
 	spin_lock_init(&host->lock);
 
 	mmc->ops = &sh_mmcif_ops;
-	mmc->f_max = host->clk;
-	/* close to 400KHz */
-	if (mmc->f_max < 51200000)
-		mmc->f_min = mmc->f_max / 128;
-	else if (mmc->f_max < 102400000)
-		mmc->f_min = mmc->f_max / 256;
-	else
-		mmc->f_min = mmc->f_max / 512;
+	mmc->f_max = host->clk / 2;
+	mmc->f_min = host->clk / 512;
 	if (pd->ocr)
 		mmc->ocr_avail = pd->ocr;
 	mmc->caps = MMC_CAP_MMC_HIGHSPEED;
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 94eb05b..58fc65f 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -106,16 +106,14 @@
 	}
 
 	if (mtd->type == MTD_ABSENT) {
-		put_mtd_device(mtd);
 		ret = -ENODEV;
-		goto out;
+		goto out1;
 	}
 
 	mtd_ino = iget_locked(mnt->mnt_sb, devnum);
 	if (!mtd_ino) {
-		put_mtd_device(mtd);
 		ret = -ENOMEM;
-		goto out;
+		goto out1;
 	}
 	if (mtd_ino->i_state & I_NEW) {
 		mtd_ino->i_private = mtd;
@@ -127,23 +125,25 @@
 
 	/* You can't open it RW if it's not a writeable device */
 	if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
-		iput(mtd_ino);
-		put_mtd_device(mtd);
 		ret = -EACCES;
-		goto out;
+		goto out2;
 	}
 
 	mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
 	if (!mfi) {
-		iput(mtd_ino);
-		put_mtd_device(mtd);
 		ret = -ENOMEM;
-		goto out;
+		goto out2;
 	}
 	mfi->ino = mtd_ino;
 	mfi->mtd = mtd;
 	file->private_data = mfi;
+	mutex_unlock(&mtd_mutex);
+	return 0;
 
+out2:
+	iput(mtd_ino);
+out1:
+	put_mtd_device(mtd);
 out:
 	mutex_unlock(&mtd_mutex);
 	simple_release_fs(&mnt, &count);
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 25197b6..b8b4c7b 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -89,16 +89,16 @@
 	BUGLVL(D_NORMAL) printk(VERSION);
 	BUGLVL(D_NORMAL) printk("E-mail me if you actually test the RIM I driver, please!\n");
 
-	BUGMSG(D_NORMAL, "Given: node %02Xh, shmem %lXh, irq %d\n",
+	BUGLVL(D_NORMAL) printk("Given: node %02Xh, shmem %lXh, irq %d\n",
 	       dev->dev_addr[0], dev->mem_start, dev->irq);
 
 	if (dev->mem_start <= 0 || dev->irq <= 0) {
-		BUGMSG(D_NORMAL, "No autoprobe for RIM I; you "
+		BUGLVL(D_NORMAL) printk("No autoprobe for RIM I; you "
 		       "must specify the shmem and irq!\n");
 		return -ENODEV;
 	}
 	if (dev->dev_addr[0] == 0) {
-		BUGMSG(D_NORMAL, "You need to specify your card's station "
+		BUGLVL(D_NORMAL) printk("You need to specify your card's station "
 		       "ID!\n");
 		return -ENODEV;
 	}
@@ -109,7 +109,7 @@
 	 * will be taken.
 	 */
 	if (!request_mem_region(dev->mem_start, MIRROR_SIZE, "arcnet (90xx)")) {
-		BUGMSG(D_NORMAL, "Card memory already allocated\n");
+		BUGLVL(D_NORMAL) printk("Card memory already allocated\n");
 		return -ENODEV;
 	}
 	return arcrimi_found(dev);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 62d2409..44e6a64 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1726,7 +1726,8 @@
 
 	read_lock(&bond->lock);
 
-	new_slave->last_arp_rx = jiffies;
+	new_slave->last_arp_rx = jiffies -
+		(msecs_to_jiffies(bond->params.arp_interval) + 1);
 
 	if (bond->params.miimon && !bond->params.use_carrier) {
 		link_reporting = bond_check_dev_link(bond, slave_dev, 1);
@@ -1751,22 +1752,30 @@
 	}
 
 	/* check for initial state */
-	if (!bond->params.miimon ||
-	    (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS)) {
-		if (bond->params.updelay) {
-			pr_debug("Initial state of slave_dev is BOND_LINK_BACK\n");
-			new_slave->link  = BOND_LINK_BACK;
-			new_slave->delay = bond->params.updelay;
+	if (bond->params.miimon) {
+		if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
+			if (bond->params.updelay) {
+				new_slave->link = BOND_LINK_BACK;
+				new_slave->delay = bond->params.updelay;
+			} else {
+				new_slave->link = BOND_LINK_UP;
+			}
 		} else {
-			pr_debug("Initial state of slave_dev is BOND_LINK_UP\n");
-			new_slave->link  = BOND_LINK_UP;
+			new_slave->link = BOND_LINK_DOWN;
 		}
-		new_slave->jiffies = jiffies;
+	} else if (bond->params.arp_interval) {
+		new_slave->link = (netif_carrier_ok(slave_dev) ?
+			BOND_LINK_UP : BOND_LINK_DOWN);
 	} else {
-		pr_debug("Initial state of slave_dev is BOND_LINK_DOWN\n");
-		new_slave->link  = BOND_LINK_DOWN;
+		new_slave->link = BOND_LINK_UP;
 	}
 
+	if (new_slave->link != BOND_LINK_DOWN)
+		new_slave->jiffies = jiffies;
+	pr_debug("Initial state of slave_dev is BOND_LINK_%s\n",
+		new_slave->link == BOND_LINK_DOWN ? "DOWN" :
+			(new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
+
 	bond_update_speed_duplex(new_slave);
 
 	if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
@@ -4820,12 +4829,9 @@
 	return 0;
 }
 
-static int bond_get_tx_queues(struct net *net, struct nlattr *tb[],
-			      unsigned int *num_queues,
-			      unsigned int *real_num_queues)
+static int bond_get_tx_queues(struct net *net, struct nlattr *tb[])
 {
-	*num_queues = tx_queues;
-	return 0;
+	return tx_queues;
 }
 
 static struct rtnl_link_ops bond_link_ops __read_mostly = {
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 9a66e2a..1520814 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -6,6 +6,8 @@
  * License terms: GNU General Public License (GPL) version 2.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME fmt
+
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/device.h>
@@ -19,6 +21,7 @@
 #include <linux/if_arp.h>
 #include <linux/timer.h>
 #include <linux/rtnetlink.h>
+#include <linux/pkt_sched.h>
 #include <net/caif/caif_layer.h>
 #include <net/caif/caif_hsi.h>
 
@@ -34,6 +37,10 @@
 module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
 
+static int aggregation_timeout = 1;
+module_param(aggregation_timeout, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(aggregation_timeout, "Aggregation timeout on HSI, ms.");
+
 /*
  * HSI padding options.
  * Warning: must be a base of 2 (& operation used) and can not be zero !
@@ -86,24 +93,84 @@
 		queue_work(cfhsi->wq, &cfhsi->wake_down_work);
 }
 
+static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
+					   const struct sk_buff *skb,
+					   int direction)
+{
+	struct caif_payload_info *info;
+	int hpad, tpad, len;
+
+	info = (struct caif_payload_info *)&skb->cb;
+	hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
+	tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
+	len = skb->len + hpad + tpad;
+
+	if (direction > 0)
+		cfhsi->aggregation_len += len;
+	else if (direction < 0)
+		cfhsi->aggregation_len -= len;
+}
+
+static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
+{
+	int i;
+
+	if (cfhsi->aggregation_timeout < 0)
+		return true;
+
+	for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
+		if (cfhsi->qhead[i].qlen)
+			return true;
+	}
+
+	/* TODO: Use aggregation_len instead */
+	if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
+		return true;
+
+	return false;
+}
+
+static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
+{
+	struct sk_buff *skb;
+	int i;
+
+	for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
+		skb = skb_dequeue(&cfhsi->qhead[i]);
+		if (skb)
+			break;
+	}
+
+	return skb;
+}
+
+static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
+{
+	int i, len = 0;
+	for (i = 0; i < CFHSI_PRIO_LAST; ++i)
+		len += skb_queue_len(&cfhsi->qhead[i]);
+	return len;
+}
+
 static void cfhsi_abort_tx(struct cfhsi *cfhsi)
 {
 	struct sk_buff *skb;
 
 	for (;;) {
 		spin_lock_bh(&cfhsi->lock);
-		skb = skb_dequeue(&cfhsi->qhead);
+		skb = cfhsi_dequeue(cfhsi);
 		if (!skb)
 			break;
 
 		cfhsi->ndev->stats.tx_errors++;
 		cfhsi->ndev->stats.tx_dropped++;
+		cfhsi_update_aggregation_stats(cfhsi, skb, -1);
 		spin_unlock_bh(&cfhsi->lock);
 		kfree_skb(skb);
 	}
 	cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
 	if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
-		mod_timer(&cfhsi->timer,
+		mod_timer(&cfhsi->inactivity_timer,
 			jiffies + cfhsi->inactivity_timeout);
 	spin_unlock_bh(&cfhsi->lock);
 }
@@ -169,7 +236,7 @@
 	struct sk_buff *skb;
 	u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
 
-	skb = skb_dequeue(&cfhsi->qhead);
+	skb = cfhsi_dequeue(cfhsi);
 	if (!skb)
 		return 0;
 
@@ -196,11 +263,16 @@
 			pemb += hpad;
 
 			/* Update network statistics. */
+			spin_lock_bh(&cfhsi->lock);
 			cfhsi->ndev->stats.tx_packets++;
 			cfhsi->ndev->stats.tx_bytes += skb->len;
+			cfhsi_update_aggregation_stats(cfhsi, skb, -1);
+			spin_unlock_bh(&cfhsi->lock);
 
 			/* Copy in embedded CAIF frame. */
 			skb_copy_bits(skb, 0, pemb, skb->len);
+
+			/* Consume the SKB */
 			consume_skb(skb);
 			skb = NULL;
 		}
@@ -214,7 +286,7 @@
 		int tpad = 0;
 
 		if (!skb)
-			skb = skb_dequeue(&cfhsi->qhead);
+			skb = cfhsi_dequeue(cfhsi);
 
 		if (!skb)
 			break;
@@ -233,8 +305,11 @@
 		pfrm += hpad;
 
 		/* Update network statistics. */
+		spin_lock_bh(&cfhsi->lock);
 		cfhsi->ndev->stats.tx_packets++;
 		cfhsi->ndev->stats.tx_bytes += skb->len;
+		cfhsi_update_aggregation_stats(cfhsi, skb, -1);
+		spin_unlock_bh(&cfhsi->lock);
 
 		/* Copy in CAIF frame. */
 		skb_copy_bits(skb, 0, pfrm, skb->len);
@@ -244,6 +319,8 @@
 
 		/* Update frame pointer. */
 		pfrm += skb->len + tpad;
+
+		/* Consume the SKB */
 		consume_skb(skb);
 		skb = NULL;
 
@@ -258,8 +335,7 @@
 	}
 
 	/* Check if we can piggy-back another descriptor. */
-	skb = skb_peek(&cfhsi->qhead);
-	if (skb)
+	if (cfhsi_can_send_aggregate(cfhsi))
 		desc->header |= CFHSI_PIGGY_DESC;
 	else
 		desc->header &= ~CFHSI_PIGGY_DESC;
@@ -267,61 +343,71 @@
 	return CFHSI_DESC_SZ + pld_len;
 }
 
-static void cfhsi_tx_done(struct cfhsi *cfhsi)
+static void cfhsi_start_tx(struct cfhsi *cfhsi)
 {
-	struct cfhsi_desc *desc = NULL;
-	int len = 0;
-	int res;
+	struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
+	int len, res;
 
 	dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
 
 	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 		return;
 
-	desc = (struct cfhsi_desc *)cfhsi->tx_buf;
-
 	do {
-		/*
-		 * Send flow on if flow off has been previously signalled
-		 * and number of packets is below low water mark.
-		 */
-		spin_lock_bh(&cfhsi->lock);
-		if (cfhsi->flow_off_sent &&
-				cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
-				cfhsi->cfdev.flowctrl) {
-
-			cfhsi->flow_off_sent = 0;
-			cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
-		}
-		spin_unlock_bh(&cfhsi->lock);
-
 		/* Create HSI frame. */
-		do {
-			len = cfhsi_tx_frm(desc, cfhsi);
-			if (!len) {
-				spin_lock_bh(&cfhsi->lock);
-				if (unlikely(skb_peek(&cfhsi->qhead))) {
-					spin_unlock_bh(&cfhsi->lock);
-					continue;
-				}
-				cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
-				/* Start inactivity timer. */
-				mod_timer(&cfhsi->timer,
-					jiffies + cfhsi->inactivity_timeout);
+		len = cfhsi_tx_frm(desc, cfhsi);
+		if (!len) {
+			spin_lock_bh(&cfhsi->lock);
+			if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
 				spin_unlock_bh(&cfhsi->lock);
-				goto done;
+				res = -EAGAIN;
+				continue;
 			}
-		} while (!len);
+			cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
+			/* Start inactivity timer. */
+			mod_timer(&cfhsi->inactivity_timer,
+				jiffies + cfhsi->inactivity_timeout);
+			spin_unlock_bh(&cfhsi->lock);
+			break;
+		}
 
 		/* Set up new transfer. */
 		res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
-		if (WARN_ON(res < 0)) {
+		if (WARN_ON(res < 0))
 			dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
 				__func__, res);
-		}
 	} while (res < 0);
+}
 
-done:
+static void cfhsi_tx_done(struct cfhsi *cfhsi)
+{
+	dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
+
+	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		return;
+
+	/*
+	 * Send flow on if flow off has been previously signalled
+	 * and number of packets is below low water mark.
+	 */
+	spin_lock_bh(&cfhsi->lock);
+	if (cfhsi->flow_off_sent &&
+			cfhsi_tx_queue_len(cfhsi) <= cfhsi->q_low_mark &&
+			cfhsi->cfdev.flowctrl) {
+
+		cfhsi->flow_off_sent = 0;
+		cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
+	}
+
+	if (cfhsi_can_send_aggregate(cfhsi)) {
+		spin_unlock_bh(&cfhsi->lock);
+		cfhsi_start_tx(cfhsi);
+	} else {
+		mod_timer(&cfhsi->aggregation_timer,
+			jiffies + cfhsi->aggregation_timeout);
+		spin_unlock_bh(&cfhsi->lock);
+	}
+
 	return;
 }
 
@@ -560,7 +646,7 @@
 
 	/* Update inactivity timer if pending. */
 	spin_lock_bh(&cfhsi->lock);
-	mod_timer_pending(&cfhsi->timer,
+	mod_timer_pending(&cfhsi->inactivity_timer,
 			jiffies + cfhsi->inactivity_timeout);
 	spin_unlock_bh(&cfhsi->lock);
 
@@ -744,14 +830,14 @@
 		size_t fifo_occupancy = 0;
 
 		/* Wakeup timeout */
-		dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
+		dev_dbg(&cfhsi->ndev->dev, "%s: Timeout.\n",
 			__func__);
 
 		/* Check FIFO to check if modem has sent something. */
 		WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
 					&fifo_occupancy));
 
-		dev_err(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n",
+		dev_dbg(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n",
 				__func__, (unsigned) fifo_occupancy);
 
 		/* Check if we misssed the interrupt. */
@@ -793,12 +879,12 @@
 
 	spin_lock_bh(&cfhsi->lock);
 
-	/* Resume transmit if queue is not empty. */
-	if (!skb_peek(&cfhsi->qhead)) {
+	/* Resume transmit if queues are not empty. */
+	if (!cfhsi_tx_queue_len(cfhsi)) {
 		dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
 			__func__);
 		/* Start inactivity timer. */
-		mod_timer(&cfhsi->timer,
+		mod_timer(&cfhsi->inactivity_timer,
 				jiffies + cfhsi->inactivity_timeout);
 		spin_unlock_bh(&cfhsi->lock);
 		return;
@@ -934,20 +1020,53 @@
 	wake_up_interruptible(&cfhsi->wake_down_wait);
 }
 
+static void cfhsi_aggregation_tout(unsigned long arg)
+{
+	struct cfhsi *cfhsi = (struct cfhsi *)arg;
+
+	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+		__func__);
+
+	cfhsi_start_tx(cfhsi);
+}
+
 static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct cfhsi *cfhsi = NULL;
 	int start_xfer = 0;
 	int timer_active;
+	int prio;
 
 	if (!dev)
 		return -EINVAL;
 
 	cfhsi = netdev_priv(dev);
 
+	switch (skb->priority) {
+	case TC_PRIO_BESTEFFORT:
+	case TC_PRIO_FILLER:
+	case TC_PRIO_BULK:
+		prio = CFHSI_PRIO_BEBK;
+		break;
+	case TC_PRIO_INTERACTIVE_BULK:
+		prio = CFHSI_PRIO_VI;
+		break;
+	case TC_PRIO_INTERACTIVE:
+		prio = CFHSI_PRIO_VO;
+		break;
+	case TC_PRIO_CONTROL:
+	default:
+		prio = CFHSI_PRIO_CTL;
+		break;
+	}
+
 	spin_lock_bh(&cfhsi->lock);
 
-	skb_queue_tail(&cfhsi->qhead, skb);
+	/* Update aggregation statistics  */
+	cfhsi_update_aggregation_stats(cfhsi, skb, 1);
+
+	/* Queue the SKB */
+	skb_queue_tail(&cfhsi->qhead[prio], skb);
 
 	/* Sanity check; xmit should not be called after unregister_netdev */
 	if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
@@ -958,7 +1077,7 @@
 
 	/* Send flow off if number of packets is above high water mark. */
 	if (!cfhsi->flow_off_sent &&
-		cfhsi->qhead.qlen > cfhsi->q_high_mark &&
+		cfhsi_tx_queue_len(cfhsi) > cfhsi->q_high_mark &&
 		cfhsi->cfdev.flowctrl) {
 		cfhsi->flow_off_sent = 1;
 		cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
@@ -970,12 +1089,18 @@
 	}
 
 	if (!start_xfer) {
+		/* Send aggregate if it is possible */
+		bool aggregate_ready =
+			cfhsi_can_send_aggregate(cfhsi) &&
+			del_timer(&cfhsi->aggregation_timer) > 0;
 		spin_unlock_bh(&cfhsi->lock);
+		if (aggregate_ready)
+			cfhsi_start_tx(cfhsi);
 		return 0;
 	}
 
 	/* Delete inactivity timer if started. */
-	timer_active = del_timer_sync(&cfhsi->timer);
+	timer_active = del_timer_sync(&cfhsi->inactivity_timer);
 
 	spin_unlock_bh(&cfhsi->lock);
 
@@ -1004,28 +1129,11 @@
 	return 0;
 }
 
-static int cfhsi_open(struct net_device *dev)
-{
-	netif_wake_queue(dev);
-
-	return 0;
-}
-
-static int cfhsi_close(struct net_device *dev)
-{
-	netif_stop_queue(dev);
-
-	return 0;
-}
-
-static const struct net_device_ops cfhsi_ops = {
-	.ndo_open = cfhsi_open,
-	.ndo_stop = cfhsi_close,
-	.ndo_start_xmit = cfhsi_xmit
-};
+static const struct net_device_ops cfhsi_ops;
 
 static void cfhsi_setup(struct net_device *dev)
 {
+	int i;
 	struct cfhsi *cfhsi = netdev_priv(dev);
 	dev->features = 0;
 	dev->netdev_ops = &cfhsi_ops;
@@ -1034,7 +1142,8 @@
 	dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
 	dev->tx_queue_len = 0;
 	dev->destructor = free_netdev;
-	skb_queue_head_init(&cfhsi->qhead);
+	for (i = 0; i < CFHSI_PRIO_LAST; ++i)
+		skb_queue_head_init(&cfhsi->qhead[i]);
 	cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
 	cfhsi->cfdev.use_frag = false;
 	cfhsi->cfdev.use_stx = false;
@@ -1046,7 +1155,7 @@
 {
 	struct cfhsi *cfhsi = NULL;
 	struct net_device *ndev;
-	struct cfhsi_dev *dev;
+
 	int res;
 
 	ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
@@ -1057,6 +1166,34 @@
 	cfhsi->ndev = ndev;
 	cfhsi->pdev = pdev;
 
+	/* Assign the HSI device. */
+	cfhsi->dev = pdev->dev.platform_data;
+
+	/* Assign the driver to this HSI device. */
+	cfhsi->dev->drv = &cfhsi->drv;
+
+	/* Register network device. */
+	res = register_netdev(ndev);
+	if (res) {
+		dev_err(&ndev->dev, "%s: Registration error: %d.\n",
+			__func__, res);
+		free_netdev(ndev);
+	}
+	/* Add CAIF HSI device to list. */
+	spin_lock(&cfhsi_list_lock);
+	list_add_tail(&cfhsi->list, &cfhsi_list);
+	spin_unlock(&cfhsi_list_lock);
+
+	return res;
+}
+
+static int cfhsi_open(struct net_device *ndev)
+{
+	struct cfhsi *cfhsi = netdev_priv(ndev);
+	int res;
+
+	clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
+
 	/* Initialize state vaiables. */
 	cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
 	cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
@@ -1066,12 +1203,6 @@
 	cfhsi->q_low_mark = LOW_WATER_MARK;
 	cfhsi->q_high_mark = HIGH_WATER_MARK;
 
-	/* Assign the HSI device. */
-	dev = (struct cfhsi_dev *)pdev->dev.platform_data;
-	cfhsi->dev = dev;
-
-	/* Assign the driver to this HSI device. */
-	dev->drv = &cfhsi->drv;
 
 	/*
 	 * Allocate a TX buffer with the size of a HSI packet descriptors
@@ -1111,6 +1242,9 @@
 		cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
 	}
 
+	/* Initialize aggregation timeout */
+	cfhsi->aggregation_timeout = aggregation_timeout;
+
 	/* Initialize recieve vaiables. */
 	cfhsi->rx_ptr = cfhsi->rx_buf;
 	cfhsi->rx_len = CFHSI_DESC_SZ;
@@ -1136,9 +1270,9 @@
 	clear_bit(CFHSI_AWAKE, &cfhsi->bits);
 
 	/* Create work thread. */
-	cfhsi->wq = create_singlethread_workqueue(pdev->name);
+	cfhsi->wq = create_singlethread_workqueue(cfhsi->pdev->name);
 	if (!cfhsi->wq) {
-		dev_err(&ndev->dev, "%s: Failed to create work queue.\n",
+		dev_err(&cfhsi->ndev->dev, "%s: Failed to create work queue.\n",
 			__func__);
 		res = -ENODEV;
 		goto err_create_wq;
@@ -1150,18 +1284,17 @@
 	init_waitqueue_head(&cfhsi->flush_fifo_wait);
 
 	/* Setup the inactivity timer. */
-	init_timer(&cfhsi->timer);
-	cfhsi->timer.data = (unsigned long)cfhsi;
-	cfhsi->timer.function = cfhsi_inactivity_tout;
+	init_timer(&cfhsi->inactivity_timer);
+	cfhsi->inactivity_timer.data = (unsigned long)cfhsi;
+	cfhsi->inactivity_timer.function = cfhsi_inactivity_tout;
 	/* Setup the slowpath RX timer. */
 	init_timer(&cfhsi->rx_slowpath_timer);
 	cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
 	cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
-
-	/* Add CAIF HSI device to list. */
-	spin_lock(&cfhsi_list_lock);
-	list_add_tail(&cfhsi->list, &cfhsi_list);
-	spin_unlock(&cfhsi_list_lock);
+	/* Setup the aggregation timer. */
+	init_timer(&cfhsi->aggregation_timer);
+	cfhsi->aggregation_timer.data = (unsigned long)cfhsi;
+	cfhsi->aggregation_timer.function = cfhsi_aggregation_tout;
 
 	/* Activate HSI interface. */
 	res = cfhsi->dev->cfhsi_up(cfhsi->dev);
@@ -1175,21 +1308,10 @@
 	/* Flush FIFO */
 	res = cfhsi_flush_fifo(cfhsi);
 	if (res) {
-		dev_err(&ndev->dev, "%s: Can't flush FIFO: %d.\n",
+		dev_err(&cfhsi->ndev->dev, "%s: Can't flush FIFO: %d.\n",
 			__func__, res);
 		goto err_net_reg;
 	}
-
-	/* Register network device. */
-	res = register_netdev(ndev);
-	if (res) {
-		dev_err(&ndev->dev, "%s: Registration error: %d.\n",
-			__func__, res);
-		goto err_net_reg;
-	}
-
-	netif_stop_queue(ndev);
-
 	return res;
 
  err_net_reg:
@@ -1203,17 +1325,13 @@
  err_alloc_rx:
 	kfree(cfhsi->tx_buf);
  err_alloc_tx:
-	free_netdev(ndev);
-
 	return res;
 }
 
-static void cfhsi_shutdown(struct cfhsi *cfhsi)
+static int cfhsi_close(struct net_device *ndev)
 {
-	u8 *tx_buf, *rx_buf;
-
-	/* Stop TXing */
-	netif_tx_stop_all_queues(cfhsi->ndev);
+	struct cfhsi *cfhsi = netdev_priv(ndev);
+	u8 *tx_buf, *rx_buf, *flip_buf;
 
 	/* going to shutdown driver */
 	set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
@@ -1222,8 +1340,9 @@
 	flush_workqueue(cfhsi->wq);
 
 	/* Delete timers if pending */
-	del_timer_sync(&cfhsi->timer);
+	del_timer_sync(&cfhsi->inactivity_timer);
 	del_timer_sync(&cfhsi->rx_slowpath_timer);
+	del_timer_sync(&cfhsi->aggregation_timer);
 
 	/* Cancel pending RX request (if any) */
 	cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
@@ -1234,21 +1353,26 @@
 	/* Store bufferes: will be freed later. */
 	tx_buf = cfhsi->tx_buf;
 	rx_buf = cfhsi->rx_buf;
-
+	flip_buf = cfhsi->rx_flip_buf;
 	/* Flush transmit queues. */
 	cfhsi_abort_tx(cfhsi);
 
 	/* Deactivate interface */
 	cfhsi->dev->cfhsi_down(cfhsi->dev);
 
-	/* Finally unregister the network device. */
-	unregister_netdev(cfhsi->ndev);
-
 	/* Free buffers. */
 	kfree(tx_buf);
 	kfree(rx_buf);
+	kfree(flip_buf);
+	return 0;
 }
 
+static const struct net_device_ops cfhsi_ops = {
+	.ndo_open = cfhsi_open,
+	.ndo_stop = cfhsi_close,
+	.ndo_start_xmit = cfhsi_xmit
+};
+
 int cfhsi_remove(struct platform_device *pdev)
 {
 	struct list_head *list_node;
@@ -1265,10 +1389,6 @@
 			/* Remove from list. */
 			list_del(list_node);
 			spin_unlock(&cfhsi_list_lock);
-
-			/* Shutdown driver. */
-			cfhsi_shutdown(cfhsi);
-
 			return 0;
 		}
 	}
@@ -1299,8 +1419,7 @@
 		list_del(list_node);
 		spin_unlock(&cfhsi_list_lock);
 
-		/* Shutdown driver. */
-		cfhsi_shutdown(cfhsi);
+		unregister_netdevice(cfhsi->ndev);
 
 		spin_lock(&cfhsi_list_lock);
 	}
@@ -1325,8 +1444,6 @@
 		goto err_dev_register;
 	}
 
-	return result;
-
  err_dev_register:
 	return result;
 }
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index 5b20413..bc497d7 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -13,6 +13,7 @@
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/if_arp.h>
+#include <linux/io.h>
 
 #include <net/caif/caif_device.h>
 #include <net/caif/caif_shm.h>
@@ -647,6 +648,9 @@
 		if (pshm_dev->shm_loopback)
 			tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
 		else
+			/*
+			 * FIXME: the result of ioremap is not a pointer - arnd
+			 */
 			tx_buf->desc_vptr =
 					ioremap(tx_buf->phy_addr, TX_BUF_SZ);
 
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index c5fe3a3..f03d7a4 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -687,18 +687,19 @@
 
 	if (priv->do_get_state)
 		priv->do_get_state(dev, &state);
-	NLA_PUT_U32(skb, IFLA_CAN_STATE, state);
-	NLA_PUT(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm);
-	NLA_PUT_U32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms);
-	NLA_PUT(skb, IFLA_CAN_BITTIMING,
-		sizeof(priv->bittiming), &priv->bittiming);
-	NLA_PUT(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock);
-	if (priv->do_get_berr_counter && !priv->do_get_berr_counter(dev, &bec))
-		NLA_PUT(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec);
-	if (priv->bittiming_const)
-		NLA_PUT(skb, IFLA_CAN_BITTIMING_CONST,
-			sizeof(*priv->bittiming_const), priv->bittiming_const);
-
+	if (nla_put_u32(skb, IFLA_CAN_STATE, state) ||
+	    nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
+	    nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
+	    nla_put(skb, IFLA_CAN_BITTIMING,
+		    sizeof(priv->bittiming), &priv->bittiming) ||
+	    nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) ||
+	    (priv->do_get_berr_counter &&
+	     !priv->do_get_berr_counter(dev, &bec) &&
+	     nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
+	    (priv->bittiming_const &&
+	     nla_put(skb, IFLA_CAN_BITTIMING_CONST,
+		     sizeof(*priv->bittiming_const), priv->bittiming_const)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -714,9 +715,9 @@
 {
 	struct can_priv *priv = netdev_priv(dev);
 
-	NLA_PUT(skb, IFLA_INFO_XSTATS,
-		sizeof(priv->can_stats), &priv->can_stats);
-
+	if (nla_put(skb, IFLA_INFO_XSTATS,
+		    sizeof(priv->can_stats), &priv->can_stats))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 2bb215e..1226297 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -1274,17 +1274,7 @@
 	.resume = pch_can_resume,
 };
 
-static int __init pch_can_pci_init(void)
-{
-	return pci_register_driver(&pch_can_pci_driver);
-}
-module_init(pch_can_pci_init);
-
-static void __exit pch_can_pci_exit(void)
-{
-	pci_unregister_driver(&pch_can_pci_driver);
-}
-module_exit(pch_can_pci_exit);
+module_pci_driver(pch_can_pci_driver);
 
 MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 36f4f97..5c6d412 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -371,16 +371,4 @@
 	.remove = ems_pci_del_card,
 };
 
-static int __init ems_pci_init(void)
-{
-	return pci_register_driver(&ems_pci_driver);
-}
-
-static void __exit ems_pci_exit(void)
-{
-	pci_unregister_driver(&ems_pci_driver);
-}
-
-module_init(ems_pci_init);
-module_exit(ems_pci_exit);
-
+module_pci_driver(ems_pci_driver);
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index ed004ce..23ed6ea 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -397,15 +397,4 @@
 	.remove = __devexit_p(kvaser_pci_remove_one),
 };
 
-static int __init kvaser_pci_init(void)
-{
-	return pci_register_driver(&kvaser_pci_driver);
-}
-
-static void __exit kvaser_pci_exit(void)
-{
-	pci_unregister_driver(&kvaser_pci_driver);
-}
-
-module_init(kvaser_pci_init);
-module_exit(kvaser_pci_exit);
+module_pci_driver(kvaser_pci_driver);
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 5f92b86..f0a1296 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -749,14 +749,4 @@
 	.remove = __devexit_p(peak_pci_remove),
 };
 
-static int __init peak_pci_init(void)
-{
-	return pci_register_driver(&peak_pci_driver);
-}
-module_init(peak_pci_init);
-
-static void __exit peak_pci_exit(void)
-{
-	pci_unregister_driver(&peak_pci_driver);
-}
-module_exit(peak_pci_exit);
+module_pci_driver(peak_pci_driver);
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index a227586..8bc9598 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -609,15 +609,4 @@
 	.remove = plx_pci_del_card,
 };
 
-static int __init plx_pci_init(void)
-{
-	return pci_register_driver(&plx_pci_driver);
-}
-
-static void __exit plx_pci_exit(void)
-{
-	pci_unregister_driver(&plx_pci_driver);
-}
-
-module_init(plx_pci_init);
-module_exit(plx_pci_exit);
+module_pci_driver(plx_pci_driver);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 5234586..629c4ba 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -875,6 +875,7 @@
 					    PCAN_USBPRO_INFO_FW,
 					    &fi, sizeof(fi));
 		if (err) {
+			kfree(usb_if);
 			dev_err(dev->netdev->dev.parent,
 				"unable to read %s firmware info (err %d)\n",
 				pcan_usb_pro.name, err);
@@ -885,6 +886,7 @@
 					    PCAN_USBPRO_INFO_BL,
 					    &bi, sizeof(bi));
 		if (err) {
+			kfree(usb_if);
 			dev_err(dev->netdev->dev.parent,
 				"unable to read %s bootloader info (err %d)\n",
 				pcan_usb_pro.name, err);
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index d5c6d92..442d91a 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -107,14 +107,14 @@
 	return 0;
 }
 
-static void dummy_dev_free(struct net_device *dev)
+static void dummy_dev_uninit(struct net_device *dev)
 {
 	free_percpu(dev->dstats);
-	free_netdev(dev);
 }
 
 static const struct net_device_ops dummy_netdev_ops = {
 	.ndo_init		= dummy_dev_init,
+	.ndo_uninit		= dummy_dev_uninit,
 	.ndo_start_xmit		= dummy_xmit,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_set_rx_mode	= set_multicast_list,
@@ -128,7 +128,7 @@
 
 	/* Initialize the device structure. */
 	dev->netdev_ops = &dummy_netdev_ops;
-	dev->destructor = dummy_dev_free;
+	dev->destructor = free_netdev;
 
 	/* Fill in device structure with ethernet-generic values. */
 	dev->tx_queue_len = 0;
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index e04ade4..910895c 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -60,6 +60,7 @@
 config AX88796
 	tristate "ASIX AX88796 NE2000 clone support"
 	depends on (ARM || MIPS || SUPERH)
+	select CRC32
 	select PHYLIB
 	select MDIO_BITBANG
 	---help---
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 11476ca..203ff9d 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -501,6 +501,7 @@
 	.get_settings		= ax_get_settings,
 	.set_settings		= ax_set_settings,
 	.get_link		= ethtool_op_get_link,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 #ifdef CONFIG_AX88796_93CX6
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index dbefd56..8322c54 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -635,6 +635,7 @@
 	.get_settings	= etherh_get_settings,
 	.set_settings	= etherh_set_settings,
 	.get_drvinfo	= etherh_get_drvinfo,
+	.get_ts_info	= ethtool_op_get_ts_info,
 };
 
 static const struct net_device_ops etherh_netdev_ops = {
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index c63a64c..a11af5c 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -174,6 +174,7 @@
 source "drivers/net/ethernet/toshiba/Kconfig"
 source "drivers/net/ethernet/tundra/Kconfig"
 source "drivers/net/ethernet/via/Kconfig"
+source "drivers/net/ethernet/wiznet/Kconfig"
 source "drivers/net/ethernet/xilinx/Kconfig"
 source "drivers/net/ethernet/xircom/Kconfig"
 
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 9676a51..878ad32 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -73,5 +73,6 @@
 obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/
 obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/
 obj-$(CONFIG_NET_VENDOR_VIA) += via/
+obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
 obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
 obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index d896816..d920a52 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -114,15 +114,6 @@
 #define DMA_BURST_SIZE 128
 #endif
 
-/* Used to pass the media type, etc.
-   Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
-   The media type is usually passed in 'options[]'.
-   These variables are deprecated, use ethtool instead. -Ion
-*/
-#define MAX_UNITS 8		/* More are supported, limit only on options */
-static int options[MAX_UNITS] = {0, };
-static int full_duplex[MAX_UNITS] = {0, };
-
 /* Operational parameters that are set at compile time. */
 
 /* The "native" ring sizes are either 256 or 2048.
@@ -192,8 +183,6 @@
 module_param(rx_copybreak, int, 0);
 module_param(intr_latency, int, 0);
 module_param(small_frames, int, 0);
-module_param_array(options, int, NULL, 0);
-module_param_array(full_duplex, int, NULL, 0);
 module_param(enable_hw_cksum, int, 0);
 MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
 MODULE_PARM_DESC(mtu, "MTU (all boards)");
@@ -201,8 +190,6 @@
 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
 MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
-MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex");
-MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)");
 MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
 
 /*
@@ -657,10 +644,10 @@
 static int __devinit starfire_init_one(struct pci_dev *pdev,
 				       const struct pci_device_id *ent)
 {
+	struct device *d = &pdev->dev;
 	struct netdev_private *np;
-	int i, irq, option, chip_idx = ent->driver_data;
+	int i, irq, chip_idx = ent->driver_data;
 	struct net_device *dev;
-	static int card_idx = -1;
 	long ioaddr;
 	void __iomem *base;
 	int drv_flags, io_size;
@@ -673,15 +660,13 @@
 		printk(version);
 #endif
 
-	card_idx++;
-
 	if (pci_enable_device (pdev))
 		return -EIO;
 
 	ioaddr = pci_resource_start(pdev, 0);
 	io_size = pci_resource_len(pdev, 0);
 	if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
-		printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx);
+		dev_err(d, "no PCI MEM resources, aborting\n");
 		return -ENODEV;
 	}
 
@@ -694,14 +679,14 @@
 	irq = pdev->irq;
 
 	if (pci_request_regions (pdev, DRV_NAME)) {
-		printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx);
+		dev_err(d, "cannot reserve PCI resources, aborting\n");
 		goto err_out_free_netdev;
 	}
 
 	base = ioremap(ioaddr, io_size);
 	if (!base) {
-		printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n",
-			card_idx, io_size, ioaddr);
+		dev_err(d, "cannot remap %#x @ %#lx, aborting\n",
+			io_size, ioaddr);
 		goto err_out_free_res;
 	}
 
@@ -753,9 +738,6 @@
 	/* wait a little longer */
 	udelay(1000);
 
-	dev->base_addr = (unsigned long)base;
-	dev->irq = irq;
-
 	np = netdev_priv(dev);
 	np->dev = dev;
 	np->base = base;
@@ -772,21 +754,6 @@
 
 	drv_flags = netdrv_tbl[chip_idx].drv_flags;
 
-	option = card_idx < MAX_UNITS ? options[card_idx] : 0;
-	if (dev->mem_start)
-		option = dev->mem_start;
-
-	/* The lower four bits are the media type. */
-	if (option & 0x200)
-		np->mii_if.full_duplex = 1;
-
-	if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
-		np->mii_if.full_duplex = 1;
-
-	if (np->mii_if.full_duplex)
-		np->mii_if.force_media = 1;
-	else
-		np->mii_if.force_media = 0;
 	np->speed100 = 1;
 
 	/* timer resolution is 128 * 0.8us */
@@ -909,13 +876,14 @@
 	const __be32 *fw_rx_data, *fw_tx_data;
 	struct netdev_private *np = netdev_priv(dev);
 	void __iomem *ioaddr = np->base;
+	const int irq = np->pci_dev->irq;
 	int i, retval;
 	size_t tx_size, rx_size;
 	size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
 
 	/* Do we ever need to reset the chip??? */
 
-	retval = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
+	retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 	if (retval)
 		return retval;
 
@@ -924,7 +892,7 @@
 	writel(1, ioaddr + PCIDeviceConfig);
 	if (debug > 1)
 		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
-		       dev->name, dev->irq);
+		       dev->name, irq);
 
 	/* Allocate the various queues. */
 	if (!np->queue_mem) {
@@ -935,7 +903,7 @@
 		np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
 		np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
 		if (np->queue_mem == NULL) {
-			free_irq(dev->irq, dev);
+			free_irq(irq, dev);
 			return -ENOMEM;
 		}
 
@@ -1962,7 +1930,7 @@
 		}
 	}
 
-	free_irq(dev->irq, dev);
+	free_irq(np->pci_dev->irq, dev);
 
 	/* Free all the skbuffs in the Rx queue. */
 	for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index ab4daec..f816426 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -548,6 +548,25 @@
 	return 0;
 }
 
+static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
+	struct ethtool_ts_info *info)
+{
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_HARDWARE |
+		SOF_TIMESTAMPING_RX_HARDWARE |
+		SOF_TIMESTAMPING_SYS_HARDWARE;
+	info->phc_index = -1;
+	info->tx_types =
+		(1 << HWTSTAMP_TX_OFF) |
+		(1 << HWTSTAMP_TX_ON);
+	info->rx_filters =
+		(1 << HWTSTAMP_FILTER_NONE) |
+		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+	return 0;
+}
+
 static const struct ethtool_ops bfin_mac_ethtool_ops = {
 	.get_settings = bfin_mac_ethtool_getsettings,
 	.set_settings = bfin_mac_ethtool_setsettings,
@@ -555,6 +574,7 @@
 	.get_drvinfo = bfin_mac_ethtool_getdrvinfo,
 	.get_wol = bfin_mac_ethtool_getwol,
 	.set_wol = bfin_mac_ethtool_setwol,
+	.get_ts_info = bfin_mac_ethtool_get_ts_info,
 };
 
 /**************************************************************************/
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index ca70e16..acc2956 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -74,8 +74,6 @@
 
 #define AT_RX_BUF_SIZE		(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)
 #define MAX_JUMBO_FRAME_SIZE	(6*1024)
-#define MAX_TSO_FRAME_SIZE      (7*1024)
-#define MAX_TX_OFFLOAD_THRESH	(9*1024)
 
 #define AT_MAX_RECEIVE_QUEUE    4
 #define AT_DEF_RECEIVE_QUEUE	1
@@ -100,7 +98,7 @@
 #define ATL1C_ASPM_L0s_ENABLE		0x0001
 #define ATL1C_ASPM_L1_ENABLE		0x0002
 
-#define AT_REGS_LEN	(75 * sizeof(u32))
+#define AT_REGS_LEN	(74 * sizeof(u32))
 #define AT_EEPROM_LEN 	512
 
 #define ATL1C_GET_DESC(R, i, type)	(&(((type *)((R)->desc))[i]))
@@ -297,20 +295,6 @@
 	atl1c_dma_req_4096 = 5
 };
 
-enum atl1c_rss_mode {
-	atl1c_rss_mode_disable = 0,
-	atl1c_rss_sig_que = 1,
-	atl1c_rss_mul_que_sig_int = 2,
-	atl1c_rss_mul_que_mul_int = 4,
-};
-
-enum atl1c_rss_type {
-	atl1c_rss_disable = 0,
-	atl1c_rss_ipv4 = 1,
-	atl1c_rss_ipv4_tcp = 2,
-	atl1c_rss_ipv6 = 4,
-	atl1c_rss_ipv6_tcp = 8
-};
 
 enum atl1c_nic_type {
 	athr_l1c = 0,
@@ -388,7 +372,6 @@
 	enum atl1c_dma_order dma_order;
 	enum atl1c_dma_rcb   rcb_value;
 	enum atl1c_dma_req_block dmar_block;
-	enum atl1c_dma_req_block dmaw_block;
 
 	u16 device_id;
 	u16 vendor_id;
@@ -399,8 +382,6 @@
 	u16 phy_id2;
 
 	u32 intr_mask;
-	u8 dmaw_dly_cnt;
-	u8 dmar_dly_cnt;
 
 	u8 preamble_len;
 	u16 max_frame_size;
@@ -440,10 +421,6 @@
 #define ATL1C_FPGA_VERSION              0x8000
 	u16 link_cap_flags;
 #define ATL1C_LINK_CAP_1000M		0x0001
-	u16 cmb_tpd;
-	u16 cmb_rrd;
-	u16 cmb_rx_timer; /* 2us resolution */
-	u16 cmb_tx_timer;
 	u32 smb_timer;
 
 	u16 rrd_thresh; /* Threshold of number of RRD produced to trigger
@@ -451,9 +428,6 @@
 	u16 tpd_thresh;
 	u8 tpd_burst;   /* Number of TPD to prefetch in cache-aligned burst. */
 	u8 rfd_burst;
-	enum atl1c_rss_type rss_type;
-	enum atl1c_rss_mode rss_mode;
-	u8 rss_hash_bits;
 	u32 base_cpu;
 	u32 indirect_tab;
 	u8 mac_addr[ETH_ALEN];
@@ -466,8 +440,7 @@
 
 /*
  * atl1c_ring_header represents a single, contiguous block of DMA space
- * mapped for the three descriptor rings (tpd, rfd, rrd) and the two
- * message blocks (cmb, smb) described below
+ * mapped for the three descriptor rings (tpd, rfd, rrd) described below
  */
 struct atl1c_ring_header {
 	void *desc;		/* virtual address */
@@ -541,16 +514,6 @@
 	u16 next_to_clean;
 };
 
-struct atl1c_cmb {
-	void *cmb;
-	dma_addr_t dma;
-};
-
-struct atl1c_smb {
-	void *smb;
-	dma_addr_t dma;
-};
-
 /* board specific private data structure */
 struct atl1c_adapter {
 	struct net_device   *netdev;
@@ -586,11 +549,8 @@
 	/* All Descriptor memory */
 	struct atl1c_ring_header ring_header;
 	struct atl1c_tpd_ring tpd_ring[AT_MAX_TRANSMIT_QUEUE];
-	struct atl1c_rfd_ring rfd_ring[AT_MAX_RECEIVE_QUEUE];
-	struct atl1c_rrd_ring rrd_ring[AT_MAX_RECEIVE_QUEUE];
-	struct atl1c_cmb cmb;
-	struct atl1c_smb smb;
-	int num_rx_queues;
+	struct atl1c_rfd_ring rfd_ring;
+	struct atl1c_rrd_ring rrd_ring;
 	u32 bd_number;     /* board number;*/
 };
 
@@ -618,8 +578,14 @@
 #define AT_WRITE_REGW(a, reg, value) (\
 		writew((value), ((a)->hw_addr + reg)))
 
-#define AT_READ_REGW(a, reg) (\
-		readw((a)->hw_addr + reg))
+#define AT_READ_REGW(a, reg, pdata) do {				\
+		if (unlikely((a)->hibernate)) {				\
+			readw((a)->hw_addr + reg);			\
+			*(u16 *)pdata = readw((a)->hw_addr + reg);	\
+		} else {						\
+			*(u16 *)pdata = readw((a)->hw_addr + reg);	\
+		}							\
+	} while (0)
 
 #define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \
 		writel((value), (((a)->hw_addr + reg) + ((offset) << 2))))
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 0a9326a..3feb846 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -141,8 +141,7 @@
 
 	memset(p, 0, AT_REGS_LEN);
 
-	regs->version = 0;
-	AT_READ_REG(hw, REG_VPD_CAP, 		  p++);
+	regs->version = 1;
 	AT_READ_REG(hw, REG_PM_CTRL, 		  p++);
 	AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL,  p++);
 	AT_READ_REG(hw, REG_TWSI_CTRL, 		  p++);
@@ -167,9 +166,9 @@
 	AT_READ_REG(hw, REG_WOL_CTRL, 		  p++);
 
 	atl1c_read_phy_reg(hw, MII_BMCR, &phy_data);
-	regs_buff[73] =	(u32) phy_data;
+	regs_buff[AT_REGS_LEN/sizeof(u32) - 2] = (u32) phy_data;
 	atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
-	regs_buff[74] = (u32) phy_data;
+	regs_buff[AT_REGS_LEN/sizeof(u32) - 1] = (u32) phy_data;
 }
 
 static int atl1c_get_eeprom_len(struct net_device *netdev)
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
index 655fc6c..cc7afa1 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
@@ -25,6 +25,12 @@
 #include <linux/types.h>
 #include <linux/mii.h>
 
+#define FIELD_GETX(_x, _name)   ((_x) >> (_name##_SHIFT) & (_name##_MASK))
+#define FIELD_SETX(_x, _name, _v) \
+(((_x) & ~((_name##_MASK) << (_name##_SHIFT))) |\
+(((_v) & (_name##_MASK)) << (_name##_SHIFT)))
+#define FIELDX(_name, _v) (((_v) & (_name##_MASK)) << (_name##_SHIFT))
+
 struct atl1c_adapter;
 struct atl1c_hw;
 
@@ -48,41 +54,13 @@
 #define DEVICE_CAP_MAX_PAYLOAD_MASK     0x7
 #define DEVICE_CAP_MAX_PAYLOAD_SHIFT    0
 
-#define REG_DEVICE_CTRL			0x60
-#define DEVICE_CTRL_MAX_PAYLOAD_MASK    0x7
-#define DEVICE_CTRL_MAX_PAYLOAD_SHIFT   5
-#define DEVICE_CTRL_MAX_RREQ_SZ_MASK    0x7
-#define DEVICE_CTRL_MAX_RREQ_SZ_SHIFT   12
+#define DEVICE_CTRL_MAXRRS_MIN		2
 
 #define REG_LINK_CTRL			0x68
 #define LINK_CTRL_L0S_EN		0x01
 #define LINK_CTRL_L1_EN			0x02
 #define LINK_CTRL_EXT_SYNC		0x80
 
-#define REG_VPD_CAP			0x6C
-#define VPD_CAP_ID_MASK                 0xff
-#define VPD_CAP_ID_SHIFT                0
-#define VPD_CAP_NEXT_PTR_MASK           0xFF
-#define VPD_CAP_NEXT_PTR_SHIFT          8
-#define VPD_CAP_VPD_ADDR_MASK           0x7FFF
-#define VPD_CAP_VPD_ADDR_SHIFT          16
-#define VPD_CAP_VPD_FLAG                0x80000000
-
-#define REG_VPD_DATA                	0x70
-
-#define REG_PCIE_UC_SEVERITY		0x10C
-#define PCIE_UC_SERVRITY_TRN		0x00000001
-#define PCIE_UC_SERVRITY_DLP		0x00000010
-#define PCIE_UC_SERVRITY_PSN_TLP	0x00001000
-#define PCIE_UC_SERVRITY_FCP		0x00002000
-#define PCIE_UC_SERVRITY_CPL_TO		0x00004000
-#define PCIE_UC_SERVRITY_CA		0x00008000
-#define PCIE_UC_SERVRITY_UC		0x00010000
-#define PCIE_UC_SERVRITY_ROV		0x00020000
-#define PCIE_UC_SERVRITY_MLFP		0x00040000
-#define PCIE_UC_SERVRITY_ECRC		0x00080000
-#define PCIE_UC_SERVRITY_UR		0x00100000
-
 #define REG_DEV_SERIALNUM_CTRL		0x200
 #define REG_DEV_MAC_SEL_MASK		0x0 /* 0:EUI; 1:MAC */
 #define REG_DEV_MAC_SEL_SHIFT		0
@@ -118,17 +96,24 @@
 #define PCIE_DEV_MISC_SERDES_SEL_DIN   	0x10
 
 #define REG_PCIE_PHYMISC	    	0x1000
-#define PCIE_PHYMISC_FORCE_RCV_DET	0x4
+#define PCIE_PHYMISC_FORCE_RCV_DET	BIT(2)
+#define PCIE_PHYMISC_NFTS_MASK		0xFFUL
+#define PCIE_PHYMISC_NFTS_SHIFT		16
 
 #define REG_PCIE_PHYMISC2		0x1004
-#define PCIE_PHYMISC2_SERDES_CDR_MASK	0x3
-#define PCIE_PHYMISC2_SERDES_CDR_SHIFT	16
-#define PCIE_PHYMISC2_SERDES_TH_MASK	0x3
-#define PCIE_PHYMISC2_SERDES_TH_SHIFT	18
+#define PCIE_PHYMISC2_L0S_TH_MASK	0x3UL
+#define PCIE_PHYMISC2_L0S_TH_SHIFT	18
+#define L2CB1_PCIE_PHYMISC2_L0S_TH	3
+#define PCIE_PHYMISC2_CDR_BW_MASK	0x3UL
+#define PCIE_PHYMISC2_CDR_BW_SHIFT	16
+#define L2CB1_PCIE_PHYMISC2_CDR_BW	3
 
 #define REG_TWSI_DEBUG			0x1108
 #define TWSI_DEBUG_DEV_EXIST		0x20000000
 
+#define REG_DMA_DBG			0x1114
+#define DMA_DBG_VENDOR_MSG		BIT(0)
+
 #define REG_EEPROM_CTRL			0x12C0
 #define EEPROM_CTRL_DATA_HI_MASK	0xFFFF
 #define EEPROM_CTRL_DATA_HI_SHIFT	0
@@ -143,53 +128,78 @@
 #define OTP_CTRL_CLK_EN			0x0002
 
 #define REG_PM_CTRL			0x12F8
-#define PM_CTRL_SDES_EN			0x00000001
-#define PM_CTRL_RBER_EN			0x00000002
-#define PM_CTRL_CLK_REQ_EN		0x00000004
-#define PM_CTRL_ASPM_L1_EN		0x00000008
-#define PM_CTRL_SERDES_L1_EN		0x00000010
-#define PM_CTRL_SERDES_PLL_L1_EN	0x00000020
-#define PM_CTRL_SERDES_PD_EX_L1		0x00000040
-#define PM_CTRL_SERDES_BUDS_RX_L1_EN	0x00000080
-#define PM_CTRL_L0S_ENTRY_TIMER_MASK	0xF
-#define PM_CTRL_L0S_ENTRY_TIMER_SHIFT	8
-#define PM_CTRL_ASPM_L0S_EN		0x00001000
-#define PM_CTRL_CLK_SWH_L1		0x00002000
-#define PM_CTRL_CLK_PWM_VER1_1		0x00004000
-#define PM_CTRL_RCVR_WT_TIMER		0x00008000
-#define PM_CTRL_L1_ENTRY_TIMER_MASK	0xF
-#define PM_CTRL_L1_ENTRY_TIMER_SHIFT	16
-#define PM_CTRL_PM_REQ_TIMER_MASK	0xF
-#define PM_CTRL_PM_REQ_TIMER_SHIFT	20
-#define PM_CTRL_LCKDET_TIMER_MASK	0xF
+#define PM_CTRL_HOTRST			BIT(31)
+#define PM_CTRL_MAC_ASPM_CHK		BIT(30)	/* L0s/L1 dis by MAC based on
+						 * thrghput(setting in 15A0) */
+#define PM_CTRL_SA_DLY_EN		BIT(29)
+#define PM_CTRL_L0S_BUFSRX_EN		BIT(28)
+#define PM_CTRL_LCKDET_TIMER_MASK	0xFUL
 #define PM_CTRL_LCKDET_TIMER_SHIFT	24
-#define PM_CTRL_EN_BUFS_RX_L0S		0x10000000
-#define PM_CTRL_SA_DLY_EN		0x20000000
-#define PM_CTRL_MAC_ASPM_CHK		0x40000000
-#define PM_CTRL_HOTRST			0x80000000
+#define PM_CTRL_LCKDET_TIMER_DEF	0xC
+#define PM_CTRL_PM_REQ_TIMER_MASK	0xFUL
+#define PM_CTRL_PM_REQ_TIMER_SHIFT	20	/* pm_request_l1 time > @
+						 * ->L0s not L1 */
+#define PM_CTRL_PM_REQ_TO_DEF		0xC
+#define PMCTRL_TXL1_AFTER_L0S		BIT(19)	/* l1dv2.0+ */
+#define L1D_PMCTRL_L1_ENTRY_TM_MASK	7UL	/* l1dv2.0+, 3bits */
+#define L1D_PMCTRL_L1_ENTRY_TM_SHIFT	16
+#define L1D_PMCTRL_L1_ENTRY_TM_DIS	0
+#define L1D_PMCTRL_L1_ENTRY_TM_2US	1
+#define L1D_PMCTRL_L1_ENTRY_TM_4US	2
+#define L1D_PMCTRL_L1_ENTRY_TM_8US	3
+#define L1D_PMCTRL_L1_ENTRY_TM_16US	4
+#define L1D_PMCTRL_L1_ENTRY_TM_24US	5
+#define L1D_PMCTRL_L1_ENTRY_TM_32US	6
+#define L1D_PMCTRL_L1_ENTRY_TM_63US	7
+#define PM_CTRL_L1_ENTRY_TIMER_MASK	0xFUL  /* l1C 4bits */
+#define PM_CTRL_L1_ENTRY_TIMER_SHIFT	16
+#define L2CB1_PM_CTRL_L1_ENTRY_TM	7
+#define L1C_PM_CTRL_L1_ENTRY_TM		0xF
+#define PM_CTRL_RCVR_WT_TIMER		BIT(15)	/* 1:1us, 0:2ms */
+#define PM_CTRL_CLK_PWM_VER1_1		BIT(14)	/* 0:1.0a,1:1.1 */
+#define PM_CTRL_CLK_SWH_L1		BIT(13)	/* en pcie clk sw in L1 */
+#define PM_CTRL_ASPM_L0S_EN		BIT(12)
+#define PM_CTRL_RXL1_AFTER_L0S		BIT(11)	/* l1dv2.0+ */
+#define L1D_PMCTRL_L0S_TIMER_MASK	7UL	/* l1d2.0+, 3bits*/
+#define L1D_PMCTRL_L0S_TIMER_SHIFT	8
+#define PM_CTRL_L0S_ENTRY_TIMER_MASK	0xFUL	/* l1c, 4bits */
+#define PM_CTRL_L0S_ENTRY_TIMER_SHIFT	8
+#define PM_CTRL_SERDES_BUFS_RX_L1_EN	BIT(7)
+#define PM_CTRL_SERDES_PD_EX_L1		BIT(6)	/* power down serdes rx */
+#define PM_CTRL_SERDES_PLL_L1_EN	BIT(5)
+#define PM_CTRL_SERDES_L1_EN		BIT(4)
+#define PM_CTRL_ASPM_L1_EN		BIT(3)
+#define PM_CTRL_CLK_REQ_EN		BIT(2)
+#define PM_CTRL_RBER_EN			BIT(1)
+#define PM_CTRL_SPRSDWER_EN		BIT(0)
 
 #define REG_LTSSM_ID_CTRL		0x12FC
 #define LTSSM_ID_EN_WRO			0x1000
+
+
 /* Selene Master Control Register */
 #define REG_MASTER_CTRL			0x1400
-#define MASTER_CTRL_SOFT_RST            0x1
-#define MASTER_CTRL_TEST_MODE_MASK	0x3
-#define MASTER_CTRL_TEST_MODE_SHIFT	2
-#define MASTER_CTRL_BERT_START		0x10
-#define MASTER_CTRL_OOB_DIS_OFF		0x40
-#define MASTER_CTRL_SA_TIMER_EN		0x80
-#define MASTER_CTRL_MTIMER_EN           0x100
-#define MASTER_CTRL_MANUAL_INT          0x200
-#define MASTER_CTRL_TX_ITIMER_EN	0x400
-#define MASTER_CTRL_RX_ITIMER_EN	0x800
-#define MASTER_CTRL_CLK_SEL_DIS		0x1000
-#define MASTER_CTRL_CLK_SWH_MODE	0x2000
-#define MASTER_CTRL_INT_RDCLR		0x4000
-#define MASTER_CTRL_REV_NUM_SHIFT	16
-#define MASTER_CTRL_REV_NUM_MASK	0xff
-#define MASTER_CTRL_DEV_ID_SHIFT	24
-#define MASTER_CTRL_DEV_ID_MASK		0x7f
-#define MASTER_CTRL_OTP_SEL		0x80000000
+#define MASTER_CTRL_OTP_SEL		BIT(31)
+#define MASTER_DEV_NUM_MASK		0x7FUL
+#define MASTER_DEV_NUM_SHIFT		24
+#define MASTER_REV_NUM_MASK		0xFFUL
+#define MASTER_REV_NUM_SHIFT		16
+#define MASTER_CTRL_INT_RDCLR		BIT(14)
+#define MASTER_CTRL_CLK_SEL_DIS		BIT(12)	/* 1:alwys sel pclk from
+						 * serdes, not sw to 25M */
+#define MASTER_CTRL_RX_ITIMER_EN	BIT(11)	/* IRQ MODURATION FOR RX */
+#define MASTER_CTRL_TX_ITIMER_EN	BIT(10)	/* MODURATION FOR TX/RX */
+#define MASTER_CTRL_MANU_INT		BIT(9)	/* SOFT MANUAL INT */
+#define MASTER_CTRL_MANUTIMER_EN	BIT(8)
+#define MASTER_CTRL_SA_TIMER_EN		BIT(7)	/* SYS ALIVE TIMER EN */
+#define MASTER_CTRL_OOB_DIS		BIT(6)	/* OUT OF BOX DIS */
+#define MASTER_CTRL_WAKEN_25M		BIT(5)	/* WAKE WO. PCIE CLK */
+#define MASTER_CTRL_BERT_START		BIT(4)
+#define MASTER_PCIE_TSTMOD_MASK		3UL
+#define MASTER_PCIE_TSTMOD_SHIFT	2
+#define MASTER_PCIE_RST			BIT(1)
+#define MASTER_CTRL_SOFT_RST		BIT(0)	/* RST MAC & DMA */
+#define DMA_MAC_RST_TO			50
 
 /* Timer Initial Value Register */
 #define REG_MANUAL_TIMER_INIT       	0x1404
@@ -236,17 +246,25 @@
 		GPHY_CTRL_HIB_PULSE	|\
 		GPHY_CTRL_PWDOWN_HW	|\
 		GPHY_CTRL_PHY_IDDQ)
+
 /* Block IDLE Status Register */
-#define REG_IDLE_STATUS  		0x1410
-#define IDLE_STATUS_MASK		0x00FF
-#define IDLE_STATUS_RXMAC_NO_IDLE      	0x1
-#define IDLE_STATUS_TXMAC_NO_IDLE      	0x2
-#define IDLE_STATUS_RXQ_NO_IDLE        	0x4
-#define IDLE_STATUS_TXQ_NO_IDLE        	0x8
-#define IDLE_STATUS_DMAR_NO_IDLE       	0x10
-#define IDLE_STATUS_DMAW_NO_IDLE       	0x20
-#define IDLE_STATUS_SMB_NO_IDLE        	0x40
-#define IDLE_STATUS_CMB_NO_IDLE        	0x80
+#define REG_IDLE_STATUS			0x1410
+#define IDLE_STATUS_SFORCE_MASK		0xFUL
+#define IDLE_STATUS_SFORCE_SHIFT	14
+#define IDLE_STATUS_CALIB_DONE		BIT(13)
+#define IDLE_STATUS_CALIB_RES_MASK	0x1FUL
+#define IDLE_STATUS_CALIB_RES_SHIFT	8
+#define IDLE_STATUS_CALIBERR_MASK	0xFUL
+#define IDLE_STATUS_CALIBERR_SHIFT	4
+#define IDLE_STATUS_TXQ_BUSY		BIT(3)
+#define IDLE_STATUS_RXQ_BUSY		BIT(2)
+#define IDLE_STATUS_TXMAC_BUSY		BIT(1)
+#define IDLE_STATUS_RXMAC_BUSY		BIT(0)
+#define IDLE_STATUS_MASK		(\
+	IDLE_STATUS_TXQ_BUSY		|\
+	IDLE_STATUS_RXQ_BUSY		|\
+	IDLE_STATUS_TXMAC_BUSY		|\
+	IDLE_STATUS_RXMAC_BUSY)
 
 /* MDIO Control Register */
 #define REG_MDIO_CTRL           	0x1414
@@ -386,34 +404,53 @@
 
 /* Wake-On-Lan control register */
 #define REG_WOL_CTRL                	0x14a0
-#define WOL_PATTERN_EN              	0x00000001
-#define WOL_PATTERN_PME_EN              0x00000002
-#define WOL_MAGIC_EN                    0x00000004
-#define WOL_MAGIC_PME_EN                0x00000008
-#define WOL_LINK_CHG_EN                 0x00000010
-#define WOL_LINK_CHG_PME_EN             0x00000020
-#define WOL_PATTERN_ST                  0x00000100
-#define WOL_MAGIC_ST                    0x00000200
-#define WOL_LINKCHG_ST                  0x00000400
-#define WOL_CLK_SWITCH_EN               0x00008000
-#define WOL_PT0_EN                      0x00010000
-#define WOL_PT1_EN                      0x00020000
-#define WOL_PT2_EN                      0x00040000
-#define WOL_PT3_EN                      0x00080000
-#define WOL_PT4_EN                      0x00100000
-#define WOL_PT5_EN                      0x00200000
-#define WOL_PT6_EN                      0x00400000
+#define WOL_PT7_MATCH			BIT(31)
+#define WOL_PT6_MATCH			BIT(30)
+#define WOL_PT5_MATCH			BIT(29)
+#define WOL_PT4_MATCH			BIT(28)
+#define WOL_PT3_MATCH			BIT(27)
+#define WOL_PT2_MATCH			BIT(26)
+#define WOL_PT1_MATCH			BIT(25)
+#define WOL_PT0_MATCH			BIT(24)
+#define WOL_PT7_EN			BIT(23)
+#define WOL_PT6_EN			BIT(22)
+#define WOL_PT5_EN			BIT(21)
+#define WOL_PT4_EN			BIT(20)
+#define WOL_PT3_EN			BIT(19)
+#define WOL_PT2_EN			BIT(18)
+#define WOL_PT1_EN			BIT(17)
+#define WOL_PT0_EN			BIT(16)
+#define WOL_LNKCHG_ST			BIT(10)
+#define WOL_MAGIC_ST			BIT(9)
+#define WOL_PATTERN_ST			BIT(8)
+#define WOL_OOB_EN			BIT(6)
+#define WOL_LINK_CHG_PME_EN		BIT(5)
+#define WOL_LINK_CHG_EN			BIT(4)
+#define WOL_MAGIC_PME_EN		BIT(3)
+#define WOL_MAGIC_EN			BIT(2)
+#define WOL_PATTERN_PME_EN		BIT(1)
+#define WOL_PATTERN_EN			BIT(0)
 
 /* WOL Length ( 2 DWORD ) */
-#define REG_WOL_PATTERN_LEN         	0x14a4
-#define WOL_PT_LEN_MASK                 0x7f
-#define WOL_PT0_LEN_SHIFT               0
-#define WOL_PT1_LEN_SHIFT               8
-#define WOL_PT2_LEN_SHIFT               16
-#define WOL_PT3_LEN_SHIFT               24
-#define WOL_PT4_LEN_SHIFT               0
-#define WOL_PT5_LEN_SHIFT               8
-#define WOL_PT6_LEN_SHIFT               16
+#define REG_WOL_PTLEN1			0x14A4
+#define WOL_PTLEN1_3_MASK		0xFFUL
+#define WOL_PTLEN1_3_SHIFT		24
+#define WOL_PTLEN1_2_MASK		0xFFUL
+#define WOL_PTLEN1_2_SHIFT		16
+#define WOL_PTLEN1_1_MASK		0xFFUL
+#define WOL_PTLEN1_1_SHIFT		8
+#define WOL_PTLEN1_0_MASK		0xFFUL
+#define WOL_PTLEN1_0_SHIFT		0
+
+#define REG_WOL_PTLEN2			0x14A8
+#define WOL_PTLEN2_7_MASK		0xFFUL
+#define WOL_PTLEN2_7_SHIFT		24
+#define WOL_PTLEN2_6_MASK		0xFFUL
+#define WOL_PTLEN2_6_SHIFT		16
+#define WOL_PTLEN2_5_MASK		0xFFUL
+#define WOL_PTLEN2_5_SHIFT		8
+#define WOL_PTLEN2_4_MASK		0xFFUL
+#define WOL_PTLEN2_4_SHIFT		0
 
 /* Internal SRAM Partition Register */
 #define RFDX_HEAD_ADDR_MASK		0x03FF
@@ -458,66 +495,50 @@
  */
 #define REG_RX_BASE_ADDR_HI		0x1540
 #define REG_TX_BASE_ADDR_HI		0x1544
-#define REG_SMB_BASE_ADDR_HI		0x1548
-#define REG_SMB_BASE_ADDR_LO		0x154C
 #define REG_RFD0_HEAD_ADDR_LO		0x1550
-#define REG_RFD1_HEAD_ADDR_LO		0x1554
-#define REG_RFD2_HEAD_ADDR_LO		0x1558
-#define REG_RFD3_HEAD_ADDR_LO		0x155C
 #define REG_RFD_RING_SIZE		0x1560
 #define RFD_RING_SIZE_MASK		0x0FFF
 #define REG_RX_BUF_SIZE			0x1564
 #define RX_BUF_SIZE_MASK		0xFFFF
 #define REG_RRD0_HEAD_ADDR_LO		0x1568
-#define REG_RRD1_HEAD_ADDR_LO		0x156C
-#define REG_RRD2_HEAD_ADDR_LO		0x1570
-#define REG_RRD3_HEAD_ADDR_LO		0x1574
 #define REG_RRD_RING_SIZE		0x1578
 #define RRD_RING_SIZE_MASK		0x0FFF
-#define REG_HTPD_HEAD_ADDR_LO		0x157C
-#define REG_NTPD_HEAD_ADDR_LO		0x1580
+#define REG_TPD_PRI1_ADDR_LO		0x157C
+#define REG_TPD_PRI0_ADDR_LO		0x1580
 #define REG_TPD_RING_SIZE		0x1584
 #define TPD_RING_SIZE_MASK		0xFFFF
-#define REG_CMB_BASE_ADDR_LO		0x1588
-
-/* RSS about */
-#define REG_RSS_KEY0                    0x14B0
-#define REG_RSS_KEY1                    0x14B4
-#define REG_RSS_KEY2                    0x14B8
-#define REG_RSS_KEY3                    0x14BC
-#define REG_RSS_KEY4                    0x14C0
-#define REG_RSS_KEY5                    0x14C4
-#define REG_RSS_KEY6                    0x14C8
-#define REG_RSS_KEY7                    0x14CC
-#define REG_RSS_KEY8                    0x14D0
-#define REG_RSS_KEY9                    0x14D4
-#define REG_IDT_TABLE0                	0x14E0
-#define REG_IDT_TABLE1                  0x14E4
-#define REG_IDT_TABLE2                  0x14E8
-#define REG_IDT_TABLE3                  0x14EC
-#define REG_IDT_TABLE4                  0x14F0
-#define REG_IDT_TABLE5                  0x14F4
-#define REG_IDT_TABLE6                  0x14F8
-#define REG_IDT_TABLE7                  0x14FC
-#define REG_IDT_TABLE                   REG_IDT_TABLE0
-#define REG_RSS_HASH_VALUE              0x15B0
-#define REG_RSS_HASH_FLAG               0x15B4
-#define REG_BASE_CPU_NUMBER             0x15B8
 
 /* TXQ Control Register */
-#define REG_TXQ_CTRL                	0x1590
-#define	TXQ_NUM_TPD_BURST_MASK     	0xF
-#define TXQ_NUM_TPD_BURST_SHIFT    	0
-#define TXQ_CTRL_IP_OPTION_EN		0x10
-#define TXQ_CTRL_EN                     0x20
-#define TXQ_CTRL_ENH_MODE               0x40
-#define TXQ_CTRL_LS_8023_EN		0x80
-#define TXQ_TXF_BURST_NUM_SHIFT    	16
-#define TXQ_TXF_BURST_NUM_MASK     	0xFFFF
+#define REG_TXQ_CTRL			0x1590
+#define TXQ_TXF_BURST_NUM_MASK          0xFFFFUL
+#define TXQ_TXF_BURST_NUM_SHIFT		16
+#define L1C_TXQ_TXF_BURST_PREF          0x200
+#define L2CB_TXQ_TXF_BURST_PREF         0x40
+#define TXQ_CTRL_PEDING_CLR             BIT(8)
+#define TXQ_CTRL_LS_8023_EN             BIT(7)
+#define TXQ_CTRL_ENH_MODE               BIT(6)
+#define TXQ_CTRL_EN                     BIT(5)
+#define TXQ_CTRL_IP_OPTION_EN           BIT(4)
+#define TXQ_NUM_TPD_BURST_MASK          0xFUL
+#define TXQ_NUM_TPD_BURST_SHIFT         0
+#define TXQ_NUM_TPD_BURST_DEF           5
+#define TXQ_CFGV			(\
+	FIELDX(TXQ_NUM_TPD_BURST, TXQ_NUM_TPD_BURST_DEF) |\
+	TXQ_CTRL_ENH_MODE |\
+	TXQ_CTRL_LS_8023_EN |\
+	TXQ_CTRL_IP_OPTION_EN)
+#define L1C_TXQ_CFGV			(\
+	TXQ_CFGV |\
+	FIELDX(TXQ_TXF_BURST_NUM, L1C_TXQ_TXF_BURST_PREF))
+#define L2CB_TXQ_CFGV			(\
+	TXQ_CFGV |\
+	FIELDX(TXQ_TXF_BURST_NUM, L2CB_TXQ_TXF_BURST_PREF))
+
 
 /* Jumbo packet Threshold for task offload */
 #define REG_TX_TSO_OFFLOAD_THRESH	0x1594 /* In 8-bytes */
 #define TX_TSO_OFFLOAD_THRESH_MASK	0x07FF
+#define MAX_TSO_FRAME_SIZE		(7*1024)
 
 #define	REG_TXF_WATER_MARK		0x1598 /* In 8-bytes */
 #define TXF_WATER_MARK_MASK		0x0FFF
@@ -537,26 +558,21 @@
 #define ASPM_THRUPUT_LIMIT_NO		0x00
 #define ASPM_THRUPUT_LIMIT_1M		0x01
 #define ASPM_THRUPUT_LIMIT_10M		0x02
-#define ASPM_THRUPUT_LIMIT_100M		0x04
-#define RXQ1_CTRL_EN			0x10
-#define RXQ2_CTRL_EN			0x20
-#define RXQ3_CTRL_EN			0x40
-#define IPV6_CHKSUM_CTRL_EN		0x80
-#define RSS_HASH_BITS_MASK		0x00FF
-#define RSS_HASH_BITS_SHIFT		8
-#define RSS_HASH_IPV4			0x10000
-#define RSS_HASH_IPV4_TCP		0x20000
-#define RSS_HASH_IPV6			0x40000
-#define RSS_HASH_IPV6_TCP		0x80000
+#define ASPM_THRUPUT_LIMIT_100M		0x03
+#define IPV6_CHKSUM_CTRL_EN		BIT(7)
 #define RXQ_RFD_BURST_NUM_MASK		0x003F
 #define RXQ_RFD_BURST_NUM_SHIFT		20
-#define RSS_MODE_MASK			0x0003
+#define RXQ_NUM_RFD_PREF_DEF		8
+#define RSS_MODE_MASK			3UL
 #define RSS_MODE_SHIFT			26
-#define RSS_NIP_QUEUE_SEL_MASK		0x1
-#define RSS_NIP_QUEUE_SEL_SHIFT		28
-#define RRS_HASH_CTRL_EN		0x20000000
-#define RX_CUT_THRU_EN			0x40000000
-#define RXQ_CTRL_EN			0x80000000
+#define RSS_MODE_DIS			0
+#define RSS_MODE_SQSI			1
+#define RSS_MODE_MQSI			2
+#define RSS_MODE_MQMI			3
+#define RSS_NIP_QUEUE_SEL		BIT(28) /* 0:q0, 1:table */
+#define RRS_HASH_CTRL_EN		BIT(29)
+#define RX_CUT_THRU_EN			BIT(30)
+#define RXQ_CTRL_EN			BIT(31)
 
 #define REG_RFD_FREE_THRESH		0x15A4
 #define RFD_FREE_THRESH_MASK		0x003F
@@ -577,57 +593,45 @@
 #define RXD_DMA_DOWN_TIMER_SHIFT	16
 
 /* DMA Engine Control Register */
-#define REG_DMA_CTRL                	0x15C0
-#define DMA_CTRL_DMAR_IN_ORDER          0x1
-#define DMA_CTRL_DMAR_ENH_ORDER         0x2
-#define DMA_CTRL_DMAR_OUT_ORDER         0x4
-#define DMA_CTRL_RCB_VALUE              0x8
-#define DMA_CTRL_DMAR_BURST_LEN_MASK    0x0007
-#define DMA_CTRL_DMAR_BURST_LEN_SHIFT   4
-#define DMA_CTRL_DMAW_BURST_LEN_MASK    0x0007
-#define DMA_CTRL_DMAW_BURST_LEN_SHIFT   7
-#define DMA_CTRL_DMAR_REQ_PRI           0x400
-#define DMA_CTRL_DMAR_DLY_CNT_MASK      0x001F
-#define DMA_CTRL_DMAR_DLY_CNT_SHIFT     11
-#define DMA_CTRL_DMAW_DLY_CNT_MASK      0x000F
-#define DMA_CTRL_DMAW_DLY_CNT_SHIFT     16
-#define DMA_CTRL_CMB_EN               	0x100000
-#define DMA_CTRL_SMB_EN			0x200000
-#define DMA_CTRL_CMB_NOW		0x400000
-#define MAC_CTRL_SMB_DIS		0x1000000
-#define DMA_CTRL_SMB_NOW		0x80000000
+#define REG_DMA_CTRL			0x15C0
+#define DMA_CTRL_SMB_NOW                BIT(31)
+#define DMA_CTRL_WPEND_CLR              BIT(30)
+#define DMA_CTRL_RPEND_CLR              BIT(29)
+#define DMA_CTRL_WDLY_CNT_MASK          0xFUL
+#define DMA_CTRL_WDLY_CNT_SHIFT         16
+#define DMA_CTRL_WDLY_CNT_DEF           4
+#define DMA_CTRL_RDLY_CNT_MASK          0x1FUL
+#define DMA_CTRL_RDLY_CNT_SHIFT         11
+#define DMA_CTRL_RDLY_CNT_DEF           15
+#define DMA_CTRL_RREQ_PRI_DATA          BIT(10)      /* 0:tpd, 1:data */
+#define DMA_CTRL_WREQ_BLEN_MASK         7UL
+#define DMA_CTRL_WREQ_BLEN_SHIFT        7
+#define DMA_CTRL_RREQ_BLEN_MASK         7UL
+#define DMA_CTRL_RREQ_BLEN_SHIFT        4
+#define L1C_CTRL_DMA_RCB_LEN128         BIT(3)   /* 0:64bytes,1:128bytes */
+#define DMA_CTRL_RORDER_MODE_MASK       7UL
+#define DMA_CTRL_RORDER_MODE_SHIFT      0
+#define DMA_CTRL_RORDER_MODE_OUT        4
+#define DMA_CTRL_RORDER_MODE_ENHANCE    2
+#define DMA_CTRL_RORDER_MODE_IN         1
 
-/* CMB/SMB Control Register */
+/* INT-triggle/SMB Control Register */
 #define REG_SMB_STAT_TIMER		0x15C4	/* 2us resolution */
 #define SMB_STAT_TIMER_MASK		0xFFFFFF
-#define REG_CMB_TPD_THRESH		0x15C8
-#define CMB_TPD_THRESH_MASK		0xFFFF
-#define REG_CMB_TX_TIMER		0x15CC	/* 2us resolution */
-#define CMB_TX_TIMER_MASK		0xFFFF
+#define REG_TINT_TPD_THRESH             0x15C8 /* tpd th to trig intrrupt */
 
 /* Mail box */
 #define MB_RFDX_PROD_IDX_MASK		0xFFFF
 #define REG_MB_RFD0_PROD_IDX		0x15E0
-#define REG_MB_RFD1_PROD_IDX		0x15E4
-#define REG_MB_RFD2_PROD_IDX		0x15E8
-#define REG_MB_RFD3_PROD_IDX		0x15EC
 
-#define MB_PRIO_PROD_IDX_MASK		0xFFFF
-#define REG_MB_PRIO_PROD_IDX		0x15F0
-#define MB_HTPD_PROD_IDX_SHIFT		0
-#define MB_NTPD_PROD_IDX_SHIFT		16
-
-#define MB_PRIO_CONS_IDX_MASK		0xFFFF
-#define REG_MB_PRIO_CONS_IDX		0x15F4
-#define MB_HTPD_CONS_IDX_SHIFT		0
-#define MB_NTPD_CONS_IDX_SHIFT		16
+#define REG_TPD_PRI1_PIDX               0x15F0	/* 16bit,hi-tpd producer idx */
+#define REG_TPD_PRI0_PIDX		0x15F2	/* 16bit,lo-tpd producer idx */
+#define REG_TPD_PRI1_CIDX		0x15F4	/* 16bit,hi-tpd consumer idx */
+#define REG_TPD_PRI0_CIDX		0x15F6	/* 16bit,lo-tpd consumer idx */
 
 #define REG_MB_RFD01_CONS_IDX		0x15F8
 #define MB_RFD0_CONS_IDX_MASK		0x0000FFFF
 #define MB_RFD1_CONS_IDX_MASK		0xFFFF0000
-#define REG_MB_RFD23_CONS_IDX		0x15FC
-#define MB_RFD2_CONS_IDX_MASK		0x0000FFFF
-#define MB_RFD3_CONS_IDX_MASK		0xFFFF0000
 
 /* Interrupt Status Register */
 #define REG_ISR    			0x1600
@@ -705,13 +709,6 @@
 #define REG_INT_RETRIG_TIMER		0x1608
 #define INT_RETRIG_TIMER_MASK		0xFFFF
 
-#define REG_HDS_CTRL			0x160C
-#define HDS_CTRL_EN			0x0001
-#define HDS_CTRL_BACKFILLSIZE_SHIFT	8
-#define HDS_CTRL_BACKFILLSIZE_MASK	0x0FFF
-#define HDS_CTRL_MAX_HDRSIZE_SHIFT	20
-#define HDS_CTRL_MAC_HDRSIZE_MASK	0x0FFF
-
 #define REG_MAC_RX_STATUS_BIN 		0x1700
 #define REG_MAC_RX_STATUS_END 		0x175c
 #define REG_MAC_TX_STATUS_BIN 		0x1760
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 1ef0c927..df10637 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -54,8 +54,9 @@
 };
 MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl);
 
-MODULE_AUTHOR("Jie Yang <jie.yang@atheros.com>");
-MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver");
+MODULE_AUTHOR("Jie Yang");
+MODULE_AUTHOR("Qualcomm Atheros Inc., <nic-devel@qualcomm.com>");
+MODULE_DESCRIPTION("Qualcom Atheros 100/1000M Ethernet Network Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(ATL1C_DRV_VERSION);
 
@@ -63,9 +64,9 @@
 static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw);
 static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw);
 static void atl1c_disable_l0s_l1(struct atl1c_hw *hw);
-static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup);
+static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed);
 static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter);
-static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
+static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter,
 		   int *work_done, int work_to_do);
 static int atl1c_up(struct atl1c_adapter *adapter);
 static void atl1c_down(struct atl1c_adapter *adapter);
@@ -74,50 +75,49 @@
 	128, 256, 512, 1024, 2048, 4096,
 };
 
-static const u16 atl1c_rfd_prod_idx_regs[AT_MAX_RECEIVE_QUEUE] =
-{
-	REG_MB_RFD0_PROD_IDX,
-	REG_MB_RFD1_PROD_IDX,
-	REG_MB_RFD2_PROD_IDX,
-	REG_MB_RFD3_PROD_IDX
-};
-
-static const u16 atl1c_rfd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
-{
-	REG_RFD0_HEAD_ADDR_LO,
-	REG_RFD1_HEAD_ADDR_LO,
-	REG_RFD2_HEAD_ADDR_LO,
-	REG_RFD3_HEAD_ADDR_LO
-};
-
-static const u16 atl1c_rrd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
-{
-	REG_RRD0_HEAD_ADDR_LO,
-	REG_RRD1_HEAD_ADDR_LO,
-	REG_RRD2_HEAD_ADDR_LO,
-	REG_RRD3_HEAD_ADDR_LO
-};
 
 static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
 	NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
 static void atl1c_pcie_patch(struct atl1c_hw *hw)
 {
-	u32 data;
+	u32 mst_data, data;
 
-	AT_READ_REG(hw, REG_PCIE_PHYMISC, &data);
-	data |= PCIE_PHYMISC_FORCE_RCV_DET;
-	AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data);
+	/* pclk sel could switch to 25M */
+	AT_READ_REG(hw, REG_MASTER_CTRL, &mst_data);
+	mst_data &= ~MASTER_CTRL_CLK_SEL_DIS;
+	AT_WRITE_REG(hw, REG_MASTER_CTRL, mst_data);
 
+	/* WoL/PCIE related settings */
+	if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
+		AT_READ_REG(hw, REG_PCIE_PHYMISC, &data);
+		data |= PCIE_PHYMISC_FORCE_RCV_DET;
+		AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data);
+	} else { /* new dev set bit5 of MASTER */
+		if (!(mst_data & MASTER_CTRL_WAKEN_25M))
+			AT_WRITE_REG(hw, REG_MASTER_CTRL,
+				mst_data | MASTER_CTRL_WAKEN_25M);
+	}
+	/* aspm/PCIE setting only for l2cb 1.0 */
 	if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) {
 		AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data);
-
-		data &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK <<
-			PCIE_PHYMISC2_SERDES_CDR_SHIFT);
-		data |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
-		data &= ~(PCIE_PHYMISC2_SERDES_TH_MASK <<
-			PCIE_PHYMISC2_SERDES_TH_SHIFT);
-		data |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
+		data = FIELD_SETX(data, PCIE_PHYMISC2_CDR_BW,
+			L2CB1_PCIE_PHYMISC2_CDR_BW);
+		data = FIELD_SETX(data, PCIE_PHYMISC2_L0S_TH,
+			L2CB1_PCIE_PHYMISC2_L0S_TH);
 		AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data);
+		/* extend L1 sync timer */
+		AT_READ_REG(hw, REG_LINK_CTRL, &data);
+		data |= LINK_CTRL_EXT_SYNC;
+		AT_WRITE_REG(hw, REG_LINK_CTRL, data);
+	}
+	/* l2cb 1.x & l1d 1.x */
+	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d) {
+		AT_READ_REG(hw, REG_PM_CTRL, &data);
+		data |= PM_CTRL_L0S_BUFSRX_EN;
+		AT_WRITE_REG(hw, REG_PM_CTRL, data);
+		/* clear vendor msg */
+		AT_READ_REG(hw, REG_DMA_DBG, &data);
+		AT_WRITE_REG(hw, REG_DMA_DBG, data & ~DMA_DBG_VENDOR_MSG);
 	}
 }
 
@@ -130,6 +130,7 @@
 	u32 data;
 	u32 pci_cmd;
 	struct pci_dev *pdev = hw->adapter->pdev;
+	int pos;
 
 	AT_READ_REG(hw, PCI_COMMAND, &pci_cmd);
 	pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
@@ -146,10 +147,16 @@
 	/*
 	 * Mask some pcie error bits
 	 */
-	AT_READ_REG(hw, REG_PCIE_UC_SEVERITY, &data);
-	data &= ~PCIE_UC_SERVRITY_DLP;
-	data &= ~PCIE_UC_SERVRITY_FCP;
-	AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data);
+	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+	pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
+	data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
+	pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
+	/* clear error status */
+	pci_write_config_word(pdev, pci_pcie_cap(pdev) + PCI_EXP_DEVSTA,
+			PCI_EXP_DEVSTA_NFED |
+			PCI_EXP_DEVSTA_FED |
+			PCI_EXP_DEVSTA_CED |
+			PCI_EXP_DEVSTA_URD);
 
 	AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data);
 	data &= ~LTSSM_ID_EN_WRO;
@@ -207,14 +214,14 @@
  * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads
  * of the idle status register until the device is actually idle
  */
-static u32 atl1c_wait_until_idle(struct atl1c_hw *hw)
+static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
 {
 	int timeout;
 	u32 data;
 
 	for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
 		AT_READ_REG(hw, REG_IDLE_STATUS, &data);
-		if ((data & IDLE_STATUS_MASK) == 0)
+		if ((data & modu_ctrl) == 0)
 			return 0;
 		msleep(1);
 	}
@@ -265,7 +272,7 @@
 		if (atl1c_stop_mac(hw) != 0)
 			if (netif_msg_hw(adapter))
 				dev_warn(&pdev->dev, "stop mac failed\n");
-		atl1c_set_aspm(hw, false);
+		atl1c_set_aspm(hw, SPEED_0);
 		netif_carrier_off(netdev);
 		netif_stop_queue(netdev);
 		atl1c_phy_reset(hw);
@@ -283,7 +290,7 @@
 		    adapter->link_duplex != duplex) {
 			adapter->link_speed  = speed;
 			adapter->link_duplex = duplex;
-			atl1c_set_aspm(hw, true);
+			atl1c_set_aspm(hw, speed);
 			atl1c_enable_tx_ctrl(hw);
 			atl1c_enable_rx_ctrl(hw);
 			atl1c_setup_mac_ctrl(adapter);
@@ -523,11 +530,16 @@
 static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
 {
 	struct atl1c_adapter *adapter = netdev_priv(netdev);
+	struct atl1c_hw *hw = &adapter->hw;
 	int old_mtu   = netdev->mtu;
 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
 
-	if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
-			(max_frame > MAX_JUMBO_FRAME_SIZE)) {
+	/* Fast Ethernet controller doesn't support jumbo packet */
+	if (((hw->nic_type == athr_l2c ||
+	      hw->nic_type == athr_l2c_b ||
+	      hw->nic_type == athr_l2c_b2) && new_mtu > ETH_DATA_LEN) ||
+	      max_frame < ETH_ZLEN + ETH_FCS_LEN ||
+	      max_frame > MAX_JUMBO_FRAME_SIZE) {
 		if (netif_msg_link(adapter))
 			dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
 		return -EINVAL;
@@ -563,7 +575,7 @@
 	struct atl1c_adapter *adapter = netdev_priv(netdev);
 	u16 result;
 
-	atl1c_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result);
+	atl1c_read_phy_reg(&adapter->hw, reg_num, &result);
 	return result;
 }
 
@@ -572,7 +584,7 @@
 {
 	struct atl1c_adapter *adapter = netdev_priv(netdev);
 
-	atl1c_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
+	atl1c_write_phy_reg(&adapter->hw, reg_num, val);
 }
 
 /*
@@ -696,12 +708,8 @@
 
 	hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE  |
 			 ATL1C_TXQ_MODE_ENHANCE;
-	if (link_ctrl_data & LINK_CTRL_L0S_EN)
-		hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT;
-	if (link_ctrl_data & LINK_CTRL_L1_EN)
-		hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT;
-	if (link_ctrl_data & LINK_CTRL_EXT_SYNC)
-		hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC;
+	hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT |
+			  ATL1C_ASPM_L1_SUPPORT;
 	hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
 
 	if (hw->nic_type == athr_l1c ||
@@ -729,9 +737,8 @@
 	device_set_wakeup_enable(&pdev->dev, false);
 	adapter->link_speed = SPEED_0;
 	adapter->link_duplex = FULL_DUPLEX;
-	adapter->num_rx_queues = AT_DEF_RECEIVE_QUEUE;
 	adapter->tpd_ring[0].count = 1024;
-	adapter->rfd_ring[0].count = 512;
+	adapter->rfd_ring.count = 512;
 
 	hw->vendor_id = pdev->vendor;
 	hw->device_id = pdev->device;
@@ -750,22 +757,12 @@
 	hw->phy_configured = false;
 	hw->preamble_len = 7;
 	hw->max_frame_size = adapter->netdev->mtu;
-	if (adapter->num_rx_queues < 2) {
-		hw->rss_type = atl1c_rss_disable;
-		hw->rss_mode = atl1c_rss_mode_disable;
-	} else {
-		hw->rss_type = atl1c_rss_ipv4;
-		hw->rss_mode = atl1c_rss_mul_que_mul_int;
-		hw->rss_hash_bits = 16;
-	}
 	hw->autoneg_advertised = ADVERTISED_Autoneg;
 	hw->indirect_tab = 0xE4E4E4E4;
 	hw->base_cpu = 0;
 
 	hw->ict = 50000;		/* 100ms */
 	hw->smb_timer = 200000;	  	/* 400ms */
-	hw->cmb_tpd = 4;
-	hw->cmb_tx_timer = 1;		/* 2 us  */
 	hw->rx_imt = 200;
 	hw->tx_imt = 1000;
 
@@ -773,9 +770,6 @@
 	hw->rfd_burst = 8;
 	hw->dma_order = atl1c_dma_ord_out;
 	hw->dmar_block = atl1c_dma_req_1024;
-	hw->dmaw_block = atl1c_dma_req_1024;
-	hw->dmar_dly_cnt = 15;
-	hw->dmaw_dly_cnt = 4;
 
 	if (atl1c_alloc_queues(adapter)) {
 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
@@ -851,24 +845,22 @@
  */
 static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
 {
-	struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
-	struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
+	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
+	struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
 	struct atl1c_buffer *buffer_info;
 	struct pci_dev *pdev = adapter->pdev;
-	int i, j;
+	int j;
 
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		for (j = 0; j < rfd_ring[i].count; j++) {
-			buffer_info = &rfd_ring[i].buffer_info[j];
-			atl1c_clean_buffer(pdev, buffer_info, 0);
-		}
-		/* zero out the descriptor ring */
-		memset(rfd_ring[i].desc, 0, rfd_ring[i].size);
-		rfd_ring[i].next_to_clean = 0;
-		rfd_ring[i].next_to_use = 0;
-		rrd_ring[i].next_to_use = 0;
-		rrd_ring[i].next_to_clean = 0;
+	for (j = 0; j < rfd_ring->count; j++) {
+		buffer_info = &rfd_ring->buffer_info[j];
+		atl1c_clean_buffer(pdev, buffer_info, 0);
 	}
+	/* zero out the descriptor ring */
+	memset(rfd_ring->desc, 0, rfd_ring->size);
+	rfd_ring->next_to_clean = 0;
+	rfd_ring->next_to_use = 0;
+	rrd_ring->next_to_use = 0;
+	rrd_ring->next_to_clean = 0;
 }
 
 /*
@@ -877,8 +869,8 @@
 static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
 {
 	struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
-	struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
-	struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
+	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
+	struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
 	struct atl1c_buffer *buffer_info;
 	int i, j;
 
@@ -890,15 +882,13 @@
 			ATL1C_SET_BUFFER_STATE(&buffer_info[i],
 					ATL1C_BUFFER_FREE);
 	}
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		rfd_ring[i].next_to_use = 0;
-		rfd_ring[i].next_to_clean = 0;
-		rrd_ring[i].next_to_use = 0;
-		rrd_ring[i].next_to_clean = 0;
-		for (j = 0; j < rfd_ring[i].count; j++) {
-			buffer_info = &rfd_ring[i].buffer_info[j];
-			ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
-		}
+	rfd_ring->next_to_use = 0;
+	rfd_ring->next_to_clean = 0;
+	rrd_ring->next_to_use = 0;
+	rrd_ring->next_to_clean = 0;
+	for (j = 0; j < rfd_ring->count; j++) {
+		buffer_info = &rfd_ring->buffer_info[j];
+		ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
 	}
 }
 
@@ -935,27 +925,23 @@
 {
 	struct pci_dev *pdev = adapter->pdev;
 	struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
-	struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
-	struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
+	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
+	struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
 	struct atl1c_ring_header *ring_header = &adapter->ring_header;
-	int num_rx_queues = adapter->num_rx_queues;
 	int size;
 	int i;
 	int count = 0;
 	int rx_desc_count = 0;
 	u32 offset = 0;
 
-	rrd_ring[0].count = rfd_ring[0].count;
+	rrd_ring->count = rfd_ring->count;
 	for (i = 1; i < AT_MAX_TRANSMIT_QUEUE; i++)
 		tpd_ring[i].count = tpd_ring[0].count;
 
-	for (i = 1; i < adapter->num_rx_queues; i++)
-		rfd_ring[i].count = rrd_ring[i].count = rfd_ring[0].count;
-
 	/* 2 tpd queue, one high priority queue,
 	 * another normal priority queue */
 	size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 +
-		rfd_ring->count * num_rx_queues);
+		rfd_ring->count);
 	tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
 	if (unlikely(!tpd_ring->buffer_info)) {
 		dev_err(&pdev->dev, "kzalloc failed, size = %d\n",
@@ -968,12 +954,11 @@
 		count += tpd_ring[i].count;
 	}
 
-	for (i = 0; i < num_rx_queues; i++) {
-		rfd_ring[i].buffer_info =
-			(struct atl1c_buffer *) (tpd_ring->buffer_info + count);
-		count += rfd_ring[i].count;
-		rx_desc_count += rfd_ring[i].count;
-	}
+	rfd_ring->buffer_info =
+		(struct atl1c_buffer *) (tpd_ring->buffer_info + count);
+	count += rfd_ring->count;
+	rx_desc_count += rfd_ring->count;
+
 	/*
 	 * real ring DMA buffer
 	 * each ring/block may need up to 8 bytes for alignment, hence the
@@ -983,8 +968,7 @@
 		sizeof(struct atl1c_tpd_desc) * tpd_ring->count * 2 +
 		sizeof(struct atl1c_rx_free_desc) * rx_desc_count +
 		sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
-		sizeof(struct atl1c_hw_stats) +
-		8 * 4 + 8 * 2 * num_rx_queues;
+		8 * 4;
 
 	ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
 				&ring_header->dma);
@@ -1005,25 +989,18 @@
 		offset += roundup(tpd_ring[i].size, 8);
 	}
 	/* init RFD ring */
-	for (i = 0; i < num_rx_queues; i++) {
-		rfd_ring[i].dma = ring_header->dma + offset;
-		rfd_ring[i].desc = (u8 *) ring_header->desc + offset;
-		rfd_ring[i].size = sizeof(struct atl1c_rx_free_desc) *
-				rfd_ring[i].count;
-		offset += roundup(rfd_ring[i].size, 8);
-	}
+	rfd_ring->dma = ring_header->dma + offset;
+	rfd_ring->desc = (u8 *) ring_header->desc + offset;
+	rfd_ring->size = sizeof(struct atl1c_rx_free_desc) * rfd_ring->count;
+	offset += roundup(rfd_ring->size, 8);
 
 	/* init RRD ring */
-	for (i = 0; i < num_rx_queues; i++) {
-		rrd_ring[i].dma = ring_header->dma + offset;
-		rrd_ring[i].desc = (u8 *) ring_header->desc + offset;
-		rrd_ring[i].size = sizeof(struct atl1c_recv_ret_status) *
-				rrd_ring[i].count;
-		offset += roundup(rrd_ring[i].size, 8);
-	}
+	rrd_ring->dma = ring_header->dma + offset;
+	rrd_ring->desc = (u8 *) ring_header->desc + offset;
+	rrd_ring->size = sizeof(struct atl1c_recv_ret_status) *
+		rrd_ring->count;
+	offset += roundup(rrd_ring->size, 8);
 
-	adapter->smb.dma = ring_header->dma + offset;
-	adapter->smb.smb = (u8 *)ring_header->desc + offset;
 	return 0;
 
 err_nomem:
@@ -1034,15 +1011,10 @@
 static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
 {
 	struct atl1c_hw *hw = &adapter->hw;
-	struct atl1c_rfd_ring *rfd_ring = (struct atl1c_rfd_ring *)
-				adapter->rfd_ring;
-	struct atl1c_rrd_ring *rrd_ring = (struct atl1c_rrd_ring *)
-				adapter->rrd_ring;
+	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
+	struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
 	struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
 				adapter->tpd_ring;
-	struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb;
-	struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb;
-	int i;
 	u32 data;
 
 	/* TPD */
@@ -1050,10 +1022,10 @@
 			(u32)((tpd_ring[atl1c_trans_normal].dma &
 				AT_DMA_HI_ADDR_MASK) >> 32));
 	/* just enable normal priority TX queue */
-	AT_WRITE_REG(hw, REG_NTPD_HEAD_ADDR_LO,
+	AT_WRITE_REG(hw, REG_TPD_PRI0_ADDR_LO,
 			(u32)(tpd_ring[atl1c_trans_normal].dma &
 				AT_DMA_LO_ADDR_MASK));
-	AT_WRITE_REG(hw, REG_HTPD_HEAD_ADDR_LO,
+	AT_WRITE_REG(hw, REG_TPD_PRI1_ADDR_LO,
 			(u32)(tpd_ring[atl1c_trans_high].dma &
 				AT_DMA_LO_ADDR_MASK));
 	AT_WRITE_REG(hw, REG_TPD_RING_SIZE,
@@ -1062,31 +1034,21 @@
 
 	/* RFD */
 	AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI,
-			(u32)((rfd_ring[0].dma & AT_DMA_HI_ADDR_MASK) >> 32));
-	for (i = 0; i < adapter->num_rx_queues; i++)
-		AT_WRITE_REG(hw, atl1c_rfd_addr_lo_regs[i],
-			(u32)(rfd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
+			(u32)((rfd_ring->dma & AT_DMA_HI_ADDR_MASK) >> 32));
+	AT_WRITE_REG(hw, REG_RFD0_HEAD_ADDR_LO,
+			(u32)(rfd_ring->dma & AT_DMA_LO_ADDR_MASK));
 
 	AT_WRITE_REG(hw, REG_RFD_RING_SIZE,
-			rfd_ring[0].count & RFD_RING_SIZE_MASK);
+			rfd_ring->count & RFD_RING_SIZE_MASK);
 	AT_WRITE_REG(hw, REG_RX_BUF_SIZE,
 			adapter->rx_buffer_len & RX_BUF_SIZE_MASK);
 
 	/* RRD */
-	for (i = 0; i < adapter->num_rx_queues; i++)
-		AT_WRITE_REG(hw, atl1c_rrd_addr_lo_regs[i],
-			(u32)(rrd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
+	AT_WRITE_REG(hw, REG_RRD0_HEAD_ADDR_LO,
+			(u32)(rrd_ring->dma & AT_DMA_LO_ADDR_MASK));
 	AT_WRITE_REG(hw, REG_RRD_RING_SIZE,
-			(rrd_ring[0].count & RRD_RING_SIZE_MASK));
+			(rrd_ring->count & RRD_RING_SIZE_MASK));
 
-	/* CMB */
-	AT_WRITE_REG(hw, REG_CMB_BASE_ADDR_LO, cmb->dma & AT_DMA_LO_ADDR_MASK);
-
-	/* SMB */
-	AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_HI,
-			(u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32));
-	AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO,
-			(u32)(smb->dma & AT_DMA_LO_ADDR_MASK));
 	if (hw->nic_type == athr_l2c_b) {
 		AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L);
 		AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L);
@@ -1111,32 +1073,26 @@
 static void atl1c_configure_tx(struct atl1c_adapter *adapter)
 {
 	struct atl1c_hw *hw = &adapter->hw;
-	u32 dev_ctrl_data;
-	u32 max_pay_load;
+	int max_pay_load;
 	u16 tx_offload_thresh;
 	u32 txq_ctrl_data;
-	u32 max_pay_load_data;
 
-	tx_offload_thresh = MAX_TX_OFFLOAD_THRESH;
+	tx_offload_thresh = MAX_TSO_FRAME_SIZE;
 	AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH,
 		(tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK);
-	AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data);
-	max_pay_load  = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) &
-			DEVICE_CTRL_MAX_PAYLOAD_MASK;
-	hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
-	max_pay_load  = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) &
-			DEVICE_CTRL_MAX_RREQ_SZ_MASK;
+	max_pay_load = pcie_get_readrq(adapter->pdev) >> 8;
 	hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
-
-	txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) <<
-			TXQ_NUM_TPD_BURST_SHIFT;
-	if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE)
-		txq_ctrl_data |= TXQ_CTRL_ENH_MODE;
-	max_pay_load_data = (atl1c_pay_load_size[hw->dmar_block] &
-			TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT;
-	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2)
-		max_pay_load_data >>= 1;
-	txq_ctrl_data |= max_pay_load_data;
+	/*
+	 * if BIOS had changed the dam-read-max-length to an invalid value,
+	 * restore it to default value
+	 */
+	if (hw->dmar_block < DEVICE_CTRL_MAXRRS_MIN) {
+		pcie_set_readrq(adapter->pdev, 128 << DEVICE_CTRL_MAXRRS_MIN);
+		hw->dmar_block = DEVICE_CTRL_MAXRRS_MIN;
+	}
+	txq_ctrl_data =
+		hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 ?
+		L2CB_TXQ_CFGV : L1C_TXQ_CFGV;
 
 	AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data);
 }
@@ -1151,71 +1107,25 @@
 
 	if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM)
 		rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN;
-	if (hw->rss_type == atl1c_rss_ipv4)
-		rxq_ctrl_data |= RSS_HASH_IPV4;
-	if (hw->rss_type == atl1c_rss_ipv4_tcp)
-		rxq_ctrl_data |= RSS_HASH_IPV4_TCP;
-	if (hw->rss_type == atl1c_rss_ipv6)
-		rxq_ctrl_data |= RSS_HASH_IPV6;
-	if (hw->rss_type == atl1c_rss_ipv6_tcp)
-		rxq_ctrl_data |= RSS_HASH_IPV6_TCP;
-	if (hw->rss_type != atl1c_rss_disable)
-		rxq_ctrl_data |= RRS_HASH_CTRL_EN;
 
-	rxq_ctrl_data |= (hw->rss_mode & RSS_MODE_MASK) <<
-			RSS_MODE_SHIFT;
-	rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) <<
-			RSS_HASH_BITS_SHIFT;
-	if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON)
-		rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_1M &
-			ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT;
+	/* aspm for gigabit */
+	if (hw->nic_type != athr_l1d_2 && (hw->device_id & 1) != 0)
+		rxq_ctrl_data = FIELD_SETX(rxq_ctrl_data, ASPM_THRUPUT_LIMIT,
+			ASPM_THRUPUT_LIMIT_100M);
 
 	AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
 }
 
-static void atl1c_configure_rss(struct atl1c_adapter *adapter)
-{
-	struct atl1c_hw *hw = &adapter->hw;
-
-	AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab);
-	AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu);
-}
-
 static void atl1c_configure_dma(struct atl1c_adapter *adapter)
 {
 	struct atl1c_hw *hw = &adapter->hw;
 	u32 dma_ctrl_data;
 
-	dma_ctrl_data = DMA_CTRL_DMAR_REQ_PRI;
-	if (hw->ctrl_flags & ATL1C_CMB_ENABLE)
-		dma_ctrl_data |= DMA_CTRL_CMB_EN;
-	if (hw->ctrl_flags & ATL1C_SMB_ENABLE)
-		dma_ctrl_data |= DMA_CTRL_SMB_EN;
-	else
-		dma_ctrl_data |= MAC_CTRL_SMB_DIS;
-
-	switch (hw->dma_order) {
-	case atl1c_dma_ord_in:
-		dma_ctrl_data |= DMA_CTRL_DMAR_IN_ORDER;
-		break;
-	case atl1c_dma_ord_enh:
-		dma_ctrl_data |= DMA_CTRL_DMAR_ENH_ORDER;
-		break;
-	case atl1c_dma_ord_out:
-		dma_ctrl_data |= DMA_CTRL_DMAR_OUT_ORDER;
-		break;
-	default:
-		break;
-	}
-
-	dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
-		<< DMA_CTRL_DMAR_BURST_LEN_SHIFT;
-	dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
-		<< DMA_CTRL_DMAW_BURST_LEN_SHIFT;
-	dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK)
-		<< DMA_CTRL_DMAR_DLY_CNT_SHIFT;
-	dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK)
-		<< DMA_CTRL_DMAW_DLY_CNT_SHIFT;
+	dma_ctrl_data = FIELDX(DMA_CTRL_RORDER_MODE, DMA_CTRL_RORDER_MODE_OUT) |
+		DMA_CTRL_RREQ_PRI_DATA |
+		FIELDX(DMA_CTRL_RREQ_BLEN, hw->dmar_block) |
+		FIELDX(DMA_CTRL_WDLY_CNT, DMA_CTRL_WDLY_CNT_DEF) |
+		FIELDX(DMA_CTRL_RDLY_CNT, DMA_CTRL_RDLY_CNT_DEF);
 
 	AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
 }
@@ -1230,21 +1140,21 @@
 	u32 data;
 
 	AT_READ_REG(hw, REG_RXQ_CTRL, &data);
-	data &= ~(RXQ1_CTRL_EN | RXQ2_CTRL_EN |
-		  RXQ3_CTRL_EN | RXQ_CTRL_EN);
+	data &= ~RXQ_CTRL_EN;
 	AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
 
 	AT_READ_REG(hw, REG_TXQ_CTRL, &data);
 	data &= ~TXQ_CTRL_EN;
-	AT_WRITE_REG(hw, REG_TWSI_CTRL, data);
+	AT_WRITE_REG(hw, REG_TXQ_CTRL, data);
 
-	atl1c_wait_until_idle(hw);
+	atl1c_wait_until_idle(hw, IDLE_STATUS_RXQ_BUSY | IDLE_STATUS_TXQ_BUSY);
 
 	AT_READ_REG(hw, REG_MAC_CTRL, &data);
 	data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN);
 	AT_WRITE_REG(hw, REG_MAC_CTRL, data);
 
-	return (int)atl1c_wait_until_idle(hw);
+	return (int)atl1c_wait_until_idle(hw,
+		IDLE_STATUS_TXMAC_BUSY | IDLE_STATUS_RXMAC_BUSY);
 }
 
 static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw)
@@ -1252,19 +1162,6 @@
 	u32 data;
 
 	AT_READ_REG(hw, REG_RXQ_CTRL, &data);
-	switch (hw->adapter->num_rx_queues) {
-	case 4:
-		data |= (RXQ3_CTRL_EN | RXQ2_CTRL_EN | RXQ1_CTRL_EN);
-		break;
-	case 3:
-		data |= (RXQ2_CTRL_EN | RXQ1_CTRL_EN);
-		break;
-	case 2:
-		data |= RXQ1_CTRL_EN;
-		break;
-	default:
-		break;
-	}
 	data |= RXQ_CTRL_EN;
 	AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
 }
@@ -1300,131 +1197,104 @@
 	 * clearing, and should clear within a microsecond.
 	 */
 	AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
-	master_ctrl_data |= MASTER_CTRL_OOB_DIS_OFF;
-	AT_WRITE_REGW(hw, REG_MASTER_CTRL, ((master_ctrl_data | MASTER_CTRL_SOFT_RST)
-			& 0xFFFF));
+	master_ctrl_data |= MASTER_CTRL_OOB_DIS;
+	AT_WRITE_REG(hw, REG_MASTER_CTRL,
+		master_ctrl_data | MASTER_CTRL_SOFT_RST);
 
 	AT_WRITE_FLUSH(hw);
 	msleep(10);
 	/* Wait at least 10ms for All module to be Idle */
 
-	if (atl1c_wait_until_idle(hw)) {
+	if (atl1c_wait_until_idle(hw, IDLE_STATUS_MASK)) {
 		dev_err(&pdev->dev,
 			"MAC state machine can't be idle since"
 			" disabled for 10ms second\n");
 		return -1;
 	}
+	AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
+
 	return 0;
 }
 
 static void atl1c_disable_l0s_l1(struct atl1c_hw *hw)
 {
-	u32 pm_ctrl_data;
+	u16 ctrl_flags = hw->ctrl_flags;
 
-	AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
-	pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
-			PM_CTRL_L1_ENTRY_TIMER_SHIFT);
-	pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
-	pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
-	pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
-	pm_ctrl_data &= ~PM_CTRL_MAC_ASPM_CHK;
-	pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
-
-	pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
-	pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
-	pm_ctrl_data |=	PM_CTRL_SERDES_L1_EN;
-	AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
+	hw->ctrl_flags &= ~(ATL1C_ASPM_L0S_SUPPORT | ATL1C_ASPM_L1_SUPPORT);
+	atl1c_set_aspm(hw, SPEED_0);
+	hw->ctrl_flags = ctrl_flags;
 }
 
 /*
  * Set ASPM state.
  * Enable/disable L0s/L1 depend on link state.
  */
-static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
+static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed)
 {
 	u32 pm_ctrl_data;
-	u32 link_ctrl_data;
-	u32 link_l1_timer = 0xF;
+	u32 link_l1_timer;
 
 	AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
-	AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
-
-	pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
-	pm_ctrl_data &=  ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
-			PM_CTRL_L1_ENTRY_TIMER_SHIFT);
-	pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK <<
-			PM_CTRL_LCKDET_TIMER_SHIFT);
-	pm_ctrl_data |= AT_LCKDET_TIMER	<< PM_CTRL_LCKDET_TIMER_SHIFT;
-
-	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
-		hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
-		link_ctrl_data &= ~LINK_CTRL_EXT_SYNC;
-		if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) {
-			if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10)
-				link_ctrl_data |= LINK_CTRL_EXT_SYNC;
-		}
-
-		AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data);
-
-		pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER;
-		pm_ctrl_data &= ~(PM_CTRL_PM_REQ_TIMER_MASK <<
-			PM_CTRL_PM_REQ_TIMER_SHIFT);
-		pm_ctrl_data |= AT_ASPM_L1_TIMER <<
-			PM_CTRL_PM_REQ_TIMER_SHIFT;
-		pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN;
-		pm_ctrl_data &= ~PM_CTRL_HOTRST;
-		pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT;
-		pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1;
-	}
-	pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
-	if (linkup) {
-		pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
-		pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
-		if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
-			pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
-		if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
-			pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN;
-
-		if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
-			hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
-			if (hw->nic_type == athr_l2c_b)
-				if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE))
-					pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
-			pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
-			pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
-			pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
-			pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
-		if (hw->adapter->link_speed == SPEED_100 ||
-				hw->adapter->link_speed == SPEED_1000) {
-				pm_ctrl_data &=  ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
-					PM_CTRL_L1_ENTRY_TIMER_SHIFT);
-				if (hw->nic_type == athr_l2c_b)
-					link_l1_timer = 7;
-				else if (hw->nic_type == athr_l2c_b2 ||
-					hw->nic_type == athr_l1d_2)
-					link_l1_timer = 4;
-				pm_ctrl_data |= link_l1_timer <<
-					PM_CTRL_L1_ENTRY_TIMER_SHIFT;
-			}
-		} else {
-			pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
-			pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
-			pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
-			pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
-			pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
-			pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
-
-		}
+	pm_ctrl_data &= ~(PM_CTRL_ASPM_L1_EN |
+			  PM_CTRL_ASPM_L0S_EN |
+			  PM_CTRL_MAC_ASPM_CHK);
+	/* L1 timer */
+	if (hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
+		pm_ctrl_data &= ~PMCTRL_TXL1_AFTER_L0S;
+		link_l1_timer =
+			link_speed == SPEED_1000 || link_speed == SPEED_100 ?
+			L1D_PMCTRL_L1_ENTRY_TM_16US : 1;
+		pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
+			L1D_PMCTRL_L1_ENTRY_TM, link_l1_timer);
 	} else {
-		pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
-		pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
-		pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
-		pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
+		link_l1_timer = hw->nic_type == athr_l2c_b ?
+			L2CB1_PM_CTRL_L1_ENTRY_TM : L1C_PM_CTRL_L1_ENTRY_TM;
+		if (link_speed != SPEED_1000 && link_speed != SPEED_100)
+			link_l1_timer = 1;
+		pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
+			PM_CTRL_L1_ENTRY_TIMER, link_l1_timer);
+	}
 
-		if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
-			pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
-		else
-			pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
+	/* L0S/L1 enable */
+	if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
+		pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN | PM_CTRL_MAC_ASPM_CHK;
+	if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
+		pm_ctrl_data |= PM_CTRL_ASPM_L1_EN | PM_CTRL_MAC_ASPM_CHK;
+
+	/* l2cb & l1d & l2cb2 & l1d2 */
+	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
+	    hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
+		pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
+			PM_CTRL_PM_REQ_TIMER, PM_CTRL_PM_REQ_TO_DEF);
+		pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER |
+				PM_CTRL_SERDES_PD_EX_L1 |
+				PM_CTRL_CLK_SWH_L1;
+		pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN |
+				  PM_CTRL_SERDES_PLL_L1_EN |
+				  PM_CTRL_SERDES_BUFS_RX_L1_EN |
+				  PM_CTRL_SA_DLY_EN |
+				  PM_CTRL_HOTRST);
+		/* disable l0s if link down or l2cb */
+		if (link_speed == SPEED_0 || hw->nic_type == athr_l2c_b)
+			pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
+	} else { /* l1c */
+		pm_ctrl_data =
+			FIELD_SETX(pm_ctrl_data, PM_CTRL_L1_ENTRY_TIMER, 0);
+		if (link_speed != SPEED_0) {
+			pm_ctrl_data |= PM_CTRL_SERDES_L1_EN |
+					PM_CTRL_SERDES_PLL_L1_EN |
+					PM_CTRL_SERDES_BUFS_RX_L1_EN;
+			pm_ctrl_data &= ~(PM_CTRL_SERDES_PD_EX_L1 |
+					  PM_CTRL_CLK_SWH_L1 |
+					  PM_CTRL_ASPM_L0S_EN |
+					  PM_CTRL_ASPM_L1_EN);
+		} else { /* link down */
+			pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
+			pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN |
+					  PM_CTRL_SERDES_PLL_L1_EN |
+					  PM_CTRL_SERDES_BUFS_RX_L1_EN |
+					  PM_CTRL_ASPM_L0S_EN);
+		}
 	}
 	AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
 
@@ -1487,6 +1357,10 @@
 	u32 intr_modrt_data;
 	u32 data;
 
+	AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
+	master_ctrl_data &= ~(MASTER_CTRL_TX_ITIMER_EN |
+			      MASTER_CTRL_RX_ITIMER_EN |
+			      MASTER_CTRL_INT_RDCLR);
 	/* clear interrupt status */
 	AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF);
 	/*  Clear any WOL status */
@@ -1525,25 +1399,15 @@
 	master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN;
 	AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
 
-	if (hw->ctrl_flags & ATL1C_CMB_ENABLE) {
-		AT_WRITE_REG(hw, REG_CMB_TPD_THRESH,
-			hw->cmb_tpd & CMB_TPD_THRESH_MASK);
-		AT_WRITE_REG(hw, REG_CMB_TX_TIMER,
-			hw->cmb_tx_timer & CMB_TX_TIMER_MASK);
-	}
+	AT_WRITE_REG(hw, REG_SMB_STAT_TIMER,
+		hw->smb_timer & SMB_STAT_TIMER_MASK);
 
-	if (hw->ctrl_flags & ATL1C_SMB_ENABLE)
-		AT_WRITE_REG(hw, REG_SMB_STAT_TIMER,
-			hw->smb_timer & SMB_STAT_TIMER_MASK);
 	/* set MTU */
 	AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN +
 			VLAN_HLEN + ETH_FCS_LEN);
-	/* HDS, disable */
-	AT_WRITE_REG(hw, REG_HDS_CTRL, 0);
 
 	atl1c_configure_tx(adapter);
 	atl1c_configure_rx(adapter);
-	atl1c_configure_rss(adapter);
 	atl1c_configure_dma(adapter);
 
 	return 0;
@@ -1635,16 +1499,11 @@
 	struct pci_dev *pdev = adapter->pdev;
 	u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
 	u16 hw_next_to_clean;
-	u16 shift;
-	u32 data;
+	u16 reg;
 
-	if (type == atl1c_trans_high)
-		shift = MB_HTPD_CONS_IDX_SHIFT;
-	else
-		shift = MB_NTPD_CONS_IDX_SHIFT;
+	reg = type == atl1c_trans_high ? REG_TPD_PRI1_CIDX : REG_TPD_PRI0_CIDX;
 
-	AT_READ_REG(&adapter->hw, REG_MB_PRIO_CONS_IDX, &data);
-	hw_next_to_clean = (data >> shift) & MB_PRIO_PROD_IDX_MASK;
+	AT_READ_REGW(&adapter->hw, reg, &hw_next_to_clean);
 
 	while (next_to_clean != hw_next_to_clean) {
 		buffer_info = &tpd_ring->buffer_info[next_to_clean];
@@ -1746,9 +1605,9 @@
 	skb_checksum_none_assert(skb);
 }
 
-static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid)
+static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
 {
-	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[ringid];
+	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
 	struct pci_dev *pdev = adapter->pdev;
 	struct atl1c_buffer *buffer_info, *next_info;
 	struct sk_buff *skb;
@@ -1800,7 +1659,7 @@
 		/* TODO: update mailbox here */
 		wmb();
 		rfd_ring->next_to_use = rfd_next_to_use;
-		AT_WRITE_REG(&adapter->hw, atl1c_rfd_prod_idx_regs[ringid],
+		AT_WRITE_REG(&adapter->hw, REG_MB_RFD0_PROD_IDX,
 			rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK);
 	}
 
@@ -1839,7 +1698,7 @@
 	rfd_ring->next_to_clean = rfd_index;
 }
 
-static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
+static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter,
 		   int *work_done, int work_to_do)
 {
 	u16 rfd_num, rfd_index;
@@ -1847,8 +1706,8 @@
 	u16 length;
 	struct pci_dev *pdev = adapter->pdev;
 	struct net_device *netdev  = adapter->netdev;
-	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[que];
-	struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[que];
+	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
+	struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
 	struct sk_buff *skb;
 	struct atl1c_recv_ret_status *rrs;
 	struct atl1c_buffer *buffer_info;
@@ -1914,7 +1773,7 @@
 		count++;
 	}
 	if (count)
-		atl1c_alloc_rx_buffer(adapter, que);
+		atl1c_alloc_rx_buffer(adapter);
 }
 
 /*
@@ -1931,7 +1790,7 @@
 	if (!netif_carrier_ok(adapter->netdev))
 		goto quit_polling;
 	/* just enable one RXQ */
-	atl1c_clean_rx_irq(adapter, 0, &work_done, budget);
+	atl1c_clean_rx_irq(adapter, &work_done, budget);
 
 	if (work_done < budget) {
 quit_polling:
@@ -2206,23 +2065,10 @@
 			   struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type)
 {
 	struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
-	u32 prod_data;
+	u16 reg;
 
-	AT_READ_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, &prod_data);
-	switch (type) {
-	case atl1c_trans_high:
-		prod_data &= 0xFFFF0000;
-		prod_data |= tpd_ring->next_to_use & 0xFFFF;
-		break;
-	case atl1c_trans_normal:
-		prod_data &= 0x0000FFFF;
-		prod_data |= (tpd_ring->next_to_use & 0xFFFF) << 16;
-		break;
-	default:
-		break;
-	}
-	wmb();
-	AT_WRITE_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, prod_data);
+	reg = type == atl1c_trans_high ? REG_TPD_PRI1_PIDX : REG_TPD_PRI0_PIDX;
+	AT_WRITE_REGW(&adapter->hw, reg, tpd_ring->next_to_use);
 }
 
 static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
@@ -2307,8 +2153,7 @@
 				"Unable to allocate MSI interrupt Error: %d\n",
 				err);
 		adapter->have_msi = false;
-	} else
-		netdev->irq = pdev->irq;
+	}
 
 	if (!adapter->have_msi)
 		flags |= IRQF_SHARED;
@@ -2333,19 +2178,16 @@
 	struct net_device *netdev = adapter->netdev;
 	int num;
 	int err;
-	int i;
 
 	netif_carrier_off(netdev);
 	atl1c_init_ring_ptrs(adapter);
 	atl1c_set_multi(netdev);
 	atl1c_restore_vlan(adapter);
 
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		num = atl1c_alloc_rx_buffer(adapter, i);
-		if (unlikely(num == 0)) {
-			err = -ENOMEM;
-			goto err_alloc_rx;
-		}
+	num = atl1c_alloc_rx_buffer(adapter);
+	if (unlikely(num == 0)) {
+		err = -ENOMEM;
+		goto err_alloc_rx;
 	}
 
 	if (atl1c_configure(adapter)) {
@@ -2383,6 +2225,8 @@
 	napi_disable(&adapter->napi);
 	atl1c_irq_disable(adapter);
 	atl1c_free_irq(adapter);
+	/* disable ASPM if device inactive */
+	atl1c_disable_l0s_l1(&adapter->hw);
 	/* reset MAC to disable all RX/TX */
 	atl1c_reset_mac(&adapter->hw);
 	msleep(1);
@@ -2510,9 +2354,14 @@
 			mac_ctrl_data |= MAC_CTRL_DUPLX;
 
 		/* turn on magic packet wol */
-		if (wufc & AT_WUFC_MAG)
+		if (wufc & AT_WUFC_MAG) {
 			wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
-
+			if (hw->nic_type == athr_l2c_b &&
+			    hw->revision_id == L2CB_V11) {
+				wol_ctrl_data |=
+					WOL_PATTERN_EN | WOL_PATTERN_PME_EN;
+			}
+		}
 		if (wufc & AT_WUFC_LNKC) {
 			wol_ctrl_data |=  WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
 			/* only link up can wake up */
@@ -2616,7 +2465,6 @@
 	SET_NETDEV_DEV(netdev, &pdev->dev);
 	pci_set_drvdata(pdev, netdev);
 
-	netdev->irq  = pdev->irq;
 	netdev->netdev_ops = &atl1c_netdev_ops;
 	netdev->watchdog_timeo = AT_TX_WATCHDOG;
 	atl1c_set_ethtool_ops(netdev);
@@ -2706,7 +2554,6 @@
 		dev_err(&pdev->dev, "cannot map device registers\n");
 		goto err_ioremap;
 	}
-	netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
 
 	/* init mii data */
 	adapter->mii.dev = netdev;
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 93ff2b2..1220e51 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1883,27 +1883,24 @@
 	int err = 0;
 
 	adapter->have_msi = true;
-	err = pci_enable_msi(adapter->pdev);
+	err = pci_enable_msi(pdev);
 	if (err) {
-		netdev_dbg(adapter->netdev,
+		netdev_dbg(netdev,
 			   "Unable to allocate MSI interrupt Error: %d\n", err);
 		adapter->have_msi = false;
-	} else
-		netdev->irq = pdev->irq;
-
+	}
 
 	if (!adapter->have_msi)
 		flags |= IRQF_SHARED;
-	err = request_irq(adapter->pdev->irq, atl1e_intr, flags,
-			netdev->name, netdev);
+	err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev);
 	if (err) {
 		netdev_dbg(adapter->netdev,
 			   "Unable to allocate interrupt Error: %d\n", err);
 		if (adapter->have_msi)
-			pci_disable_msi(adapter->pdev);
+			pci_disable_msi(pdev);
 		return err;
 	}
-	netdev_dbg(adapter->netdev, "atl1e_request_irq OK\n");
+	netdev_dbg(netdev, "atl1e_request_irq OK\n");
 	return err;
 }
 
@@ -2233,7 +2230,6 @@
 	SET_NETDEV_DEV(netdev, &pdev->dev);
 	pci_set_drvdata(pdev, netdev);
 
-	netdev->irq  = pdev->irq;
 	netdev->netdev_ops = &atl1e_netdev_ops;
 
 	netdev->watchdog_timeo = AT_TX_WATCHDOG;
@@ -2319,7 +2315,6 @@
 		netdev_err(netdev, "cannot map device registers\n");
 		goto err_ioremap;
 	}
-	netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
 
 	/* init mii data */
 	adapter->mii.dev = netdev;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 40ac414..5d10884 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -266,7 +266,7 @@
 	 * interrupts & Clear any pending interrupt events
 	 */
 	/*
-	 * iowrite32(0, hw->hw_addr + REG_IMR);
+	 * atlx_irq_disable(adapter);
 	 * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
 	 */
 
@@ -1917,7 +1917,7 @@
 	return num_alloc;
 }
 
-static void atl1_intr_rx(struct atl1_adapter *adapter)
+static int atl1_intr_rx(struct atl1_adapter *adapter, int budget)
 {
 	int i, count;
 	u16 length;
@@ -1933,7 +1933,7 @@
 
 	rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
 
-	while (1) {
+	while (count < budget) {
 		rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
 		i = 1;
 		if (likely(rrd->xsz.valid)) {	/* packet valid */
@@ -2032,7 +2032,7 @@
 
 			__vlan_hwaccel_put_tag(skb, vlan_tag);
 		}
-		netif_rx(skb);
+		netif_receive_skb(skb);
 
 		/* let protocol layer free skb */
 		buffer_info->skb = NULL;
@@ -2065,14 +2065,17 @@
 		iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
 		spin_unlock(&adapter->mb_lock);
 	}
+
+	return count;
 }
 
-static void atl1_intr_tx(struct atl1_adapter *adapter)
+static int atl1_intr_tx(struct atl1_adapter *adapter)
 {
 	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
 	struct atl1_buffer *buffer_info;
 	u16 sw_tpd_next_to_clean;
 	u16 cmb_tpd_next_to_clean;
+	int count = 0;
 
 	sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
 	cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
@@ -2092,12 +2095,16 @@
 
 		if (++sw_tpd_next_to_clean == tpd_ring->count)
 			sw_tpd_next_to_clean = 0;
+
+		count++;
 	}
 	atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
 
 	if (netif_queue_stopped(adapter->netdev) &&
 	    netif_carrier_ok(adapter->netdev))
 		netif_wake_queue(adapter->netdev);
+
+	return count;
 }
 
 static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
@@ -2439,6 +2446,49 @@
 	return NETDEV_TX_OK;
 }
 
+static int atl1_rings_clean(struct napi_struct *napi, int budget)
+{
+	struct atl1_adapter *adapter = container_of(napi, struct atl1_adapter, napi);
+	int work_done = atl1_intr_rx(adapter, budget);
+
+	if (atl1_intr_tx(adapter))
+		work_done = budget;
+
+	/* Let's come again to process some more packets */
+	if (work_done >= budget)
+		return work_done;
+
+	napi_complete(napi);
+	/* re-enable Interrupt */
+	if (likely(adapter->int_enabled))
+		atlx_imr_set(adapter, IMR_NORMAL_MASK);
+	return work_done;
+}
+
+static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter)
+{
+	if (!napi_schedule_prep(&adapter->napi))
+		/* It is possible in case even the RX/TX ints are disabled via IMR
+		 * register the ISR bits are set anyway (but do not produce IRQ).
+		 * To handle such situation the napi functions used to check is
+		 * something scheduled or not.
+		 */
+		return 0;
+
+	__napi_schedule(&adapter->napi);
+
+	/*
+	 * Disable RX/TX ints via IMR register if it is
+	 * allowed. NAPI handler must reenable them in same
+	 * way.
+	 */
+	if (!adapter->int_enabled)
+		return 1;
+
+	atlx_imr_set(adapter, IMR_NORXTX_MASK);
+	return 1;
+}
+
 /*
  * atl1_intr - Interrupt Handler
  * @irq: interrupt number
@@ -2449,78 +2499,74 @@
 {
 	struct atl1_adapter *adapter = netdev_priv(data);
 	u32 status;
-	int max_ints = 10;
 
 	status = adapter->cmb.cmb->int_stats;
 	if (!status)
 		return IRQ_NONE;
 
-	do {
-		/* clear CMB interrupt status at once */
-		adapter->cmb.cmb->int_stats = 0;
+	/* clear CMB interrupt status at once,
+	 * but leave rx/tx interrupt status in case it should be dropped
+	 * only if rx/tx processing queued. In other case interrupt
+	 * can be lost.
+	 */
+	adapter->cmb.cmb->int_stats = status & (ISR_CMB_TX | ISR_CMB_RX);
 
-		if (status & ISR_GPHY)	/* clear phy status */
-			atlx_clear_phy_int(adapter);
+	if (status & ISR_GPHY)	/* clear phy status */
+		atlx_clear_phy_int(adapter);
 
-		/* clear ISR status, and Enable CMB DMA/Disable Interrupt */
-		iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
+	/* clear ISR status, and Enable CMB DMA/Disable Interrupt */
+	iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
 
-		/* check if SMB intr */
-		if (status & ISR_SMB)
-			atl1_inc_smb(adapter);
+	/* check if SMB intr */
+	if (status & ISR_SMB)
+		atl1_inc_smb(adapter);
 
-		/* check if PCIE PHY Link down */
-		if (status & ISR_PHY_LINKDOWN) {
-			if (netif_msg_intr(adapter))
-				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
-					"pcie phy link down %x\n", status);
-			if (netif_running(adapter->netdev)) {	/* reset MAC */
-				iowrite32(0, adapter->hw.hw_addr + REG_IMR);
-				schedule_work(&adapter->pcie_dma_to_rst_task);
-				return IRQ_HANDLED;
-			}
-		}
-
-		/* check if DMA read/write error ? */
-		if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
-			if (netif_msg_intr(adapter))
-				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
-					"pcie DMA r/w error (status = 0x%x)\n",
-					status);
-			iowrite32(0, adapter->hw.hw_addr + REG_IMR);
-			schedule_work(&adapter->pcie_dma_to_rst_task);
+	/* check if PCIE PHY Link down */
+	if (status & ISR_PHY_LINKDOWN) {
+		if (netif_msg_intr(adapter))
+			dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+				"pcie phy link down %x\n", status);
+		if (netif_running(adapter->netdev)) {	/* reset MAC */
+			atlx_irq_disable(adapter);
+			schedule_work(&adapter->reset_dev_task);
 			return IRQ_HANDLED;
 		}
+	}
 
-		/* link event */
-		if (status & ISR_GPHY) {
-			adapter->soft_stats.tx_carrier_errors++;
-			atl1_check_for_link(adapter);
-		}
+	/* check if DMA read/write error ? */
+	if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
+		if (netif_msg_intr(adapter))
+			dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+				"pcie DMA r/w error (status = 0x%x)\n",
+				status);
+		atlx_irq_disable(adapter);
+		schedule_work(&adapter->reset_dev_task);
+		return IRQ_HANDLED;
+	}
 
-		/* transmit event */
-		if (status & ISR_CMB_TX)
-			atl1_intr_tx(adapter);
+	/* link event */
+	if (status & ISR_GPHY) {
+		adapter->soft_stats.tx_carrier_errors++;
+		atl1_check_for_link(adapter);
+	}
 
-		/* rx exception */
-		if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
-			ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
-			ISR_HOST_RRD_OV | ISR_CMB_RX))) {
-			if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
-				ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
-				ISR_HOST_RRD_OV))
-				if (netif_msg_intr(adapter))
-					dev_printk(KERN_DEBUG,
-						&adapter->pdev->dev,
-						"rx exception, ISR = 0x%x\n",
-						status);
-			atl1_intr_rx(adapter);
-		}
+	/* transmit or receive event */
+	if (status & (ISR_CMB_TX | ISR_CMB_RX) &&
+	    atl1_sched_rings_clean(adapter))
+		adapter->cmb.cmb->int_stats = adapter->cmb.cmb->int_stats &
+					      ~(ISR_CMB_TX | ISR_CMB_RX);
 
-		if (--max_ints < 0)
-			break;
-
-	} while ((status = adapter->cmb.cmb->int_stats));
+	/* rx exception */
+	if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
+		ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
+		ISR_HOST_RRD_OV))) {
+		if (netif_msg_intr(adapter))
+			dev_printk(KERN_DEBUG,
+				&adapter->pdev->dev,
+				"rx exception, ISR = 0x%x\n",
+				status);
+		atl1_sched_rings_clean(adapter);
+	}
 
 	/* re-enable Interrupt */
 	iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
@@ -2599,6 +2645,7 @@
 	if (unlikely(err))
 		goto err_up;
 
+	napi_enable(&adapter->napi);
 	atlx_irq_enable(adapter);
 	atl1_check_link(adapter);
 	netif_start_queue(netdev);
@@ -2615,6 +2662,7 @@
 {
 	struct net_device *netdev = adapter->netdev;
 
+	napi_disable(&adapter->napi);
 	netif_stop_queue(netdev);
 	del_timer_sync(&adapter->phy_config_timer);
 	adapter->phy_timer_pending = false;
@@ -2633,10 +2681,10 @@
 	atl1_clean_rx_ring(adapter);
 }
 
-static void atl1_tx_timeout_task(struct work_struct *work)
+static void atl1_reset_dev_task(struct work_struct *work)
 {
 	struct atl1_adapter *adapter =
-		container_of(work, struct atl1_adapter, tx_timeout_task);
+		container_of(work, struct atl1_adapter, reset_dev_task);
 	struct net_device *netdev = adapter->netdev;
 
 	netif_device_detach(netdev);
@@ -2971,6 +3019,7 @@
 
 	netdev->netdev_ops = &atl1_netdev_ops;
 	netdev->watchdog_timeo = 5 * HZ;
+	netif_napi_add(netdev, &adapter->napi, atl1_rings_clean, 64);
 
 	netdev->ethtool_ops = &atl1_ethtool_ops;
 	adapter->bd_number = cards_found;
@@ -3038,12 +3087,10 @@
 		    (unsigned long)adapter);
 	adapter->phy_timer_pending = false;
 
-	INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
+	INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task);
 
 	INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
 
-	INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
-
 	err = register_netdev(netdev);
 	if (err)
 		goto err_common;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h
index 109d6da..3bf79a5 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.h
+++ b/drivers/net/ethernet/atheros/atlx/atl1.h
@@ -275,13 +275,17 @@
 #define ISR_DIS_SMB				0x20000000
 #define ISR_DIS_DMA				0x40000000
 
-/* Normal Interrupt mask  */
-#define IMR_NORMAL_MASK	(\
+/* Normal Interrupt mask without RX/TX enabled */
+#define IMR_NORXTX_MASK	(\
 	ISR_SMB		|\
 	ISR_GPHY	|\
 	ISR_PHY_LINKDOWN|\
 	ISR_DMAR_TO_RST	|\
-	ISR_DMAW_TO_RST	|\
+	ISR_DMAW_TO_RST)
+
+/* Normal Interrupt mask  */
+#define IMR_NORMAL_MASK	(\
+	IMR_NORXTX_MASK	|\
 	ISR_CMB_TX	|\
 	ISR_CMB_RX)
 
@@ -758,9 +762,9 @@
 	u16 link_speed;
 	u16 link_duplex;
 	spinlock_t lock;
-	struct work_struct tx_timeout_task;
+	struct napi_struct napi;
+	struct work_struct reset_dev_task;
 	struct work_struct link_chg_task;
-	struct work_struct pcie_dma_to_rst_task;
 
 	struct timer_list phy_config_timer;
 	bool phy_timer_pending;
@@ -782,6 +786,12 @@
 	u16 ict;		/* interrupt clear timer (2us resolution */
 	struct mii_if_info mii;	/* MII interface info */
 
+	/*
+	 * Use this value to check is napi handler allowed to
+	 * enable ints or not
+	 */
+	bool int_enabled;
+
 	u32 bd_number;		/* board number */
 	bool pci_using_64;
 	struct atl1_hw hw;
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index 3cd8837..b4f3aa4 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -155,14 +155,21 @@
 	}
 }
 
+static inline void atlx_imr_set(struct atlx_adapter *adapter,
+				unsigned int imr)
+{
+	iowrite32(imr, adapter->hw.hw_addr + REG_IMR);
+	ioread32(adapter->hw.hw_addr + REG_IMR);
+}
+
 /*
  * atlx_irq_enable - Enable default interrupt generation settings
  * @adapter: board private structure
  */
 static void atlx_irq_enable(struct atlx_adapter *adapter)
 {
-	iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR);
-	ioread32(adapter->hw.hw_addr + REG_IMR);
+	atlx_imr_set(adapter, IMR_NORMAL_MASK);
+	adapter->int_enabled = true;
 }
 
 /*
@@ -171,8 +178,8 @@
  */
 static void atlx_irq_disable(struct atlx_adapter *adapter)
 {
-	iowrite32(0, adapter->hw.hw_addr + REG_IMR);
-	ioread32(adapter->hw.hw_addr + REG_IMR);
+	adapter->int_enabled = false;
+	atlx_imr_set(adapter, 0);
 	synchronize_irq(adapter->pdev->irq);
 }
 
@@ -194,7 +201,7 @@
 {
 	struct atlx_adapter *adapter = netdev_priv(netdev);
 	/* Do the reset outside of interrupt context */
-	schedule_work(&adapter->tx_timeout_task);
+	schedule_work(&adapter->reset_dev_task);
 }
 
 /*
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 8297e28..ab55979 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -7343,8 +7343,7 @@
 	{ "rx_fw_discards" },
 };
 
-#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
-			sizeof(bnx2_stats_str_arr[0]))
+#define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
 
 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
 
@@ -7976,7 +7975,6 @@
 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 {
 	struct bnx2 *bp;
-	unsigned long mem_len;
 	int rc, i, j;
 	u32 reg;
 	u64 dma_mask, persist_dma_mask;
@@ -8036,13 +8034,8 @@
 #endif
 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
 
-	dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
-	mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
-	dev->mem_end = dev->mem_start + mem_len;
-	dev->irq = pdev->irq;
-
-	bp->regview = ioremap_nocache(dev->base_addr, mem_len);
-
+	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
+							 TX_MAX_TSS_RINGS + 1));
 	if (!bp->regview) {
 		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
 		rc = -ENOMEM;
@@ -8346,10 +8339,8 @@
 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
 	}
 
-	if (bp->regview) {
-		iounmap(bp->regview);
-		bp->regview = NULL;
-	}
+	pci_iounmap(pdev, bp->regview);
+	bp->regview = NULL;
 
 err_out_release:
 	pci_release_regions(pdev);
@@ -8432,7 +8423,7 @@
 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int version_printed = 0;
-	struct net_device *dev = NULL;
+	struct net_device *dev;
 	struct bnx2 *bp;
 	int rc;
 	char str[40];
@@ -8442,15 +8433,12 @@
 
 	/* dev zeroed in init_etherdev */
 	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
-
 	if (!dev)
 		return -ENOMEM;
 
 	rc = bnx2_init_board(pdev, dev);
-	if (rc < 0) {
-		free_netdev(dev);
-		return rc;
-	}
+	if (rc < 0)
+		goto err_free;
 
 	dev->netdev_ops = &bnx2_netdev_ops;
 	dev->watchdog_timeo = TX_TIMEOUT;
@@ -8480,22 +8468,21 @@
 		goto error;
 	}
 
-	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
-		    board_info[ent->driver_data].name,
+	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
+		    "node addr %pM\n", board_info[ent->driver_data].name,
 		    ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
 		    ((CHIP_ID(bp) & 0x0ff0) >> 4),
-		    bnx2_bus_string(bp, str),
-		    dev->base_addr,
-		    bp->pdev->irq, dev->dev_addr);
+		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
+		    pdev->irq, dev->dev_addr);
 
 	return 0;
 
 error:
-	if (bp->regview)
-		iounmap(bp->regview);
+	iounmap(bp->regview);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
 	pci_set_drvdata(pdev, NULL);
+err_free:
 	free_netdev(dev);
 	return rc;
 }
@@ -8511,8 +8498,7 @@
 	del_timer_sync(&bp->timer);
 	cancel_work_sync(&bp->reset_task);
 
-	if (bp->regview)
-		iounmap(bp->regview);
+	pci_iounmap(bp->pdev, bp->regview);
 
 	kfree(bp->temp_stats_blk);
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 2c9ee55..e30e2a2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,13 +23,17 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.72.10-0"
-#define DRV_MODULE_RELDATE      "2012/02/20"
+#define DRV_MODULE_VERSION      "1.72.50-0"
+#define DRV_MODULE_RELDATE      "2012/04/23"
 #define BNX2X_BC_VER            0x040200
 
 #if defined(CONFIG_DCB)
 #define BCM_DCBNL
 #endif
+
+
+#include "bnx2x_hsi.h"
+
 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
 #define BCM_CNIC 1
 #include "../cnic_if.h"
@@ -345,7 +349,6 @@
 #define SGE_PAGE_SIZE		PAGE_SIZE
 #define SGE_PAGE_SHIFT		PAGE_SHIFT
 #define SGE_PAGE_ALIGN(addr)	PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
-#define SGE_PAGES		(SGE_PAGE_SIZE * PAGES_PER_SGE)
 
 /* SGE ring related macros */
 #define NUM_RX_SGE_PAGES	2
@@ -815,6 +818,8 @@
 #define CHIP_NUM_57800_MF		0x16a5
 #define CHIP_NUM_57810			0x168e
 #define CHIP_NUM_57810_MF		0x16ae
+#define CHIP_NUM_57811			0x163d
+#define CHIP_NUM_57811_MF		0x163e
 #define CHIP_NUM_57840			0x168d
 #define CHIP_NUM_57840_MF		0x16ab
 #define CHIP_IS_E1(bp)			(CHIP_NUM(bp) == CHIP_NUM_57710)
@@ -826,6 +831,8 @@
 #define CHIP_IS_57800_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57800_MF)
 #define CHIP_IS_57810(bp)		(CHIP_NUM(bp) == CHIP_NUM_57810)
 #define CHIP_IS_57810_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57810_MF)
+#define CHIP_IS_57811(bp)		(CHIP_NUM(bp) == CHIP_NUM_57811)
+#define CHIP_IS_57811_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57811_MF)
 #define CHIP_IS_57840(bp)		(CHIP_NUM(bp) == CHIP_NUM_57840)
 #define CHIP_IS_57840_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57840_MF)
 #define CHIP_IS_E1H(bp)			(CHIP_IS_57711(bp) || \
@@ -836,6 +843,8 @@
 					 CHIP_IS_57800_MF(bp) || \
 					 CHIP_IS_57810(bp) || \
 					 CHIP_IS_57810_MF(bp) || \
+					 CHIP_IS_57811(bp) || \
+					 CHIP_IS_57811_MF(bp) || \
 					 CHIP_IS_57840(bp) || \
 					 CHIP_IS_57840_MF(bp))
 #define CHIP_IS_E1x(bp)			(CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
@@ -1053,6 +1062,13 @@
 		struct flow_control_configuration pfc_config;
 	} func_rdata;
 
+	/* afex ramrod can not be a part of func_rdata union because these
+	 * events might arrive in parallel to other events from func_rdata.
+	 * Therefore, if they would have been defined in the same union,
+	 * data can get corrupted.
+	 */
+	struct afex_vif_list_ramrod_data func_afex_rdata;
+
 	/* used by dmae command executer */
 	struct dmae_command		dmae[MAX_DMAE_C];
 
@@ -1169,6 +1185,7 @@
 enum {
 	BNX2X_SP_RTNL_SETUP_TC,
 	BNX2X_SP_RTNL_TX_TIMEOUT,
+	BNX2X_SP_RTNL_AFEX_F_UPDATE,
 	BNX2X_SP_RTNL_FAN_FAILURE,
 };
 
@@ -1222,7 +1239,6 @@
 #define ETH_MAX_JUMBO_PACKET_SIZE	9600
 /* TCP with Timestamp Option (32) + IPv6 (40) */
 #define ETH_MAX_TPA_HEADER_SIZE		72
-#define ETH_MIN_TPA_HEADER_SIZE		40
 
 	/* Max supported alignment is 256 (8 shift) */
 #define BNX2X_RX_ALIGN_SHIFT		min(8, L1_CACHE_SHIFT)
@@ -1300,6 +1316,7 @@
 #define NO_ISCSI_FLAG			(1 << 14)
 #define NO_FCOE_FLAG			(1 << 15)
 #define BC_SUPPORTS_PFC_STATS		(1 << 17)
+#define USING_SINGLE_MSIX_FLAG		(1 << 20)
 
 #define NO_ISCSI(bp)		((bp)->flags & NO_ISCSI_FLAG)
 #define NO_ISCSI_OOO(bp)	((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1329,21 +1346,20 @@
 	struct bnx2x_common	common;
 	struct bnx2x_port	port;
 
-	struct cmng_struct_per_port cmng;
-	u32			vn_weight_sum;
+	struct cmng_init	cmng;
+
 	u32			mf_config[E1HVN_MAX];
-	u32			mf2_config[E2_FUNC_MAX];
+	u32			mf_ext_config;
 	u32			path_has_ovlan; /* E3 */
 	u16			mf_ov;
 	u8			mf_mode;
 #define IS_MF(bp)		(bp->mf_mode != 0)
 #define IS_MF_SI(bp)		(bp->mf_mode == MULTI_FUNCTION_SI)
 #define IS_MF_SD(bp)		(bp->mf_mode == MULTI_FUNCTION_SD)
+#define IS_MF_AFEX(bp)		(bp->mf_mode == MULTI_FUNCTION_AFEX)
 
 	u8			wol;
 
-	bool			gro_check;
-
 	int			rx_ring_size;
 
 	u16			tx_quick_cons_trip_int;
@@ -1371,7 +1387,6 @@
 #define BNX2X_STATE_DIAG		0xe000
 #define BNX2X_STATE_ERROR		0xf000
 
-	int			multi_mode;
 #define BNX2X_MAX_PRIORITY		8
 #define BNX2X_MAX_ENTRIES_PER_PRI	16
 #define BNX2X_MAX_COS			3
@@ -1582,6 +1597,9 @@
 	struct dcbx_features			dcbx_remote_feat;
 	u32					dcbx_remote_flags;
 #endif
+	/* AFEX: store default vlan used */
+	int					afex_def_vlan_tag;
+	enum mf_cfg_afex_vlan_mode		afex_vlan_mode;
 	u32					pending_max;
 
 	/* multiple tx classes of service */
@@ -2138,9 +2156,16 @@
 #define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp))
 #define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))
 
+#define BNX2X_MF_EXT_PROTOCOL_FCOE(bp)  ((bp)->mf_ext_config & \
+					 MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
+
+#define IS_MF_FCOE_AFEX(bp) (IS_MF_AFEX(bp) && BNX2X_MF_EXT_PROTOCOL_FCOE(bp))
 #define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \
 				(BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
 				 BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
+#else
+#define IS_MF_FCOE_AFEX(bp)	false
 #endif
 
+
 #endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4b05481..60d5b54 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -23,7 +23,6 @@
 #include <linux/ip.h>
 #include <net/ipv6.h>
 #include <net/ip6_checksum.h>
-#include <linux/firmware.h>
 #include <linux/prefetch.h>
 #include "bnx2x_cmn.h"
 #include "bnx2x_init.h"
@@ -329,16 +328,6 @@
 		u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
 		tpa_info->full_page =
 			SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
-		/*
-		 * FW 7.2.16 BUG workaround:
-		 * if SGE size is (exactly) multiple gro_size
-		 * fw will place one less frag on SGE.
-		 * the calculation is done only for potentially
-		 * dangerous MTUs.
-		 */
-		if (unlikely(bp->gro_check))
-			if (!(SGE_PAGE_SIZE * PAGES_PER_SGE % gro_size))
-				tpa_info->full_page -= gro_size;
 		tpa_info->gro_size = gro_size;
 	}
 
@@ -1212,16 +1201,15 @@
 
 void bnx2x_free_irq(struct bnx2x *bp)
 {
-	if (bp->flags & USING_MSIX_FLAG)
+	if (bp->flags & USING_MSIX_FLAG &&
+	    !(bp->flags & USING_SINGLE_MSIX_FLAG))
 		bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
 				     CNIC_PRESENT + 1);
-	else if (bp->flags & USING_MSI_FLAG)
-		free_irq(bp->pdev->irq, bp->dev);
 	else
-		free_irq(bp->pdev->irq, bp->dev);
+		free_irq(bp->dev->irq, bp->dev);
 }
 
-int bnx2x_enable_msix(struct bnx2x *bp)
+int __devinit bnx2x_enable_msix(struct bnx2x *bp)
 {
 	int msix_vec = 0, i, rc, req_cnt;
 
@@ -1261,8 +1249,8 @@
 		rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
 
 		if (rc) {
-			BNX2X_DEV_INFO("MSI-X is not attainable  rc %d\n", rc);
-			return rc;
+			BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
+			goto no_msix;
 		}
 		/*
 		 * decrease number of queues by number of unallocated entries
@@ -1270,18 +1258,34 @@
 		bp->num_queues -= diff;
 
 		BNX2X_DEV_INFO("New queue configuration set: %d\n",
-				  bp->num_queues);
-	} else if (rc) {
-		/* fall to INTx if not enough memory */
-		if (rc == -ENOMEM)
-			bp->flags |= DISABLE_MSI_FLAG;
+			       bp->num_queues);
+	} else if (rc > 0) {
+		/* Get by with single vector */
+		rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
+		if (rc) {
+			BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
+				       rc);
+			goto no_msix;
+		}
+
+		BNX2X_DEV_INFO("Using single MSI-X vector\n");
+		bp->flags |= USING_SINGLE_MSIX_FLAG;
+
+	} else if (rc < 0) {
 		BNX2X_DEV_INFO("MSI-X is not attainable  rc %d\n", rc);
-		return rc;
+		goto no_msix;
 	}
 
 	bp->flags |= USING_MSIX_FLAG;
 
 	return 0;
+
+no_msix:
+	/* fall to INTx if not enough memory */
+	if (rc == -ENOMEM)
+		bp->flags |= DISABLE_MSI_FLAG;
+
+	return rc;
 }
 
 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
@@ -1343,22 +1347,26 @@
 static int bnx2x_req_irq(struct bnx2x *bp)
 {
 	unsigned long flags;
-	int rc;
+	unsigned int irq;
 
-	if (bp->flags & USING_MSI_FLAG)
+	if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
 		flags = 0;
 	else
 		flags = IRQF_SHARED;
 
-	rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
-			 bp->dev->name, bp->dev);
-	return rc;
+	if (bp->flags & USING_MSIX_FLAG)
+		irq = bp->msix_table[0].vector;
+	else
+		irq = bp->pdev->irq;
+
+	return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
 }
 
 static inline int bnx2x_setup_irqs(struct bnx2x *bp)
 {
 	int rc = 0;
-	if (bp->flags & USING_MSIX_FLAG) {
+	if (bp->flags & USING_MSIX_FLAG &&
+	    !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
 		rc = bnx2x_req_msix_irqs(bp);
 		if (rc)
 			return rc;
@@ -1371,8 +1379,13 @@
 		}
 		if (bp->flags & USING_MSI_FLAG) {
 			bp->dev->irq = bp->pdev->irq;
-			netdev_info(bp->dev, "using MSI  IRQ %d\n",
-			       bp->pdev->irq);
+			netdev_info(bp->dev, "using MSI IRQ %d\n",
+				    bp->dev->irq);
+		}
+		if (bp->flags & USING_MSIX_FLAG) {
+			bp->dev->irq = bp->msix_table[0].vector;
+			netdev_info(bp->dev, "using MSIX IRQ %d\n",
+				    bp->dev->irq);
 		}
 	}
 
@@ -1437,24 +1450,15 @@
 	return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
 }
 
+
 void bnx2x_set_num_queues(struct bnx2x *bp)
 {
-	switch (bp->multi_mode) {
-	case ETH_RSS_MODE_DISABLED:
-		bp->num_queues = 1;
-		break;
-	case ETH_RSS_MODE_REGULAR:
-		bp->num_queues = bnx2x_calc_num_queues(bp);
-		break;
-
-	default:
-		bp->num_queues = 1;
-		break;
-	}
+	/* RSS queues */
+	bp->num_queues = bnx2x_calc_num_queues(bp);
 
 #ifdef BCM_CNIC
-	/* override in STORAGE SD mode */
-	if (IS_MF_STORAGE_SD(bp))
+	/* override in STORAGE SD modes */
+	if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
 		bp->num_queues = 1;
 #endif
 	/* Add special queues */
@@ -1549,16 +1553,13 @@
 	u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
 	u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
 
-	/*
-	 * Prepare the inital contents fo the indirection table if RSS is
+	/* Prepare the initial contents fo the indirection table if RSS is
 	 * enabled
 	 */
-	if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
-		for (i = 0; i < sizeof(ind_table); i++)
-			ind_table[i] =
-				bp->fp->cl_id +
-				ethtool_rxfh_indir_default(i, num_eth_queues);
-	}
+	for (i = 0; i < sizeof(ind_table); i++)
+		ind_table[i] =
+			bp->fp->cl_id +
+			ethtool_rxfh_indir_default(i, num_eth_queues);
 
 	/*
 	 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
@@ -1568,11 +1569,12 @@
 	 * For 57712 and newer on the other hand it's a per-function
 	 * configuration.
 	 */
-	return bnx2x_config_rss_pf(bp, ind_table,
-				   bp->port.pmf || !CHIP_IS_E1x(bp));
+	return bnx2x_config_rss_eth(bp, ind_table,
+				    bp->port.pmf || !CHIP_IS_E1x(bp));
 }
 
-int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
+int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
+			u8 *ind_table, bool config_hash)
 {
 	struct bnx2x_config_rss_params params = {NULL};
 	int i;
@@ -1584,52 +1586,29 @@
 	 *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
 	 */
 
-	params.rss_obj = &bp->rss_conf_obj;
+	params.rss_obj = rss_obj;
 
 	__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
 
-	/* RSS mode */
-	switch (bp->multi_mode) {
-	case ETH_RSS_MODE_DISABLED:
-		__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
-		break;
-	case ETH_RSS_MODE_REGULAR:
-		__set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
-		break;
-	case ETH_RSS_MODE_VLAN_PRI:
-		__set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
-		break;
-	case ETH_RSS_MODE_E1HOV_PRI:
-		__set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
-		break;
-	case ETH_RSS_MODE_IP_DSCP:
-		__set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
-		break;
-	default:
-		BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
-		return -EINVAL;
-	}
+	__set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
 
-	/* If RSS is enabled */
-	if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
-		/* RSS configuration */
-		__set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
-		__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
-		__set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
-		__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
+	/* RSS configuration */
+	__set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
+	__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
+	__set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
+	__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
 
-		/* Hash bits */
-		params.rss_result_mask = MULTI_MASK;
+	/* Hash bits */
+	params.rss_result_mask = MULTI_MASK;
 
-		memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
+	memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
 
-		if (config_hash) {
-			/* RSS keys */
-			for (i = 0; i < sizeof(params.rss_key) / 4; i++)
-				params.rss_key[i] = random32();
+	if (config_hash) {
+		/* RSS keys */
+		for (i = 0; i < sizeof(params.rss_key) / 4; i++)
+			params.rss_key[i] = random32();
 
-			__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
-		}
+		__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
 	}
 
 	return bnx2x_config_rss(bp, &params);
@@ -1911,8 +1890,14 @@
 			SHMEM2_WR(bp, dcc_support,
 				  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
 				   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
+		if (SHMEM2_HAS(bp, afex_driver_support))
+			SHMEM2_WR(bp, afex_driver_support,
+				  SHMEM_AFEX_SUPPORTED_VERSION_ONE);
 	}
 
+	/* Set AFEX default VLAN tag to an invalid value */
+	bp->afex_def_vlan_tag = -1;
+
 	bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
 	rc = bnx2x_func_start(bp);
 	if (rc) {
@@ -3084,7 +3069,8 @@
 	}
 
 #ifdef BCM_CNIC
-	if (IS_MF_STORAGE_SD(bp) && !is_zero_ether_addr(addr->sa_data)) {
+	if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
+	    !is_zero_ether_addr(addr->sa_data)) {
 		BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
 		return -EINVAL;
 	}
@@ -3206,7 +3192,8 @@
 	int rx_ring_size = 0;
 
 #ifdef BCM_CNIC
-	if (!bp->rx_ring_size && IS_MF_STORAGE_SD(bp)) {
+	if (!bp->rx_ring_size &&
+	    (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
 		rx_ring_size = MIN_RX_SIZE_NONTPA;
 		bp->rx_ring_size = rx_ring_size;
 	} else
@@ -3528,8 +3515,6 @@
 	 */
 	dev->mtu = new_mtu;
 
-	bp->gro_check = bnx2x_need_gro_check(new_mtu);
-
 	return bnx2x_reload_if_running(dev);
 }
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 5c27454..cec993b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -86,13 +86,15 @@
 void bnx2x_send_unload_done(struct bnx2x *bp);
 
 /**
- * bnx2x_config_rss_pf - configure RSS parameters.
+ * bnx2x_config_rss_pf - configure RSS parameters in a PF.
  *
  * @bp:			driver handle
+ * @rss_obj		RSS object to use
  * @ind_table:		indirection table to configure
  * @config_hash:	re-configure RSS hash keys configuration
  */
-int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash);
+int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
+			u8 *ind_table, bool config_hash);
 
 /**
  * bnx2x__init_func_obj - init function object
@@ -485,7 +487,7 @@
  * fills msix_table, requests vectors, updates num_queues
  * according to number of available vectors.
  */
-int bnx2x_enable_msix(struct bnx2x *bp);
+int __devinit bnx2x_enable_msix(struct bnx2x *bp);
 
 /**
  * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
@@ -843,7 +845,7 @@
 {
 	if (bp->flags & USING_MSIX_FLAG) {
 		pci_disable_msix(bp->pdev);
-		bp->flags &= ~USING_MSIX_FLAG;
+		bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG);
 	} else if (bp->flags & USING_MSI_FLAG) {
 		pci_disable_msi(bp->pdev);
 		bp->flags &= ~USING_MSI_FLAG;
@@ -964,6 +966,19 @@
 
 /************************* Init ******************************************/
 
+/* returns func by VN for current port */
+static inline int func_by_vn(struct bnx2x *bp, int vn)
+{
+	return 2 * vn + BP_PORT(bp);
+}
+
+static inline int bnx2x_config_rss_eth(struct bnx2x *bp, u8 *ind_table,
+				       bool config_hash)
+{
+	return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, ind_table,
+				   config_hash);
+}
+
 /**
  * bnx2x_func_start - init function
  *
@@ -1419,15 +1434,32 @@
 }
 
 static inline void storm_memset_cmng(struct bnx2x *bp,
-				struct cmng_struct_per_port *cmng,
+				struct cmng_init *cmng,
 				u8 port)
 {
+	int vn;
 	size_t size = sizeof(struct cmng_struct_per_port);
 
 	u32 addr = BAR_XSTRORM_INTMEM +
 			XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
 
-	__storm_memset_struct(bp, addr, size, (u32 *)cmng);
+	__storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
+
+	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+		int func = func_by_vn(bp, vn);
+
+		addr = BAR_XSTRORM_INTMEM +
+		       XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
+		size = sizeof(struct rate_shaping_vars_per_vn);
+		__storm_memset_struct(bp, addr, size,
+				      (u32 *)&cmng->vnic.vnic_max_rate[vn]);
+
+		addr = BAR_XSTRORM_INTMEM +
+		       XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
+		size = sizeof(struct fairness_vars_per_vn);
+		__storm_memset_struct(bp, addr, size,
+				      (u32 *)&cmng->vnic.vnic_min_rate[vn]);
+	}
 }
 
 /**
@@ -1512,13 +1544,6 @@
 	 */
 	return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
 }
-
-static inline bool bnx2x_need_gro_check(int mtu)
-{
-	return (SGE_PAGES / (mtu - ETH_MAX_TPA_HEADER_SIZE - 1)) !=
-		(SGE_PAGES / (mtu - ETH_MIN_TPA_HEADER_SIZE + 1));
-}
-
 /**
  * bnx2x_bz_fp - zero content of the fastpath structure.
  *
@@ -1608,11 +1633,6 @@
  */
 void bnx2x_get_iscsi_info(struct bnx2x *bp);
 #endif
-/* returns func by VN for current port */
-static inline int func_by_vn(struct bnx2x *bp, int vn)
-{
-	return 2 * vn + BP_PORT(bp);
-}
 
 /**
  * bnx2x_link_sync_notify - send notification to other functions.
@@ -1667,7 +1687,8 @@
 	if (is_valid_ether_addr(addr))
 		return true;
 #ifdef BCM_CNIC
-	if (is_zero_ether_addr(addr) && IS_MF_STORAGE_SD(bp))
+	if (is_zero_ether_addr(addr) &&
+	    (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))
 		return true;
 #endif
 	return false;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 2cc0a17..faf8abd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -22,13 +22,10 @@
 #include <linux/types.h>
 #include <linux/sched.h>
 #include <linux/crc32.h>
-
-
 #include "bnx2x.h"
 #include "bnx2x_cmn.h"
 #include "bnx2x_dump.h"
 #include "bnx2x_init.h"
-#include "bnx2x_sp.h"
 
 /* Note: in the format strings below %s is replaced by the queue-name which is
  * either its index or 'fcoe' for the fcoe queue. Make sure the format string
@@ -1433,7 +1430,7 @@
 	else
 		ering->rx_pending = MAX_RX_AVAIL;
 
-	ering->tx_max_pending = MAX_TX_AVAIL;
+	ering->tx_max_pending = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
 	ering->tx_pending = bp->tx_ring_size;
 }
 
@@ -1451,7 +1448,7 @@
 	if ((ering->rx_pending > MAX_RX_AVAIL) ||
 	    (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
 						    MIN_RX_SIZE_TPA)) ||
-	    (ering->tx_pending > MAX_TX_AVAIL) ||
+	    (ering->tx_pending > (IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL)) ||
 	    (ering->tx_pending <= MAX_SKB_FRAGS + 4)) {
 		DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
 		return -EINVAL;
@@ -2396,10 +2393,7 @@
 
 static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
 {
-	struct bnx2x *bp = netdev_priv(dev);
-
-	return (bp->multi_mode == ETH_RSS_MODE_DISABLED ?
-		0 : T_ETH_INDIRECTION_TABLE_SIZE);
+	return T_ETH_INDIRECTION_TABLE_SIZE;
 }
 
 static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
@@ -2445,7 +2439,7 @@
 		ind_table[i] = indir[i] + bp->fp->cl_id;
 	}
 
-	return bnx2x_config_rss_pf(bp, ind_table, false);
+	return bnx2x_config_rss_eth(bp, ind_table, false);
 }
 
 static const struct ethtool_ops bnx2x_ethtool_ops = {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index b9b2633..426f77a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -387,7 +387,7 @@
 
 #define STATS_QUERY_CMD_COUNT 16
 
-#define NIV_LIST_TABLE_SIZE 4096
+#define AFEX_LIST_TABLE_SIZE 4096
 
 #define INVALID_VNIC_ID	0xFF
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index dbff591..a440a8b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -833,6 +833,7 @@
 		#define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF      0x00000100
 		#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4          0x00000200
 		#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT  0x00000300
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE      0x00000400
 
 	/* The interval in seconds between sending LLDP packets. Set to zero
 	   to disable the feature */
@@ -1235,6 +1236,8 @@
 	#define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL     0x00050006
 	#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL  0xa1000000
 	#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL  0x00050234
+	#define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED        0xa2000000
+	#define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED        0x00070002
 	#define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED   0x00070014
 	#define REQ_BC_VER_4_PFC_STATS_SUPPORTED        0x00070201
 
@@ -1242,6 +1245,13 @@
 	#define DRV_MSG_CODE_DCBX_PMF_DRV_OK            0xb2000000
 
 	#define DRV_MSG_CODE_VF_DISABLED_DONE           0xc0000000
+
+	#define DRV_MSG_CODE_AFEX_DRIVER_SETMAC         0xd0000000
+	#define DRV_MSG_CODE_AFEX_LISTGET_ACK           0xd1000000
+	#define DRV_MSG_CODE_AFEX_LISTSET_ACK           0xd2000000
+	#define DRV_MSG_CODE_AFEX_STATSGET_ACK          0xd3000000
+	#define DRV_MSG_CODE_AFEX_VIFSET_ACK            0xd4000000
+
 	#define DRV_MSG_CODE_DRV_INFO_ACK               0xd8000000
 	#define DRV_MSG_CODE_DRV_INFO_NACK              0xd9000000
 
@@ -1299,6 +1309,14 @@
 	#define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG      0xa0200000
 	#define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED     0xa0300000
 	#define FW_MSG_CODE_VF_DISABLED_DONE            0xb0000000
+	#define FW_MSG_CODE_HW_SET_INVALID_IMAGE        0xb0100000
+
+	#define FW_MSG_CODE_AFEX_DRIVER_SETMAC_DONE     0xd0100000
+	#define FW_MSG_CODE_AFEX_LISTGET_ACK            0xd1100000
+	#define FW_MSG_CODE_AFEX_LISTSET_ACK            0xd2100000
+	#define FW_MSG_CODE_AFEX_STATSGET_ACK           0xd3100000
+	#define FW_MSG_CODE_AFEX_VIFSET_ACK             0xd4100000
+
 	#define FW_MSG_CODE_DRV_INFO_ACK                0xd8100000
 	#define FW_MSG_CODE_DRV_INFO_NACK               0xd9100000
 
@@ -1357,6 +1375,12 @@
 
 	#define DRV_STATUS_DCBX_EVENT_MASK              0x000f0000
 	#define DRV_STATUS_DCBX_NEGOTIATION_RESULTS     0x00010000
+	#define DRV_STATUS_AFEX_EVENT_MASK              0x03f00000
+	#define DRV_STATUS_AFEX_LISTGET_REQ             0x00100000
+	#define DRV_STATUS_AFEX_LISTSET_REQ             0x00200000
+	#define DRV_STATUS_AFEX_STATSGET_REQ            0x00400000
+	#define DRV_STATUS_AFEX_VIFSET_REQ              0x00800000
+
 	#define DRV_STATUS_DRV_INFO_REQ                 0x04000000
 
 	u32 virt_mac_upper;
@@ -1448,7 +1472,26 @@
 	#define FUNC_MF_CFG_E1HOV_TAG_SHIFT             0
 	#define FUNC_MF_CFG_E1HOV_TAG_DEFAULT         FUNC_MF_CFG_E1HOV_TAG_MASK
 
-	u32 reserved[2];
+	/* afex default VLAN ID - 12 bits */
+	#define FUNC_MF_CFG_AFEX_VLAN_MASK              0x0fff0000
+	#define FUNC_MF_CFG_AFEX_VLAN_SHIFT             16
+
+	u32 afex_config;
+	#define FUNC_MF_CFG_AFEX_COS_FILTER_MASK                     0x000000ff
+	#define FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT                    0
+	#define FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK                    0x0000ff00
+	#define FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT                   8
+	#define FUNC_MF_CFG_AFEX_MBA_ENABLED_VAL                     0x00000100
+	#define FUNC_MF_CFG_AFEX_VLAN_MODE_MASK                      0x000f0000
+	#define FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT                     16
+
+	u32 reserved;
+};
+
+enum mf_cfg_afex_vlan_mode {
+	FUNC_MF_CFG_AFEX_VLAN_TRUNK_MODE = 0,
+	FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE,
+	FUNC_MF_CFG_AFEX_VLAN_TRUNK_TAG_NATIVE_MODE
 };
 
 /* This structure is not applicable and should not be accessed on 57711 */
@@ -1945,18 +1988,29 @@
 
 	u32 nvm_retain_bitmap_addr;			/* 0x0070 */
 
-	u32	reserved1;	/* 0x0074 */
+	/* afex support of that driver */
+	u32 afex_driver_support;			/* 0x0074 */
+	#define SHMEM_AFEX_VERSION_MASK                  0x100f
+	#define SHMEM_AFEX_SUPPORTED_VERSION_ONE         0x1001
+	#define SHMEM_AFEX_REDUCED_DRV_LOADED            0x8000
 
-	u32	reserved2[E2_FUNC_MAX];
+	/* driver receives addr in scratchpad to which it should respond */
+	u32 afex_scratchpad_addr_to_write[E2_FUNC_MAX];
 
-	u32	reserved3[E2_FUNC_MAX];/* 0x0088 */
-	u32	reserved4[E2_FUNC_MAX];/* 0x0098 */
+	/* generic params from MCP to driver (value depends on the msg sent
+	 * to driver
+	 */
+	u32 afex_param1_to_driver[E2_FUNC_MAX];		/* 0x0088 */
+	u32 afex_param2_to_driver[E2_FUNC_MAX];		/* 0x0098 */
 
 	u32 swim_base_addr;				/* 0x0108 */
 	u32 swim_funcs;
 	u32 swim_main_cb;
 
-	u32	reserved5[2];
+	/* bitmap notifying which VIF profiles stored in nvram are enabled by
+	 * switch
+	 */
+	u32 afex_profiles_enabled[2];
 
 	/* generic flags controlled by the driver */
 	u32 drv_flags;
@@ -2696,10 +2750,51 @@
 	struct fcoe_stats_info	fcoe_stat;
 	struct iscsi_stats_info	iscsi_stat;
 };
+
+/* stats collected for afex.
+ * NOTE: structure is exactly as expected to be received by the switch.
+ *       order must remain exactly as is unless protocol changes !
+ */
+struct afex_stats {
+	u32 tx_unicast_frames_hi;
+	u32 tx_unicast_frames_lo;
+	u32 tx_unicast_bytes_hi;
+	u32 tx_unicast_bytes_lo;
+	u32 tx_multicast_frames_hi;
+	u32 tx_multicast_frames_lo;
+	u32 tx_multicast_bytes_hi;
+	u32 tx_multicast_bytes_lo;
+	u32 tx_broadcast_frames_hi;
+	u32 tx_broadcast_frames_lo;
+	u32 tx_broadcast_bytes_hi;
+	u32 tx_broadcast_bytes_lo;
+	u32 tx_frames_discarded_hi;
+	u32 tx_frames_discarded_lo;
+	u32 tx_frames_dropped_hi;
+	u32 tx_frames_dropped_lo;
+
+	u32 rx_unicast_frames_hi;
+	u32 rx_unicast_frames_lo;
+	u32 rx_unicast_bytes_hi;
+	u32 rx_unicast_bytes_lo;
+	u32 rx_multicast_frames_hi;
+	u32 rx_multicast_frames_lo;
+	u32 rx_multicast_bytes_hi;
+	u32 rx_multicast_bytes_lo;
+	u32 rx_broadcast_frames_hi;
+	u32 rx_broadcast_frames_lo;
+	u32 rx_broadcast_bytes_hi;
+	u32 rx_broadcast_bytes_lo;
+	u32 rx_frames_discarded_hi;
+	u32 rx_frames_discarded_lo;
+	u32 rx_frames_dropped_hi;
+	u32 rx_frames_dropped_lo;
+};
+
 #define BCM_5710_FW_MAJOR_VERSION			7
 #define BCM_5710_FW_MINOR_VERSION			2
-#define BCM_5710_FW_REVISION_VERSION		16
-#define BCM_5710_FW_ENGINEERING_VERSION		0
+#define BCM_5710_FW_REVISION_VERSION			51
+#define BCM_5710_FW_ENGINEERING_VERSION			0
 #define BCM_5710_FW_COMPILE_FLAGS			1
 
 
@@ -3389,7 +3484,7 @@
 #define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4)
 #define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4
 	u8 default_vlan_flg;
-	u8 reserved2;
+	u8 force_default_pri_flg;
 	__le32 reserved3;
 };
 
@@ -4375,8 +4470,21 @@
 
 
 /*
+ * The data afex vif list ramrod need
+ */
+struct afex_vif_list_ramrod_data {
+	u8 afex_vif_list_command;
+	u8 func_bit_map;
+	__le16 vif_list_index;
+	u8 func_to_clear;
+	u8 echo;
+	__le16 reserved1;
+};
+
+
+/*
  * cfc delete event data
-*/
+ */
 struct cfc_del_event_data {
 	u32 cid;
 	u32 reserved0;
@@ -4448,6 +4556,65 @@
 	struct cmng_flags_per_port flags;
 };
 
+/*
+ * a single rate shaping counter. can be used as protocol or vnic counter
+ */
+struct rate_shaping_counter {
+	u32 quota;
+#if defined(__BIG_ENDIAN)
+	u16 __reserved0;
+	u16 rate;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rate;
+	u16 __reserved0;
+#endif
+};
+
+/*
+ * per-vnic rate shaping variables
+ */
+struct rate_shaping_vars_per_vn {
+	struct rate_shaping_counter vn_counter;
+};
+
+/*
+ * per-vnic fairness variables
+ */
+struct fairness_vars_per_vn {
+	u32 cos_credit_delta[MAX_COS_NUMBER];
+	u32 vn_credit_delta;
+	u32 __reserved0;
+};
+
+/*
+ * cmng port init state
+ */
+struct cmng_vnic {
+	struct rate_shaping_vars_per_vn vnic_max_rate[4];
+	struct fairness_vars_per_vn vnic_min_rate[4];
+};
+
+/*
+ * cmng port init state
+ */
+struct cmng_init {
+	struct cmng_struct_per_port port;
+	struct cmng_vnic vnic;
+};
+
+
+/*
+ * driver parameters for congestion management init, all rates are in Mbps
+ */
+struct cmng_init_input {
+	u32 port_rate;
+	u16 vnic_min_rate[4];
+	u16 vnic_max_rate[4];
+	u16 cos_min_rate[MAX_COS_NUMBER];
+	u16 cos_to_pause_mask[MAX_COS_NUMBER];
+	struct cmng_flags_per_port flags;
+};
+
 
 /*
  * Protocol-common command ID for slow path elements
@@ -4462,7 +4629,7 @@
 	RAMROD_CMD_ID_COMMON_STAT_QUERY,
 	RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
 	RAMROD_CMD_ID_COMMON_START_TRAFFIC,
-	RAMROD_CMD_ID_COMMON_RESERVED1,
+	RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS,
 	MAX_COMMON_SPQE_CMD_ID
 };
 
@@ -4670,6 +4837,17 @@
 };
 
 /*
+ * vif list event data
+ */
+struct vif_list_event_data {
+	u8 func_bit_map;
+	u8 echo;
+	__le16 reserved0;
+	__le32 reserved1;
+	__le32 reserved2;
+};
+
+/*
  * union for all event ring message types
  */
 union event_data {
@@ -4678,6 +4856,7 @@
 	struct cfc_del_event_data cfc_del_event;
 	struct vf_flr_event_data vf_flr_event;
 	struct malicious_vf_event_data malicious_vf_event;
+	struct vif_list_event_data vif_list_event;
 };
 
 
@@ -4743,7 +4922,7 @@
 	EVENT_RING_OPCODE_FORWARD_SETUP,
 	EVENT_RING_OPCODE_RSS_UPDATE_RULES,
 	EVENT_RING_OPCODE_FUNCTION_UPDATE,
-	EVENT_RING_OPCODE_RESERVED1,
+	EVENT_RING_OPCODE_AFEX_VIF_LISTS,
 	EVENT_RING_OPCODE_SET_MAC,
 	EVENT_RING_OPCODE_CLASSIFICATION_RULES,
 	EVENT_RING_OPCODE_FILTERS_RULES,
@@ -4763,16 +4942,6 @@
 
 
 /*
- * per-vnic fairness variables
- */
-struct fairness_vars_per_vn {
-	u32 cos_credit_delta[MAX_COS_NUMBER];
-	u32 vn_credit_delta;
-	u32 __reserved0;
-};
-
-
-/*
  * Priority and cos
  */
 struct priority_cos {
@@ -4800,12 +4969,27 @@
 struct function_start_data {
 	__le16 function_mode;
 	__le16 sd_vlan_tag;
-	u16 reserved;
+	__le16 vif_id;
 	u8 path_id;
 	u8 network_cos_mode;
 };
 
 
+struct function_update_data {
+	u8 vif_id_change_flg;
+	u8 afex_default_vlan_change_flg;
+	u8 allowed_priorities_change_flg;
+	u8 network_cos_mode_change_flg;
+	__le16 vif_id;
+	__le16 afex_default_vlan;
+	u8 allowed_priorities;
+	u8 network_cos_mode;
+	u8 lb_mode_en;
+	u8 reserved0;
+	__le32 reserved1;
+};
+
+
 /*
  * FW version stored in the Xstorm RAM
  */
@@ -5003,7 +5187,7 @@
 	SINGLE_FUNCTION,
 	MULTI_FUNCTION_SD,
 	MULTI_FUNCTION_SI,
-	MULTI_FUNCTION_RESERVED,
+	MULTI_FUNCTION_AFEX,
 	MAX_MF_MODE
 };
 
@@ -5128,6 +5312,7 @@
 	u8 protocol_data[8];
 	struct regpair phy_address;
 	struct regpair mac_config_addr;
+	struct afex_vif_list_ramrod_data afex_vif_list_data;
 };
 
 /*
@@ -5140,29 +5325,6 @@
 
 
 /*
- * a single rate shaping counter. can be used as protocol or vnic counter
- */
-struct rate_shaping_counter {
-	u32 quota;
-#if defined(__BIG_ENDIAN)
-	u16 __reserved0;
-	u16 rate;
-#elif defined(__LITTLE_ENDIAN)
-	u16 rate;
-	u16 __reserved0;
-#endif
-};
-
-
-/*
- * per-vnic rate shaping variables
- */
-struct rate_shaping_vars_per_vn {
-	struct rate_shaping_counter vn_counter;
-};
-
-
-/*
  * The send queue element
  */
 struct slow_path_element {
@@ -5330,6 +5492,18 @@
 
 
 /*
+ * vif_list_rule_kind
+ */
+enum vif_list_rule_kind {
+	VIF_LIST_RULE_SET,
+	VIF_LIST_RULE_GET,
+	VIF_LIST_RULE_CLEAR_ALL,
+	VIF_LIST_RULE_CLEAR_FUNC,
+	MAX_VIF_LIST_RULE_KIND
+};
+
+
+/*
  * zone A per-queue data
  */
 struct xstorm_queue_zone_data {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 29f5c3c..559c396 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -125,7 +125,7 @@
 	MODE_MF                        = 0x00000100,
 	MODE_MF_SD                     = 0x00000200,
 	MODE_MF_SI                     = 0x00000400,
-	MODE_MF_NIV                    = 0x00000800,
+	MODE_MF_AFEX                   = 0x00000800,
 	MODE_E3_A0                     = 0x00001000,
 	MODE_E3_B0                     = 0x00002000,
 	MODE_COS3                      = 0x00004000,
@@ -241,7 +241,8 @@
 			REG_WR(bp, reg_addr, reg_bit_map | q_bit_map);
 
 			/* set/clear queue bit in command-queue bit map
-			(E2/E3A0 only, valid COS values are 0/1) */
+			 * (E2/E3A0 only, valid COS values are 0/1)
+			 */
 			if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) {
 				reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num);
 				reg_bit_map = REG_RD(bp, reg_addr);
@@ -277,7 +278,215 @@
 }
 
 
-/* Returns the index of start or end of a specific block stage in ops array*/
+/* congestion managment port init api description
+ * the api works as follows:
+ * the driver should pass the cmng_init_input struct, the port_init function
+ * will prepare the required internal ram structure which will be passed back
+ * to the driver (cmng_init) that will write it into the internal ram.
+ *
+ * IMPORTANT REMARKS:
+ * 1. the cmng_init struct does not represent the contiguous internal ram
+ *    structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET
+ *    offset in order to write the port sub struct and the
+ *    PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other
+ *    words - don't use memcpy!).
+ * 2. although the cmng_init struct is filled for the maximal vnic number
+ *    possible, the driver should only write the valid vnics into the internal
+ *    ram according to the appropriate port mode.
+ */
+#define BITS_TO_BYTES(x) ((x)/8)
+
+/* CMNG constants, as derived from system spec calculations */
+
+/* default MIN rate in case VNIC min rate is configured to zero- 100Mbps */
+#define DEF_MIN_RATE 100
+
+/* resolution of the rate shaping timer - 400 usec */
+#define RS_PERIODIC_TIMEOUT_USEC 400
+
+/* number of bytes in single QM arbitration cycle -
+ * coefficient for calculating the fairness timer
+ */
+#define QM_ARB_BYTES 160000
+
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES 100
+
+/* how many bytes above threshold for
+ * the minimal credit of Min algorithm
+ */
+#define MIN_ABOVE_THRESH 32768
+
+/* Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair
+ */
+#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
+
+/* Memory of fairness algorithm - 2 cycles */
+#define FAIR_MEM 2
+#define SAFC_TIMEOUT_USEC 52
+
+#define SDM_TICKS 4
+
+
+static inline void bnx2x_init_max(const struct cmng_init_input *input_data,
+				  u32 r_param, struct cmng_init *ram_data)
+{
+	u32 vnic;
+	struct cmng_vnic *vdata = &ram_data->vnic;
+	struct cmng_struct_per_port *pdata = &ram_data->port;
+	/* rate shaping per-port variables
+	 * 100 micro seconds in SDM ticks = 25
+	 * since each tick is 4 microSeconds
+	 */
+
+	pdata->rs_vars.rs_periodic_timeout =
+	RS_PERIODIC_TIMEOUT_USEC / SDM_TICKS;
+
+	/* this is the threshold below which no timer arming will occur.
+	 * 1.25 coefficient is for the threshold to be a little bigger
+	 * then the real time to compensate for timer in-accuracy
+	 */
+	pdata->rs_vars.rs_threshold =
+	(5 * RS_PERIODIC_TIMEOUT_USEC * r_param)/4;
+
+	/* rate shaping per-vnic variables */
+	for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
+		/* global vnic counter */
+		vdata->vnic_max_rate[vnic].vn_counter.rate =
+		input_data->vnic_max_rate[vnic];
+		/* maximal Mbps for this vnic
+		 * the quota in each timer period - number of bytes
+		 * transmitted in this period
+		 */
+		vdata->vnic_max_rate[vnic].vn_counter.quota =
+			RS_PERIODIC_TIMEOUT_USEC *
+			(u32)vdata->vnic_max_rate[vnic].vn_counter.rate / 8;
+	}
+
+}
+
+static inline void bnx2x_init_min(const struct cmng_init_input *input_data,
+				  u32 r_param, struct cmng_init *ram_data)
+{
+	u32 vnic, fair_periodic_timeout_usec, vnicWeightSum, tFair;
+	struct cmng_vnic *vdata = &ram_data->vnic;
+	struct cmng_struct_per_port *pdata = &ram_data->port;
+
+	/* this is the resolution of the fairness timer */
+	fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
+
+	/* fairness per-port variables
+	 * for 10G it is 1000usec. for 1G it is 10000usec.
+	 */
+	tFair = T_FAIR_COEF / input_data->port_rate;
+
+	/* this is the threshold below which we won't arm the timer anymore */
+	pdata->fair_vars.fair_threshold = QM_ARB_BYTES;
+
+	/* we multiply by 1e3/8 to get bytes/msec. We don't want the credits
+	 * to pass a credit of the T_FAIR*FAIR_MEM (algorithm resolution)
+	 */
+	pdata->fair_vars.upper_bound = r_param * tFair * FAIR_MEM;
+
+	/* since each tick is 4 microSeconds */
+	pdata->fair_vars.fairness_timeout =
+				fair_periodic_timeout_usec / SDM_TICKS;
+
+	/* calculate sum of weights */
+	vnicWeightSum = 0;
+
+	for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++)
+		vnicWeightSum += input_data->vnic_min_rate[vnic];
+
+	/* global vnic counter */
+	if (vnicWeightSum > 0) {
+		/* fairness per-vnic variables */
+		for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
+			/* this is the credit for each period of the fairness
+			 * algorithm - number of bytes in T_FAIR (this vnic
+			 * share of the port rate)
+			 */
+			vdata->vnic_min_rate[vnic].vn_credit_delta =
+				(u32)input_data->vnic_min_rate[vnic] * 100 *
+				(T_FAIR_COEF / (8 * 100 * vnicWeightSum));
+			if (vdata->vnic_min_rate[vnic].vn_credit_delta <
+			    pdata->fair_vars.fair_threshold +
+			    MIN_ABOVE_THRESH) {
+				vdata->vnic_min_rate[vnic].vn_credit_delta =
+					pdata->fair_vars.fair_threshold +
+					MIN_ABOVE_THRESH;
+			}
+		}
+	}
+}
+
+static inline void bnx2x_init_fw_wrr(const struct cmng_init_input *input_data,
+				     u32 r_param, struct cmng_init *ram_data)
+{
+	u32 vnic, cos;
+	u32 cosWeightSum = 0;
+	struct cmng_vnic *vdata = &ram_data->vnic;
+	struct cmng_struct_per_port *pdata = &ram_data->port;
+
+	for (cos = 0; cos < MAX_COS_NUMBER; cos++)
+		cosWeightSum += input_data->cos_min_rate[cos];
+
+	if (cosWeightSum > 0) {
+
+		for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
+			/* Since cos and vnic shouldn't work together the rate
+			 * to divide between the coses is the port rate.
+			 */
+			u32 *ccd = vdata->vnic_min_rate[vnic].cos_credit_delta;
+			for (cos = 0; cos < MAX_COS_NUMBER; cos++) {
+				/* this is the credit for each period of
+				 * the fairness algorithm - number of bytes
+				 * in T_FAIR (this cos share of the vnic rate)
+				 */
+				ccd[cos] =
+				    (u32)input_data->cos_min_rate[cos] * 100 *
+				    (T_FAIR_COEF / (8 * 100 * cosWeightSum));
+				 if (ccd[cos] < pdata->fair_vars.fair_threshold
+						+ MIN_ABOVE_THRESH) {
+					ccd[cos] =
+					    pdata->fair_vars.fair_threshold +
+					    MIN_ABOVE_THRESH;
+				}
+			}
+		}
+	}
+}
+
+static inline void bnx2x_init_safc(const struct cmng_init_input *input_data,
+				   struct cmng_init *ram_data)
+{
+	/* in microSeconds */
+	ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC;
+}
+
+/* Congestion management port init */
+static inline void bnx2x_init_cmng(const struct cmng_init_input *input_data,
+				   struct cmng_init *ram_data)
+{
+	u32 r_param;
+	memset(ram_data, 0, sizeof(struct cmng_init));
+
+	ram_data->port.flags = input_data->flags;
+
+	/* number of bytes transmitted in a rate of 10Gbps
+	 * in one usec = 1.25KB.
+	 */
+	r_param = BITS_TO_BYTES(input_data->port_rate);
+	bnx2x_init_max(input_data, r_param, ram_data);
+	bnx2x_init_min(input_data, r_param, ram_data);
+	bnx2x_init_fw_wrr(input_data, r_param, ram_data);
+	bnx2x_init_safc(input_data, ram_data);
+}
+
+
+
+/* Returns the index of start or end of a specific block stage in ops array */
 #define BLOCK_OPS_IDX(block, stage, end) \
 			(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
 
@@ -499,9 +708,7 @@
 	bnx2x_set_mcp_parity(bp, false);
 }
 
-/**
- * Clear the parity error status registers.
- */
+/* Clear the parity error status registers. */
 static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
 {
 	int i;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index ad95324..a3fb721 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -138,7 +138,6 @@
 
 
 
-/* */
 #define SFP_EEPROM_CON_TYPE_ADDR		0x2
 	#define SFP_EEPROM_CON_TYPE_VAL_LC	0x7
 	#define SFP_EEPROM_CON_TYPE_VAL_COPPER	0x21
@@ -404,8 +403,7 @@
 
 	DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n");
 
-	/*
-	 * mapping between entry  priority to client number (0,1,2 -debug and
+	/* mapping between entry  priority to client number (0,1,2 -debug and
 	 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
 	 * 3bits client num.
 	 *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
@@ -413,8 +411,7 @@
 	 */
 
 	REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
-	/*
-	 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+	/* Bitmap of 5bits length. Each bit specifies whether the entry behaves
 	 * as strict.  Bits 0,1,2 - debug and management entries, 3 -
 	 * COS0 entry, 4 - COS1 entry.
 	 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
@@ -425,13 +422,11 @@
 	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
 	/* defines which entries (clients) are subjected to WFQ arbitration */
 	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
-	/*
-	 * For strict priority entries defines the number of consecutive
+	/* For strict priority entries defines the number of consecutive
 	 * slots for the highest priority.
 	 */
 	REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
-	/*
-	 * mapping between the CREDIT_WEIGHT registers and actual client
+	/* mapping between the CREDIT_WEIGHT registers and actual client
 	 * numbers
 	 */
 	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
@@ -443,8 +438,7 @@
 	REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
 	/* ETS mode disable */
 	REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
-	/*
-	 * If ETS mode is enabled (there is no strict priority) defines a WFQ
+	/* If ETS mode is enabled (there is no strict priority) defines a WFQ
 	 * weight for COS0/COS1.
 	 */
 	REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710);
@@ -471,10 +465,9 @@
 			min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
 	} else
 		min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
-	/**
-	 *  If the link isn't up (static configuration for example ) The
-	 *  link will be according to 20GBPS.
-	*/
+	/* If the link isn't up (static configuration for example ) The
+	 * link will be according to 20GBPS.
+	 */
 	return min_w_val;
 }
 /******************************************************************************
@@ -538,8 +531,7 @@
 	struct bnx2x *bp = params->bp;
 	const u8 port = params->port;
 	const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars);
-	/**
-	 * mapping between entry  priority to client number (0,1,2 -debug and
+	/* Mapping between entry  priority to client number (0,1,2 -debug and
 	 * management clients, 3 - COS0 client, 4 - COS1, ... 8 -
 	 * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by
 	 * reset value or init tool
@@ -551,18 +543,14 @@
 		REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210);
 		REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8);
 	}
-	/**
-	* For strict priority entries defines the number of consecutive
-	* slots for the highest priority.
-	*/
-	/* TODO_ETS - Should be done by reset value or init tool */
+	/* For strict priority entries defines the number of consecutive
+	 * slots for the highest priority.
+	 */
 	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
 		   NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
-	/**
-	 * mapping between the CREDIT_WEIGHT registers and actual client
+	/* Mapping between the CREDIT_WEIGHT registers and actual client
 	 * numbers
 	 */
-	/* TODO_ETS - Should be done by reset value or init tool */
 	if (port) {
 		/*Port 1 has 6 COS*/
 		REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543);
@@ -574,8 +562,7 @@
 		REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5);
 	}
 
-	/**
-	 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+	/* Bitmap of 5bits length. Each bit specifies whether the entry behaves
 	 * as strict.  Bits 0,1,2 - debug and management entries, 3 -
 	 * COS0 entry, 4 - COS1 entry.
 	 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
@@ -590,13 +577,12 @@
 	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
 		   NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
 
-	/**
-	* Please notice the register address are note continuous and a
-	* for here is note appropriate.In 2 port mode port0 only COS0-5
-	* can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
-	* port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
-	* are never used for WFQ
-	*/
+	/* Please notice the register address are note continuous and a
+	 * for here is note appropriate.In 2 port mode port0 only COS0-5
+	 * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
+	 * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
+	 * are never used for WFQ
+	 */
 	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
 		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0);
 	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
@@ -633,10 +619,9 @@
 	u32 base_upper_bound = 0;
 	u8 max_cos = 0;
 	u8 i = 0;
-	/**
-	* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
-	* port mode port1 has COS0-2 that can be used for WFQ.
-	*/
+	/* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
+	 * port mode port1 has COS0-2 that can be used for WFQ.
+	 */
 	if (!port) {
 		base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
 		max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
@@ -666,8 +651,7 @@
 	u32 base_weight = 0;
 	u8 max_cos = 0;
 
-	/**
-	 * mapping between entry  priority to client number 0 - COS0
+	/* Mapping between entry  priority to client number 0 - COS0
 	 * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num.
 	 * TODO_ETS - Should be done by reset value or init tool
 	 */
@@ -695,10 +679,9 @@
 
 	REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
 		   PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0);
-	/**
-	* In 2 port mode port0 has COS0-5 that can be used for WFQ.
-	* In 4 port mode port1 has COS0-2 that can be used for WFQ.
-	*/
+	/* In 2 port mode port0 has COS0-5 that can be used for WFQ.
+	 * In 4 port mode port1 has COS0-2 that can be used for WFQ.
+	 */
 	if (!port) {
 		base_weight = PBF_REG_COS0_WEIGHT_P0;
 		max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
@@ -738,7 +721,7 @@
 /******************************************************************************
 * Description:
 *	Disable will return basicly the values to init values.
-*.
+*
 ******************************************************************************/
 int bnx2x_ets_disabled(struct link_params *params,
 		      struct link_vars *vars)
@@ -867,7 +850,7 @@
 /******************************************************************************
 * Description:
 *	Calculate the total BW.A value of 0 isn't legal.
-*.
+*
 ******************************************************************************/
 static int bnx2x_ets_e3b0_get_total_bw(
 	const struct link_params *params,
@@ -879,7 +862,6 @@
 	u8 is_bw_cos_exist = 0;
 
 	*total_bw = 0 ;
-
 	/* Calculate total BW requested */
 	for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
 		if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) {
@@ -887,10 +869,9 @@
 			if (!ets_params->cos[cos_idx].params.bw_params.bw) {
 				DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
 						   "was set to 0\n");
-				/*
-				 * This is to prevent a state when ramrods
+				/* This is to prevent a state when ramrods
 				 * can't be sent
-				*/
+				 */
 				ets_params->cos[cos_idx].params.bw_params.bw
 					 = 1;
 			}
@@ -908,8 +889,7 @@
 		}
 		DP(NETIF_MSG_LINK,
 		   "bnx2x_ets_E3B0_config total BW should be 100\n");
-		/*
-		 * We can handle a case whre the BW isn't 100 this can happen
+		/* We can handle a case whre the BW isn't 100 this can happen
 		 * if the TC are joined.
 		 */
 	}
@@ -919,7 +899,7 @@
 /******************************************************************************
 * Description:
 *	Invalidate all the sp_pri_to_cos.
-*.
+*
 ******************************************************************************/
 static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
 {
@@ -931,7 +911,7 @@
 * Description:
 *	Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
 *	according to sp_pri_to_cos.
-*.
+*
 ******************************************************************************/
 static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
 					    u8 *sp_pri_to_cos, const u8 pri,
@@ -942,6 +922,12 @@
 	const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
 		DCBX_E3B0_MAX_NUM_COS_PORT0;
 
+	if (pri >= max_num_of_cos) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
+		   "parameter Illegal strict priority\n");
+	    return -EINVAL;
+	}
+
 	if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
 		DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
 				   "parameter There can't be two COS's with "
@@ -949,12 +935,6 @@
 		return -EINVAL;
 	}
 
-	if (pri > max_num_of_cos) {
-		DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
-		   "parameter Illegal strict priority\n");
-	    return -EINVAL;
-	}
-
 	sp_pri_to_cos[pri] = cos_entry;
 	return 0;
 
@@ -964,7 +944,7 @@
 * Description:
 *	Returns the correct value according to COS and priority in
 *	the sp_pri_cli register.
-*.
+*
 ******************************************************************************/
 static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
 					 const u8 pri_set,
@@ -981,7 +961,7 @@
 * Description:
 *	Returns the correct value according to COS and priority in the
 *	sp_pri_cli register for NIG.
-*.
+*
 ******************************************************************************/
 static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
 {
@@ -997,7 +977,7 @@
 * Description:
 *	Returns the correct value according to COS and priority in the
 *	sp_pri_cli register for PBF.
-*.
+*
 ******************************************************************************/
 static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
 {
@@ -1013,7 +993,7 @@
 * Description:
 *	Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
 *	according to sp_pri_to_cos.(which COS has higher priority)
-*.
+*
 ******************************************************************************/
 static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
 					     u8 *sp_pri_to_cos)
@@ -1149,8 +1129,7 @@
 		return -EINVAL;
 	}
 
-	/*
-	 * Upper bound is set according to current link speed (min_w_val
+	/* Upper bound is set according to current link speed (min_w_val
 	 * should be the same for upper bound and COS credit val).
 	 */
 	bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
@@ -1160,8 +1139,7 @@
 	for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
 		if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
 			cos_bw_bitmap |= (1 << cos_entry);
-			/*
-			 * The function also sets the BW in HW(not the mappin
+			/* The function also sets the BW in HW(not the mappin
 			 * yet)
 			 */
 			bnx2x_status = bnx2x_ets_e3b0_set_cos_bw(
@@ -1217,14 +1195,12 @@
 	/* ETS disabled configuration */
 	struct bnx2x *bp = params->bp;
 	DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
-	/*
-	 * defines which entries (clients) are subjected to WFQ arbitration
+	/* Defines which entries (clients) are subjected to WFQ arbitration
 	 * COS0 0x8
 	 * COS1 0x10
 	 */
 	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
-	/*
-	 * mapping between the ARB_CREDIT_WEIGHT registers and actual
+	/* Mapping between the ARB_CREDIT_WEIGHT registers and actual
 	 * client numbers (WEIGHT_0 does not actually have to represent
 	 * client 0)
 	 *    PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
@@ -1242,8 +1218,7 @@
 
 	/* Defines the number of consecutive slots for the strict priority */
 	REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
-	/*
-	 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+	/* Bitmap of 5bits length. Each bit specifies whether the entry behaves
 	 * as strict.  Bits 0,1,2 - debug and management entries, 3 - COS0
 	 * entry, 4 - COS1 entry.
 	 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
@@ -1298,8 +1273,7 @@
 	u32 val	= 0;
 
 	DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
-	/*
-	 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+	/* Bitmap of 5bits length. Each bit specifies whether the entry behaves
 	 * as strict.  Bits 0,1,2 - debug and management entries,
 	 * 3 - COS0 entry, 4 - COS1 entry.
 	 *  COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
@@ -1307,8 +1281,7 @@
 	 * MCP and debug are strict
 	 */
 	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
-	/*
-	 * For strict priority entries defines the number of consecutive slots
+	/* For strict priority entries defines the number of consecutive slots
 	 * for the highest priority.
 	 */
 	REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
@@ -1320,8 +1293,7 @@
 	/* Defines the number of consecutive slots for the strict priority */
 	REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
 
-	/*
-	 * mapping between entry  priority to client number (0,1,2 -debug and
+	/* Mapping between entry  priority to client number (0,1,2 -debug and
 	 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
 	 * 3bits client num.
 	 *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
@@ -1356,15 +1328,12 @@
 	if (!(params->feature_config_flags &
 	      FEATURE_CONFIG_PFC_ENABLED)) {
 
-		/*
-		 * RX flow control - Process pause frame in receive direction
+		/* RX flow control - Process pause frame in receive direction
 		 */
 		if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
 			pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN;
 
-		/*
-		 * TX flow control - Send pause packet when buffer is full
-		 */
+		/* TX flow control - Send pause packet when buffer is full */
 		if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
 			pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN;
 	} else {/* PFC support */
@@ -1457,8 +1426,7 @@
 static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
 {
 	u32 mode, emac_base;
-	/**
-	 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
+	/* Set clause 45 mode, slow down the MDIO clock to 2.5MHz
 	 * (a value of 49==0x31) and make sure that the AUTO poll is off
 	 */
 
@@ -1578,15 +1546,6 @@
 
 	DP(NETIF_MSG_LINK, "enabling UMAC\n");
 
-	/**
-	 * This register determines on which events the MAC will assert
-	 * error on the i/f to the NIG along w/ EOP.
-	 */
-
-	/**
-	 * BD REG_WR(bp, NIG_REG_P0_MAC_RSV_ERR_MASK +
-	 * params->port*0x14,      0xfffff.
-	 */
 	/* This register opens the gate for the UMAC despite its name */
 	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
 
@@ -1649,8 +1608,7 @@
 		val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA;
 	REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
 
-	/*
-	 * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+	/* Maximum Frame Length (RW). Defines a 14-Bit maximum frame
 	 * length used by the MAC receive logic to check frames.
 	 */
 	REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
@@ -1666,8 +1624,7 @@
 	struct bnx2x *bp = params->bp;
 	u32 is_port4mode = bnx2x_is_4_port_mode(bp);
 
-	/*
-	 * In 4-port mode, need to set the mode only once, so if XMAC is
+	/* In 4-port mode, need to set the mode only once, so if XMAC is
 	 * already out of reset, it means the mode has already been set,
 	 * and it must not* reset the XMAC again, since it controls both
 	 * ports of the path
@@ -1691,13 +1648,13 @@
 	if (is_port4mode) {
 		DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n");
 
-		/*  Set the number of ports on the system side to up to 2 */
+		/* Set the number of ports on the system side to up to 2 */
 		REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1);
 
 		/* Set the number of ports on the Warp Core to 10G */
 		REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
 	} else {
-		/*  Set the number of ports on the system side to 1 */
+		/* Set the number of ports on the system side to 1 */
 		REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0);
 		if (max_speed == SPEED_10000) {
 			DP(NETIF_MSG_LINK,
@@ -1729,8 +1686,7 @@
 
 	if (REG_RD(bp, MISC_REG_RESET_REG_2) &
 	    MISC_REGISTERS_RESET_REG_2_XMAC) {
-		/*
-		 * Send an indication to change the state in the NIG back to XON
+		/* Send an indication to change the state in the NIG back to XON
 		 * Clearing this bit enables the next set of this bit to get
 		 * rising edge
 		 */
@@ -1755,13 +1711,11 @@
 
 	bnx2x_xmac_init(params, vars->line_speed);
 
-	/*
-	 * This register determines on which events the MAC will assert
+	/* This register determines on which events the MAC will assert
 	 * error on the i/f to the NIG along w/ EOP.
 	 */
 
-	/*
-	 * This register tells the NIG whether to send traffic to UMAC
+	/* This register tells the NIG whether to send traffic to UMAC
 	 * or XMAC
 	 */
 	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
@@ -1863,8 +1817,7 @@
 	val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
 	val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
 
-	/*
-	 * Setting this bit causes MAC control frames (except for pause
+	/* Setting this bit causes MAC control frames (except for pause
 	 * frames) to be passed on for processing. This setting has no
 	 * affect on the operation of the pause frames. This bit effects
 	 * all packets regardless of RX Parser packet sorting logic.
@@ -1963,8 +1916,7 @@
 				   struct link_vars *vars,
 				   u8 is_lb)
 {
-	/*
-	 * Set rx control: Strip CRC and enable BigMAC to relay
+	/* Set rx control: Strip CRC and enable BigMAC to relay
 	 * control packets to the system as well
 	 */
 	u32 wb_data[2];
@@ -2016,8 +1968,7 @@
 
 	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
 
-	/*
-	 * Set Time (based unit is 512 bit time) between automatic
+	/* Set Time (based unit is 512 bit time) between automatic
 	 * re-sending of PP packets amd enable automatic re-send of
 	 * Per-Priroity Packet as long as pp_gen is asserted and
 	 * pp_disable is low.
@@ -2086,7 +2037,7 @@
 	config_val->default_class1.full_xon = 0;
 
 	if (CHIP_IS_E2(bp)) {
-		/*  class0 defaults */
+		/* Class0 defaults */
 		config_val->default_class0.pause_xoff =
 			DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR;
 		config_val->default_class0.pause_xon =
@@ -2095,7 +2046,7 @@
 			DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR;
 		config_val->default_class0.full_xon =
 			DEFAULT0_E2_BRB_MAC_FULL_XON_THR;
-		/*  pause able*/
+		/* Pause able*/
 		config_val->pauseable_th.pause_xoff =
 			PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
 		config_val->pauseable_th.pause_xon =
@@ -2114,7 +2065,7 @@
 		config_val->non_pauseable_th.full_xon =
 			PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
 	} else if (CHIP_IS_E3A0(bp)) {
-		/*  class0 defaults */
+		/* Class0 defaults */
 		config_val->default_class0.pause_xoff =
 			DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR;
 		config_val->default_class0.pause_xon =
@@ -2123,7 +2074,7 @@
 			DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR;
 		config_val->default_class0.full_xon =
 			DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR;
-		/*  pause able */
+		/* Pause able */
 		config_val->pauseable_th.pause_xoff =
 			PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
 		config_val->pauseable_th.pause_xon =
@@ -2142,7 +2093,7 @@
 		config_val->non_pauseable_th.full_xon =
 			PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
 	} else if (CHIP_IS_E3B0(bp)) {
-		/*  class0 defaults */
+		/* Class0 defaults */
 		config_val->default_class0.pause_xoff =
 			DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR;
 		config_val->default_class0.pause_xon =
@@ -2305,27 +2256,23 @@
 			reg_th_config = &config_val.non_pauseable_th;
 	} else
 		reg_th_config = &config_val.default_class0;
-	/*
-	 * The number of free blocks below which the pause signal to class 0
+	/* The number of free blocks below which the pause signal to class 0
 	 * of MAC #n is asserted. n=0,1
 	 */
 	REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 :
 	       BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 ,
 	       reg_th_config->pause_xoff);
-	/*
-	 * The number of free blocks above which the pause signal to class 0
+	/* The number of free blocks above which the pause signal to class 0
 	 * of MAC #n is de-asserted. n=0,1
 	 */
 	REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 :
 	       BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon);
-	/*
-	 * The number of free blocks below which the full signal to class 0
+	/* The number of free blocks below which the full signal to class 0
 	 * of MAC #n is asserted. n=0,1
 	 */
 	REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 :
 	       BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff);
-	/*
-	 * The number of free blocks above which the full signal to class 0
+	/* The number of free blocks above which the full signal to class 0
 	 * of MAC #n is de-asserted. n=0,1
 	 */
 	REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
@@ -2339,30 +2286,26 @@
 			reg_th_config = &config_val.non_pauseable_th;
 	} else
 		reg_th_config = &config_val.default_class1;
-	/*
-	 * The number of free blocks below which the pause signal to
+	/* The number of free blocks below which the pause signal to
 	 * class 1 of MAC #n is asserted. n=0,1
 	 */
 	REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
 	       BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
 	       reg_th_config->pause_xoff);
 
-	/*
-	 * The number of free blocks above which the pause signal to
+	/* The number of free blocks above which the pause signal to
 	 * class 1 of MAC #n is de-asserted. n=0,1
 	 */
 	REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
 	       BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
 	       reg_th_config->pause_xon);
-	/*
-	 * The number of free blocks below which the full signal to
+	/* The number of free blocks below which the full signal to
 	 * class 1 of MAC #n is asserted. n=0,1
 	 */
 	REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
 	       BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
 	       reg_th_config->full_xoff);
-	/*
-	 * The number of free blocks above which the full signal to
+	/* The number of free blocks above which the full signal to
 	 * class 1 of MAC #n is de-asserted. n=0,1
 	 */
 	REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
@@ -2379,49 +2322,41 @@
 		REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE,
 			   e3b0_val.per_class_guaranty_mode);
 
-		/*
-		 * The hysteresis on the guarantied buffer space for the Lb
+		/* The hysteresis on the guarantied buffer space for the Lb
 		 * port before signaling XON.
 		 */
 		REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST,
 			   e3b0_val.lb_guarantied_hyst);
 
-		/*
-		 * The number of free blocks below which the full signal to the
+		/* The number of free blocks below which the full signal to the
 		 * LB port is asserted.
 		 */
 		REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
 		       e3b0_val.full_lb_xoff_th);
-		/*
-		 * The number of free blocks above which the full signal to the
+		/* The number of free blocks above which the full signal to the
 		 * LB port is de-asserted.
 		 */
 		REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
 		       e3b0_val.full_lb_xon_threshold);
-		/*
-		 * The number of blocks guarantied for the MAC #n port. n=0,1
+		/* The number of blocks guarantied for the MAC #n port. n=0,1
 		 */
 
-		/* The number of blocks guarantied for the LB port.*/
+		/* The number of blocks guarantied for the LB port. */
 		REG_WR(bp, BRB1_REG_LB_GUARANTIED,
 		       e3b0_val.lb_guarantied);
 
-		/*
-		 * The number of blocks guarantied for the MAC #n port.
-		 */
+		/* The number of blocks guarantied for the MAC #n port. */
 		REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
 		       2 * e3b0_val.mac_0_class_t_guarantied);
 		REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
 		       2 * e3b0_val.mac_1_class_t_guarantied);
-		/*
-		 * The number of blocks guarantied for class #t in MAC0. t=0,1
+		/* The number of blocks guarantied for class #t in MAC0. t=0,1
 		 */
 		REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
 		       e3b0_val.mac_0_class_t_guarantied);
 		REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
 		       e3b0_val.mac_0_class_t_guarantied);
-		/*
-		 * The hysteresis on the guarantied buffer space for class in
+		/* The hysteresis on the guarantied buffer space for class in
 		 * MAC0.  t=0,1
 		 */
 		REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
@@ -2429,15 +2364,13 @@
 		REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
 		       e3b0_val.mac_0_class_t_guarantied_hyst);
 
-		/*
-		 * The number of blocks guarantied for class #t in MAC1.t=0,1
+		/* The number of blocks guarantied for class #t in MAC1.t=0,1
 		 */
 		REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
 		       e3b0_val.mac_1_class_t_guarantied);
 		REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
 		       e3b0_val.mac_1_class_t_guarantied);
-		/*
-		 * The hysteresis on the guarantied buffer space for class #t
+		/* The hysteresis on the guarantied buffer space for class #t
 		 * in MAC1.  t=0,1
 		 */
 		REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
@@ -2520,15 +2453,13 @@
 		FEATURE_CONFIG_PFC_ENABLED;
 	DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
 
-	/*
-	 * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
+	/* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
 	 * MAC control frames (that are not pause packets)
 	 * will be forwarded to the XCM.
 	 */
 	xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK :
 			  NIG_REG_LLH0_XCM_MASK);
-	/*
-	 * nig params will override non PFC params, since it's possible to
+	/* NIG params will override non PFC params, since it's possible to
 	 * do transition from PFC to SAFC
 	 */
 	if (set_pfc) {
@@ -2548,7 +2479,7 @@
 			llfc_out_en = nig_params->llfc_out_en;
 			llfc_enable = nig_params->llfc_enable;
 			pause_enable = nig_params->pause_enable;
-		} else  /*defaul non PFC mode - PAUSE */
+		} else  /* Default non PFC mode - PAUSE */
 			pause_enable = 1;
 
 		xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
@@ -2608,8 +2539,7 @@
 		      struct link_vars *vars,
 		      struct bnx2x_nig_brb_pfc_port_params *pfc_params)
 {
-	/*
-	 * The PFC and pause are orthogonal to one another, meaning when
+	/* The PFC and pause are orthogonal to one another, meaning when
 	 * PFC is enabled, the pause are disabled, and when PFC is
 	 * disabled, pause are set according to the pause result.
 	 */
@@ -3148,7 +3078,6 @@
 			      EMAC_MDIO_STATUS_10MB);
 
 	/* address */
-
 	tmp = ((phy->addr << 21) | (devad << 16) | reg |
 	       EMAC_MDIO_COMM_COMMAND_ADDRESS |
 	       EMAC_MDIO_COMM_START_BUSY);
@@ -3337,8 +3266,7 @@
 		   u8 devad, u16 reg, u16 *ret_val)
 {
 	u8 phy_index;
-	/*
-	 * Probe for the phy according to the given phy_addr, and execute
+	/* Probe for the phy according to the given phy_addr, and execute
 	 * the read request on it
 	 */
 	for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
@@ -3355,8 +3283,7 @@
 		    u8 devad, u16 reg, u16 val)
 {
 	u8 phy_index;
-	/*
-	 * Probe for the phy according to the given phy_addr, and execute
+	/* Probe for the phy according to the given phy_addr, and execute
 	 * the write request on it
 	 */
 	for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
@@ -3382,7 +3309,7 @@
 	if (bnx2x_is_4_port_mode(bp)) {
 		u32 port_swap, port_swap_ovr;
 
-		/*figure out path swap value */
+		/* Figure out path swap value */
 		path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR);
 		if (path_swap_ovr & 0x1)
 			path_swap = (path_swap_ovr & 0x2);
@@ -3392,7 +3319,7 @@
 		if (path_swap)
 			path = path ^ 1;
 
-		/*figure out port swap value */
+		/* Figure out port swap value */
 		port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR);
 		if (port_swap_ovr & 0x1)
 			port_swap = (port_swap_ovr & 0x2);
@@ -3405,7 +3332,7 @@
 		lane = (port<<1) + path;
 	} else { /* two port mode - no port swap */
 
-		/*figure out path swap value */
+		/* Figure out path swap value */
 		path_swap_ovr =
 			REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR);
 		if (path_swap_ovr & 0x1) {
@@ -3437,8 +3364,7 @@
 
 	if (USES_WARPCORE(bp)) {
 		aer_val = bnx2x_get_warpcore_lane(phy, params);
-		/*
-		 * In Dual-lane mode, two lanes are joined together,
+		/* In Dual-lane mode, two lanes are joined together,
 		 * so in order to configure them, the AER broadcast method is
 		 * used here.
 		 * 0x200 is the broadcast address for lanes 0,1
@@ -3518,8 +3444,7 @@
 {
 	struct bnx2x *bp = params->bp;
 	*ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
-	/**
-	 * resolve pause mode and advertisement Please refer to Table
+	/* Resolve pause mode and advertisement Please refer to Table
 	 * 28B-3 of the 802.3ab-1999 spec
 	 */
 
@@ -3642,6 +3567,7 @@
 		vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
 	if (pause_result & (1<<1))
 		vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
+
 }
 
 static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
@@ -3698,6 +3624,7 @@
 	bnx2x_pause_resolve(vars, pause_result);
 
 }
+
 static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
 				   struct link_params *params,
 				   struct link_vars *vars)
@@ -3819,9 +3746,7 @@
 
 	/* Advertise pause */
 	bnx2x_ext_phy_set_pause(params, phy, vars);
-
-	/*
-	 * Set KR Autoneg Work-Around flag for Warpcore version older than D108
+	/* Set KR Autoneg Work-Around flag for Warpcore version older than D108
 	 */
 	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 			MDIO_WC_REG_UC_INFO_B1_VERSION, &val16);
@@ -3829,7 +3754,6 @@
 		DP(NETIF_MSG_LINK, "Enable AN KR work-around\n");
 		vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
 	}
-
 	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 			MDIO_WC_REG_DIGITAL5_MISC7, &val16);
 
@@ -3903,7 +3827,7 @@
 	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
 			 MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB);
 
-	/*Enable encoded forced speed */
+	/* Enable encoded forced speed */
 	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 			 MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30);
 
@@ -4265,8 +4189,7 @@
 				PORT_HW_CFG_E3_MOD_ABS_MASK) >>
 				PORT_HW_CFG_E3_MOD_ABS_SHIFT;
 
-		/*
-		 * Should not happen. This function called upon interrupt
+		/* Should not happen. This function called upon interrupt
 		 * triggered by GPIO ( since EPIO can only generate interrupts
 		 * to MCP).
 		 * So if this function was called and none of the GPIOs was set,
@@ -4366,7 +4289,7 @@
 					"link up, rx_tx_asic_rst 0x%x\n",
 					vars->rx_tx_asic_rst);
 			} else {
-				/*reset the lane to see if link comes up.*/
+				/* Reset the lane to see if link comes up.*/
 				bnx2x_warpcore_reset_lane(bp, phy, 1);
 				bnx2x_warpcore_reset_lane(bp, phy, 0);
 
@@ -4387,7 +4310,6 @@
 	} /*params->rx_tx_asic_rst*/
 
 }
-
 static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
 				       struct link_params *params,
 				       struct link_vars *vars)
@@ -4545,7 +4467,7 @@
 	/* Update those 1-copy registers */
 	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
 			  MDIO_AER_BLOCK_AER_REG, 0);
-		/* Enable 1G MDIO (1-copy) */
+	/* Enable 1G MDIO (1-copy) */
 	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 			MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
 			&val16);
@@ -4624,43 +4546,43 @@
 		vars->duplex = DUPLEX_FULL;
 		switch (vars->link_status &
 			LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
-			case LINK_10THD:
-				vars->duplex = DUPLEX_HALF;
-				/* fall thru */
-			case LINK_10TFD:
-				vars->line_speed = SPEED_10;
-				break;
+		case LINK_10THD:
+			vars->duplex = DUPLEX_HALF;
+			/* Fall thru */
+		case LINK_10TFD:
+			vars->line_speed = SPEED_10;
+			break;
 
-			case LINK_100TXHD:
-				vars->duplex = DUPLEX_HALF;
-				/* fall thru */
-			case LINK_100T4:
-			case LINK_100TXFD:
-				vars->line_speed = SPEED_100;
-				break;
+		case LINK_100TXHD:
+			vars->duplex = DUPLEX_HALF;
+			/* Fall thru */
+		case LINK_100T4:
+		case LINK_100TXFD:
+			vars->line_speed = SPEED_100;
+			break;
 
-			case LINK_1000THD:
-				vars->duplex = DUPLEX_HALF;
-				/* fall thru */
-			case LINK_1000TFD:
-				vars->line_speed = SPEED_1000;
-				break;
+		case LINK_1000THD:
+			vars->duplex = DUPLEX_HALF;
+			/* Fall thru */
+		case LINK_1000TFD:
+			vars->line_speed = SPEED_1000;
+			break;
 
-			case LINK_2500THD:
-				vars->duplex = DUPLEX_HALF;
-				/* fall thru */
-			case LINK_2500TFD:
-				vars->line_speed = SPEED_2500;
-				break;
+		case LINK_2500THD:
+			vars->duplex = DUPLEX_HALF;
+			/* Fall thru */
+		case LINK_2500TFD:
+			vars->line_speed = SPEED_2500;
+			break;
 
-			case LINK_10GTFD:
-				vars->line_speed = SPEED_10000;
-				break;
-			case LINK_20GTFD:
-				vars->line_speed = SPEED_20000;
-				break;
-			default:
-				break;
+		case LINK_10GTFD:
+			vars->line_speed = SPEED_10000;
+			break;
+		case LINK_20GTFD:
+			vars->line_speed = SPEED_20000;
+			break;
+		default:
+			break;
 		}
 		vars->flow_ctrl = 0;
 		if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
@@ -4835,9 +4757,8 @@
 				 struct bnx2x_phy *phy)
 {
 	struct bnx2x *bp = params->bp;
-	/*
-	 *  Each two bits represents a lane number:
-	 *  No swap is 0123 => 0x1b no need to enable the swap
+	/* Each two bits represents a lane number:
+	 * No swap is 0123 => 0x1b no need to enable the swap
 	 */
 	u16 rx_lane_swap, tx_lane_swap;
 
@@ -5051,8 +4972,7 @@
 			  MDIO_REG_BANK_COMBO_IEEE0,
 			  MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
 
-	/*
-	 * program speed
+	/* Program speed
 	 *  - needed only if the speed is greater than 1G (2.5G or 10G)
 	 */
 	CL22_RD_OVER_CL45(bp, phy,
@@ -5087,8 +5007,6 @@
 	struct bnx2x *bp = params->bp;
 	u16 val = 0;
 
-	/* configure the 48 bits for BAM AN */
-
 	/* set extended capabilities */
 	if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
 		val |= MDIO_OVER_1G_UP1_2_5G;
@@ -5234,11 +5152,8 @@
 	}
 }
 
-
-/*
- * link management
+/* Link management
  */
-
 static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
 					     struct link_params *params)
 {
@@ -5383,8 +5298,7 @@
 			     "ustat_val(0x8371) = 0x%x\n", ustat_val);
 		return;
 	}
-	/*
-	 * Step 3: Check CL37 Message Pages received to indicate LP
+	/* Step 3: Check CL37 Message Pages received to indicate LP
 	 * supports only CL37
 	 */
 	CL22_RD_OVER_CL45(bp, phy,
@@ -5401,8 +5315,7 @@
 			 cl37_fsm_received);
 		return;
 	}
-	/*
-	 * The combined cl37/cl73 fsm state information indicating that
+	/* The combined cl37/cl73 fsm state information indicating that
 	 * we are connected to a device which does not support cl73, but
 	 * does support cl37 BAM. In this case we disable cl73 and
 	 * restart cl37 auto-neg
@@ -5973,8 +5886,7 @@
 {
 	u32 latch_status = 0;
 
-	/*
-	 * Disable the MI INT ( external phy int ) by writing 1 to the
+	/* Disable the MI INT ( external phy int ) by writing 1 to the
 	 * status register. Link down indication is high-active-signal,
 	 * so in this case we need to write the status to clear the XOR
 	 */
@@ -6009,8 +5921,7 @@
 	struct bnx2x *bp = params->bp;
 	u8 port = params->port;
 	u32 mask;
-	/*
-	 * First reset all status we assume only one line will be
+	/* First reset all status we assume only one line will be
 	 * change at a time
 	 */
 	bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
@@ -6024,8 +5935,7 @@
 			if (is_10g_plus)
 				mask = NIG_STATUS_XGXS0_LINK10G;
 			else if (params->switch_cfg == SWITCH_CFG_10G) {
-				/*
-				 * Disable the link interrupt by writing 1 to
+				/* Disable the link interrupt by writing 1 to
 				 * the relevant lane in the status register
 				 */
 				u32 ser_lane =
@@ -6227,8 +6137,7 @@
 		break;
 
 	case LED_MODE_OPER:
-		/*
-		 * For all other phys, OPER mode is same as ON, so in case
+		/* For all other phys, OPER mode is same as ON, so in case
 		 * link is down, do nothing
 		 */
 		if (!vars->link_up)
@@ -6239,9 +6148,7 @@
 			 (params->phy[EXT_PHY1].type ==
 			  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) &&
 		    CHIP_IS_E2(bp) && params->num_phys == 2) {
-			/*
-			 * This is a work-around for E2+8727 Configurations
-			 */
+			/* This is a work-around for E2+8727 Configurations */
 			if (mode == LED_MODE_ON ||
 				speed == SPEED_10000){
 				REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
@@ -6250,8 +6157,7 @@
 				tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
 				EMAC_WR(bp, EMAC_REG_EMAC_LED,
 					(tmp | EMAC_LED_OVERRIDE));
-				/*
-				 * return here without enabling traffic
+				/* Return here without enabling traffic
 				 * LED blink and setting rate in ON mode.
 				 * In oper mode, enabling LED blink
 				 * and setting rate is needed.
@@ -6260,8 +6166,7 @@
 					return rc;
 			}
 		} else if (SINGLE_MEDIA_DIRECT(params)) {
-			/*
-			 * This is a work-around for HW issue found when link
+			/* This is a work-around for HW issue found when link
 			 * is up in CL73
 			 */
 			if ((!CHIP_IS_E3(bp)) ||
@@ -6310,10 +6215,7 @@
 		     (speed == SPEED_1000) ||
 		     (speed == SPEED_100) ||
 		     (speed == SPEED_10))) {
-			/*
-			 * On Everest 1 Ax chip versions for speeds less than
-			 * 10G LED scheme is different
-			 */
+			/* For speeds less than 10G LED scheme is different */
 			REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
 			       + port*4, 1);
 			REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
@@ -6333,8 +6235,7 @@
 
 }
 
-/*
- * This function comes to reflect the actual link state read DIRECTLY from the
+/* This function comes to reflect the actual link state read DIRECTLY from the
  * HW
  */
 int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
@@ -6422,16 +6323,14 @@
 	int rc = 0;
 	u8 phy_index, non_ext_phy;
 	struct bnx2x *bp = params->bp;
-	/*
-	 * In case of external phy existence, the line speed would be the
+	/* In case of external phy existence, the line speed would be the
 	 * line speed linked up by the external phy. In case it is direct
 	 * only, then the line_speed during initialization will be
 	 * equal to the req_line_speed
 	 */
 	vars->line_speed = params->phy[INT_PHY].req_line_speed;
 
-	/*
-	 * Initialize the internal phy in case this is a direct board
+	/* Initialize the internal phy in case this is a direct board
 	 * (no external phys), or this board has external phy which requires
 	 * to first.
 	 */
@@ -6463,8 +6362,7 @@
 	} else {
 		for (phy_index = EXT_PHY1; phy_index < params->num_phys;
 		      phy_index++) {
-			/*
-			 * No need to initialize second phy in case of first
+			/* No need to initialize second phy in case of first
 			 * phy only selection. In case of second phy, we do
 			 * need to initialize the first phy, since they are
 			 * connected.
@@ -6492,7 +6390,6 @@
 			NIG_STATUS_XGXS0_LINK_STATUS |
 			NIG_STATUS_SERDES0_LINK_STATUS |
 			NIG_MASK_MI_INT));
-	bnx2x_update_mng(params, vars->link_status);
 	return rc;
 }
 
@@ -6577,7 +6474,7 @@
 				u8 link_10g)
 {
 	struct bnx2x *bp = params->bp;
-	u8 port = params->port;
+	u8 phy_idx, port = params->port;
 	int rc = 0;
 
 	vars->link_status |= (LINK_STATUS_LINK_UP |
@@ -6641,11 +6538,18 @@
 
 	/* update shared memory */
 	bnx2x_update_mng(params, vars->link_status);
+
+	/* Check remote fault */
+	for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
+		if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
+			bnx2x_check_half_open_conn(params, vars, 0);
+			break;
+		}
+	}
 	msleep(20);
 	return rc;
 }
-/*
- * The bnx2x_link_update function should be called upon link
+/* The bnx2x_link_update function should be called upon link
  * interrupt.
  * Link is considered up as follows:
  * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs
@@ -6702,8 +6606,7 @@
 	if (!CHIP_IS_E3(bp))
 		REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
 
-	/*
-	 * Step 1:
+	/* Step 1:
 	 * Check external link change only for external phys, and apply
 	 * priority selection between them in case the link on both phys
 	 * is up. Note that instead of the common vars, a temporary
@@ -6734,23 +6637,20 @@
 			switch (bnx2x_phy_selection(params)) {
 			case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
 			case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
-			/*
-			 * In this option, the first PHY makes sure to pass the
+			/* In this option, the first PHY makes sure to pass the
 			 * traffic through itself only.
 			 * Its not clear how to reset the link on the second phy
 			 */
 				active_external_phy = EXT_PHY1;
 				break;
 			case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
-			/*
-			 * In this option, the first PHY makes sure to pass the
+			/* In this option, the first PHY makes sure to pass the
 			 * traffic through the second PHY.
 			 */
 				active_external_phy = EXT_PHY2;
 				break;
 			default:
-			/*
-			 * Link indication on both PHYs with the following cases
+			/* Link indication on both PHYs with the following cases
 			 * is invalid:
 			 * - FIRST_PHY means that second phy wasn't initialized,
 			 * hence its link is expected to be down
@@ -6767,8 +6667,7 @@
 		}
 	}
 	prev_line_speed = vars->line_speed;
-	/*
-	 * Step 2:
+	/* Step 2:
 	 * Read the status of the internal phy. In case of
 	 * DIRECT_SINGLE_MEDIA board, this link is the external link,
 	 * otherwise this is the link between the 577xx and the first
@@ -6778,8 +6677,7 @@
 		params->phy[INT_PHY].read_status(
 			&params->phy[INT_PHY],
 			params, vars);
-	/*
-	 * The INT_PHY flow control reside in the vars. This include the
+	/* The INT_PHY flow control reside in the vars. This include the
 	 * case where the speed or flow control are not set to AUTO.
 	 * Otherwise, the active external phy flow control result is set
 	 * to the vars. The ext_phy_line_speed is needed to check if the
@@ -6788,14 +6686,12 @@
 	 */
 	if (active_external_phy > INT_PHY) {
 		vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
-		/*
-		 * Link speed is taken from the XGXS. AN and FC result from
+		/* Link speed is taken from the XGXS. AN and FC result from
 		 * the external phy.
 		 */
 		vars->link_status |= phy_vars[active_external_phy].link_status;
 
-		/*
-		 * if active_external_phy is first PHY and link is up - disable
+		/* if active_external_phy is first PHY and link is up - disable
 		 * disable TX on second external PHY
 		 */
 		if (active_external_phy == EXT_PHY1) {
@@ -6832,8 +6728,7 @@
 	DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
 		   " ext_phy_line_speed = %d\n", vars->flow_ctrl,
 		   vars->link_status, ext_phy_line_speed);
-	/*
-	 * Upon link speed change set the NIG into drain mode. Comes to
+	/* Upon link speed change set the NIG into drain mode. Comes to
 	 * deals with possible FIFO glitch due to clk change when speed
 	 * is decreased without link down indicator
 	 */
@@ -6858,8 +6753,7 @@
 
 	bnx2x_link_int_ack(params, vars, link_10g_plus);
 
-	/*
-	 * In case external phy link is up, and internal link is down
+	/* In case external phy link is up, and internal link is down
 	 * (not initialized yet probably after link initialization, it
 	 * needs to be initialized.
 	 * Note that after link down-up as result of cable plug, the xgxs
@@ -6887,8 +6781,7 @@
 						vars);
 		}
 	}
-	/*
-	 * Link is up only if both local phy and external phy (in case of
+	/* Link is up only if both local phy and external phy (in case of
 	 * non-direct board) are up and no fault detected on active PHY.
 	 */
 	vars->link_up = (vars->phy_link_up &&
@@ -6907,6 +6800,10 @@
 	else
 		rc = bnx2x_update_link_down(params, vars);
 
+	/* Update MCP link status was changed */
+	if (params->feature_config_flags & FEATURE_CONFIG_BC_SUPPORTS_AFEX)
+		bnx2x_fw_command(bp, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0);
+
 	return rc;
 }
 
@@ -7120,8 +7017,7 @@
 	}
 	/* XAUI workaround in 8073 A0: */
 
-	/*
-	 * After loading the boot ROM and restarting Autoneg, poll
+	/* After loading the boot ROM and restarting Autoneg, poll
 	 * Dev1, Reg $C820:
 	 */
 
@@ -7130,8 +7026,7 @@
 				MDIO_PMA_DEVAD,
 				MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
 				&val);
-		  /*
-		   * If bit [14] = 0 or bit [13] = 0, continue on with
+		  /* If bit [14] = 0 or bit [13] = 0, continue on with
 		   * system initialization (XAUI work-around not required, as
 		   * these bits indicate 2.5G or 1G link up).
 		   */
@@ -7140,8 +7035,7 @@
 			return 0;
 		} else if (!(val & (1<<15))) {
 			DP(NETIF_MSG_LINK, "bit 15 went off\n");
-			/*
-			 * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
+			/* If bit 15 is 0, then poll Dev1, Reg $C841 until it's
 			 * MSB (bit15) goes to 1 (indicating that the XAUI
 			 * workaround has completed), then continue on with
 			 * system initialization.
@@ -7291,8 +7185,7 @@
 			val = (1<<7);
 		} else if (phy->req_line_speed ==  SPEED_2500) {
 			val = (1<<5);
-			/*
-			 * Note that 2.5G works only when used with 1G
+			/* Note that 2.5G works only when used with 1G
 			 * advertisement
 			 */
 		} else
@@ -7343,8 +7236,7 @@
 	/* Add support for CL37 (passive mode) III */
 	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
 
-	/*
-	 * The SNR will improve about 2db by changing BW and FEE main
+	/* The SNR will improve about 2db by changing BW and FEE main
 	 * tap. Rest commands are executed after link is up
 	 * Change FFE main cursor to 5 in EDC register
 	 */
@@ -7431,8 +7323,7 @@
 
 	link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
 	if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
-		/*
-		 * The SNR will improve about 2dbby changing the BW and FEE main
+		/* The SNR will improve about 2dbby changing the BW and FEE main
 		 * tap. The 1st write to change FFE main tap is set before
 		 * restart AN. Change PLL Bandwidth in EDC register
 		 */
@@ -7479,8 +7370,7 @@
 			bnx2x_cl45_read(bp, phy,
 					MDIO_XS_DEVAD,
 					MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
-			/*
-			 * Set bit 3 to invert Rx in 1G mode and clear this bit
+			/* Set bit 3 to invert Rx in 1G mode and clear this bit
 			 * when it`s in 10G mode.
 			 */
 			if (vars->line_speed == SPEED_1000) {
@@ -7602,8 +7492,7 @@
 					   u8 pmd_dis)
 {
 	struct bnx2x *bp = params->bp;
-	/*
-	 * Disable transmitter only for bootcodes which can enable it afterwards
+	/* Disable transmitter only for bootcodes which can enable it afterwards
 	 * (for D3 link)
 	 */
 	if (pmd_dis) {
@@ -7780,9 +7669,6 @@
 	u32 data_array[4];
 	u16 addr32;
 	struct bnx2x *bp = params->bp;
-	/*DP(NETIF_MSG_LINK, "bnx2x_direct_read_sfp_module_eeprom:"
-					" addr %d, cnt %d\n",
-					addr, byte_cnt);*/
 	if (byte_cnt > 16) {
 		DP(NETIF_MSG_LINK,
 		   "Reading from eeprom is limited to 16 bytes\n");
@@ -7847,8 +7733,7 @@
 			 MDIO_PMA_DEVAD,
 			 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
 			 0x8002);
-	/*
-	 * Wait appropriate time for two-wire command to finish before
+	/* Wait appropriate time for two-wire command to finish before
 	 * polling the status register
 	 */
 	msleep(1);
@@ -7941,8 +7826,7 @@
 	{
 		u8 copper_module_type;
 		phy->media_type = ETH_PHY_DA_TWINAX;
-		/*
-		 * Check if its active cable (includes SFP+ module)
+		/* Check if its active cable (includes SFP+ module)
 		 * of passive cable
 		 */
 		if (bnx2x_read_sfp_module_eeprom(phy,
@@ -8019,8 +7903,7 @@
 	DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
 	return 0;
 }
-/*
- * This function read the relevant field from the module (SFP+), and verify it
+/* This function read the relevant field from the module (SFP+), and verify it
  * is compliant with this board
  */
 static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
@@ -8102,8 +7985,7 @@
 	u8 val;
 	struct bnx2x *bp = params->bp;
 	u16 timeout;
-	/*
-	 * Initialization time after hot-plug may take up to 300ms for
+	/* Initialization time after hot-plug may take up to 300ms for
 	 * some phys type ( e.g. JDSU )
 	 */
 
@@ -8125,8 +8007,7 @@
 				    u8 is_power_up) {
 	/* Make sure GPIOs are not using for LED mode */
 	u16 val;
-	/*
-	 * In the GPIO register, bit 4 is use to determine if the GPIOs are
+	/* In the GPIO register, bit 4 is use to determine if the GPIOs are
 	 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
 	 * output
 	 * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0
@@ -8142,8 +8023,7 @@
 	if (is_power_up)
 		val = (1<<4);
 	else
-		/*
-		 * Set GPIO control to OUTPUT, and set the power bit
+		/* Set GPIO control to OUTPUT, and set the power bit
 		 * to according to the is_power_up
 		 */
 		val = (1<<1);
@@ -8177,8 +8057,7 @@
 
 		DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
 
-		/*
-		 * Changing to LRM mode takes quite few seconds. So do it only
+		/* Changing to LRM mode takes quite few seconds. So do it only
 		 * if current mode is limiting (default is LRM)
 		 */
 		if (cur_limiting_mode != EDC_MODE_LIMITING)
@@ -8313,8 +8192,7 @@
 	struct bnx2x *bp = params->bp;
 	DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode);
 	if (CHIP_IS_E3(bp)) {
-		/*
-		 * Low ==> if SFP+ module is supported otherwise
+		/* Low ==> if SFP+ module is supported otherwise
 		 * High ==> if SFP+ module is not on the approved vendor list
 		 */
 		bnx2x_set_e3_module_fault_led(params, gpio_mode);
@@ -8339,8 +8217,7 @@
 		return;
 	DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
 		       power, pin_cfg);
-	/*
-	 * Low ==> corresponding SFP+ module is powered
+	/* Low ==> corresponding SFP+ module is powered
 	 * high ==> the SFP+ module is powered down
 	 */
 	bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
@@ -8474,14 +8351,12 @@
 		bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
 	}
 
-	/*
-	 * Check and set limiting mode / LRM mode on 8726. On 8727 it
+	/* Check and set limiting mode / LRM mode on 8726. On 8727 it
 	 * is done automatically
 	 */
 	bnx2x_set_limiting_mode(params, phy, edc_mode);
 
-	/*
-	 * Enable transmit for this module if the module is approved, or
+	/* Enable transmit for this module if the module is approved, or
 	 * if unapproved modules should also enable the Tx laser
 	 */
 	if (rc == 0 ||
@@ -8536,8 +8411,7 @@
 		bnx2x_set_gpio_int(bp, gpio_num,
 				   MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
 				   gpio_port);
-		/*
-		 * Module was plugged out.
+		/* Module was plugged out.
 		 * Disable transmit for this module
 		 */
 		phy->media_type = ETH_PHY_NOT_PRESENT;
@@ -8607,8 +8481,7 @@
 
 	DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
 			" link_status 0x%x\n", rx_sd, pcs_status, val2);
-	/*
-	 * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
+	/* Link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
 	 * are set, or if the autoneg bit 1 is set
 	 */
 	link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
@@ -8722,8 +8595,7 @@
 	}
 	bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
 
-	/*
-	 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+	/* If TX Laser is controlled by GPIO_0, do not let PHY go into low
 	 * power mode, if TX Laser is disabled
 	 */
 
@@ -8833,8 +8705,7 @@
 
 	bnx2x_8726_external_rom_boot(phy, params);
 
-	/*
-	 * Need to call module detected on initialization since the module
+	/* Need to call module detected on initialization since the module
 	 * detection triggered by actual module insertion might occur before
 	 * driver is loaded, and when driver is loaded, it reset all
 	 * registers, including the transmitter
@@ -8871,8 +8742,7 @@
 				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
 		bnx2x_cl45_write(bp, phy,
 				MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
-		/*
-		 * Enable RX-ALARM control to receive interrupt for 1G speed
+		/* Enable RX-ALARM control to receive interrupt for 1G speed
 		 * change
 		 */
 		bnx2x_cl45_write(bp, phy,
@@ -8973,8 +8843,7 @@
 				struct link_params *params) {
 	u32 swap_val, swap_override;
 	u8 port;
-	/*
-	 * The PHY reset is controlled by GPIO 1. Fake the port number
+	/* The PHY reset is controlled by GPIO 1. Fake the port number
 	 * to cancel the swap done in set_gpio()
 	 */
 	struct bnx2x *bp = params->bp;
@@ -9012,14 +8881,12 @@
 	bnx2x_cl45_write(bp, phy,
 			 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val);
 
-	/*
-	 * Initially configure MOD_ABS to interrupt when module is
+	/* Initially configure MOD_ABS to interrupt when module is
 	 * presence( bit 8)
 	 */
 	bnx2x_cl45_read(bp, phy,
 			MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
-	/*
-	 * Set EDC off by setting OPTXLOS signal input to low (bit 9).
+	/* Set EDC off by setting OPTXLOS signal input to low (bit 9).
 	 * When the EDC is off it locks onto a reference clock and avoids
 	 * becoming 'lost'
 	 */
@@ -9040,8 +8907,7 @@
 	if (phy->flags & FLAGS_NOC)
 		val |= (3<<5);
 
-	/*
-	 * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
+	/* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
 	 * status which reflect SFP+ module over-current
 	 */
 	if (!(phy->flags & FLAGS_NOC))
@@ -9067,8 +8933,7 @@
 		bnx2x_cl45_read(bp, phy,
 				MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
 		DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
-		/*
-		 * Power down the XAUI until link is up in case of dual-media
+		/* Power down the XAUI until link is up in case of dual-media
 		 * and 1G
 		 */
 		if (DUAL_MEDIA(params)) {
@@ -9093,8 +8958,7 @@
 		bnx2x_cl45_write(bp, phy,
 				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
 	} else {
-		/*
-		 * Since the 8727 has only single reset pin, need to set the 10G
+		/* Since the 8727 has only single reset pin, need to set the 10G
 		 * registers although it is default
 		 */
 		bnx2x_cl45_write(bp, phy,
@@ -9109,8 +8973,7 @@
 				 0x0008);
 	}
 
-	/*
-	 * Set 2-wire transfer rate of SFP+ module EEPROM
+	/* Set 2-wire transfer rate of SFP+ module EEPROM
 	 * to 100Khz since some DACs(direct attached cables) do
 	 * not work at 400Khz.
 	 */
@@ -9133,8 +8996,7 @@
 				 phy->tx_preemphasis[1]);
 	}
 
-	/*
-	 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+	/* If TX Laser is controlled by GPIO_0, do not let PHY go into low
 	 * power mode, if TX Laser is disabled
 	 */
 	tx_en_mode = REG_RD(bp, params->shmem_base +
@@ -9180,8 +9042,7 @@
 		DP(NETIF_MSG_LINK,
 		   "MOD_ABS indication show module is absent\n");
 		phy->media_type = ETH_PHY_NOT_PRESENT;
-		/*
-		 * 1. Set mod_abs to detect next module
+		/* 1. Set mod_abs to detect next module
 		 *    presence event
 		 * 2. Set EDC off by setting OPTXLOS signal input to low
 		 *    (bit 9).
@@ -9195,8 +9056,7 @@
 				 MDIO_PMA_DEVAD,
 				 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
 
-		/*
-		 * Clear RX alarm since it stays up as long as
+		/* Clear RX alarm since it stays up as long as
 		 * the mod_abs wasn't changed
 		 */
 		bnx2x_cl45_read(bp, phy,
@@ -9207,8 +9067,7 @@
 		/* Module is present */
 		DP(NETIF_MSG_LINK,
 		   "MOD_ABS indication show module is present\n");
-		/*
-		 * First disable transmitter, and if the module is ok, the
+		/* First disable transmitter, and if the module is ok, the
 		 * module_detection will enable it
 		 * 1. Set mod_abs to detect next module absent event ( bit 8)
 		 * 2. Restore the default polarity of the OPRXLOS signal and
@@ -9222,8 +9081,7 @@
 				 MDIO_PMA_DEVAD,
 				 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
 
-		/*
-		 * Clear RX alarm since it stays up as long as the mod_abs
+		/* Clear RX alarm since it stays up as long as the mod_abs
 		 * wasn't changed. This is need to be done before calling the
 		 * module detection, otherwise it will clear* the link update
 		 * alarm
@@ -9284,8 +9142,7 @@
 	bnx2x_cl45_read(bp, phy,
 			MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
 
-	/*
-	 * If a module is present and there is need to check
+	/* If a module is present and there is need to check
 	 * for over current
 	 */
 	if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) {
@@ -9350,8 +9207,7 @@
 			MDIO_PMA_DEVAD,
 			MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
 
-	/*
-	 * Bits 0..2 --> speed detected,
+	/* Bits 0..2 --> speed detected,
 	 * Bits 13..15--> link is down
 	 */
 	if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
@@ -9394,8 +9250,7 @@
 		bnx2x_cl45_read(bp, phy,
 				MDIO_PMA_DEVAD,
 				MDIO_PMA_REG_8727_PCS_GP, &val1);
-		/*
-		 * In case of dual-media board and 1G, power up the XAUI side,
+		/* In case of dual-media board and 1G, power up the XAUI side,
 		 * otherwise power it down. For 10G it is done automatically
 		 */
 		if (link_up)
@@ -9561,8 +9416,7 @@
 		/* Save spirom version */
 		bnx2x_save_848xx_spirom_version(phy, bp, params->port);
 	}
-	/*
-	 * This phy uses the NIG latch mechanism since link indication
+	/* This phy uses the NIG latch mechanism since link indication
 	 * arrives through its LED4 and not via its LASI signal, so we
 	 * get steady signal instead of clear on read
 	 */
@@ -9667,8 +9521,7 @@
 	if (phy->req_duplex == DUPLEX_FULL)
 		autoneg_val |= (1<<8);
 
-	/*
-	 * Always write this if this is not 84833.
+	/* Always write this if this is not 84833.
 	 * For 84833, write it only when it's a forced speed.
 	 */
 	if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
@@ -9916,8 +9769,7 @@
 	/* Wait for GPHY to come out of reset */
 	msleep(50);
 	if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
-		/*
-		 * BCM84823 requires that XGXS links up first @ 10G for normal
+		/* BCM84823 requires that XGXS links up first @ 10G for normal
 		 * behavior.
 		 */
 		u16 temp;
@@ -10393,8 +10245,7 @@
 		break;
 	}
 
-	/*
-	 * This is a workaround for E3+84833 until autoneg
+	/* This is a workaround for E3+84833 until autoneg
 	 * restart is fixed in f/w
 	 */
 	if (CHIP_IS_E3(bp)) {
@@ -10418,8 +10269,7 @@
 	DP(NETIF_MSG_LINK, "54618SE cfg init\n");
 	usleep_range(1000, 1000);
 
-	/*
-	 * This works with E3 only, no need to check the chip
+	/* This works with E3 only, no need to check the chip
 	 * before determining the port.
 	 */
 	port = params->port;
@@ -10441,7 +10291,7 @@
 			 MDIO_PMA_REG_CTRL, 0x8000);
 	bnx2x_wait_reset_complete(bp, phy, params);
 
-	/*wait for GPHY to reset */
+	/* Wait for GPHY to reset */
 	msleep(50);
 
 	/* Configure LED4: set to INTR (0x6). */
@@ -10647,13 +10497,11 @@
 	u32 cfg_pin;
 	u8 port;
 
-	/*
-	 * In case of no EPIO routed to reset the GPHY, put it
+	/* In case of no EPIO routed to reset the GPHY, put it
 	 * in low power mode.
 	 */
 	bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800);
-	/*
-	 * This works with E3 only, no need to check the chip
+	/* This works with E3 only, no need to check the chip
 	 * before determining the port.
 	 */
 	port = params->port;
@@ -10762,7 +10610,7 @@
 		bnx2x_ext_phy_resolve_fc(phy, params, vars);
 
 		if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
-			/* report LP advertised speeds */
+			/* Report LP advertised speeds */
 			bnx2x_cl22_read(bp, phy, 0x5, &val);
 
 			if (val & (1<<5))
@@ -10827,8 +10675,7 @@
 	/* This register opens the gate for the UMAC despite its name */
 	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
 
-	/*
-	 * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+	/* Maximum Frame Length (RW). Defines a 14-Bit maximum frame
 	 * length used by the MAC receive logic to check frames.
 	 */
 	REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
@@ -11101,22 +10948,23 @@
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
-	.flags		= FLAGS_HW_LOCK_REQUIRED,
+	.flags		= (FLAGS_HW_LOCK_REQUIRED |
+			   FLAGS_TX_ERROR_CHECK),
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
 	.supported	= (SUPPORTED_10baseT_Half |
-			     SUPPORTED_10baseT_Full |
-			     SUPPORTED_100baseT_Half |
-			     SUPPORTED_100baseT_Full |
-			     SUPPORTED_1000baseT_Full |
-			     SUPPORTED_10000baseT_Full |
-			     SUPPORTED_20000baseKR2_Full |
-			     SUPPORTED_20000baseMLD2_Full |
-			     SUPPORTED_FIBRE |
-			     SUPPORTED_Autoneg |
-			     SUPPORTED_Pause |
-			     SUPPORTED_Asym_Pause),
+			   SUPPORTED_10baseT_Full |
+			   SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_10000baseT_Full |
+			   SUPPORTED_20000baseKR2_Full |
+			   SUPPORTED_20000baseMLD2_Full |
+			   SUPPORTED_FIBRE |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
 	.media_type	= ETH_PHY_UNSPECIFIED,
 	.ver_addr	= 0,
 	.req_flow_ctrl	= 0,
@@ -11258,7 +11106,8 @@
 	.addr		= 0xff,
 	.def_md_devad	= 0,
 	.flags		= (FLAGS_HW_LOCK_REQUIRED |
-			   FLAGS_INIT_XGXS_FIRST),
+			   FLAGS_INIT_XGXS_FIRST |
+			   FLAGS_TX_ERROR_CHECK),
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -11289,7 +11138,8 @@
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
-	.flags		= FLAGS_FAN_FAILURE_DET_REQ,
+	.flags		= (FLAGS_FAN_FAILURE_DET_REQ |
+			   FLAGS_TX_ERROR_CHECK),
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -11354,8 +11204,9 @@
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
-	.flags		= FLAGS_FAN_FAILURE_DET_REQ |
-			  FLAGS_REARM_LATCH_SIGNAL,
+	.flags		= (FLAGS_FAN_FAILURE_DET_REQ |
+			   FLAGS_REARM_LATCH_SIGNAL |
+			   FLAGS_TX_ERROR_CHECK),
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -11390,8 +11241,9 @@
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
-	.flags		= FLAGS_FAN_FAILURE_DET_REQ |
-			    FLAGS_REARM_LATCH_SIGNAL,
+	.flags		= (FLAGS_FAN_FAILURE_DET_REQ |
+			   FLAGS_REARM_LATCH_SIGNAL |
+			   FLAGS_TX_ERROR_CHECK),
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -11466,9 +11318,8 @@
 	/* Get the 4 lanes xgxs config rx and tx */
 	u32 rx = 0, tx = 0, i;
 	for (i = 0; i < 2; i++) {
-		/*
-		 * INT_PHY and EXT_PHY1 share the same value location in the
-		 * shmem. When num_phys is greater than 1, than this value
+		/* INT_PHY and EXT_PHY1 share the same value location in
+		 * the shmem. When num_phys is greater than 1, than this value
 		 * applies only to EXT_PHY1
 		 */
 		if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
@@ -11546,8 +11397,7 @@
 					offsetof(struct shmem_region, dev_info.
 					port_hw_config[port].default_cfg)) &
 				 PORT_HW_CFG_NET_SERDES_IF_MASK);
-		/*
-		 * Set the appropriate supported and flags indications per
+		/* Set the appropriate supported and flags indications per
 		 * interface type of the chip
 		 */
 		switch (serdes_net_if) {
@@ -11605,8 +11455,7 @@
 			break;
 		}
 
-		/*
-		 * Enable MDC/MDIO work-around for E3 A0 since free running MDC
+		/* Enable MDC/MDIO work-around for E3 A0 since free running MDC
 		 * was not set as expected. For B0, ECO will be enabled so there
 		 * won't be an issue there
 		 */
@@ -11719,8 +11568,7 @@
 	phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
 	bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
 
-	/*
-	 * The shmem address of the phy version is located on different
+	/* The shmem address of the phy version is located on different
 	 * structures. In case this structure is too old, do not set
 	 * the address
 	 */
@@ -11754,8 +11602,7 @@
 
 	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
 	    (phy->ver_addr)) {
-		/*
-		 * Remove 100Mb link supported for BCM84833 when phy fw
+		/* Remove 100Mb link supported for BCM84833 when phy fw
 		 * version lower than or equal to 1.39
 		 */
 		u32 raw_ver = REG_RD(bp, phy->ver_addr);
@@ -11765,8 +11612,7 @@
 					    SUPPORTED_100baseT_Full);
 	}
 
-	/*
-	 * In case mdc/mdio_access of the external phy is different than the
+	/* In case mdc/mdio_access of the external phy is different than the
 	 * mdc/mdio access of the XGXS, a HW lock must be taken in each access
 	 * to prevent one port interfere with another port's CL45 operations.
 	 */
@@ -11936,13 +11782,16 @@
 		if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
 			break;
 
+		if (params->feature_config_flags &
+		    FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET)
+			phy->flags &= ~FLAGS_TX_ERROR_CHECK;
+
 		sync_offset = params->shmem_base +
 			offsetof(struct shmem_region,
 			dev_info.port_hw_config[params->port].media_type);
 		media_types = REG_RD(bp, sync_offset);
 
-		/*
-		 * Update media type for non-PMF sync only for the first time
+		/* Update media type for non-PMF sync only for the first time
 		 * In case the media type changes afterwards, it will be updated
 		 * using the update_status function
 		 */
@@ -12016,8 +11865,7 @@
 	vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
 	vars->mac_type = MAC_TYPE_XMAC;
 	vars->phy_flags = PHY_XGXS_FLAG;
-	/*
-	 * Set WC to loopback mode since link is required to provide clock
+	/* Set WC to loopback mode since link is required to provide clock
 	 * to the XMAC in 20G mode
 	 */
 	bnx2x_set_aer_mmd(params, &params->phy[0]);
@@ -12162,6 +12010,7 @@
 		bnx2x_link_int_enable(params);
 		break;
 	}
+	bnx2x_update_mng(params, vars->link_status);
 	return 0;
 }
 
@@ -12302,7 +12151,8 @@
 				NIG_MASK_MI_INT));
 
 		/* Need to take the phy out of low power mode in order
-			to write to access its registers */
+		 * to write to access its registers
+		 */
 		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
 			       MISC_REGISTERS_GPIO_OUTPUT_HIGH,
 			       port);
@@ -12350,8 +12200,7 @@
 				 (val | 1<<10));
 	}
 
-	/*
-	 * Toggle Transmitter: Power down and then up with 600ms delay
+	/* Toggle Transmitter: Power down and then up with 600ms delay
 	 * between
 	 */
 	msleep(600);
@@ -12494,8 +12343,7 @@
 	reset_gpio = MISC_REGISTERS_GPIO_1;
 	port = 1;
 
-	/*
-	 * Retrieve the reset gpio/port which control the reset.
+	/* Retrieve the reset gpio/port which control the reset.
 	 * Default is GPIO1, PORT1
 	 */
 	bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
@@ -12670,8 +12518,7 @@
 		break;
 
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
-		/*
-		 * GPIO1 affects both ports, so there's need to pull
+		/* GPIO1 affects both ports, so there's need to pull
 		 * it for single port alone
 		 */
 		rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
@@ -12679,8 +12526,7 @@
 						phy_index, chip_id);
 		break;
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
-		/*
-		 * GPIO3's are linked, and so both need to be toggled
+		/* GPIO3's are linked, and so both need to be toggled
 		 * to obtain required 2us pulse.
 		 */
 		rc = bnx2x_84833_common_init_phy(bp, shmem_base_path,
@@ -12779,7 +12625,8 @@
 }
 
 static void bnx2x_analyze_link_error(struct link_params *params,
-				     struct link_vars *vars, u32 lss_status)
+				     struct link_vars *vars, u32 lss_status,
+				     u8 notify)
 {
 	struct bnx2x *bp = params->bp;
 	/* Compare new value with previous value */
@@ -12793,8 +12640,7 @@
 	DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up,
 		       half_open_conn, lss_status);
 
-	/*
-	 * a. Update shmem->link_status accordingly
+	/* a. Update shmem->link_status accordingly
 	 * b. Update link_vars->link_up
 	 */
 	if (lss_status) {
@@ -12802,8 +12648,10 @@
 		vars->link_status &= ~LINK_STATUS_LINK_UP;
 		vars->link_up = 0;
 		vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
-		/*
-		 * Set LED mode to off since the PHY doesn't know about these
+
+		/* activate nig drain */
+		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
+		/* Set LED mode to off since the PHY doesn't know about these
 		 * errors
 		 */
 		led_mode = LED_MODE_OFF;
@@ -12813,7 +12661,11 @@
 		vars->link_up = 1;
 		vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
 		led_mode = LED_MODE_OPER;
+
+		/* Clear nig drain */
+		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 	}
+	bnx2x_sync_link(params, vars);
 	/* Update the LED according to the link state */
 	bnx2x_set_led(params, vars, led_mode, SPEED_10000);
 
@@ -12822,7 +12674,8 @@
 
 	/* C. Trigger General Attention */
 	vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
-	bnx2x_notify_link_changed(bp);
+	if (notify)
+		bnx2x_notify_link_changed(bp);
 }
 
 /******************************************************************************
@@ -12834,22 +12687,23 @@
 *	a fault, for example, due to break in the TX side of fiber.
 *
 ******************************************************************************/
-static void bnx2x_check_half_open_conn(struct link_params *params,
-				       struct link_vars *vars)
+int bnx2x_check_half_open_conn(struct link_params *params,
+				struct link_vars *vars,
+				u8 notify)
 {
 	struct bnx2x *bp = params->bp;
 	u32 lss_status = 0;
 	u32 mac_base;
 	/* In case link status is physically up @ 10G do */
-	if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
-		return;
+	if (((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) ||
+	    (REG_RD(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4)))
+		return 0;
 
 	if (CHIP_IS_E3(bp) &&
 	    (REG_RD(bp, MISC_REG_RESET_REG_2) &
 	      (MISC_REGISTERS_RESET_REG_2_XMAC))) {
 		/* Check E3 XMAC */
-		/*
-		 * Note that link speed cannot be queried here, since it may be
+		/* Note that link speed cannot be queried here, since it may be
 		 * zero while link is down. In case UMAC is active, LSS will
 		 * simply not be set
 		 */
@@ -12863,7 +12717,7 @@
 		if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
 			lss_status = 1;
 
-		bnx2x_analyze_link_error(params, vars, lss_status);
+		bnx2x_analyze_link_error(params, vars, lss_status, notify);
 	} else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
 		   (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
 		/* Check E1X / E2 BMAC */
@@ -12880,18 +12734,21 @@
 		REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
 		lss_status = (wb_data[0] > 0);
 
-		bnx2x_analyze_link_error(params, vars, lss_status);
+		bnx2x_analyze_link_error(params, vars, lss_status, notify);
 	}
+	return 0;
 }
 
 void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
 {
-	struct bnx2x *bp = params->bp;
 	u16 phy_idx;
+	struct bnx2x *bp = params->bp;
 	for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
 		if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
 			bnx2x_set_aer_mmd(params, &params->phy[phy_idx]);
-			bnx2x_check_half_open_conn(params, vars);
+			if (bnx2x_check_half_open_conn(params, vars, 1) !=
+			    0)
+				DP(NETIF_MSG_LINK, "Fault detection failed\n");
 			break;
 		}
 	}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 763535e..ea4371f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -254,8 +254,10 @@
 #define FEATURE_CONFIG_PFC_ENABLED			(1<<1)
 #define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY		(1<<2)
 #define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY	(1<<3)
+#define FEATURE_CONFIG_BC_SUPPORTS_AFEX			(1<<8)
 #define FEATURE_CONFIG_AUTOGREEEN_ENABLED			(1<<9)
 #define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED		(1<<10)
+#define FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET		(1<<11)
 	/* Will be populated during common init */
 	struct bnx2x_phy phy[MAX_PHYS];
 
@@ -495,4 +497,6 @@
 
 void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
 
+int bnx2x_check_half_open_conn(struct link_params *params,
+			       struct link_vars *vars, u8 notify);
 #endif /* BNX2X_LINK_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e077d25..0708cb8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -39,7 +39,6 @@
 #include <linux/time.h>
 #include <linux/ethtool.h>
 #include <linux/mii.h>
-#include <linux/if.h>
 #include <linux/if_vlan.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
@@ -93,15 +92,11 @@
 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
 MODULE_FIRMWARE(FW_FILE_NAME_E2);
 
-static int multi_mode = 1;
-module_param(multi_mode, int, 0);
-MODULE_PARM_DESC(multi_mode, " Multi queue mode "
-			     "(0 Disable; 1 Enable (default))");
 
 int num_queues;
 module_param(num_queues, int, 0);
-MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
-				" (default is as a number of CPUs)");
+MODULE_PARM_DESC(num_queues,
+		 " Set number of queues (default is as a number of CPUs)");
 
 static int disable_tpa;
 module_param(disable_tpa, int, 0);
@@ -141,7 +136,9 @@
 	BCM57810,
 	BCM57810_MF,
 	BCM57840,
-	BCM57840_MF
+	BCM57840_MF,
+	BCM57811,
+	BCM57811_MF
 };
 
 /* indexed by board_type, above */
@@ -158,8 +155,9 @@
 	{ "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
 	{ "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
 	{ "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
-	{ "Broadcom NetXtreme II BCM57840 10/20 Gigabit "
-						"Ethernet Multi Function"}
+	{ "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
+	{ "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"},
+	{ "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"},
 };
 
 #ifndef PCI_DEVICE_ID_NX2_57710
@@ -195,6 +193,12 @@
 #ifndef PCI_DEVICE_ID_NX2_57840_MF
 #define PCI_DEVICE_ID_NX2_57840_MF	CHIP_NUM_57840_MF
 #endif
+#ifndef PCI_DEVICE_ID_NX2_57811
+#define PCI_DEVICE_ID_NX2_57811		CHIP_NUM_57811
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57811_MF
+#define PCI_DEVICE_ID_NX2_57811_MF	CHIP_NUM_57811_MF
+#endif
 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
@@ -207,6 +211,8 @@
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
 	{ 0 }
 };
 
@@ -309,67 +315,6 @@
 #define DMAE_DP_DST_PCI		"pci dst_addr [%x:%08x]"
 #define DMAE_DP_DST_NONE	"dst_addr [none]"
 
-static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
-			  int msglvl)
-{
-	u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
-
-	switch (dmae->opcode & DMAE_COMMAND_DST) {
-	case DMAE_CMD_DST_PCI:
-		if (src_type == DMAE_CMD_SRC_PCI)
-			DP(msglvl, "DMAE: opcode 0x%08x\n"
-			   "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
-			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
-			   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
-			   dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
-			   dmae->comp_addr_hi, dmae->comp_addr_lo,
-			   dmae->comp_val);
-		else
-			DP(msglvl, "DMAE: opcode 0x%08x\n"
-			   "src [%08x], len [%d*4], dst [%x:%08x]\n"
-			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
-			   dmae->opcode, dmae->src_addr_lo >> 2,
-			   dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
-			   dmae->comp_addr_hi, dmae->comp_addr_lo,
-			   dmae->comp_val);
-		break;
-	case DMAE_CMD_DST_GRC:
-		if (src_type == DMAE_CMD_SRC_PCI)
-			DP(msglvl, "DMAE: opcode 0x%08x\n"
-			   "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
-			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
-			   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
-			   dmae->len, dmae->dst_addr_lo >> 2,
-			   dmae->comp_addr_hi, dmae->comp_addr_lo,
-			   dmae->comp_val);
-		else
-			DP(msglvl, "DMAE: opcode 0x%08x\n"
-			   "src [%08x], len [%d*4], dst [%08x]\n"
-			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
-			   dmae->opcode, dmae->src_addr_lo >> 2,
-			   dmae->len, dmae->dst_addr_lo >> 2,
-			   dmae->comp_addr_hi, dmae->comp_addr_lo,
-			   dmae->comp_val);
-		break;
-	default:
-		if (src_type == DMAE_CMD_SRC_PCI)
-			DP(msglvl, "DMAE: opcode 0x%08x\n"
-			   "src_addr [%x:%08x]  len [%d * 4]  dst_addr [none]\n"
-			   "comp_addr [%x:%08x]  comp_val 0x%08x\n",
-			   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
-			   dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
-			   dmae->comp_val);
-		else
-			DP(msglvl, "DMAE: opcode 0x%08x\n"
-			   "src_addr [%08x]  len [%d * 4]  dst_addr [none]\n"
-			   "comp_addr [%x:%08x]  comp_val 0x%08x\n",
-			   dmae->opcode, dmae->src_addr_lo >> 2,
-			   dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
-			   dmae->comp_val);
-		break;
-	}
-
-}
 
 /* copy command into DMAE command memory and set DMAE command go */
 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
@@ -506,8 +451,6 @@
 	dmae.dst_addr_hi = 0;
 	dmae.len = len32;
 
-	bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
-
 	/* issue the command and wait for completion */
 	bnx2x_issue_dmae_with_comp(bp, &dmae);
 }
@@ -540,8 +483,6 @@
 	dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
 	dmae.len = len32;
 
-	bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
-
 	/* issue the command and wait for completion */
 	bnx2x_issue_dmae_with_comp(bp, &dmae);
 }
@@ -562,27 +503,6 @@
 	bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
 }
 
-/* used only for slowpath so not inlined */
-static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
-{
-	u32 wb_write[2];
-
-	wb_write[0] = val_hi;
-	wb_write[1] = val_lo;
-	REG_WR_DMAE(bp, reg, wb_write, 2);
-}
-
-#ifdef USE_WB_RD
-static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
-{
-	u32 wb_data[2];
-
-	REG_RD_DMAE(bp, reg, wb_data, 2);
-
-	return HILO_U64(wb_data[0], wb_data[1]);
-}
-#endif
-
 static int bnx2x_mc_assert(struct bnx2x *bp)
 {
 	char last_idx;
@@ -1425,8 +1345,9 @@
 static void bnx2x_igu_int_enable(struct bnx2x *bp)
 {
 	u32 val;
-	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
-	int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
+	bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
+	bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
+	bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
 
 	val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
 
@@ -1436,6 +1357,9 @@
 		val |= (IGU_PF_CONF_FUNC_EN |
 			IGU_PF_CONF_MSI_MSIX_EN |
 			IGU_PF_CONF_ATTN_BIT_EN);
+
+		if (single_msix)
+			val |= IGU_PF_CONF_SINGLE_ISR_EN;
 	} else if (msi) {
 		val &= ~IGU_PF_CONF_INT_LINE_EN;
 		val |= (IGU_PF_CONF_FUNC_EN |
@@ -1455,6 +1379,9 @@
 
 	REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
 
+	if (val & IGU_PF_CONF_INT_LINE_EN)
+		pci_intx(bp->pdev, true);
+
 	barrier();
 
 	/* init leading/trailing edge */
@@ -1719,6 +1646,27 @@
 
 	DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
 
+	if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
+	    (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
+		/* if Q update ramrod is completed for last Q in AFEX vif set
+		 * flow, then ACK MCP at the end
+		 *
+		 * mark pending ACK to MCP bit.
+		 * prevent case that both bits are cleared.
+		 * At the end of load/unload driver checks that
+		 * sp_state is cleaerd, and this order prevents
+		 * races
+		 */
+		smp_mb__before_clear_bit();
+		set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
+		wmb();
+		clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
+		smp_mb__after_clear_bit();
+
+		/* schedule workqueue to send ack to MCP */
+		queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+	}
+
 	return;
 }
 
@@ -2229,40 +2177,6 @@
 	return rc;
 }
 
-static void bnx2x_init_port_minmax(struct bnx2x *bp)
-{
-	u32 r_param = bp->link_vars.line_speed / 8;
-	u32 fair_periodic_timeout_usec;
-	u32 t_fair;
-
-	memset(&(bp->cmng.rs_vars), 0,
-	       sizeof(struct rate_shaping_vars_per_port));
-	memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
-
-	/* 100 usec in SDM ticks = 25 since each tick is 4 usec */
-	bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
-
-	/* this is the threshold below which no timer arming will occur
-	   1.25 coefficient is for the threshold to be a little bigger
-	   than the real time, to compensate for timer in-accuracy */
-	bp->cmng.rs_vars.rs_threshold =
-				(RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
-
-	/* resolution of fairness timer */
-	fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
-	/* for 10G it is 1000usec. for 1G it is 10000usec. */
-	t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
-
-	/* this is the threshold below which we won't arm the timer anymore */
-	bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
-
-	/* we multiply by 1e3/8 to get bytes/msec.
-	   We don't want the credits to pass a credit
-	   of the t_fair*FAIR_MEM (algorithm resolution) */
-	bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
-	/* since each tick is 4 usec */
-	bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
-}
 
 /* Calculates the sum of vn_min_rates.
    It's needed for further normalizing of the min_rates.
@@ -2273,12 +2187,12 @@
      In the later case fainess algorithm should be deactivated.
      If not all min_rates are zero then those that are zeroes will be set to 1.
  */
-static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
+static void bnx2x_calc_vn_min(struct bnx2x *bp,
+				      struct cmng_init_input *input)
 {
 	int all_zero = 1;
 	int vn;
 
-	bp->vn_weight_sum = 0;
 	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
 		u32 vn_cfg = bp->mf_config[vn];
 		u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
@@ -2286,106 +2200,56 @@
 
 		/* Skip hidden vns */
 		if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
-			continue;
-
+			vn_min_rate = 0;
 		/* If min rate is zero - set it to 1 */
-		if (!vn_min_rate)
+		else if (!vn_min_rate)
 			vn_min_rate = DEF_MIN_RATE;
 		else
 			all_zero = 0;
 
-		bp->vn_weight_sum += vn_min_rate;
+		input->vnic_min_rate[vn] = vn_min_rate;
 	}
 
 	/* if ETS or all min rates are zeros - disable fairness */
 	if (BNX2X_IS_ETS_ENABLED(bp)) {
-		bp->cmng.flags.cmng_enables &=
+		input->flags.cmng_enables &=
 					~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 		DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
 	} else if (all_zero) {
-		bp->cmng.flags.cmng_enables &=
+		input->flags.cmng_enables &=
 					~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
-		DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
-		   "  fairness will be disabled\n");
+		DP(NETIF_MSG_IFUP,
+		   "All MIN values are zeroes fairness will be disabled\n");
 	} else
-		bp->cmng.flags.cmng_enables |=
+		input->flags.cmng_enables |=
 					CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 }
 
-static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
+static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
+				    struct cmng_init_input *input)
 {
-	struct rate_shaping_vars_per_vn m_rs_vn;
-	struct fairness_vars_per_vn m_fair_vn;
+	u16 vn_max_rate;
 	u32 vn_cfg = bp->mf_config[vn];
-	int func = func_by_vn(bp, vn);
-	u16 vn_min_rate, vn_max_rate;
-	int i;
 
-	/* If function is hidden - set min and max to zeroes */
-	if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
-		vn_min_rate = 0;
+	if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
 		vn_max_rate = 0;
-
-	} else {
+	else {
 		u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
 
-		vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
-				FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
-		/* If fairness is enabled (not all min rates are zeroes) and
-		   if current min rate is zero - set it to 1.
-		   This is a requirement of the algorithm. */
-		if (bp->vn_weight_sum && (vn_min_rate == 0))
-			vn_min_rate = DEF_MIN_RATE;
-
-		if (IS_MF_SI(bp))
+		if (IS_MF_SI(bp)) {
 			/* maxCfg in percents of linkspeed */
 			vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
-		else
+		} else /* SD modes */
 			/* maxCfg is absolute in 100Mb units */
 			vn_max_rate = maxCfg * 100;
 	}
 
-	DP(NETIF_MSG_IFUP,
-	   "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
-	   func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
+	DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
 
-	memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
-	memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
-
-	/* global vn counter - maximal Mbps for this vn */
-	m_rs_vn.vn_counter.rate = vn_max_rate;
-
-	/* quota - number of bytes transmitted in this period */
-	m_rs_vn.vn_counter.quota =
-				(vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
-
-	if (bp->vn_weight_sum) {
-		/* credit for each period of the fairness algorithm:
-		   number of bytes in T_FAIR (the vn share the port rate).
-		   vn_weight_sum should not be larger than 10000, thus
-		   T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
-		   than zero */
-		m_fair_vn.vn_credit_delta =
-			max_t(u32, (vn_min_rate * (T_FAIR_COEF /
-						   (8 * bp->vn_weight_sum))),
-			      (bp->cmng.fair_vars.fair_threshold +
-							MIN_ABOVE_THRESH));
-		DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
-		   m_fair_vn.vn_credit_delta);
-	}
-
-	/* Store it to internal memory */
-	for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
-		REG_WR(bp, BAR_XSTRORM_INTMEM +
-		       XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
-		       ((u32 *)(&m_rs_vn))[i]);
-
-	for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
-		REG_WR(bp, BAR_XSTRORM_INTMEM +
-		       XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
-		       ((u32 *)(&m_fair_vn))[i]);
+	input->vnic_max_rate[vn] = vn_max_rate;
 }
 
+
 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
 {
 	if (CHIP_REV_IS_SLOW(bp))
@@ -2423,38 +2287,42 @@
 		bp->mf_config[vn] =
 			MF_CFG_RD(bp, func_mf_config[func].config);
 	}
+	if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
+		DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
+		bp->flags |= MF_FUNC_DIS;
+	} else {
+		DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
+		bp->flags &= ~MF_FUNC_DIS;
+	}
 }
 
 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
 {
+	struct cmng_init_input input;
+	memset(&input, 0, sizeof(struct cmng_init_input));
+
+	input.port_rate = bp->link_vars.line_speed;
 
 	if (cmng_type == CMNG_FNS_MINMAX) {
 		int vn;
 
-		/* clear cmng_enables */
-		bp->cmng.flags.cmng_enables = 0;
-
 		/* read mf conf from shmem */
 		if (read_cfg)
 			bnx2x_read_mf_cfg(bp);
 
-		/* Init rate shaping and fairness contexts */
-		bnx2x_init_port_minmax(bp);
-
 		/* vn_weight_sum and enable fairness if not 0 */
-		bnx2x_calc_vn_weight_sum(bp);
+		bnx2x_calc_vn_min(bp, &input);
 
 		/* calculate and set min-max rate for each vn */
 		if (bp->port.pmf)
 			for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
-				bnx2x_init_vn_minmax(bp, vn);
+				bnx2x_calc_vn_max(bp, vn, &input);
 
 		/* always enable rate shaping and fairness */
-		bp->cmng.flags.cmng_enables |=
+		input.flags.cmng_enables |=
 					CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
-		if (!bp->vn_weight_sum)
-			DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
-				   "  fairness will be disabled\n");
+
+		bnx2x_init_cmng(&input, &bp->cmng);
 		return;
 	}
 
@@ -2535,6 +2403,190 @@
 	bnx2x_link_report(bp);
 }
 
+static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
+				  u16 vlan_val, u8 allowed_prio)
+{
+	struct bnx2x_func_state_params func_params = {0};
+	struct bnx2x_func_afex_update_params *f_update_params =
+		&func_params.params.afex_update;
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
+
+	/* no need to wait for RAMROD completion, so don't
+	 * set RAMROD_COMP_WAIT flag
+	 */
+
+	f_update_params->vif_id = vifid;
+	f_update_params->afex_default_vlan = vlan_val;
+	f_update_params->allowed_priorities = allowed_prio;
+
+	/* if ramrod can not be sent, response to MCP immediately */
+	if (bnx2x_func_state_change(bp, &func_params) < 0)
+		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+
+	return 0;
+}
+
+static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
+					  u16 vif_index, u8 func_bit_map)
+{
+	struct bnx2x_func_state_params func_params = {0};
+	struct bnx2x_func_afex_viflists_params *update_params =
+		&func_params.params.afex_viflists;
+	int rc;
+	u32 drv_msg_code;
+
+	/* validate only LIST_SET and LIST_GET are received from switch */
+	if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
+		BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
+			  cmd_type);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
+
+	/* set parameters according to cmd_type */
+	update_params->afex_vif_list_command = cmd_type;
+	update_params->vif_list_index = cpu_to_le16(vif_index);
+	update_params->func_bit_map =
+		(cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
+	update_params->func_to_clear = 0;
+	drv_msg_code =
+		(cmd_type == VIF_LIST_RULE_GET) ?
+		DRV_MSG_CODE_AFEX_LISTGET_ACK :
+		DRV_MSG_CODE_AFEX_LISTSET_ACK;
+
+	/* if ramrod can not be sent, respond to MCP immediately for
+	 * SET and GET requests (other are not triggered from MCP)
+	 */
+	rc = bnx2x_func_state_change(bp, &func_params);
+	if (rc < 0)
+		bnx2x_fw_command(bp, drv_msg_code, 0);
+
+	return 0;
+}
+
+static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
+{
+	struct afex_stats afex_stats;
+	u32 func = BP_ABS_FUNC(bp);
+	u32 mf_config;
+	u16 vlan_val;
+	u32 vlan_prio;
+	u16 vif_id;
+	u8 allowed_prio;
+	u8 vlan_mode;
+	u32 addr_to_write, vifid, addrs, stats_type, i;
+
+	if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
+		vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
+		DP(BNX2X_MSG_MCP,
+		   "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
+		bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
+	}
+
+	if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
+		vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
+		addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
+		DP(BNX2X_MSG_MCP,
+		   "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
+		   vifid, addrs);
+		bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
+					       addrs);
+	}
+
+	if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
+		addr_to_write = SHMEM2_RD(bp,
+			afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
+		stats_type = SHMEM2_RD(bp,
+			afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
+
+		DP(BNX2X_MSG_MCP,
+		   "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
+		   addr_to_write);
+
+		bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
+
+		/* write response to scratchpad, for MCP */
+		for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
+			REG_WR(bp, addr_to_write + i*sizeof(u32),
+			       *(((u32 *)(&afex_stats))+i));
+
+		/* send ack message to MCP */
+		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
+	}
+
+	if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
+		mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
+		bp->mf_config[BP_VN(bp)] = mf_config;
+		DP(BNX2X_MSG_MCP,
+		   "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
+		   mf_config);
+
+		/* if VIF_SET is "enabled" */
+		if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
+			/* set rate limit directly to internal RAM */
+			struct cmng_init_input cmng_input;
+			struct rate_shaping_vars_per_vn m_rs_vn;
+			size_t size = sizeof(struct rate_shaping_vars_per_vn);
+			u32 addr = BAR_XSTRORM_INTMEM +
+			    XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
+
+			bp->mf_config[BP_VN(bp)] = mf_config;
+
+			bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
+			m_rs_vn.vn_counter.rate =
+				cmng_input.vnic_max_rate[BP_VN(bp)];
+			m_rs_vn.vn_counter.quota =
+				(m_rs_vn.vn_counter.rate *
+				 RS_PERIODIC_TIMEOUT_USEC) / 8;
+
+			__storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
+
+			/* read relevant values from mf_cfg struct in shmem */
+			vif_id =
+				(MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+				 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
+				FUNC_MF_CFG_E1HOV_TAG_SHIFT;
+			vlan_val =
+				(MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+				 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
+				FUNC_MF_CFG_AFEX_VLAN_SHIFT;
+			vlan_prio = (mf_config &
+				     FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
+				    FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
+			vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
+			vlan_mode =
+				(MF_CFG_RD(bp,
+					   func_mf_config[func].afex_config) &
+				 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
+				FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
+			allowed_prio =
+				(MF_CFG_RD(bp,
+					   func_mf_config[func].afex_config) &
+				 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
+				FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
+
+			/* send ramrod to FW, return in case of failure */
+			if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
+						   allowed_prio))
+				return;
+
+			bp->afex_def_vlan_tag = vlan_val;
+			bp->afex_vlan_mode = vlan_mode;
+		} else {
+			/* notify link down because BP->flags is disabled */
+			bnx2x_link_report(bp);
+
+			/* send INVALID VIF ramrod to FW */
+			bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
+
+			/* Reset the default afex VLAN */
+			bp->afex_def_vlan_tag = -1;
+		}
+	}
+}
+
 static void bnx2x_pmf_update(struct bnx2x *bp)
 {
 	int port = BP_PORT(bp);
@@ -2680,8 +2732,11 @@
 	if (IS_MF_SD(bp))
 		__set_bit(BNX2X_Q_FLG_OV, &flags);
 
-	if (IS_FCOE_FP(fp))
+	if (IS_FCOE_FP(fp)) {
 		__set_bit(BNX2X_Q_FLG_FCOE, &flags);
+		/* For FCoE - force usage of default priority (for afex) */
+		__set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
+	}
 
 	if (!fp->disable_tpa) {
 		__set_bit(BNX2X_Q_FLG_TPA, &flags);
@@ -2698,6 +2753,10 @@
 	/* Always set HW VLAN stripping */
 	__set_bit(BNX2X_Q_FLG_VLAN, &flags);
 
+	/* configure silent vlan removal */
+	if (IS_MF_AFEX(bp))
+		__set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
+
 
 	return flags | bnx2x_get_common_flags(bp, fp, true);
 }
@@ -2800,6 +2859,13 @@
 		rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
 	else
 		rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
+	/* configure silent vlan removal
+	 * if multi function mode is afex, then mask default vlan
+	 */
+	if (IS_MF_AFEX(bp)) {
+		rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
+		rxq_init->silent_removal_mask = VLAN_VID_MASK;
+	}
 }
 
 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
@@ -3606,6 +3672,7 @@
 			int func = BP_FUNC(bp);
 
 			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+			bnx2x_read_mf_cfg(bp);
 			bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
 					func_mf_config[BP_ABS_FUNC(bp)].config);
 			val = SHMEM_RD(bp,
@@ -3628,6 +3695,9 @@
 				/* start dcbx state machine */
 				bnx2x_dcbx_set_params(bp,
 					BNX2X_DCBX_STATE_NEG_RECEIVED);
+			if (val & DRV_STATUS_AFEX_EVENT_MASK)
+				bnx2x_handle_afex_cmd(bp,
+					val & DRV_STATUS_AFEX_EVENT_MASK);
 			if (bp->link_vars.periodic_flags &
 			    PERIODIC_FLAGS_LINK_EVENT) {
 				/*  sync with link */
@@ -4555,6 +4625,93 @@
 	netif_addr_unlock_bh(bp->dev);
 }
 
+static inline void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
+					      union event_ring_elem *elem)
+{
+	if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
+		DP(BNX2X_MSG_SP,
+		   "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
+		   elem->message.data.vif_list_event.func_bit_map);
+		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
+			elem->message.data.vif_list_event.func_bit_map);
+	} else if (elem->message.data.vif_list_event.echo ==
+		   VIF_LIST_RULE_SET) {
+		DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
+		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
+	}
+}
+
+/* called with rtnl_lock */
+static inline void bnx2x_after_function_update(struct bnx2x *bp)
+{
+	int q, rc;
+	struct bnx2x_fastpath *fp;
+	struct bnx2x_queue_state_params queue_params = {NULL};
+	struct bnx2x_queue_update_params *q_update_params =
+		&queue_params.params.update;
+
+	/* Send Q update command with afex vlan removal values	for all Qs */
+	queue_params.cmd = BNX2X_Q_CMD_UPDATE;
+
+	/* set silent vlan removal values according to vlan mode */
+	__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+		  &q_update_params->update_flags);
+	__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+		  &q_update_params->update_flags);
+	__set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
+
+	/* in access mode mark mask and value are 0 to strip all vlans */
+	if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
+		q_update_params->silent_removal_value = 0;
+		q_update_params->silent_removal_mask = 0;
+	} else {
+		q_update_params->silent_removal_value =
+			(bp->afex_def_vlan_tag & VLAN_VID_MASK);
+		q_update_params->silent_removal_mask = VLAN_VID_MASK;
+	}
+
+	for_each_eth_queue(bp, q) {
+		/* Set the appropriate Queue object */
+		fp = &bp->fp[q];
+		queue_params.q_obj = &fp->q_obj;
+
+		/* send the ramrod */
+		rc = bnx2x_queue_state_change(bp, &queue_params);
+		if (rc < 0)
+			BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
+				  q);
+	}
+
+#ifdef BCM_CNIC
+	if (!NO_FCOE(bp)) {
+		fp = &bp->fp[FCOE_IDX];
+		queue_params.q_obj = &fp->q_obj;
+
+		/* clear pending completion bit */
+		__clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
+
+		/* mark latest Q bit */
+		smp_mb__before_clear_bit();
+		set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
+		smp_mb__after_clear_bit();
+
+		/* send Q update ramrod for FCoE Q */
+		rc = bnx2x_queue_state_change(bp, &queue_params);
+		if (rc < 0)
+			BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
+				  q);
+	} else {
+		/* If no FCoE ring - ACK MCP now */
+		bnx2x_link_report(bp);
+		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+	}
+#else
+	/* If no FCoE ring - ACK MCP now */
+	bnx2x_link_report(bp);
+	bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+#endif /* BCM_CNIC */
+}
+
 static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
 	struct bnx2x *bp, u32 cid)
 {
@@ -4653,6 +4810,28 @@
 				break;
 			bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
 			goto next_spqe;
+		case EVENT_RING_OPCODE_FUNCTION_UPDATE:
+			DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
+			   "AFEX: ramrod completed FUNCTION_UPDATE\n");
+			f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_AFEX_UPDATE);
+
+			/* We will perform the Queues update from sp_rtnl task
+			 * as all Queue SP operations should run under
+			 * rtnl_lock.
+			 */
+			smp_mb__before_clear_bit();
+			set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
+				&bp->sp_rtnl_state);
+			smp_mb__after_clear_bit();
+
+			schedule_delayed_work(&bp->sp_rtnl_task, 0);
+			goto next_spqe;
+
+		case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
+			f_obj->complete_cmd(bp, f_obj,
+					    BNX2X_F_CMD_AFEX_VIFLISTS);
+			bnx2x_after_afex_vif_lists(bp, elem);
+			goto next_spqe;
 		case EVENT_RING_OPCODE_FUNCTION_START:
 			DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
 			   "got FUNC_START ramrod\n");
@@ -4784,6 +4963,13 @@
 
 	bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
 	     le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
+
+	/* afex - poll to check if VIFSET_ACK should be sent to MFW */
+	if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
+			       &bp->sp_state)) {
+		bnx2x_link_report(bp);
+		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+	}
 }
 
 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -6255,12 +6441,24 @@
 	if (!CHIP_IS_E1(bp))
 		REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
 
-	if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp))
-		/* Bit-map indicating which L2 hdrs may appear
-		 * after the basic Ethernet header
-		 */
-		REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
-		       bp->path_has_ovlan ? 7 : 6);
+	if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
+		if (IS_MF_AFEX(bp)) {
+			/* configure that VNTag and VLAN headers must be
+			 * received in afex mode
+			 */
+			REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
+			REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
+			REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
+			REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
+			REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
+		} else {
+			/* Bit-map indicating which L2 hdrs may appear
+			 * after the basic Ethernet header
+			 */
+			REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
+			       bp->path_has_ovlan ? 7 : 6);
+		}
+	}
 
 	bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
 	bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
@@ -6294,9 +6492,21 @@
 	bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
 	bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
 
-	if (!CHIP_IS_E1x(bp))
-		REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
-		       bp->path_has_ovlan ? 7 : 6);
+	if (!CHIP_IS_E1x(bp)) {
+		if (IS_MF_AFEX(bp)) {
+			/* configure that VNTag and VLAN headers must be
+			 * sent in afex mode
+			 */
+			REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
+			REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
+			REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
+			REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
+			REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
+		} else {
+			REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
+			       bp->path_has_ovlan ? 7 : 6);
+		}
+	}
 
 	REG_WR(bp, SRC_REG_SOFT_RST, 1);
 
@@ -6514,15 +6724,29 @@
 
 
 	bnx2x_init_block(bp, BLOCK_PRS, init_phase);
-	if (CHIP_IS_E3B0(bp))
-		/* Ovlan exists only if we are in multi-function +
-		 * switch-dependent mode, in switch-independent there
-		 * is no ovlan headers
-		 */
-		REG_WR(bp, BP_PORT(bp) ?
-		       PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
-		       PRS_REG_HDRS_AFTER_BASIC_PORT_0,
-		       (bp->path_has_ovlan ? 7 : 6));
+	if (CHIP_IS_E3B0(bp)) {
+		if (IS_MF_AFEX(bp)) {
+			/* configure headers for AFEX mode */
+			REG_WR(bp, BP_PORT(bp) ?
+			       PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
+			       PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
+			REG_WR(bp, BP_PORT(bp) ?
+			       PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
+			       PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
+			REG_WR(bp, BP_PORT(bp) ?
+			       PRS_REG_MUST_HAVE_HDRS_PORT_1 :
+			       PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
+		} else {
+			/* Ovlan exists only if we are in multi-function +
+			 * switch-dependent mode, in switch-independent there
+			 * is no ovlan headers
+			 */
+			REG_WR(bp, BP_PORT(bp) ?
+			       PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
+			       PRS_REG_HDRS_AFTER_BASIC_PORT_0,
+			       (bp->path_has_ovlan ? 7 : 6));
+		}
+	}
 
 	bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
 	bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
@@ -6584,10 +6808,15 @@
 		/* Bit-map indicating which L2 hdrs may appear after the
 		 * basic Ethernet header
 		 */
-		REG_WR(bp, BP_PORT(bp) ?
-			   NIG_REG_P1_HDRS_AFTER_BASIC :
-			   NIG_REG_P0_HDRS_AFTER_BASIC,
-			   IS_MF_SD(bp) ? 7 : 6);
+		if (IS_MF_AFEX(bp))
+			REG_WR(bp, BP_PORT(bp) ?
+			       NIG_REG_P1_HDRS_AFTER_BASIC :
+			       NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
+		else
+			REG_WR(bp, BP_PORT(bp) ?
+			       NIG_REG_P1_HDRS_AFTER_BASIC :
+			       NIG_REG_P0_HDRS_AFTER_BASIC,
+			       IS_MF_SD(bp) ? 7 : 6);
 
 		if (CHIP_IS_E3(bp))
 			REG_WR(bp, BP_PORT(bp) ?
@@ -6609,6 +6838,7 @@
 				val = 1;
 				break;
 			case MULTI_FUNCTION_SI:
+			case MULTI_FUNCTION_AFEX:
 				val = 2;
 				break;
 			}
@@ -6640,13 +6870,16 @@
 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
 {
 	int reg;
+	u32 wb_write[2];
 
 	if (CHIP_IS_E1(bp))
 		reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
 	else
 		reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
 
-	bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
+	wb_write[0] = ONCHIP_ADDR1(addr);
+	wb_write[1] = ONCHIP_ADDR2(addr);
+	REG_WR_DMAE(bp, reg, wb_write, 2);
 }
 
 static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
@@ -7192,7 +7425,8 @@
 	unsigned long ramrod_flags = 0;
 
 #ifdef BCM_CNIC
-	if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_STORAGE_SD(bp)) {
+	if (is_zero_ether_addr(bp->dev->dev_addr) &&
+	    (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
 		DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
 		   "Ignoring Zero MAC for STORAGE SD mode\n");
 		return 0;
@@ -7230,7 +7464,7 @@
 		BNX2X_DEV_INFO("set number of queues to 1\n");
 		break;
 	default:
-		/* Set number of queues according to bp->multi_mode value */
+		/* Set number of queues for MSI-X mode */
 		bnx2x_set_num_queues(bp);
 
 		BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
@@ -7239,15 +7473,17 @@
 		 * so try to enable MSI-X with the requested number of fp's
 		 * and fallback to MSI or legacy INTx with one fp
 		 */
-		if (bnx2x_enable_msix(bp)) {
-			/* failed to enable MSI-X */
-			BNX2X_DEV_INFO("Failed to enable MSI-X (%d), set number of queues to %d\n",
+		if (bnx2x_enable_msix(bp) ||
+		    bp->flags & USING_SINGLE_MSIX_FLAG) {
+			/* failed to enable multiple MSI-X */
+			BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
 				       bp->num_queues, 1 + NON_ETH_CONTEXT_USE);
 
 			bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
 
 			/* Try to enable MSI */
-			if (!(bp->flags & DISABLE_MSI_FLAG))
+			if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
+			    !(bp->flags & DISABLE_MSI_FLAG))
 				bnx2x_enable_msi(bp);
 		}
 		break;
@@ -8727,7 +8963,8 @@
 #endif
 	if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
 		bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
-
+	if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
+		bnx2x_after_function_update(bp);
 	/*
 	 * in case of fan failure we need to reset id if the "stop on error"
 	 * debug flag is set, since we trying to prevent permanent overheating
@@ -9201,6 +9438,17 @@
 	id |= (val & 0xf);
 	bp->common.chip_id = id;
 
+	/* force 57811 according to MISC register */
+	if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
+		if (CHIP_IS_57810(bp))
+			bp->common.chip_id = (CHIP_NUM_57811 << 16) |
+				(bp->common.chip_id & 0x0000FFFF);
+		else if (CHIP_IS_57810_MF(bp))
+			bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
+				(bp->common.chip_id & 0x0000FFFF);
+		bp->common.chip_id |= 0x1;
+	}
+
 	/* Set doorbell size */
 	bp->db_size = (1 << BNX2X_DB_SHIFT);
 
@@ -9293,7 +9541,9 @@
 	bp->link_params.feature_config_flags |=
 		(val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
 		FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
-
+	bp->link_params.feature_config_flags |=
+		(val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
+		FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
 	bp->link_params.feature_config_flags |=
 		(val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
 		FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
@@ -9925,6 +10175,9 @@
 
 			} else
 				bp->flags |= NO_FCOE_FLAG;
+
+			bp->mf_ext_config = cfg;
+
 		} else { /* SD MODE */
 			if (IS_MF_STORAGE_SD(bp)) {
 				if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
@@ -9946,6 +10199,11 @@
 				memset(bp->dev->dev_addr, 0, ETH_ALEN);
 			}
 		}
+
+		if (IS_MF_FCOE_AFEX(bp))
+			/* use FIP MAC as primary MAC */
+			memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
+
 #endif
 	} else {
 		/* in SF read MACs from port configuration */
@@ -10118,6 +10376,19 @@
 				} else
 					BNX2X_DEV_INFO("illegal MAC address for SI\n");
 				break;
+			case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
+				if ((!CHIP_IS_E1x(bp)) &&
+				    (MF_CFG_RD(bp, func_mf_config[func].
+					       mac_upper) != 0xffff) &&
+				    (SHMEM2_HAS(bp,
+						afex_driver_support))) {
+					bp->mf_mode = MULTI_FUNCTION_AFEX;
+					bp->mf_config[vn] = MF_CFG_RD(bp,
+						func_mf_config[func].config);
+				} else {
+					BNX2X_DEV_INFO("can not configure afex mode\n");
+				}
+				break;
 			case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
 				/* get OV configuration */
 				val = MF_CFG_RD(bp,
@@ -10158,6 +10429,9 @@
 				return -EPERM;
 			}
 			break;
+		case MULTI_FUNCTION_AFEX:
+			BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
+			break;
 		case MULTI_FUNCTION_SI:
 			BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
 				       func);
@@ -10325,6 +10599,9 @@
 		case MULTI_FUNCTION_SI:
 			SET_FLAGS(flags, MODE_MF_SI);
 			break;
+		case MULTI_FUNCTION_AFEX:
+			SET_FLAGS(flags, MODE_MF_AFEX);
+			break;
 		}
 	} else
 		SET_FLAGS(flags, MODE_SF);
@@ -10384,12 +10661,10 @@
 	if (BP_NOMCP(bp) && (func == 0))
 		dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
 
-	bp->multi_mode = multi_mode;
-
 	bp->disable_tpa = disable_tpa;
 
 #ifdef BCM_CNIC
-	bp->disable_tpa |= IS_MF_STORAGE_SD(bp);
+	bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
 #endif
 
 	/* Set TPA flags */
@@ -10408,7 +10683,7 @@
 
 	bp->mrrs = mrrs;
 
-	bp->tx_ring_size = MAX_TX_AVAIL;
+	bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
 
 	/* make sure that the numbers are in the right granularity */
 	bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
@@ -10439,8 +10714,6 @@
 	if (CHIP_IS_E3B0(bp))
 		bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
 
-	bp->gro_check = bnx2x_need_gro_check(bp->dev->mtu);
-
 	return rc;
 }
 
@@ -11244,6 +11517,8 @@
 	bnx2x_init_func_obj(bp, &bp->func_obj,
 			    bnx2x_sp(bp, func_rdata),
 			    bnx2x_sp_mapping(bp, func_rdata),
+			    bnx2x_sp(bp, func_afex_rdata),
+			    bnx2x_sp_mapping(bp, func_afex_rdata),
 			    &bnx2x_func_sp_drv);
 }
 
@@ -11325,6 +11600,8 @@
 	case BCM57810_MF:
 	case BCM57840:
 	case BCM57840_MF:
+	case BCM57811:
+	case BCM57811_MF:
 		max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
 		break;
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index c25803b..bbd3874 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1483,6 +1483,11 @@
    starts at 0x0 for the A0 tape-out and increments by one for each
    all-layer tape-out. */
 #define MISC_REG_CHIP_REV					 0xa40c
+/* [R 14] otp_misc_do[100:0] spare bits collection: 13:11-
+ * otp_misc_do[100:98]; 10:7 - otp_misc_do[87:84]; 6:3 - otp_misc_do[75:72];
+ * 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. */
+#define MISC_REG_CHIP_TYPE					 0xac60
+#define MISC_REG_CHIP_TYPE_57811_MASK				 (1<<1)
 /* [RW 32] The following driver registers(1...16) represent 16 drivers and
    32 clients. Each client can be controlled by one driver only. One in each
    bit represent that this driver control the appropriate client (Ex: bit 5
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 5135733..6c14b4a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -633,14 +633,17 @@
 }
 
 
-static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
-				 bool add, unsigned char *dev_addr, int index)
+void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+			  bool add, unsigned char *dev_addr, int index)
 {
 	u32 wb_data[2];
 	u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
 			 NIG_REG_LLH0_FUNC_MEM;
 
-	if (!IS_MF_SI(bp) || index > BNX2X_LLH_CAM_MAX_PF_LINE)
+	if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
+		return;
+
+	if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
 		return;
 
 	DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
@@ -4090,12 +4093,6 @@
 		rss_mode = ETH_RSS_MODE_DISABLED;
 	else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
 		rss_mode = ETH_RSS_MODE_REGULAR;
-	else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
-		rss_mode = ETH_RSS_MODE_VLAN_PRI;
-	else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
-		rss_mode = ETH_RSS_MODE_E1HOV_PRI;
-	else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
-		rss_mode = ETH_RSS_MODE_IP_DSCP;
 
 	data->rss_mode = rss_mode;
 
@@ -4404,6 +4401,9 @@
 		test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
 	tx_data->anti_spoofing_flg =
 		test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
+	tx_data->force_default_pri_flg =
+		test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
+
 	tx_data->tx_status_block_id = params->fw_sb_id;
 	tx_data->tx_sb_index_number = params->sb_cq_index;
 	tx_data->tss_leading_client_id = params->tss_leading_cl_id;
@@ -5331,6 +5331,17 @@
 	case BNX2X_F_STATE_STARTED:
 		if (cmd == BNX2X_F_CMD_STOP)
 			next_state = BNX2X_F_STATE_INITIALIZED;
+		/* afex ramrods can be sent only in started mode, and only
+		 * if not pending for function_stop ramrod completion
+		 * for these events - next state remained STARTED.
+		 */
+		else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
+			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+			next_state = BNX2X_F_STATE_STARTED;
+
+		else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
+			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+			next_state = BNX2X_F_STATE_STARTED;
 		else if (cmd == BNX2X_F_CMD_TX_STOP)
 			next_state = BNX2X_F_STATE_TX_STOPPED;
 
@@ -5618,6 +5629,83 @@
 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
 }
 
+static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
+					 struct bnx2x_func_state_params *params)
+{
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	struct function_update_data *rdata =
+		(struct function_update_data *)o->afex_rdata;
+	dma_addr_t data_mapping = o->afex_rdata_mapping;
+	struct bnx2x_func_afex_update_params *afex_update_params =
+		&params->params.afex_update;
+
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data with provided parameters */
+	rdata->vif_id_change_flg = 1;
+	rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
+	rdata->afex_default_vlan_change_flg = 1;
+	rdata->afex_default_vlan =
+		cpu_to_le16(afex_update_params->afex_default_vlan);
+	rdata->allowed_priorities_change_flg = 1;
+	rdata->allowed_priorities = afex_update_params->allowed_priorities;
+
+	/*  No need for an explicit memory barrier here as long we would
+	 *  need to ensure the ordering of writing to the SPQ element
+	 *  and updating of the SPQ producer which involves a memory
+	 *  read and we will have to put a full memory barrier there
+	 *  (inside bnx2x_sp_post()).
+	 */
+	DP(BNX2X_MSG_SP,
+	   "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
+	   rdata->vif_id,
+	   rdata->afex_default_vlan, rdata->allowed_priorities);
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static
+inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
+					 struct bnx2x_func_state_params *params)
+{
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	struct afex_vif_list_ramrod_data *rdata =
+		(struct afex_vif_list_ramrod_data *)o->afex_rdata;
+	struct bnx2x_func_afex_viflists_params *afex_viflist_params =
+		&params->params.afex_viflists;
+	u64 *p_rdata = (u64 *)rdata;
+
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data with provided parameters */
+	rdata->vif_list_index = afex_viflist_params->vif_list_index;
+	rdata->func_bit_map = afex_viflist_params->func_bit_map;
+	rdata->afex_vif_list_command =
+		afex_viflist_params->afex_vif_list_command;
+	rdata->func_to_clear = afex_viflist_params->func_to_clear;
+
+	/* send in echo type of sub command */
+	rdata->echo = afex_viflist_params->afex_vif_list_command;
+
+	/*  No need for an explicit memory barrier here as long we would
+	 *  need to ensure the ordering of writing to the SPQ element
+	 *  and updating of the SPQ producer which involves a memory
+	 *  read and we will have to put a full memory barrier there
+	 *  (inside bnx2x_sp_post()).
+	 */
+
+	DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
+	   rdata->afex_vif_list_command, rdata->vif_list_index,
+	   rdata->func_bit_map, rdata->func_to_clear);
+
+	/* this ramrod sends data directly and not through DMA mapping */
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
+			     U64_HI(*p_rdata), U64_LO(*p_rdata),
+			     NONE_CONNECTION_TYPE);
+}
+
 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
 				       struct bnx2x_func_state_params *params)
 {
@@ -5669,6 +5757,10 @@
 		return bnx2x_func_send_stop(bp, params);
 	case BNX2X_F_CMD_HW_RESET:
 		return bnx2x_func_hw_reset(bp, params);
+	case BNX2X_F_CMD_AFEX_UPDATE:
+		return bnx2x_func_send_afex_update(bp, params);
+	case BNX2X_F_CMD_AFEX_VIFLISTS:
+		return bnx2x_func_send_afex_viflists(bp, params);
 	case BNX2X_F_CMD_TX_STOP:
 		return bnx2x_func_send_tx_stop(bp, params);
 	case BNX2X_F_CMD_TX_START:
@@ -5682,6 +5774,7 @@
 void bnx2x_init_func_obj(struct bnx2x *bp,
 			 struct bnx2x_func_sp_obj *obj,
 			 void *rdata, dma_addr_t rdata_mapping,
+			 void *afex_rdata, dma_addr_t afex_rdata_mapping,
 			 struct bnx2x_func_sp_drv_ops *drv_iface)
 {
 	memset(obj, 0, sizeof(*obj));
@@ -5690,7 +5783,8 @@
 
 	obj->rdata = rdata;
 	obj->rdata_mapping = rdata_mapping;
-
+	obj->afex_rdata = afex_rdata;
+	obj->afex_rdata_mapping = afex_rdata_mapping;
 	obj->send_cmd = bnx2x_func_send_cmd;
 	obj->check_transition = bnx2x_func_chk_transition;
 	obj->complete_cmd = bnx2x_func_comp_cmd;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 61a7670..efd80bd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -62,6 +62,8 @@
 	BNX2X_FILTER_MCAST_PENDING,
 	BNX2X_FILTER_MCAST_SCHED,
 	BNX2X_FILTER_RSS_CONF_PENDING,
+	BNX2X_AFEX_FCOE_Q_UPDATE_PENDING,
+	BNX2X_AFEX_PENDING_VIFSET_MCP_ACK
 };
 
 struct bnx2x_raw_obj {
@@ -432,6 +434,8 @@
 	BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
 };
 
+void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+			  bool add, unsigned char *dev_addr, int index);
 
 /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
 
@@ -685,9 +689,6 @@
 	/* RSS_MODE bits are mutually exclusive */
 	BNX2X_RSS_MODE_DISABLED,
 	BNX2X_RSS_MODE_REGULAR,
-	BNX2X_RSS_MODE_VLAN_PRI,
-	BNX2X_RSS_MODE_E1HOV_PRI,
-	BNX2X_RSS_MODE_IP_DSCP,
 
 	BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
 
@@ -801,7 +802,8 @@
 	BNX2X_Q_FLG_TX_SWITCH,
 	BNX2X_Q_FLG_TX_SEC,
 	BNX2X_Q_FLG_ANTI_SPOOF,
-	BNX2X_Q_FLG_SILENT_VLAN_REM
+	BNX2X_Q_FLG_SILENT_VLAN_REM,
+	BNX2X_Q_FLG_FORCE_DEFAULT_PRI
 };
 
 /* Queue type options: queue type may be a compination of below. */
@@ -963,6 +965,11 @@
 	} params;
 };
 
+struct bnx2x_viflist_params {
+	u8 echo_res;
+	u8 func_bit_map_res;
+};
+
 struct bnx2x_queue_sp_obj {
 	u32		cids[BNX2X_MULTI_TX_COS];
 	u8		cl_id;
@@ -1045,6 +1052,8 @@
 	BNX2X_F_CMD_START,
 	BNX2X_F_CMD_STOP,
 	BNX2X_F_CMD_HW_RESET,
+	BNX2X_F_CMD_AFEX_UPDATE,
+	BNX2X_F_CMD_AFEX_VIFLISTS,
 	BNX2X_F_CMD_TX_STOP,
 	BNX2X_F_CMD_TX_START,
 	BNX2X_F_CMD_MAX,
@@ -1089,6 +1098,18 @@
 	u8 network_cos_mode;
 };
 
+struct bnx2x_func_afex_update_params {
+	u16 vif_id;
+	u16 afex_default_vlan;
+	u8 allowed_priorities;
+};
+
+struct bnx2x_func_afex_viflists_params {
+	u16 vif_list_index;
+	u8 func_bit_map;
+	u8 afex_vif_list_command;
+	u8 func_to_clear;
+};
 struct bnx2x_func_tx_start_params {
 	struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
 	u8 dcb_enabled;
@@ -1110,6 +1131,8 @@
 		struct bnx2x_func_hw_init_params hw_init;
 		struct bnx2x_func_hw_reset_params hw_reset;
 		struct bnx2x_func_start_params start;
+		struct bnx2x_func_afex_update_params afex_update;
+		struct bnx2x_func_afex_viflists_params afex_viflists;
 		struct bnx2x_func_tx_start_params tx_start;
 	} params;
 };
@@ -1154,6 +1177,13 @@
 	void			*rdata;
 	dma_addr_t		rdata_mapping;
 
+	/* Buffer to use as a afex ramrod data and its mapping.
+	 * This can't be same rdata as above because afex ramrod requests
+	 * can arrive to the object in parallel to other ramrod requests.
+	 */
+	void			*afex_rdata;
+	dma_addr_t		afex_rdata_mapping;
+
 	/* this mutex validates that when pending flag is taken, the next
 	 * ramrod to be sent will be the one set the pending bit
 	 */
@@ -1197,6 +1227,7 @@
 void bnx2x_init_func_obj(struct bnx2x *bp,
 			 struct bnx2x_func_sp_obj *obj,
 			 void *rdata, dma_addr_t rdata_mapping,
+			 void *afex_rdata, dma_addr_t afex_rdata_mapping,
 			 struct bnx2x_func_sp_drv_ops *drv_iface);
 
 int bnx2x_func_state_change(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index e1c9310..7366e92 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1561,3 +1561,274 @@
 		UPDATE_FW_STAT_OLD(mac_discard);
 	}
 }
+
+void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
+			      u32 stats_type)
+{
+	int i;
+	struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+	struct per_queue_stats *fcoe_q_stats =
+		&bp->fw_stats_data->queue_stats[FCOE_IDX];
+
+	struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
+		&fcoe_q_stats->tstorm_queue_statistics;
+
+	struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
+		&fcoe_q_stats->ustorm_queue_statistics;
+
+	struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
+		&fcoe_q_stats->xstorm_queue_statistics;
+
+	struct fcoe_statistics_params *fw_fcoe_stat =
+		&bp->fw_stats_data->fcoe;
+
+	memset(afex_stats, 0, sizeof(struct afex_stats));
+
+	for_each_eth_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+
+		ADD_64(afex_stats->rx_unicast_bytes_hi,
+		       qstats->total_unicast_bytes_received_hi,
+		       afex_stats->rx_unicast_bytes_lo,
+		       qstats->total_unicast_bytes_received_lo);
+
+		ADD_64(afex_stats->rx_broadcast_bytes_hi,
+		       qstats->total_broadcast_bytes_received_hi,
+		       afex_stats->rx_broadcast_bytes_lo,
+		       qstats->total_broadcast_bytes_received_lo);
+
+		ADD_64(afex_stats->rx_multicast_bytes_hi,
+		       qstats->total_multicast_bytes_received_hi,
+		       afex_stats->rx_multicast_bytes_lo,
+		       qstats->total_multicast_bytes_received_lo);
+
+		ADD_64(afex_stats->rx_unicast_frames_hi,
+		       qstats->total_unicast_packets_received_hi,
+		       afex_stats->rx_unicast_frames_lo,
+		       qstats->total_unicast_packets_received_lo);
+
+		ADD_64(afex_stats->rx_broadcast_frames_hi,
+		       qstats->total_broadcast_packets_received_hi,
+		       afex_stats->rx_broadcast_frames_lo,
+		       qstats->total_broadcast_packets_received_lo);
+
+		ADD_64(afex_stats->rx_multicast_frames_hi,
+		       qstats->total_multicast_packets_received_hi,
+		       afex_stats->rx_multicast_frames_lo,
+		       qstats->total_multicast_packets_received_lo);
+
+		/* sum to rx_frames_discarded all discraded
+		 * packets due to size, ttl0 and checksum
+		 */
+		ADD_64(afex_stats->rx_frames_discarded_hi,
+		       qstats->total_packets_received_checksum_discarded_hi,
+		       afex_stats->rx_frames_discarded_lo,
+		       qstats->total_packets_received_checksum_discarded_lo);
+
+		ADD_64(afex_stats->rx_frames_discarded_hi,
+		       qstats->total_packets_received_ttl0_discarded_hi,
+		       afex_stats->rx_frames_discarded_lo,
+		       qstats->total_packets_received_ttl0_discarded_lo);
+
+		ADD_64(afex_stats->rx_frames_discarded_hi,
+		       qstats->etherstatsoverrsizepkts_hi,
+		       afex_stats->rx_frames_discarded_lo,
+		       qstats->etherstatsoverrsizepkts_lo);
+
+		ADD_64(afex_stats->rx_frames_dropped_hi,
+		       qstats->no_buff_discard_hi,
+		       afex_stats->rx_frames_dropped_lo,
+		       qstats->no_buff_discard_lo);
+
+		ADD_64(afex_stats->tx_unicast_bytes_hi,
+		       qstats->total_unicast_bytes_transmitted_hi,
+		       afex_stats->tx_unicast_bytes_lo,
+		       qstats->total_unicast_bytes_transmitted_lo);
+
+		ADD_64(afex_stats->tx_broadcast_bytes_hi,
+		       qstats->total_broadcast_bytes_transmitted_hi,
+		       afex_stats->tx_broadcast_bytes_lo,
+		       qstats->total_broadcast_bytes_transmitted_lo);
+
+		ADD_64(afex_stats->tx_multicast_bytes_hi,
+		       qstats->total_multicast_bytes_transmitted_hi,
+		       afex_stats->tx_multicast_bytes_lo,
+		       qstats->total_multicast_bytes_transmitted_lo);
+
+		ADD_64(afex_stats->tx_unicast_frames_hi,
+		       qstats->total_unicast_packets_transmitted_hi,
+		       afex_stats->tx_unicast_frames_lo,
+		       qstats->total_unicast_packets_transmitted_lo);
+
+		ADD_64(afex_stats->tx_broadcast_frames_hi,
+		       qstats->total_broadcast_packets_transmitted_hi,
+		       afex_stats->tx_broadcast_frames_lo,
+		       qstats->total_broadcast_packets_transmitted_lo);
+
+		ADD_64(afex_stats->tx_multicast_frames_hi,
+		       qstats->total_multicast_packets_transmitted_hi,
+		       afex_stats->tx_multicast_frames_lo,
+		       qstats->total_multicast_packets_transmitted_lo);
+
+		ADD_64(afex_stats->tx_frames_dropped_hi,
+		       qstats->total_transmitted_dropped_packets_error_hi,
+		       afex_stats->tx_frames_dropped_lo,
+		       qstats->total_transmitted_dropped_packets_error_lo);
+	}
+
+	/* now add FCoE statistics which are collected separately
+	 * (both offloaded and non offloaded)
+	 */
+	if (!NO_FCOE(bp)) {
+		ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
+			  LE32_0,
+			  afex_stats->rx_unicast_bytes_lo,
+			  fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+
+		ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
+			  fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+			  afex_stats->rx_unicast_bytes_lo,
+			  fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+
+		ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
+			  fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+			  afex_stats->rx_broadcast_bytes_lo,
+			  fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+
+		ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
+			  fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+			  afex_stats->rx_multicast_bytes_lo,
+			  fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+
+		ADD_64_LE(afex_stats->rx_unicast_frames_hi,
+			  LE32_0,
+			  afex_stats->rx_unicast_frames_lo,
+			  fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+
+		ADD_64_LE(afex_stats->rx_unicast_frames_hi,
+			  LE32_0,
+			  afex_stats->rx_unicast_frames_lo,
+			  fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+		ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
+			  LE32_0,
+			  afex_stats->rx_broadcast_frames_lo,
+			  fcoe_q_tstorm_stats->rcv_bcast_pkts);
+
+		ADD_64_LE(afex_stats->rx_multicast_frames_hi,
+			  LE32_0,
+			  afex_stats->rx_multicast_frames_lo,
+			  fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+		ADD_64_LE(afex_stats->rx_frames_discarded_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_discarded_lo,
+			  fcoe_q_tstorm_stats->checksum_discard);
+
+		ADD_64_LE(afex_stats->rx_frames_discarded_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_discarded_lo,
+			  fcoe_q_tstorm_stats->pkts_too_big_discard);
+
+		ADD_64_LE(afex_stats->rx_frames_discarded_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_discarded_lo,
+			  fcoe_q_tstorm_stats->ttl0_discard);
+
+		ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
+			    LE16_0,
+			    afex_stats->rx_frames_dropped_lo,
+			    fcoe_q_tstorm_stats->no_buff_discard);
+
+		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_dropped_lo,
+			  fcoe_q_ustorm_stats->ucast_no_buff_pkts);
+
+		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_dropped_lo,
+			  fcoe_q_ustorm_stats->mcast_no_buff_pkts);
+
+		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_dropped_lo,
+			  fcoe_q_ustorm_stats->bcast_no_buff_pkts);
+
+		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_dropped_lo,
+			  fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
+
+		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_dropped_lo,
+			  fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
+
+		ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
+			  LE32_0,
+			  afex_stats->tx_unicast_bytes_lo,
+			  fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+
+		ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
+			  fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+			  afex_stats->tx_unicast_bytes_lo,
+			  fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+
+		ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
+			  fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+			  afex_stats->tx_broadcast_bytes_lo,
+			  fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+
+		ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
+			  fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+			  afex_stats->tx_multicast_bytes_lo,
+			  fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+
+		ADD_64_LE(afex_stats->tx_unicast_frames_hi,
+			  LE32_0,
+			  afex_stats->tx_unicast_frames_lo,
+			  fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+
+		ADD_64_LE(afex_stats->tx_unicast_frames_hi,
+			  LE32_0,
+			  afex_stats->tx_unicast_frames_lo,
+			  fcoe_q_xstorm_stats->ucast_pkts_sent);
+
+		ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
+			  LE32_0,
+			  afex_stats->tx_broadcast_frames_lo,
+			  fcoe_q_xstorm_stats->bcast_pkts_sent);
+
+		ADD_64_LE(afex_stats->tx_multicast_frames_hi,
+			  LE32_0,
+			  afex_stats->tx_multicast_frames_lo,
+			  fcoe_q_xstorm_stats->mcast_pkts_sent);
+
+		ADD_64_LE(afex_stats->tx_frames_dropped_hi,
+			  LE32_0,
+			  afex_stats->tx_frames_dropped_lo,
+			  fcoe_q_xstorm_stats->error_drop_pkts);
+	}
+
+	/* if port stats are requested, add them to the PMF
+	 * stats, as anyway they will be accumulated by the
+	 * MCP before sent to the switch
+	 */
+	if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
+		ADD_64(afex_stats->rx_frames_dropped_hi,
+		       0,
+		       afex_stats->rx_frames_dropped_lo,
+		       estats->mac_filter_discard);
+		ADD_64(afex_stats->rx_frames_dropped_hi,
+		       0,
+		       afex_stats->rx_frames_dropped_lo,
+		       estats->brb_truncate_discard);
+		ADD_64(afex_stats->rx_frames_discarded_hi,
+		       0,
+		       afex_stats->rx_frames_discarded_lo,
+		       estats->mac_discard);
+	}
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 2b46e1e..93e689fd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -338,6 +338,18 @@
 		s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
 	} while (0)
 
+#define LE32_0 ((__force __le32) 0)
+#define LE16_0 ((__force __le16) 0)
+
+/* The _force is for cases where high value is 0 */
+#define ADD_64_LE(s_hi, a_hi_le, s_lo, a_lo_le) \
+		ADD_64(s_hi, le32_to_cpu(a_hi_le), \
+		       s_lo, le32_to_cpu(a_lo_le))
+
+#define ADD_64_LE16(s_hi, a_hi_le, s_lo, a_lo_le) \
+		ADD_64(s_hi, le16_to_cpu(a_hi_le), \
+		       s_lo, le16_to_cpu(a_lo_le))
+
 /* difference = minuend - subtrahend */
 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
 	do { \
@@ -529,4 +541,7 @@
  * @bp:		driver handle
  */
 void bnx2x_save_statistics(struct bnx2x *bp);
+
+void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
+			      u32 stats_type);
 #endif /* BNX2X_STATS_H */
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 062ac33..0c3e7c7 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12234,6 +12234,7 @@
 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
 	.get_rxfh_indir		= tg3_get_rxfh_indir,
 	.set_rxfh_indir		= tg3_set_rxfh_indir,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 77977d7..0b640fa 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -70,7 +70,6 @@
 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
 static void bfa_ioc_recover(struct bfa_ioc *ioc);
-static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
@@ -346,8 +345,6 @@
 	switch (event) {
 	case IOC_E_FWRSP_GETATTR:
 		del_timer(&ioc->ioc_timer);
-		bfa_ioc_check_attr_wwns(ioc);
-		bfa_ioc_hb_monitor(ioc);
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
 		break;
 
@@ -380,6 +377,7 @@
 {
 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
 	bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
+	bfa_ioc_hb_monitor(ioc);
 }
 
 static void
@@ -1207,27 +1205,62 @@
 	writel(1, sem_reg);
 }
 
+/* Clear fwver hdr */
+static void
+bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
+{
+	u32 pgnum, pgoff, loff = 0;
+	int i;
+
+	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
+	pgoff = PSS_SMEM_PGOFF(loff);
+	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
+		writel(0, ioc->ioc_regs.smem_page_start + loff);
+		loff += sizeof(u32);
+	}
+}
+
+
 static void
 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
 {
 	struct bfi_ioc_image_hdr fwhdr;
-	u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+	u32 fwstate, r32;
 
-	if (fwstate == BFI_IOC_UNINIT)
+	/* Spin on init semaphore to serialize. */
+	r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
+	while (r32 & 0x1) {
+		udelay(20);
+		r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
+	}
+
+	fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+	if (fwstate == BFI_IOC_UNINIT) {
+		writel(1, ioc->ioc_regs.ioc_init_sem_reg);
 		return;
+	}
 
 	bfa_nw_ioc_fwver_get(ioc, &fwhdr);
 
-	if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
+	if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
+		writel(1, ioc->ioc_regs.ioc_init_sem_reg);
 		return;
+	}
 
+	bfa_ioc_fwver_clear(ioc);
 	writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+	writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
 
 	/*
 	 * Try to lock and then unlock the semaphore.
 	 */
 	readl(ioc->ioc_regs.ioc_sem_reg);
 	writel(1, ioc->ioc_regs.ioc_sem_reg);
+
+	/* Unlock init semaphore */
+	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
 }
 
 static void
@@ -1585,11 +1618,6 @@
 	u32 i;
 	u32 asicmode;
 
-	/**
-	 * Initialize LMEM first before code download
-	 */
-	bfa_ioc_lmem_init(ioc);
-
 	fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
 
 	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
@@ -1914,6 +1942,10 @@
 	bfa_ioc_pll_init_asic(ioc);
 
 	ioc->pllinit = true;
+
+	/* Initialize LMEM */
+	bfa_ioc_lmem_init(ioc);
+
 	/*
 	 *  release semaphore.
 	 */
@@ -2513,13 +2545,6 @@
 	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
 }
 
-static void
-bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
-{
-	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
-		return;
-}
-
 /**
  * @dg hal_iocpf_pvt BFA IOC PF private functions
  * @{
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index 348479b..b6b036a 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -199,9 +199,9 @@
  * Host to LPU mailbox message addresses
  */
 static const struct {
-	u32 	hfn_mbox;
-	u32 	lpu_mbox;
-	u32 	hfn_pgn;
+	u32	hfn_mbox;
+	u32	lpu_mbox;
+	u32	hfn_pgn;
 } ct_fnreg[] = {
 	{ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
 	{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
@@ -803,17 +803,72 @@
 }
 
 #define CT2_NFC_MAX_DELAY       1000
+#define CT2_NFC_VER_VALID       0x143
+#define BFA_IOC_PLL_POLL        1000000
+
+static bool
+bfa_ioc_ct2_nfc_halted(void __iomem *rb)
+{
+	volatile u32 r32;
+
+	r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+	if (r32 & __NFC_CONTROLLER_HALTED)
+		return true;
+
+	return false;
+}
+
+static void
+bfa_ioc_ct2_nfc_resume(void __iomem *rb)
+{
+	volatile u32 r32;
+	int i;
+
+	writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
+	for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
+		r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+		if (!(r32 & __NFC_CONTROLLER_HALTED))
+			return;
+		udelay(1000);
+	}
+	BUG_ON(1);
+}
+
 static enum bfa_status
 bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
 {
 	volatile u32 wgn, r32;
-	int i;
+	u32 nfc_ver, i;
 
-	/*
-	 * Initialize PLL if not already done by NFC
-	 */
 	wgn = readl(rb + CT2_WGN_STATUS);
-	if (!(wgn & __GLBL_PF_VF_CFG_RDY)) {
+
+	nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
+
+	if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
+		(nfc_ver >= CT2_NFC_VER_VALID)) {
+		if (bfa_ioc_ct2_nfc_halted(rb))
+			bfa_ioc_ct2_nfc_resume(rb);
+		writel(__RESET_AND_START_SCLK_LCLK_PLLS,
+				rb + CT2_CSI_FW_CTL_SET_REG);
+
+		for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
+			r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+			if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
+				break;
+		}
+		BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
+
+		for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
+			r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+			if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
+				break;
+		}
+		BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
+		udelay(1000);
+
+		r32 = readl(rb + CT2_CSI_FW_CTL_REG);
+		BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
+	} else {
 		writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
 		for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
 			r32 = readl(rb + CT2_NFC_CSR_SET_REG);
@@ -821,53 +876,48 @@
 				break;
 			udelay(1000);
 		}
+
+		bfa_ioc_ct2_mac_reset(rb);
+		bfa_ioc_ct2_sclk_init(rb);
+		bfa_ioc_ct2_lclk_init(rb);
+
+		/* release soft reset on s_clk & l_clk */
+		r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+		writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
+				rb + CT2_APP_PLL_SCLK_CTL_REG);
+		r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+		writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+				rb + CT2_APP_PLL_LCLK_CTL_REG);
+	}
+
+	/* Announce flash device presence, if flash was corrupted. */
+	if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
+		r32 = readl((rb + PSS_GPIO_OUT_REG));
+		writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
+		r32 = readl((rb + PSS_GPIO_OE_REG));
+		writel(r32 | 1, rb + PSS_GPIO_OE_REG);
 	}
 
 	/*
 	 * Mask the interrupts and clear any
 	 * pending interrupts left by BIOS/EFI
 	 */
-
 	writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
 	writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
 
-	r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
-	if (r32 == 1) {
-		writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
-		readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
-	}
-	r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
-	if (r32 == 1) {
-		writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
-		readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
-	}
-
-	bfa_ioc_ct2_mac_reset(rb);
-	bfa_ioc_ct2_sclk_init(rb);
-	bfa_ioc_ct2_lclk_init(rb);
-
-	/*
-	 * release soft reset on s_clk & l_clk
-	 */
-	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
-	writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
-			(rb + CT2_APP_PLL_SCLK_CTL_REG));
-
-	/*
-	 * release soft reset on s_clk & l_clk
-	 */
-	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
-	writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
-		      (rb + CT2_APP_PLL_LCLK_CTL_REG));
-
-	/*
-	 * Announce flash device presence, if flash was corrupted.
-	 */
-	if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
-		r32 = readl((rb + PSS_GPIO_OUT_REG));
-		writel((r32 & ~1), (rb + PSS_GPIO_OUT_REG));
-		r32 = readl((rb + PSS_GPIO_OE_REG));
-		writel((r32 | 1), (rb + PSS_GPIO_OE_REG));
+	/* For first time initialization, no need to clear interrupts */
+	r32 = readl(rb + HOST_SEM5_REG);
+	if (r32 & 0x1) {
+		r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+		if (r32 == 1) {
+			writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
+			readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+		}
+		r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+		if (r32 == 1) {
+			writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
+			readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+		}
 	}
 
 	bfa_ioc_ct2_mem_init(rb);
diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h
index efacff3..0e094fe 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_reg.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h
@@ -339,10 +339,16 @@
 #define __A2T_AHB_LOAD			0x00000800
 #define __WGN_READY			0x00000400
 #define __GLBL_PF_VF_CFG_RDY		0x00000200
+#define CT2_NFC_CSR_CLR_REG             0x00027420
 #define CT2_NFC_CSR_SET_REG		0x00027424
 #define __HALT_NFC_CONTROLLER		0x00000002
 #define __NFC_CONTROLLER_HALTED		0x00001000
 
+#define CT2_RSC_GPR15_REG		0x0002765c
+#define CT2_CSI_FW_CTL_REG              0x00027080
+#define __RESET_AND_START_SCLK_LCLK_PLLS 0x00010000
+#define CT2_CSI_FW_CTL_SET_REG          0x00027088
+
 #define CT2_CSI_MAC0_CONTROL_REG	0x000270d0
 #define __CSI_MAC_RESET			0x00000010
 #define __CSI_MAC_AHB_RESET		0x00000008
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ff78f77..25c4e7f 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -80,8 +80,6 @@
 	(sizeof(struct bnad_skb_unmap) * ((_depth) - 1));	\
 } while (0)
 
-#define BNAD_TXRX_SYNC_MDELAY	250	/* 250 msecs */
-
 static void
 bnad_add_to_list(struct bnad *bnad)
 {
@@ -103,7 +101,7 @@
  * Reinitialize completions in CQ, once Rx is taken down
  */
 static void
-bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
+bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
 {
 	struct bna_cq_entry *cmpl, *next_cmpl;
 	unsigned int wi_range, wis = 0, ccb_prod = 0;
@@ -141,7 +139,8 @@
 
 	for (j = 0; j < frag; j++) {
 		dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
-			  skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE);
+			  skb_frag_size(&skb_shinfo(skb)->frags[j]),
+						DMA_TO_DEVICE);
 		dma_unmap_addr_set(&array[index], dma_addr, 0);
 		BNA_QE_INDX_ADD(index, 1, depth);
 	}
@@ -155,7 +154,7 @@
  * so DMA unmap & freeing is fine.
  */
 static void
-bnad_free_all_txbufs(struct bnad *bnad,
+bnad_txq_cleanup(struct bnad *bnad,
 		 struct bna_tcb *tcb)
 {
 	u32		unmap_cons;
@@ -183,13 +182,12 @@
 /* Data Path Handlers */
 
 /*
- * bnad_free_txbufs : Frees the Tx bufs on Tx completion
+ * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
  * Can be called in a) Interrupt context
  *		    b) Sending context
- *		    c) Tasklet context
  */
 static u32
-bnad_free_txbufs(struct bnad *bnad,
+bnad_txcmpl_process(struct bnad *bnad,
 		 struct bna_tcb *tcb)
 {
 	u32		unmap_cons, sent_packets = 0, sent_bytes = 0;
@@ -198,13 +196,7 @@
 	struct bnad_skb_unmap *unmap_array;
 	struct sk_buff		*skb;
 
-	/*
-	 * Just return if TX is stopped. This check is useful
-	 * when bnad_free_txbufs() runs out of a tasklet scheduled
-	 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
-	 * but this routine runs actually after the cleanup has been
-	 * executed.
-	 */
+	/* Just return if TX is stopped */
 	if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
 		return 0;
 
@@ -243,57 +235,8 @@
 	return sent_packets;
 }
 
-/* Tx Free Tasklet function */
-/* Frees for all the tcb's in all the Tx's */
-/*
- * Scheduled from sending context, so that
- * the fat Tx lock is not held for too long
- * in the sending context.
- */
-static void
-bnad_tx_free_tasklet(unsigned long bnad_ptr)
-{
-	struct bnad *bnad = (struct bnad *)bnad_ptr;
-	struct bna_tcb *tcb;
-	u32		acked = 0;
-	int			i, j;
-
-	for (i = 0; i < bnad->num_tx; i++) {
-		for (j = 0; j < bnad->num_txq_per_tx; j++) {
-			tcb = bnad->tx_info[i].tcb[j];
-			if (!tcb)
-				continue;
-			if (((u16) (*tcb->hw_consumer_index) !=
-				tcb->consumer_index) &&
-				(!test_and_set_bit(BNAD_TXQ_FREE_SENT,
-						  &tcb->flags))) {
-				acked = bnad_free_txbufs(bnad, tcb);
-				if (likely(test_bit(BNAD_TXQ_TX_STARTED,
-					&tcb->flags)))
-					bna_ib_ack(tcb->i_dbell, acked);
-				smp_mb__before_clear_bit();
-				clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
-			}
-			if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
-						&tcb->flags)))
-				continue;
-			if (netif_queue_stopped(bnad->netdev)) {
-				if (acked && netif_carrier_ok(bnad->netdev) &&
-					BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
-						BNAD_NETIF_WAKE_THRESHOLD) {
-					netif_wake_queue(bnad->netdev);
-					/* TODO */
-					/* Counters for individual TxQs? */
-					BNAD_UPDATE_CTR(bnad,
-						netif_queue_wakeup);
-				}
-			}
-		}
-	}
-}
-
 static u32
-bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
+bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
 {
 	struct net_device *netdev = bnad->netdev;
 	u32 sent = 0;
@@ -301,7 +244,7 @@
 	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
 		return 0;
 
-	sent = bnad_free_txbufs(bnad, tcb);
+	sent = bnad_txcmpl_process(bnad, tcb);
 	if (sent) {
 		if (netif_queue_stopped(netdev) &&
 		    netif_carrier_ok(netdev) &&
@@ -330,13 +273,13 @@
 	struct bna_tcb *tcb = (struct bna_tcb *)data;
 	struct bnad *bnad = tcb->bnad;
 
-	bnad_tx(bnad, tcb);
+	bnad_tx_complete(bnad, tcb);
 
 	return IRQ_HANDLED;
 }
 
 static void
-bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
+bnad_rcb_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
 {
 	struct bnad_unmap_q *unmap_q = rcb->unmap_q;
 
@@ -348,7 +291,7 @@
 }
 
 static void
-bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
+bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
 {
 	struct bnad_unmap_q *unmap_q;
 	struct bnad_skb_unmap *unmap_array;
@@ -369,11 +312,11 @@
 				 DMA_FROM_DEVICE);
 		dev_kfree_skb(skb);
 	}
-	bnad_reset_rcb(bnad, rcb);
+	bnad_rcb_cleanup(bnad, rcb);
 }
 
 static void
-bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
+bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
 {
 	u16 to_alloc, alloced, unmap_prod, wi_range;
 	struct bnad_unmap_q *unmap_q = rcb->unmap_q;
@@ -434,14 +377,14 @@
 	if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
 		if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
 			 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
-			bnad_alloc_n_post_rxbufs(bnad, rcb);
+			bnad_rxq_post(bnad, rcb);
 		smp_mb__before_clear_bit();
 		clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
 	}
 }
 
 static u32
-bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
+bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
 {
 	struct bna_cq_entry *cmpl, *next_cmpl;
 	struct bna_rcb *rcb = NULL;
@@ -453,12 +396,8 @@
 	struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
 	struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
 
-	set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
-
-	if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
-		clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
+	if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
 		return 0;
-	}
 
 	prefetch(bnad->netdev);
 	BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
@@ -533,9 +472,8 @@
 
 		if (skb->ip_summed == CHECKSUM_UNNECESSARY)
 			napi_gro_receive(&rx_ctrl->napi, skb);
-		else {
+		else
 			netif_receive_skb(skb);
-		}
 
 next:
 		cmpl->valid = 0;
@@ -646,7 +584,7 @@
 		for (j = 0; j < bnad->num_txq_per_tx; j++) {
 			tcb = bnad->tx_info[i].tcb[j];
 			if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
-				bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
+				bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
 		}
 	}
 	/* Rx processing */
@@ -839,20 +777,9 @@
 {
 	struct bnad_tx_info *tx_info =
 			(struct bnad_tx_info *)tcb->txq->tx->priv;
-	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
-
-	while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
-		cpu_relax();
-
-	bnad_free_all_txbufs(bnad, tcb);
-
-	unmap_q->producer_index = 0;
-	unmap_q->consumer_index = 0;
-
-	smp_mb__before_clear_bit();
-	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
 
 	tx_info->tcb[tcb->id] = NULL;
+	tcb->priv = NULL;
 }
 
 static void
@@ -866,12 +793,6 @@
 }
 
 static void
-bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
-{
-	bnad_free_all_rxbufs(bnad, rcb);
-}
-
-static void
 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
 {
 	struct bnad_rx_info *rx_info =
@@ -916,7 +837,6 @@
 {
 	struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
 	struct bna_tcb *tcb;
-	struct bnad_unmap_q *unmap_q;
 	u32 txq_id;
 	int i;
 
@@ -926,23 +846,9 @@
 			continue;
 		txq_id = tcb->id;
 
-		unmap_q = tcb->unmap_q;
-
-		if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
-			continue;
-
-		while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
-			cpu_relax();
-
-		bnad_free_all_txbufs(bnad, tcb);
-
-		unmap_q->producer_index = 0;
-		unmap_q->consumer_index = 0;
-
-		smp_mb__before_clear_bit();
-		clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
-
+		BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
 		set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
+		BUG_ON(*(tcb->hw_consumer_index) != 0);
 
 		if (netif_carrier_ok(bnad->netdev)) {
 			printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
@@ -963,6 +869,54 @@
 	}
 }
 
+/*
+ * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
+ */
+static void
+bnad_tx_cleanup(struct delayed_work *work)
+{
+	struct bnad_tx_info *tx_info =
+		container_of(work, struct bnad_tx_info, tx_cleanup_work);
+	struct bnad *bnad = NULL;
+	struct bnad_unmap_q *unmap_q;
+	struct bna_tcb *tcb;
+	unsigned long flags;
+	uint32_t i, pending = 0;
+
+	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
+		tcb = tx_info->tcb[i];
+		if (!tcb)
+			continue;
+
+		bnad = tcb->bnad;
+
+		if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
+			pending++;
+			continue;
+		}
+
+		bnad_txq_cleanup(bnad, tcb);
+
+		unmap_q = tcb->unmap_q;
+		unmap_q->producer_index = 0;
+		unmap_q->consumer_index = 0;
+
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+	}
+
+	if (pending) {
+		queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
+			msecs_to_jiffies(1));
+		return;
+	}
+
+	spin_lock_irqsave(&bnad->bna_lock, flags);
+	bna_tx_cleanup_complete(tx_info->tx);
+	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+
 static void
 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
 {
@@ -976,8 +930,7 @@
 			continue;
 	}
 
-	mdelay(BNAD_TXRX_SYNC_MDELAY);
-	bna_tx_cleanup_complete(tx);
+	queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
 }
 
 static void
@@ -1001,6 +954,44 @@
 	}
 }
 
+/*
+ * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
+ */
+static void
+bnad_rx_cleanup(void *work)
+{
+	struct bnad_rx_info *rx_info =
+		container_of(work, struct bnad_rx_info, rx_cleanup_work);
+	struct bnad_rx_ctrl *rx_ctrl;
+	struct bnad *bnad = NULL;
+	unsigned long flags;
+	uint32_t i;
+
+	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
+		rx_ctrl = &rx_info->rx_ctrl[i];
+
+		if (!rx_ctrl->ccb)
+			continue;
+
+		bnad = rx_ctrl->ccb->bnad;
+
+		/*
+		 * Wait till the poll handler has exited
+		 * and nothing can be scheduled anymore
+		 */
+		napi_disable(&rx_ctrl->napi);
+
+		bnad_cq_cleanup(bnad, rx_ctrl->ccb);
+		bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
+		if (rx_ctrl->ccb->rcb[1])
+			bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
+	}
+
+	spin_lock_irqsave(&bnad->bna_lock, flags);
+	bna_rx_cleanup_complete(rx_info->rx);
+	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
 static void
 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
 {
@@ -1009,8 +1000,6 @@
 	struct bnad_rx_ctrl *rx_ctrl;
 	int i;
 
-	mdelay(BNAD_TXRX_SYNC_MDELAY);
-
 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
 		rx_ctrl = &rx_info->rx_ctrl[i];
 		ccb = rx_ctrl->ccb;
@@ -1021,12 +1010,9 @@
 
 		if (ccb->rcb[1])
 			clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
-
-		while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
-			cpu_relax();
 	}
 
-	bna_rx_cleanup_complete(rx);
+	queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
 }
 
 static void
@@ -1046,13 +1032,12 @@
 		if (!ccb)
 			continue;
 
-		bnad_cq_cmpl_init(bnad, ccb);
+		napi_enable(&rx_ctrl->napi);
 
 		for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
 			rcb = ccb->rcb[j];
 			if (!rcb)
 				continue;
-			bnad_free_all_rxbufs(bnad, rcb);
 
 			set_bit(BNAD_RXQ_STARTED, &rcb->flags);
 			set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
@@ -1063,7 +1048,7 @@
 			if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
 				if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
 					>> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
-					bnad_alloc_n_post_rxbufs(bnad, rcb);
+					bnad_rxq_post(bnad, rcb);
 					smp_mb__before_clear_bit();
 				clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
 			}
@@ -1687,7 +1672,7 @@
 	if (!netif_carrier_ok(bnad->netdev))
 		goto poll_exit;
 
-	rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
+	rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
 	if (rcvd >= budget)
 		return rcvd;
 
@@ -1704,7 +1689,7 @@
 
 #define BNAD_NAPI_POLL_QUOTA		64
 static void
-bnad_napi_init(struct bnad *bnad, u32 rx_id)
+bnad_napi_add(struct bnad *bnad, u32 rx_id)
 {
 	struct bnad_rx_ctrl *rx_ctrl;
 	int i;
@@ -1718,34 +1703,18 @@
 }
 
 static void
-bnad_napi_enable(struct bnad *bnad, u32 rx_id)
-{
-	struct bnad_rx_ctrl *rx_ctrl;
-	int i;
-
-	/* Initialize & enable NAPI */
-	for (i = 0; i <	bnad->num_rxp_per_rx; i++) {
-		rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
-
-		napi_enable(&rx_ctrl->napi);
-	}
-}
-
-static void
-bnad_napi_disable(struct bnad *bnad, u32 rx_id)
+bnad_napi_delete(struct bnad *bnad, u32 rx_id)
 {
 	int i;
 
 	/* First disable and then clean up */
-	for (i = 0; i < bnad->num_rxp_per_rx; i++) {
-		napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
+	for (i = 0; i < bnad->num_rxp_per_rx; i++)
 		netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
-	}
 }
 
 /* Should be held with conf_lock held */
 void
-bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
+bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
 {
 	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
 	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
@@ -1764,9 +1733,6 @@
 		bnad_tx_msix_unregister(bnad, tx_info,
 			bnad->num_txq_per_tx);
 
-	if (0 == tx_id)
-		tasklet_kill(&bnad->tx_free_tasklet);
-
 	spin_lock_irqsave(&bnad->bna_lock, flags);
 	bna_tx_destroy(tx_info->tx);
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -1832,6 +1798,9 @@
 		goto err_return;
 	tx_info->tx = tx;
 
+	INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
+			(work_func_t)bnad_tx_cleanup);
+
 	/* Register ISR for the Tx object */
 	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
 		err = bnad_tx_msix_register(bnad, tx_info,
@@ -1896,7 +1865,7 @@
 
 /* Called with mutex_lock(&bnad->conf_mutex) held */
 void
-bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
+bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
 {
 	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
 	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
@@ -1928,7 +1897,7 @@
 	if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
 		bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
 
-	bnad_napi_disable(bnad, rx_id);
+	bnad_napi_delete(bnad, rx_id);
 
 	spin_lock_irqsave(&bnad->bna_lock, flags);
 	bna_rx_destroy(rx_info->rx);
@@ -1952,7 +1921,7 @@
 	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
 	static const struct bna_rx_event_cbfn rx_cbfn = {
 		.rcb_setup_cbfn = bnad_cb_rcb_setup,
-		.rcb_destroy_cbfn = bnad_cb_rcb_destroy,
+		.rcb_destroy_cbfn = NULL,
 		.ccb_setup_cbfn = bnad_cb_ccb_setup,
 		.ccb_destroy_cbfn = bnad_cb_ccb_destroy,
 		.rx_stall_cbfn = bnad_cb_rx_stall,
@@ -1998,11 +1967,14 @@
 	rx_info->rx = rx;
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
+	INIT_WORK(&rx_info->rx_cleanup_work,
+			(work_func_t)(bnad_rx_cleanup));
+
 	/*
 	 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
 	 * so that IRQ handler cannot schedule NAPI at this point.
 	 */
-	bnad_napi_init(bnad, rx_id);
+	bnad_napi_add(bnad, rx_id);
 
 	/* Register ISR for the Rx object */
 	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
@@ -2028,13 +2000,10 @@
 	bna_rx_enable(rx);
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
-	/* Enable scheduling of NAPI */
-	bnad_napi_enable(bnad, rx_id);
-
 	return 0;
 
 err_return:
-	bnad_cleanup_rx(bnad, rx_id);
+	bnad_destroy_rx(bnad, rx_id);
 	return err;
 }
 
@@ -2519,7 +2488,7 @@
 	return 0;
 
 cleanup_tx:
-	bnad_cleanup_tx(bnad, 0);
+	bnad_destroy_tx(bnad, 0);
 
 err_return:
 	mutex_unlock(&bnad->conf_mutex);
@@ -2546,8 +2515,8 @@
 
 	wait_for_completion(&bnad->bnad_completions.enet_comp);
 
-	bnad_cleanup_tx(bnad, 0);
-	bnad_cleanup_rx(bnad, 0);
+	bnad_destroy_tx(bnad, 0);
+	bnad_destroy_rx(bnad, 0);
 
 	/* Synchronize mailbox IRQ */
 	bnad_mbox_irq_sync(bnad);
@@ -2620,7 +2589,7 @@
 		if ((u16) (*tcb->hw_consumer_index) !=
 		    tcb->consumer_index &&
 		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
-			acked = bnad_free_txbufs(bnad, tcb);
+			acked = bnad_txcmpl_process(bnad, tcb);
 			if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
 				bna_ib_ack(tcb->i_dbell, acked);
 			smp_mb__before_clear_bit();
@@ -2843,9 +2812,6 @@
 	bna_txq_prod_indx_doorbell(tcb);
 	smp_mb();
 
-	if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
-		tasklet_schedule(&bnad->tx_free_tasklet);
-
 	return NETDEV_TX_OK;
 }
 
@@ -3127,8 +3093,8 @@
 /*
  * 1. Initialize the bnad structure
  * 2. Setup netdev pointer in pci_dev
- * 3. Initialze Tx free tasklet
- * 4. Initialize no. of TxQ & CQs & MSIX vectors
+ * 3. Initialize no. of TxQ & CQs & MSIX vectors
+ * 4. Initialize work queue.
  */
 static int
 bnad_init(struct bnad *bnad,
@@ -3171,8 +3137,11 @@
 	bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
 	bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
 
-	tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
-		     (unsigned long)bnad);
+	sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
+	bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
+
+	if (!bnad->work_q)
+		return -ENOMEM;
 
 	return 0;
 }
@@ -3185,6 +3154,12 @@
 static void
 bnad_uninit(struct bnad *bnad)
 {
+	if (bnad->work_q) {
+		flush_workqueue(bnad->work_q);
+		destroy_workqueue(bnad->work_q);
+		bnad->work_q = NULL;
+	}
+
 	if (bnad->bar0)
 		iounmap(bnad->bar0);
 	pci_set_drvdata(bnad->pcidev, NULL);
@@ -3304,7 +3279,6 @@
 	/*
 	 * Initialize bnad structure
 	 * Setup relation between pci_dev & netdev
-	 * Init Tx free tasklet
 	 */
 	err = bnad_init(bnad, pdev, netdev);
 	if (err)
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 55824d9..72742be 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -71,7 +71,7 @@
 #define BNAD_NAME			"bna"
 #define BNAD_NAME_LEN			64
 
-#define BNAD_VERSION			"3.0.2.2"
+#define BNAD_VERSION			"3.0.23.0"
 
 #define BNAD_MAILBOX_MSIX_INDEX		0
 #define BNAD_MAILBOX_MSIX_VECTORS	1
@@ -210,6 +210,7 @@
 	struct bna_tx *tx; /* 1:1 between tx_info & tx */
 	struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
 	u32 tx_id;
+	struct delayed_work tx_cleanup_work;
 } ____cacheline_aligned;
 
 struct bnad_rx_info {
@@ -217,6 +218,7 @@
 
 	struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX];
 	u32 rx_id;
+	struct work_struct rx_cleanup_work;
 } ____cacheline_aligned;
 
 /* Unmap queues for Tx / Rx cleanup */
@@ -318,7 +320,7 @@
 	/* Burnt in MAC address */
 	mac_t			perm_addr;
 
-	struct tasklet_struct	tx_free_tasklet;
+	struct workqueue_struct *work_q;
 
 	/* Statistics */
 	struct bnad_stats stats;
@@ -328,6 +330,7 @@
 	char			adapter_name[BNAD_NAME_LEN];
 	char			port_name[BNAD_NAME_LEN];
 	char			mbox_irq_name[BNAD_NAME_LEN];
+	char			wq_name[BNAD_NAME_LEN];
 
 	/* debugfs specific data */
 	char	*regdata;
@@ -370,8 +373,8 @@
 
 extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
 extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
-extern void bnad_cleanup_tx(struct bnad *bnad, u32 tx_id);
-extern void bnad_cleanup_rx(struct bnad *bnad, u32 rx_id);
+extern void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
+extern void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
 
 /* Timer start/stop protos */
 extern void bnad_dim_timer_start(struct bnad *bnad);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index ab753d7..40e1e84 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -464,7 +464,7 @@
 		for (i = 0; i < bnad->num_rx; i++) {
 			if (!bnad->rx_info[i].rx)
 				continue;
-			bnad_cleanup_rx(bnad, i);
+			bnad_destroy_rx(bnad, i);
 			current_err = bnad_setup_rx(bnad, i);
 			if (current_err && !err)
 				err = current_err;
@@ -492,7 +492,7 @@
 		for (i = 0; i < bnad->num_tx; i++) {
 			if (!bnad->tx_info[i].tx)
 				continue;
-			bnad_cleanup_tx(bnad, i);
+			bnad_destroy_tx(bnad, i);
 			current_err = bnad_setup_tx(bnad, i);
 			if (current_err && !err)
 				err = current_err;
@@ -539,7 +539,7 @@
 }
 
 static void
-bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
+bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
 {
 	struct bnad *bnad = netdev_priv(netdev);
 	int i, j, q_num;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index c4834c2..1466bc4 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1213,6 +1213,7 @@
 	.set_settings		= macb_set_settings,
 	.get_drvinfo		= macb_get_drvinfo,
 	.get_link		= ethtool_op_get_link,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 77b4e87..d7ac6c1 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1193,18 +1193,16 @@
 	if (err)
 		return err;
 
-	NLA_PUT_U16(skb, IFLA_PORT_REQUEST, pp->request);
-	NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
-	if (pp->set & ENIC_SET_NAME)
-		NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
-			pp->name);
-	if (pp->set & ENIC_SET_INSTANCE)
-		NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
-			pp->instance_uuid);
-	if (pp->set & ENIC_SET_HOST)
-		NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
-			pp->host_uuid);
-
+	if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
+	    nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
+	    ((pp->set & ENIC_SET_NAME) &&
+	     nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
+	    ((pp->set & ENIC_SET_INSTANCE) &&
+	     nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
+		     pp->instance_uuid)) ||
+	    ((pp->set & ENIC_SET_HOST) &&
+	     nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
index dafea1e..43464f0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.c
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -184,7 +184,7 @@
 };
 
 static const int enic_pp_handlers_count =
-			sizeof(enic_pp_handlers)/sizeof(*enic_pp_handlers);
+			ARRAY_SIZE(enic_pp_handlers);
 
 static int enic_pp_preassociate(struct enic *enic, int vf,
 	struct enic_port_profile *prev_pp, int *restore_pp)
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 68f1c39..61cc093 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1380,6 +1380,7 @@
 static int de_open (struct net_device *dev)
 {
 	struct de_private *de = netdev_priv(dev);
+	const int irq = de->pdev->irq;
 	int rc;
 
 	netif_dbg(de, ifup, dev, "enabling interface\n");
@@ -1394,10 +1395,9 @@
 
 	dw32(IntrMask, 0);
 
-	rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
+	rc = request_irq(irq, de_interrupt, IRQF_SHARED, dev->name, dev);
 	if (rc) {
-		netdev_err(dev, "IRQ %d request failure, err=%d\n",
-			   dev->irq, rc);
+		netdev_err(dev, "IRQ %d request failure, err=%d\n", irq, rc);
 		goto err_out_free;
 	}
 
@@ -1413,7 +1413,7 @@
 	return 0;
 
 err_out_free_irq:
-	free_irq(dev->irq, dev);
+	free_irq(irq, dev);
 err_out_free:
 	de_free_rings(de);
 	return rc;
@@ -1434,7 +1434,7 @@
 	netif_carrier_off(dev);
 	spin_unlock_irqrestore(&de->lock, flags);
 
-	free_irq(dev->irq, dev);
+	free_irq(de->pdev->irq, dev);
 
 	de_free_rings(de);
 	de_adapter_sleep(de);
@@ -1444,6 +1444,7 @@
 static void de_tx_timeout (struct net_device *dev)
 {
 	struct de_private *de = netdev_priv(dev);
+	const int irq = de->pdev->irq;
 
 	netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
 		   dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
@@ -1451,7 +1452,7 @@
 
 	del_timer_sync(&de->media_timer);
 
-	disable_irq(dev->irq);
+	disable_irq(irq);
 	spin_lock_irq(&de->lock);
 
 	de_stop_hw(de);
@@ -1459,12 +1460,12 @@
 	netif_carrier_off(dev);
 
 	spin_unlock_irq(&de->lock);
-	enable_irq(dev->irq);
+	enable_irq(irq);
 
 	/* Update the error counts. */
 	__de_get_stats(de);
 
-	synchronize_irq(dev->irq);
+	synchronize_irq(irq);
 	de_clean_rings(de);
 
 	de_init_rings(de);
@@ -2024,8 +2025,6 @@
 		goto err_out_res;
 	}
 
-	dev->irq = pdev->irq;
-
 	/* obtain and check validity of PCI I/O address */
 	pciaddr = pci_resource_start(pdev, 1);
 	if (!pciaddr) {
@@ -2050,7 +2049,6 @@
 		       pciaddr, pci_name(pdev));
 		goto err_out_res;
 	}
-	dev->base_addr = (unsigned long) regs;
 	de->regs = regs;
 
 	de_adapter_wake(de);
@@ -2078,11 +2076,9 @@
 		goto err_out_iomap;
 
 	/* print info about board and interface just registered */
-	netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n",
+	netdev_info(dev, "%s at %p, %pM, IRQ %d\n",
 		    de->de21040 ? "21040" : "21041",
-		    dev->base_addr,
-		    dev->dev_addr,
-		    dev->irq);
+		    regs, dev->dev_addr, pdev->irq);
 
 	pci_set_drvdata(pdev, dev);
 
@@ -2130,9 +2126,11 @@
 
 	rtnl_lock();
 	if (netif_running (dev)) {
+		const int irq = pdev->irq;
+
 		del_timer_sync(&de->media_timer);
 
-		disable_irq(dev->irq);
+		disable_irq(irq);
 		spin_lock_irq(&de->lock);
 
 		de_stop_hw(de);
@@ -2141,12 +2139,12 @@
 		netif_carrier_off(dev);
 
 		spin_unlock_irq(&de->lock);
-		enable_irq(dev->irq);
+		enable_irq(irq);
 
 		/* Update the error counts. */
 		__de_get_stats(de);
 
-		synchronize_irq(dev->irq);
+		synchronize_irq(irq);
 		de_clean_rings(de);
 
 		de_adapter_sleep(de);
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 1eccf494..4d6fe60 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -150,6 +150,12 @@
 #define DMFE_TX_TIMEOUT ((3*HZ)/2)	/* tx packet time-out time 1.5 s" */
 #define DMFE_TX_KICK 	(HZ/2)	/* tx packet Kick-out time 0.5 s" */
 
+#define dw32(reg, val)	iowrite32(val, ioaddr + (reg))
+#define dw16(reg, val)	iowrite16(val, ioaddr + (reg))
+#define dr32(reg)	ioread32(ioaddr + (reg))
+#define dr16(reg)	ioread16(ioaddr + (reg))
+#define dr8(reg)	ioread8(ioaddr + (reg))
+
 #define DMFE_DBUG(dbug_now, msg, value)			\
 	do {						\
 		if (dmfe_debug || (dbug_now))		\
@@ -178,14 +184,6 @@
 
 #define SROM_V41_CODE   0x14
 
-#define SROM_CLK_WRITE(data, ioaddr) \
-	outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
-	udelay(5); \
-	outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
-	udelay(5); \
-	outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
-	udelay(5);
-
 #define __CHK_IO_SIZE(pci_id, dev_rev) \
  (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
 	DM9102A_IO_SIZE: DM9102_IO_SIZE)
@@ -213,11 +211,11 @@
 struct dmfe_board_info {
 	u32 chip_id;			/* Chip vendor/Device ID */
 	u8 chip_revision;		/* Chip revision */
-	struct DEVICE *next_dev;	/* next device */
+	struct net_device *next_dev;	/* next device */
 	struct pci_dev *pdev;		/* PCI device */
 	spinlock_t lock;
 
-	long ioaddr;			/* I/O base address */
+	void __iomem *ioaddr;		/* I/O base address */
 	u32 cr0_data;
 	u32 cr5_data;
 	u32 cr6_data;
@@ -320,20 +318,20 @@
 static int dmfe_stop(struct DEVICE *);
 static void dmfe_set_filter_mode(struct DEVICE *);
 static const struct ethtool_ops netdev_ethtool_ops;
-static u16 read_srom_word(long ,int);
+static u16 read_srom_word(void __iomem *, int);
 static irqreturn_t dmfe_interrupt(int , void *);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void poll_dmfe (struct net_device *dev);
 #endif
-static void dmfe_descriptor_init(struct net_device *, unsigned long);
+static void dmfe_descriptor_init(struct net_device *);
 static void allocate_rx_buffer(struct net_device *);
-static void update_cr6(u32, unsigned long);
+static void update_cr6(u32, void __iomem *);
 static void send_filter_frame(struct DEVICE *);
 static void dm9132_id_table(struct DEVICE *);
-static u16 phy_read(unsigned long, u8, u8, u32);
-static void phy_write(unsigned long, u8, u8, u16, u32);
-static void phy_write_1bit(unsigned long, u32);
-static u16 phy_read_1bit(unsigned long);
+static u16 phy_read(void __iomem *, u8, u8, u32);
+static void phy_write(void __iomem *, u8, u8, u16, u32);
+static void phy_write_1bit(void __iomem *, u32);
+static u16 phy_read_1bit(void __iomem *);
 static u8 dmfe_sense_speed(struct dmfe_board_info *);
 static void dmfe_process_mode(struct dmfe_board_info *);
 static void dmfe_timer(unsigned long);
@@ -462,14 +460,16 @@
 	db->buf_pool_dma_start = db->buf_pool_dma_ptr;
 
 	db->chip_id = ent->driver_data;
-	db->ioaddr = pci_resource_start(pdev, 0);
+	/* IO type range. */
+	db->ioaddr = pci_iomap(pdev, 0, 0);
+	if (!db->ioaddr)
+		goto err_out_free_buf;
+
 	db->chip_revision = pdev->revision;
 	db->wol_mode = 0;
 
 	db->pdev = pdev;
 
-	dev->base_addr = db->ioaddr;
-	dev->irq = pdev->irq;
 	pci_set_drvdata(pdev, dev);
 	dev->netdev_ops = &netdev_ops;
 	dev->ethtool_ops = &netdev_ethtool_ops;
@@ -484,9 +484,10 @@
 		db->chip_type = 0;
 
 	/* read 64 word srom data */
-	for (i = 0; i < 64; i++)
+	for (i = 0; i < 64; i++) {
 		((__le16 *) db->srom)[i] =
 			cpu_to_le16(read_srom_word(db->ioaddr, i));
+	}
 
 	/* Set Node address */
 	for (i = 0; i < 6; i++)
@@ -494,16 +495,18 @@
 
 	err = register_netdev (dev);
 	if (err)
-		goto err_out_free_buf;
+		goto err_out_unmap;
 
 	dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
 		 ent->driver_data >> 16,
-		 pci_name(pdev), dev->dev_addr, dev->irq);
+		 pci_name(pdev), dev->dev_addr, pdev->irq);
 
 	pci_set_master(pdev);
 
 	return 0;
 
+err_out_unmap:
+	pci_iounmap(pdev, db->ioaddr);
 err_out_free_buf:
 	pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
 			    db->buf_pool_ptr, db->buf_pool_dma_ptr);
@@ -532,7 +535,7 @@
  	if (dev) {
 
 		unregister_netdev(dev);
-
+		pci_iounmap(db->pdev, db->ioaddr);
 		pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
 					DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
  					db->desc_pool_dma_ptr);
@@ -555,13 +558,13 @@
 
 static int dmfe_open(struct DEVICE *dev)
 {
-	int ret;
 	struct dmfe_board_info *db = netdev_priv(dev);
+	const int irq = db->pdev->irq;
+	int ret;
 
 	DMFE_DBUG(0, "dmfe_open", 0);
 
-	ret = request_irq(dev->irq, dmfe_interrupt,
-			  IRQF_SHARED, dev->name, dev);
+	ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev);
 	if (ret)
 		return ret;
 
@@ -615,14 +618,14 @@
 static void dmfe_init_dm910x(struct DEVICE *dev)
 {
 	struct dmfe_board_info *db = netdev_priv(dev);
-	unsigned long ioaddr = db->ioaddr;
+	void __iomem *ioaddr = db->ioaddr;
 
 	DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
 
 	/* Reset DM910x MAC controller */
-	outl(DM910X_RESET, ioaddr + DCR0);	/* RESET MAC */
+	dw32(DCR0, DM910X_RESET);	/* RESET MAC */
 	udelay(100);
-	outl(db->cr0_data, ioaddr + DCR0);
+	dw32(DCR0, db->cr0_data);
 	udelay(5);
 
 	/* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
@@ -633,12 +636,12 @@
 	db->media_mode = dmfe_media_mode;
 
 	/* RESET Phyxcer Chip by GPR port bit 7 */
-	outl(0x180, ioaddr + DCR12);		/* Let bit 7 output port */
+	dw32(DCR12, 0x180);		/* Let bit 7 output port */
 	if (db->chip_id == PCI_DM9009_ID) {
-		outl(0x80, ioaddr + DCR12);	/* Issue RESET signal */
+		dw32(DCR12, 0x80);	/* Issue RESET signal */
 		mdelay(300);			/* Delay 300 ms */
 	}
-	outl(0x0, ioaddr + DCR12);	/* Clear RESET signal */
+	dw32(DCR12, 0x0);	/* Clear RESET signal */
 
 	/* Process Phyxcer Media Mode */
 	if ( !(db->media_mode & 0x10) )	/* Force 1M mode */
@@ -649,7 +652,7 @@
 		db->op_mode = db->media_mode; 	/* Force Mode */
 
 	/* Initialize Transmit/Receive decriptor and CR3/4 */
-	dmfe_descriptor_init(dev, ioaddr);
+	dmfe_descriptor_init(dev);
 
 	/* Init CR6 to program DM910x operation */
 	update_cr6(db->cr6_data, ioaddr);
@@ -662,10 +665,10 @@
 
 	/* Init CR7, interrupt active bit */
 	db->cr7_data = CR7_DEFAULT;
-	outl(db->cr7_data, ioaddr + DCR7);
+	dw32(DCR7, db->cr7_data);
 
 	/* Init CR15, Tx jabber and Rx watchdog timer */
-	outl(db->cr15_data, ioaddr + DCR15);
+	dw32(DCR15, db->cr15_data);
 
 	/* Enable DM910X Tx/Rx function */
 	db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
@@ -682,6 +685,7 @@
 					 struct DEVICE *dev)
 {
 	struct dmfe_board_info *db = netdev_priv(dev);
+	void __iomem *ioaddr = db->ioaddr;
 	struct tx_desc *txptr;
 	unsigned long flags;
 
@@ -707,7 +711,7 @@
 	}
 
 	/* Disable NIC interrupt */
-	outl(0, dev->base_addr + DCR7);
+	dw32(DCR7, 0);
 
 	/* transmit this packet */
 	txptr = db->tx_insert_ptr;
@@ -721,11 +725,11 @@
 	if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
 		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
 		db->tx_packet_cnt++;			/* Ready to send */
-		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
+		dw32(DCR1, 0x1);			/* Issue Tx polling */
 		dev->trans_start = jiffies;		/* saved time stamp */
 	} else {
 		db->tx_queue_cnt++;			/* queue TX packet */
-		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
+		dw32(DCR1, 0x1);			/* Issue Tx polling */
 	}
 
 	/* Tx resource check */
@@ -734,7 +738,7 @@
 
 	/* Restore CR7 to enable interrupt */
 	spin_unlock_irqrestore(&db->lock, flags);
-	outl(db->cr7_data, dev->base_addr + DCR7);
+	dw32(DCR7, db->cr7_data);
 
 	/* free this SKB */
 	dev_kfree_skb(skb);
@@ -751,7 +755,7 @@
 static int dmfe_stop(struct DEVICE *dev)
 {
 	struct dmfe_board_info *db = netdev_priv(dev);
-	unsigned long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = db->ioaddr;
 
 	DMFE_DBUG(0, "dmfe_stop", 0);
 
@@ -762,12 +766,12 @@
 	del_timer_sync(&db->timer);
 
 	/* Reset & stop DM910X board */
-	outl(DM910X_RESET, ioaddr + DCR0);
-	udelay(5);
-	phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
+	dw32(DCR0, DM910X_RESET);
+	udelay(100);
+	phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
 
 	/* free interrupt */
-	free_irq(dev->irq, dev);
+	free_irq(db->pdev->irq, dev);
 
 	/* free allocated rx buffer */
 	dmfe_free_rxbuffer(db);
@@ -794,7 +798,7 @@
 {
 	struct DEVICE *dev = dev_id;
 	struct dmfe_board_info *db = netdev_priv(dev);
-	unsigned long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = db->ioaddr;
 	unsigned long flags;
 
 	DMFE_DBUG(0, "dmfe_interrupt()", 0);
@@ -802,15 +806,15 @@
 	spin_lock_irqsave(&db->lock, flags);
 
 	/* Got DM910X status */
-	db->cr5_data = inl(ioaddr + DCR5);
-	outl(db->cr5_data, ioaddr + DCR5);
+	db->cr5_data = dr32(DCR5);
+	dw32(DCR5, db->cr5_data);
 	if ( !(db->cr5_data & 0xc1) ) {
 		spin_unlock_irqrestore(&db->lock, flags);
 		return IRQ_HANDLED;
 	}
 
 	/* Disable all interrupt in CR7 to solve the interrupt edge problem */
-	outl(0, ioaddr + DCR7);
+	dw32(DCR7, 0);
 
 	/* Check system status */
 	if (db->cr5_data & 0x2000) {
@@ -838,11 +842,11 @@
 	if (db->dm910x_chk_mode & 0x2) {
 		db->dm910x_chk_mode = 0x4;
 		db->cr6_data |= 0x100;
-		update_cr6(db->cr6_data, db->ioaddr);
+		update_cr6(db->cr6_data, ioaddr);
 	}
 
 	/* Restore CR7 to enable interrupt mask */
-	outl(db->cr7_data, ioaddr + DCR7);
+	dw32(DCR7, db->cr7_data);
 
 	spin_unlock_irqrestore(&db->lock, flags);
 	return IRQ_HANDLED;
@@ -858,11 +862,14 @@
 
 static void poll_dmfe (struct net_device *dev)
 {
+	struct dmfe_board_info *db = netdev_priv(dev);
+	const int irq = db->pdev->irq;
+
 	/* disable_irq here is not very nice, but with the lockless
 	   interrupt handler we have no other choice. */
-	disable_irq(dev->irq);
-	dmfe_interrupt (dev->irq, dev);
-	enable_irq(dev->irq);
+	disable_irq(irq);
+	dmfe_interrupt (irq, dev);
+	enable_irq(irq);
 }
 #endif
 
@@ -873,7 +880,7 @@
 static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
 {
 	struct tx_desc *txptr;
-	unsigned long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = db->ioaddr;
 	u32 tdes0;
 
 	txptr = db->tx_remove_ptr;
@@ -897,7 +904,7 @@
 					db->tx_fifo_underrun++;
 					if ( !(db->cr6_data & CR6_SFT) ) {
 						db->cr6_data = db->cr6_data | CR6_SFT;
-						update_cr6(db->cr6_data, db->ioaddr);
+						update_cr6(db->cr6_data, ioaddr);
 					}
 				}
 				if (tdes0 & 0x0100)
@@ -924,7 +931,7 @@
 		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
 		db->tx_packet_cnt++;			/* Ready to send */
 		db->tx_queue_cnt--;
-		outl(0x1, ioaddr + DCR1);		/* Issue Tx polling */
+		dw32(DCR1, 0x1);			/* Issue Tx polling */
 		dev->trans_start = jiffies;		/* saved time stamp */
 	}
 
@@ -1087,12 +1094,7 @@
 
 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
-	if (np->pdev)
-		strlcpy(info->bus_info, pci_name(np->pdev),
-			sizeof(info->bus_info));
-	else
-		sprintf(info->bus_info, "EISA 0x%lx %d",
-			dev->base_addr, dev->irq);
+	strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
 }
 
 static int dmfe_ethtool_set_wol(struct net_device *dev,
@@ -1132,10 +1134,11 @@
 
 static void dmfe_timer(unsigned long data)
 {
+	struct net_device *dev = (struct net_device *)data;
+	struct dmfe_board_info *db = netdev_priv(dev);
+	void __iomem *ioaddr = db->ioaddr;
 	u32 tmp_cr8;
 	unsigned char tmp_cr12;
-	struct DEVICE *dev = (struct DEVICE *) data;
-	struct dmfe_board_info *db = netdev_priv(dev);
  	unsigned long flags;
 
 	int link_ok, link_ok_phy;
@@ -1148,11 +1151,10 @@
 		db->first_in_callback = 1;
 		if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
 			db->cr6_data &= ~0x40000;
-			update_cr6(db->cr6_data, db->ioaddr);
-			phy_write(db->ioaddr,
-				  db->phy_addr, 0, 0x1000, db->chip_id);
+			update_cr6(db->cr6_data, ioaddr);
+			phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
 			db->cr6_data |= 0x40000;
-			update_cr6(db->cr6_data, db->ioaddr);
+			update_cr6(db->cr6_data, ioaddr);
 			db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
 			add_timer(&db->timer);
 			spin_unlock_irqrestore(&db->lock, flags);
@@ -1167,7 +1169,7 @@
 		db->dm910x_chk_mode = 0x4;
 
 	/* Dynamic reset DM910X : system error or transmit time-out */
-	tmp_cr8 = inl(db->ioaddr + DCR8);
+	tmp_cr8 = dr32(DCR8);
 	if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
 		db->reset_cr8++;
 		db->wait_reset = 1;
@@ -1177,7 +1179,7 @@
 	/* TX polling kick monitor */
 	if ( db->tx_packet_cnt &&
 	     time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
-		outl(0x1, dev->base_addr + DCR1);   /* Tx polling again */
+		dw32(DCR1, 0x1);   /* Tx polling again */
 
 		/* TX Timeout */
 		if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
@@ -1200,9 +1202,9 @@
 
 	/* Link status check, Dynamic media type change */
 	if (db->chip_id == PCI_DM9132_ID)
-		tmp_cr12 = inb(db->ioaddr + DCR9 + 3);	/* DM9132 */
+		tmp_cr12 = dr8(DCR9 + 3);	/* DM9132 */
 	else
-		tmp_cr12 = inb(db->ioaddr + DCR12);	/* DM9102/DM9102A */
+		tmp_cr12 = dr8(DCR12);		/* DM9102/DM9102A */
 
 	if ( ((db->chip_id == PCI_DM9102_ID) &&
 		(db->chip_revision == 0x30)) ||
@@ -1251,7 +1253,7 @@
 			/* 10/100M link failed, used 1M Home-Net */
 			db->cr6_data|=0x00040000;	/* bit18=1, MII */
 			db->cr6_data&=~0x00000200;	/* bit9=0, HD mode */
-			update_cr6(db->cr6_data, db->ioaddr);
+			update_cr6(db->cr6_data, ioaddr);
 		}
 	} else if (!netif_carrier_ok(dev)) {
 
@@ -1288,17 +1290,18 @@
  *	Re-initialize DM910X board
  */
 
-static void dmfe_dynamic_reset(struct DEVICE *dev)
+static void dmfe_dynamic_reset(struct net_device *dev)
 {
 	struct dmfe_board_info *db = netdev_priv(dev);
+	void __iomem *ioaddr = db->ioaddr;
 
 	DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
 
 	/* Sopt MAC controller */
 	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);	/* Disable Tx/Rx */
-	update_cr6(db->cr6_data, dev->base_addr);
-	outl(0, dev->base_addr + DCR7);		/* Disable Interrupt */
-	outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
+	update_cr6(db->cr6_data, ioaddr);
+	dw32(DCR7, 0);				/* Disable Interrupt */
+	dw32(DCR5, dr32(DCR5));
 
 	/* Disable upper layer interface */
 	netif_stop_queue(dev);
@@ -1364,9 +1367,10 @@
  *	Using Chain structure, and allocate Tx/Rx buffer
  */
 
-static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr)
+static void dmfe_descriptor_init(struct net_device *dev)
 {
 	struct dmfe_board_info *db = netdev_priv(dev);
+	void __iomem *ioaddr = db->ioaddr;
 	struct tx_desc *tmp_tx;
 	struct rx_desc *tmp_rx;
 	unsigned char *tmp_buf;
@@ -1379,7 +1383,7 @@
 	/* tx descriptor start pointer */
 	db->tx_insert_ptr = db->first_tx_desc;
 	db->tx_remove_ptr = db->first_tx_desc;
-	outl(db->first_tx_desc_dma, ioaddr + DCR4);     /* TX DESC address */
+	dw32(DCR4, db->first_tx_desc_dma);     /* TX DESC address */
 
 	/* rx descriptor start pointer */
 	db->first_rx_desc = (void *)db->first_tx_desc +
@@ -1389,7 +1393,7 @@
 			sizeof(struct tx_desc) * TX_DESC_CNT;
 	db->rx_insert_ptr = db->first_rx_desc;
 	db->rx_ready_ptr = db->first_rx_desc;
-	outl(db->first_rx_desc_dma, ioaddr + DCR3);	/* RX DESC address */
+	dw32(DCR3, db->first_rx_desc_dma);		/* RX DESC address */
 
 	/* Init Transmit chain */
 	tmp_buf = db->buf_pool_start;
@@ -1431,14 +1435,14 @@
  *	Firstly stop DM910X , then written value and start
  */
 
-static void update_cr6(u32 cr6_data, unsigned long ioaddr)
+static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
 {
 	u32 cr6_tmp;
 
 	cr6_tmp = cr6_data & ~0x2002;           /* stop Tx/Rx */
-	outl(cr6_tmp, ioaddr + DCR6);
+	dw32(DCR6, cr6_tmp);
 	udelay(5);
-	outl(cr6_data, ioaddr + DCR6);
+	dw32(DCR6, cr6_data);
 	udelay(5);
 }
 
@@ -1448,24 +1452,19 @@
  *	This setup frame initialize DM910X address filter mode
 */
 
-static void dm9132_id_table(struct DEVICE *dev)
+static void dm9132_id_table(struct net_device *dev)
 {
+	struct dmfe_board_info *db = netdev_priv(dev);
+	void __iomem *ioaddr = db->ioaddr + 0xc0;
+	u16 *addrptr = (u16 *)dev->dev_addr;
 	struct netdev_hw_addr *ha;
-	u16 * addrptr;
-	unsigned long ioaddr = dev->base_addr+0xc0;		/* ID Table */
-	u32 hash_val;
 	u16 i, hash_table[4];
 
-	DMFE_DBUG(0, "dm9132_id_table()", 0);
-
 	/* Node address */
-	addrptr = (u16 *) dev->dev_addr;
-	outw(addrptr[0], ioaddr);
-	ioaddr += 4;
-	outw(addrptr[1], ioaddr);
-	ioaddr += 4;
-	outw(addrptr[2], ioaddr);
-	ioaddr += 4;
+	for (i = 0; i < 3; i++) {
+		dw16(0, addrptr[i]);
+		ioaddr += 4;
+	}
 
 	/* Clear Hash Table */
 	memset(hash_table, 0, sizeof(hash_table));
@@ -1475,13 +1474,14 @@
 
 	/* the multicast address in Hash Table : 64 bits */
 	netdev_for_each_mc_addr(ha, dev) {
-		hash_val = cal_CRC((char *) ha->addr, 6, 0) & 0x3f;
+		u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f;
+
 		hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
 	}
 
 	/* Write the hash table to MAC MD table */
 	for (i = 0; i < 4; i++, ioaddr += 4)
-		outw(hash_table[i], ioaddr);
+		dw16(0, hash_table[i]);
 }
 
 
@@ -1490,7 +1490,7 @@
  *	This setup frame initialize DM910X address filter mode
  */
 
-static void send_filter_frame(struct DEVICE *dev)
+static void send_filter_frame(struct net_device *dev)
 {
 	struct dmfe_board_info *db = netdev_priv(dev);
 	struct netdev_hw_addr *ha;
@@ -1535,12 +1535,14 @@
 
 	/* Resource Check and Send the setup packet */
 	if (!db->tx_packet_cnt) {
+		void __iomem *ioaddr = db->ioaddr;
+
 		/* Resource Empty */
 		db->tx_packet_cnt++;
 		txptr->tdes0 = cpu_to_le32(0x80000000);
-		update_cr6(db->cr6_data | 0x2000, dev->base_addr);
-		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
-		update_cr6(db->cr6_data, dev->base_addr);
+		update_cr6(db->cr6_data | 0x2000, ioaddr);
+		dw32(DCR1, 0x1);	/* Issue Tx polling */
+		update_cr6(db->cr6_data, ioaddr);
 		dev->trans_start = jiffies;
 	} else
 		db->tx_queue_cnt++;	/* Put in TX queue */
@@ -1575,43 +1577,59 @@
 	db->rx_insert_ptr = rxptr;
 }
 
+static void srom_clk_write(void __iomem *ioaddr, u32 data)
+{
+	static const u32 cmd[] = {
+		CR9_SROM_READ | CR9_SRCS,
+		CR9_SROM_READ | CR9_SRCS | CR9_SRCLK,
+		CR9_SROM_READ | CR9_SRCS
+	};
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(cmd); i++) {
+		dw32(DCR9, data | cmd[i]);
+		udelay(5);
+	}
+}
 
 /*
  *	Read one word data from the serial ROM
  */
-
-static u16 read_srom_word(long ioaddr, int offset)
+static u16 read_srom_word(void __iomem *ioaddr, int offset)
 {
+	u16 srom_data;
 	int i;
-	u16 srom_data = 0;
-	long cr9_ioaddr = ioaddr + DCR9;
 
-	outl(CR9_SROM_READ, cr9_ioaddr);
-	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+	dw32(DCR9, CR9_SROM_READ);
+	udelay(5);
+	dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
+	udelay(5);
 
 	/* Send the Read Command 110b */
-	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
-	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
-	SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
+	srom_clk_write(ioaddr, SROM_DATA_1);
+	srom_clk_write(ioaddr, SROM_DATA_1);
+	srom_clk_write(ioaddr, SROM_DATA_0);
 
 	/* Send the offset */
 	for (i = 5; i >= 0; i--) {
 		srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
-		SROM_CLK_WRITE(srom_data, cr9_ioaddr);
+		srom_clk_write(ioaddr, srom_data);
 	}
 
-	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+	dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
+	udelay(5);
 
 	for (i = 16; i > 0; i--) {
-		outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
+		dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
 		udelay(5);
 		srom_data = (srom_data << 1) |
-				((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
-		outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+				((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0);
+		dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
 		udelay(5);
 	}
 
-	outl(CR9_SROM_READ, cr9_ioaddr);
+	dw32(DCR9, CR9_SROM_READ);
+	udelay(5);
 	return srom_data;
 }
 
@@ -1620,13 +1638,14 @@
  *	Auto sense the media mode
  */
 
-static u8 dmfe_sense_speed(struct dmfe_board_info * db)
+static u8 dmfe_sense_speed(struct dmfe_board_info *db)
 {
+	void __iomem *ioaddr = db->ioaddr;
 	u8 ErrFlag = 0;
 	u16 phy_mode;
 
 	/* CR6 bit18=0, select 10/100M */
-	update_cr6( (db->cr6_data & ~0x40000), db->ioaddr);
+	update_cr6(db->cr6_data & ~0x40000, ioaddr);
 
 	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
 	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
@@ -1665,11 +1684,12 @@
 
 static void dmfe_set_phyxcer(struct dmfe_board_info *db)
 {
+	void __iomem *ioaddr = db->ioaddr;
 	u16 phy_reg;
 
 	/* Select 10/100M phyxcer */
 	db->cr6_data &= ~0x40000;
-	update_cr6(db->cr6_data, db->ioaddr);
+	update_cr6(db->cr6_data, ioaddr);
 
 	/* DM9009 Chip: Phyxcer reg18 bit12=0 */
 	if (db->chip_id == PCI_DM9009_ID) {
@@ -1765,18 +1785,15 @@
  *	Write a word to Phy register
  */
 
-static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
+static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
 		      u16 phy_data, u32 chip_id)
 {
 	u16 i;
-	unsigned long ioaddr;
 
 	if (chip_id == PCI_DM9132_ID) {
-		ioaddr = iobase + 0x80 + offset * 4;
-		outw(phy_data, ioaddr);
+		dw16(0x80 + offset * 4, phy_data);
 	} else {
 		/* DM9102/DM9102A Chip */
-		ioaddr = iobase + DCR9;
 
 		/* Send 33 synchronization clock to Phy controller */
 		for (i = 0; i < 35; i++)
@@ -1816,19 +1833,16 @@
  *	Read a word data from phy register
  */
 
-static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
+static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
 {
 	int i;
 	u16 phy_data;
-	unsigned long ioaddr;
 
 	if (chip_id == PCI_DM9132_ID) {
 		/* DM9132 Chip */
-		ioaddr = iobase + 0x80 + offset * 4;
-		phy_data = inw(ioaddr);
+		phy_data = dr16(0x80 + offset * 4);
 	} else {
 		/* DM9102/DM9102A Chip */
-		ioaddr = iobase + DCR9;
 
 		/* Send 33 synchronization clock to Phy controller */
 		for (i = 0; i < 35; i++)
@@ -1870,13 +1884,13 @@
  *	Write one bit data to Phy Controller
  */
 
-static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
+static void phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
 {
-	outl(phy_data, ioaddr);			/* MII Clock Low */
+	dw32(DCR9, phy_data);		/* MII Clock Low */
 	udelay(1);
-	outl(phy_data | MDCLKH, ioaddr);	/* MII Clock High */
+	dw32(DCR9, phy_data | MDCLKH);	/* MII Clock High */
 	udelay(1);
-	outl(phy_data, ioaddr);			/* MII Clock Low */
+	dw32(DCR9, phy_data);		/* MII Clock Low */
 	udelay(1);
 }
 
@@ -1885,14 +1899,14 @@
  *	Read one bit phy data from PHY controller
  */
 
-static u16 phy_read_1bit(unsigned long ioaddr)
+static u16 phy_read_1bit(void __iomem *ioaddr)
 {
 	u16 phy_data;
 
-	outl(0x50000, ioaddr);
+	dw32(DCR9, 0x50000);
 	udelay(1);
-	phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
-	outl(0x40000, ioaddr);
+	phy_data = (dr32(DCR9) >> 19) & 0x1;
+	dw32(DCR9, 0x40000);
 	udelay(1);
 
 	return phy_data;
@@ -1978,7 +1992,7 @@
 
 	/* Check DM9801 or DM9802 present or not */
 	db->HPNA_present = 0;
-	update_cr6(db->cr6_data|0x40000, db->ioaddr);
+	update_cr6(db->cr6_data | 0x40000, db->ioaddr);
 	tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
 	if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
 		/* DM9801 or DM9802 present */
@@ -2095,6 +2109,7 @@
 {
 	struct net_device *dev = pci_get_drvdata(pci_dev);
 	struct dmfe_board_info *db = netdev_priv(dev);
+	void __iomem *ioaddr = db->ioaddr;
 	u32 tmp;
 
 	/* Disable upper layer interface */
@@ -2102,11 +2117,11 @@
 
 	/* Disable Tx/Rx */
 	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
-	update_cr6(db->cr6_data, dev->base_addr);
+	update_cr6(db->cr6_data, ioaddr);
 
 	/* Disable Interrupt */
-	outl(0, dev->base_addr + DCR7);
-	outl(inl (dev->base_addr + DCR5), dev->base_addr + DCR5);
+	dw32(DCR7, 0);
+	dw32(DCR5, dr32(DCR5));
 
 	/* Fre RX buffers */
 	dmfe_free_rxbuffer(db);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index fea3641..c4f37ac 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -328,7 +328,7 @@
 	udelay(100);
 
 	if (tulip_debug > 1)
-		netdev_dbg(dev, "tulip_up(), irq==%d\n", dev->irq);
+		netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
 
 	iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
 	iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
@@ -515,11 +515,13 @@
 static int
 tulip_open(struct net_device *dev)
 {
+	struct tulip_private *tp = netdev_priv(dev);
 	int retval;
 
 	tulip_init_ring (dev);
 
-	retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev);
+	retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
+			     dev->name, dev);
 	if (retval)
 		goto free_ring;
 
@@ -841,7 +843,7 @@
 		netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
 			   ioread32 (ioaddr + CSR5));
 
-	free_irq (dev->irq, dev);
+	free_irq (tp->pdev->irq, dev);
 
 	tulip_free_ring (dev);
 
@@ -1489,8 +1491,6 @@
 
 	INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
 
-	dev->base_addr = (unsigned long)ioaddr;
-
 #ifdef CONFIG_TULIP_MWI
 	if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
 		tulip_mwi_config (pdev, dev);
@@ -1650,7 +1650,6 @@
 	for (i = 0; i < 6; i++)
 		last_phys_addr[i] = dev->dev_addr[i];
 	last_irq = irq;
-	dev->irq = irq;
 
 	/* The lower four bits are the media type. */
 	if (board_idx >= 0  &&  board_idx < MAX_UNITS) {
@@ -1858,7 +1857,8 @@
 	tulip_down(dev);
 
 	netif_device_detach(dev);
-	free_irq(dev->irq, dev);
+	/* FIXME: it needlessly adds an error path. */
+	free_irq(tp->pdev->irq, dev);
 
 save_state:
 	pci_save_state(pdev);
@@ -1900,7 +1900,9 @@
 		return retval;
 	}
 
-	if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
+	retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
+			     dev->name, dev);
+	if (retval) {
 		pr_err("request_irq failed in resume\n");
 		return retval;
 	}
@@ -1960,11 +1962,14 @@
 
 static void poll_tulip (struct net_device *dev)
 {
+	struct tulip_private *tp = netdev_priv(dev);
+	const int irq = tp->pdev->irq;
+
 	/* disable_irq here is not very nice, but with the lockless
 	   interrupt handler we have no other choice. */
-	disable_irq(dev->irq);
-	tulip_interrupt (dev->irq, dev);
-	enable_irq(dev->irq);
+	disable_irq(irq);
+	tulip_interrupt (irq, dev);
+	enable_irq(irq);
 }
 #endif
 
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index fc4001f..75d45f8 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -42,6 +42,8 @@
 #include <asm/dma.h>
 #include <asm/uaccess.h>
 
+#define uw32(reg, val)	iowrite32(val, ioaddr + (reg))
+#define ur32(reg)	ioread32(ioaddr + (reg))
 
 /* Board/System/Debug information/definition ---------------- */
 #define PCI_ULI5261_ID  0x526110B9	/* ULi M5261 ID*/
@@ -110,14 +112,6 @@
 
 #define SROM_V41_CODE   0x14
 
-#define SROM_CLK_WRITE(data, ioaddr)					\
-		outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);		\
-		udelay(5);						\
-		outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr);	\
-		udelay(5);						\
-		outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);		\
-		udelay(5);
-
 /* Structure/enum declaration ------------------------------- */
 struct tx_desc {
         __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
@@ -132,12 +126,15 @@
 } __attribute__(( aligned(32) ));
 
 struct uli526x_board_info {
-	u32 chip_id;			/* Chip vendor/Device ID */
+	struct uli_phy_ops {
+		void (*write)(struct uli526x_board_info *, u8, u8, u16);
+		u16 (*read)(struct uli526x_board_info *, u8, u8);
+	} phy;
 	struct net_device *next_dev;	/* next device */
 	struct pci_dev *pdev;		/* PCI device */
 	spinlock_t lock;
 
-	long ioaddr;			/* I/O base address */
+	void __iomem *ioaddr;		/* I/O base address */
 	u32 cr0_data;
 	u32 cr5_data;
 	u32 cr6_data;
@@ -227,21 +224,21 @@
 static int uli526x_stop(struct net_device *);
 static void uli526x_set_filter_mode(struct net_device *);
 static const struct ethtool_ops netdev_ethtool_ops;
-static u16 read_srom_word(long, int);
+static u16 read_srom_word(struct uli526x_board_info *, int);
 static irqreturn_t uli526x_interrupt(int, void *);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void uli526x_poll(struct net_device *dev);
 #endif
-static void uli526x_descriptor_init(struct net_device *, unsigned long);
+static void uli526x_descriptor_init(struct net_device *, void __iomem *);
 static void allocate_rx_buffer(struct net_device *);
-static void update_cr6(u32, unsigned long);
+static void update_cr6(u32, void __iomem *);
 static void send_filter_frame(struct net_device *, int);
-static u16 phy_read(unsigned long, u8, u8, u32);
-static u16 phy_readby_cr10(unsigned long, u8, u8);
-static void phy_write(unsigned long, u8, u8, u16, u32);
-static void phy_writeby_cr10(unsigned long, u8, u8, u16);
-static void phy_write_1bit(unsigned long, u32, u32);
-static u16 phy_read_1bit(unsigned long, u32);
+static u16 phy_readby_cr9(struct uli526x_board_info *, u8, u8);
+static u16 phy_readby_cr10(struct uli526x_board_info *, u8, u8);
+static void phy_writeby_cr9(struct uli526x_board_info *, u8, u8, u16);
+static void phy_writeby_cr10(struct uli526x_board_info *, u8, u8, u16);
+static void phy_write_1bit(struct uli526x_board_info *db, u32);
+static u16 phy_read_1bit(struct uli526x_board_info *db);
 static u8 uli526x_sense_speed(struct uli526x_board_info *);
 static void uli526x_process_mode(struct uli526x_board_info *);
 static void uli526x_timer(unsigned long);
@@ -253,6 +250,18 @@
 static void uli526x_init(struct net_device *);
 static void uli526x_set_phyxcer(struct uli526x_board_info *);
 
+static void srom_clk_write(struct uli526x_board_info *db, u32 data)
+{
+	void __iomem *ioaddr = db->ioaddr;
+
+	uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS);
+	udelay(5);
+	uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
+	udelay(5);
+	uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS);
+	udelay(5);
+}
+
 /* ULI526X network board routine ---------------------------- */
 
 static const struct net_device_ops netdev_ops = {
@@ -277,6 +286,7 @@
 {
 	struct uli526x_board_info *db;	/* board information structure */
 	struct net_device *dev;
+	void __iomem *ioaddr;
 	int i, err;
 
 	ULI526X_DBUG(0, "uli526x_init_one()", 0);
@@ -313,9 +323,9 @@
 		goto err_out_disable;
 	}
 
-	if (pci_request_regions(pdev, DRV_NAME)) {
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err < 0) {
 		pr_err("Failed to request PCI regions\n");
-		err = -ENODEV;
 		goto err_out_disable;
 	}
 
@@ -323,32 +333,41 @@
 	db = netdev_priv(dev);
 
 	/* Allocate Tx/Rx descriptor memory */
+	err = -ENOMEM;
+
 	db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
-	if(db->desc_pool_ptr == NULL)
-	{
-		err = -ENOMEM;
-		goto err_out_nomem;
-	}
+	if (!db->desc_pool_ptr)
+		goto err_out_release;
+
 	db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
-	if(db->buf_pool_ptr == NULL)
-	{
-		err = -ENOMEM;
-		goto err_out_nomem;
-	}
+	if (!db->buf_pool_ptr)
+		goto err_out_free_tx_desc;
 
 	db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
 	db->first_tx_desc_dma = db->desc_pool_dma_ptr;
 	db->buf_pool_start = db->buf_pool_ptr;
 	db->buf_pool_dma_start = db->buf_pool_dma_ptr;
 
-	db->chip_id = ent->driver_data;
-	db->ioaddr = pci_resource_start(pdev, 0);
+	switch (ent->driver_data) {
+	case PCI_ULI5263_ID:
+		db->phy.write	= phy_writeby_cr10;
+		db->phy.read	= phy_readby_cr10;
+		break;
+	default:
+		db->phy.write	= phy_writeby_cr9;
+		db->phy.read	= phy_readby_cr9;
+		break;
+	}
 
+	/* IO region. */
+	ioaddr = pci_iomap(pdev, 0, 0);
+	if (!ioaddr)
+		goto err_out_free_tx_buf;
+
+	db->ioaddr = ioaddr;
 	db->pdev = pdev;
 	db->init = 1;
 
-	dev->base_addr = db->ioaddr;
-	dev->irq = pdev->irq;
 	pci_set_drvdata(pdev, dev);
 
 	/* Register some necessary functions */
@@ -360,24 +379,24 @@
 
 	/* read 64 word srom data */
 	for (i = 0; i < 64; i++)
-		((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
+		((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db, i));
 
 	/* Set Node address */
 	if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0)		/* SROM absent, so read MAC address from ID Table */
 	{
-		outl(0x10000, db->ioaddr + DCR0);	//Diagnosis mode
-		outl(0x1c0, db->ioaddr + DCR13);	//Reset dianostic pointer port
-		outl(0, db->ioaddr + DCR14);		//Clear reset port
-		outl(0x10, db->ioaddr + DCR14);		//Reset ID Table pointer
-		outl(0, db->ioaddr + DCR14);		//Clear reset port
-		outl(0, db->ioaddr + DCR13);		//Clear CR13
-		outl(0x1b0, db->ioaddr + DCR13);	//Select ID Table access port
+		uw32(DCR0, 0x10000);	//Diagnosis mode
+		uw32(DCR13, 0x1c0);	//Reset dianostic pointer port
+		uw32(DCR14, 0);		//Clear reset port
+		uw32(DCR14, 0x10);	//Reset ID Table pointer
+		uw32(DCR14, 0);		//Clear reset port
+		uw32(DCR13, 0);		//Clear CR13
+		uw32(DCR13, 0x1b0);	//Select ID Table access port
 		//Read MAC address from CR14
 		for (i = 0; i < 6; i++)
-			dev->dev_addr[i] = inl(db->ioaddr + DCR14);
+			dev->dev_addr[i] = ur32(DCR14);
 		//Read end
-		outl(0, db->ioaddr + DCR13);	//Clear CR13
-		outl(0, db->ioaddr + DCR0);		//Clear CR0
+		uw32(DCR13, 0);		//Clear CR13
+		uw32(DCR0, 0);		//Clear CR0
 		udelay(10);
 	}
 	else		/*Exist SROM*/
@@ -387,26 +406,26 @@
 	}
 	err = register_netdev (dev);
 	if (err)
-		goto err_out_res;
+		goto err_out_unmap;
 
 	netdev_info(dev, "ULi M%04lx at pci%s, %pM, irq %d\n",
 		    ent->driver_data >> 16, pci_name(pdev),
-		    dev->dev_addr, dev->irq);
+		    dev->dev_addr, pdev->irq);
 
 	pci_set_master(pdev);
 
 	return 0;
 
-err_out_res:
+err_out_unmap:
+	pci_iounmap(pdev, db->ioaddr);
+err_out_free_tx_buf:
+	pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+			    db->buf_pool_ptr, db->buf_pool_dma_ptr);
+err_out_free_tx_desc:
+	pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+			    db->desc_pool_ptr, db->desc_pool_dma_ptr);
+err_out_release:
 	pci_release_regions(pdev);
-err_out_nomem:
-	if(db->desc_pool_ptr)
-		pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
-			db->desc_pool_ptr, db->desc_pool_dma_ptr);
-
-	if(db->buf_pool_ptr != NULL)
-		pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
-			db->buf_pool_ptr, db->buf_pool_dma_ptr);
 err_out_disable:
 	pci_disable_device(pdev);
 err_out_free:
@@ -422,19 +441,17 @@
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct uli526x_board_info *db = netdev_priv(dev);
 
-	ULI526X_DBUG(0, "uli526x_remove_one()", 0);
-
+	unregister_netdev(dev);
+	pci_iounmap(pdev, db->ioaddr);
 	pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
 				DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
  				db->desc_pool_dma_ptr);
 	pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
 				db->buf_pool_ptr, db->buf_pool_dma_ptr);
-	unregister_netdev(dev);
 	pci_release_regions(pdev);
-	free_netdev(dev);	/* free board information */
-	pci_set_drvdata(pdev, NULL);
 	pci_disable_device(pdev);
-	ULI526X_DBUG(0, "uli526x_remove_one() exit", 0);
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(dev);
 }
 
 
@@ -468,7 +485,8 @@
 	/* Initialize ULI526X board */
 	uli526x_init(dev);
 
-	ret = request_irq(dev->irq, uli526x_interrupt, IRQF_SHARED, dev->name, dev);
+	ret = request_irq(db->pdev->irq, uli526x_interrupt, IRQF_SHARED,
+			  dev->name, dev);
 	if (ret)
 		return ret;
 
@@ -496,57 +514,57 @@
 static void uli526x_init(struct net_device *dev)
 {
 	struct uli526x_board_info *db = netdev_priv(dev);
-	unsigned long ioaddr = db->ioaddr;
+	struct uli_phy_ops *phy = &db->phy;
+	void __iomem *ioaddr = db->ioaddr;
 	u8	phy_tmp;
 	u8	timeout;
-	u16	phy_value;
 	u16 phy_reg_reset;
 
 
 	ULI526X_DBUG(0, "uli526x_init()", 0);
 
 	/* Reset M526x MAC controller */
-	outl(ULI526X_RESET, ioaddr + DCR0);	/* RESET MAC */
+	uw32(DCR0, ULI526X_RESET);	/* RESET MAC */
 	udelay(100);
-	outl(db->cr0_data, ioaddr + DCR0);
+	uw32(DCR0, db->cr0_data);
 	udelay(5);
 
 	/* Phy addr : In some boards,M5261/M5263 phy address != 1 */
 	db->phy_addr = 1;
-	for(phy_tmp=0;phy_tmp<32;phy_tmp++)
-	{
-		phy_value=phy_read(db->ioaddr,phy_tmp,3,db->chip_id);//peer add
-		if(phy_value != 0xffff&&phy_value!=0)
-		{
+	for (phy_tmp = 0; phy_tmp < 32; phy_tmp++) {
+		u16 phy_value;
+
+		phy_value = phy->read(db, phy_tmp, 3);	//peer add
+		if (phy_value != 0xffff && phy_value != 0) {
 			db->phy_addr = phy_tmp;
 			break;
 		}
 	}
-	if(phy_tmp == 32)
+
+	if (phy_tmp == 32)
 		pr_warn("Can not find the phy address!!!\n");
 	/* Parser SROM and media mode */
 	db->media_mode = uli526x_media_mode;
 
 	/* phyxcer capability setting */
-	phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id);
+	phy_reg_reset = phy->read(db, db->phy_addr, 0);
 	phy_reg_reset = (phy_reg_reset | 0x8000);
-	phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id);
+	phy->write(db, db->phy_addr, 0, phy_reg_reset);
 
 	/* See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management
 	 * functions") or phy data sheet for details on phy reset
 	 */
 	udelay(500);
 	timeout = 10;
-	while (timeout-- &&
-		phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id) & 0x8000)
-			udelay(100);
+	while (timeout-- && phy->read(db, db->phy_addr, 0) & 0x8000)
+		udelay(100);
 
 	/* Process Phyxcer Media Mode */
 	uli526x_set_phyxcer(db);
 
 	/* Media Mode Process */
 	if ( !(db->media_mode & ULI526X_AUTO) )
-		db->op_mode = db->media_mode; 	/* Force Mode */
+		db->op_mode = db->media_mode;		/* Force Mode */
 
 	/* Initialize Transmit/Receive decriptor and CR3/4 */
 	uli526x_descriptor_init(dev, ioaddr);
@@ -559,10 +577,10 @@
 
 	/* Init CR7, interrupt active bit */
 	db->cr7_data = CR7_DEFAULT;
-	outl(db->cr7_data, ioaddr + DCR7);
+	uw32(DCR7, db->cr7_data);
 
 	/* Init CR15, Tx jabber and Rx watchdog timer */
-	outl(db->cr15_data, ioaddr + DCR15);
+	uw32(DCR15, db->cr15_data);
 
 	/* Enable ULI526X Tx/Rx function */
 	db->cr6_data |= CR6_RXSC | CR6_TXSC;
@@ -579,6 +597,7 @@
 					    struct net_device *dev)
 {
 	struct uli526x_board_info *db = netdev_priv(dev);
+	void __iomem *ioaddr = db->ioaddr;
 	struct tx_desc *txptr;
 	unsigned long flags;
 
@@ -604,7 +623,7 @@
 	}
 
 	/* Disable NIC interrupt */
-	outl(0, dev->base_addr + DCR7);
+	uw32(DCR7, 0);
 
 	/* transmit this packet */
 	txptr = db->tx_insert_ptr;
@@ -615,10 +634,10 @@
 	db->tx_insert_ptr = txptr->next_tx_desc;
 
 	/* Transmit Packet Process */
-	if ( (db->tx_packet_cnt < TX_DESC_CNT) ) {
+	if (db->tx_packet_cnt < TX_DESC_CNT) {
 		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
 		db->tx_packet_cnt++;			/* Ready to send */
-		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
+		uw32(DCR1, 0x1);			/* Issue Tx polling */
 		dev->trans_start = jiffies;		/* saved time stamp */
 	}
 
@@ -628,7 +647,7 @@
 
 	/* Restore CR7 to enable interrupt */
 	spin_unlock_irqrestore(&db->lock, flags);
-	outl(db->cr7_data, dev->base_addr + DCR7);
+	uw32(DCR7, db->cr7_data);
 
 	/* free this SKB */
 	dev_kfree_skb(skb);
@@ -645,9 +664,7 @@
 static int uli526x_stop(struct net_device *dev)
 {
 	struct uli526x_board_info *db = netdev_priv(dev);
-	unsigned long ioaddr = dev->base_addr;
-
-	ULI526X_DBUG(0, "uli526x_stop", 0);
+	void __iomem *ioaddr = db->ioaddr;
 
 	/* disable system */
 	netif_stop_queue(dev);
@@ -656,12 +673,12 @@
 	del_timer_sync(&db->timer);
 
 	/* Reset & stop ULI526X board */
-	outl(ULI526X_RESET, ioaddr + DCR0);
+	uw32(DCR0, ULI526X_RESET);
 	udelay(5);
-	phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
+	db->phy.write(db, db->phy_addr, 0, 0x8000);
 
 	/* free interrupt */
-	free_irq(dev->irq, dev);
+	free_irq(db->pdev->irq, dev);
 
 	/* free allocated rx buffer */
 	uli526x_free_rxbuffer(db);
@@ -679,18 +696,18 @@
 {
 	struct net_device *dev = dev_id;
 	struct uli526x_board_info *db = netdev_priv(dev);
-	unsigned long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = db->ioaddr;
 	unsigned long flags;
 
 	spin_lock_irqsave(&db->lock, flags);
-	outl(0, ioaddr + DCR7);
+	uw32(DCR7, 0);
 
 	/* Got ULI526X status */
-	db->cr5_data = inl(ioaddr + DCR5);
-	outl(db->cr5_data, ioaddr + DCR5);
+	db->cr5_data = ur32(DCR5);
+	uw32(DCR5, db->cr5_data);
 	if ( !(db->cr5_data & 0x180c1) ) {
 		/* Restore CR7 to enable interrupt mask */
-		outl(db->cr7_data, ioaddr + DCR7);
+		uw32(DCR7, db->cr7_data);
 		spin_unlock_irqrestore(&db->lock, flags);
 		return IRQ_HANDLED;
 	}
@@ -718,7 +735,7 @@
 		uli526x_free_tx_pkt(dev, db);
 
 	/* Restore CR7 to enable interrupt mask */
-	outl(db->cr7_data, ioaddr + DCR7);
+	uw32(DCR7, db->cr7_data);
 
 	spin_unlock_irqrestore(&db->lock, flags);
 	return IRQ_HANDLED;
@@ -727,8 +744,10 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void uli526x_poll(struct net_device *dev)
 {
+	struct uli526x_board_info *db = netdev_priv(dev);
+
 	/* ISR grabs the irqsave lock, so this should be safe */
-	uli526x_interrupt(dev->irq, dev);
+	uli526x_interrupt(db->pdev->irq, dev);
 }
 #endif
 
@@ -962,12 +981,7 @@
 
 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
-	if (np->pdev)
-		strlcpy(info->bus_info, pci_name(np->pdev),
-			sizeof(info->bus_info));
-	else
-		sprintf(info->bus_info, "EISA 0x%lx %d",
-			dev->base_addr, dev->irq);
+	strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
 }
 
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
@@ -1007,18 +1021,20 @@
 
 static void uli526x_timer(unsigned long data)
 {
-	u32 tmp_cr8;
-	unsigned char tmp_cr12=0;
 	struct net_device *dev = (struct net_device *) data;
 	struct uli526x_board_info *db = netdev_priv(dev);
+	struct uli_phy_ops *phy = &db->phy;
+	void __iomem *ioaddr = db->ioaddr;
  	unsigned long flags;
+	u8 tmp_cr12 = 0;
+	u32 tmp_cr8;
 
 	//ULI526X_DBUG(0, "uli526x_timer()", 0);
 	spin_lock_irqsave(&db->lock, flags);
 
 
 	/* Dynamic reset ULI526X : system error or transmit time-out */
-	tmp_cr8 = inl(db->ioaddr + DCR8);
+	tmp_cr8 = ur32(DCR8);
 	if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
 		db->reset_cr8++;
 		db->wait_reset = 1;
@@ -1028,7 +1044,7 @@
 	/* TX polling kick monitor */
 	if ( db->tx_packet_cnt &&
 	     time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) {
-		outl(0x1, dev->base_addr + DCR1);   // Tx polling again
+		uw32(DCR1, 0x1);   // Tx polling again
 
 		// TX Timeout
 		if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) {
@@ -1049,7 +1065,7 @@
 	}
 
 	/* Link status check, Dynamic media type change */
-	if((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)!=0)
+	if ((phy->read(db, db->phy_addr, 5) & 0x01e0)!=0)
 		tmp_cr12 = 3;
 
 	if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
@@ -1062,7 +1078,7 @@
 		/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
 		/* AUTO don't need */
 		if ( !(db->media_mode & 0x8) )
-			phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
+			phy->write(db, db->phy_addr, 0, 0x1000);
 
 		/* AUTO mode, if INT phyxcer link failed, select EXT device */
 		if (db->media_mode & ULI526X_AUTO) {
@@ -1119,12 +1135,13 @@
 static void uli526x_reset_prepare(struct net_device *dev)
 {
 	struct uli526x_board_info *db = netdev_priv(dev);
+	void __iomem *ioaddr = db->ioaddr;
 
 	/* Sopt MAC controller */
 	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);	/* Disable Tx/Rx */
-	update_cr6(db->cr6_data, dev->base_addr);
-	outl(0, dev->base_addr + DCR7);		/* Disable Interrupt */
-	outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
+	update_cr6(db->cr6_data, ioaddr);
+	uw32(DCR7, 0);				/* Disable Interrupt */
+	uw32(DCR5, ur32(DCR5));
 
 	/* Disable upper layer interface */
 	netif_stop_queue(dev);
@@ -1289,7 +1306,7 @@
  *	Using Chain structure, and allocate Tx/Rx buffer
  */
 
-static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr)
+static void uli526x_descriptor_init(struct net_device *dev, void __iomem *ioaddr)
 {
 	struct uli526x_board_info *db = netdev_priv(dev);
 	struct tx_desc *tmp_tx;
@@ -1304,14 +1321,14 @@
 	/* tx descriptor start pointer */
 	db->tx_insert_ptr = db->first_tx_desc;
 	db->tx_remove_ptr = db->first_tx_desc;
-	outl(db->first_tx_desc_dma, ioaddr + DCR4);     /* TX DESC address */
+	uw32(DCR4, db->first_tx_desc_dma);	/* TX DESC address */
 
 	/* rx descriptor start pointer */
 	db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
 	db->first_rx_desc_dma =  db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
 	db->rx_insert_ptr = db->first_rx_desc;
 	db->rx_ready_ptr = db->first_rx_desc;
-	outl(db->first_rx_desc_dma, ioaddr + DCR3);	/* RX DESC address */
+	uw32(DCR3, db->first_rx_desc_dma);	/* RX DESC address */
 
 	/* Init Transmit chain */
 	tmp_buf = db->buf_pool_start;
@@ -1352,11 +1369,9 @@
  *	Update CR6 value
  *	Firstly stop ULI526X, then written value and start
  */
-
-static void update_cr6(u32 cr6_data, unsigned long ioaddr)
+static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
 {
-
-	outl(cr6_data, ioaddr + DCR6);
+	uw32(DCR6, cr6_data);
 	udelay(5);
 }
 
@@ -1375,6 +1390,7 @@
 static void send_filter_frame(struct net_device *dev, int mc_cnt)
 {
 	struct uli526x_board_info *db = netdev_priv(dev);
+	void __iomem *ioaddr = db->ioaddr;
 	struct netdev_hw_addr *ha;
 	struct tx_desc *txptr;
 	u16 * addrptr;
@@ -1420,9 +1436,9 @@
 		/* Resource Empty */
 		db->tx_packet_cnt++;
 		txptr->tdes0 = cpu_to_le32(0x80000000);
-		update_cr6(db->cr6_data | 0x2000, dev->base_addr);
-		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
-		update_cr6(db->cr6_data, dev->base_addr);
+		update_cr6(db->cr6_data | 0x2000, ioaddr);
+		uw32(DCR1, 0x1);	/* Issue Tx polling */
+		update_cr6(db->cr6_data, ioaddr);
 		dev->trans_start = jiffies;
 	} else
 		netdev_err(dev, "No Tx resource - Send_filter_frame!\n");
@@ -1465,37 +1481,38 @@
  *	Read one word data from the serial ROM
  */
 
-static u16 read_srom_word(long ioaddr, int offset)
+static u16 read_srom_word(struct uli526x_board_info *db, int offset)
 {
-	int i;
+	void __iomem *ioaddr = db->ioaddr;
 	u16 srom_data = 0;
-	long cr9_ioaddr = ioaddr + DCR9;
+	int i;
 
-	outl(CR9_SROM_READ, cr9_ioaddr);
-	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+	uw32(DCR9, CR9_SROM_READ);
+	uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
 
 	/* Send the Read Command 110b */
-	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
-	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
-	SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
+	srom_clk_write(db, SROM_DATA_1);
+	srom_clk_write(db, SROM_DATA_1);
+	srom_clk_write(db, SROM_DATA_0);
 
 	/* Send the offset */
 	for (i = 5; i >= 0; i--) {
 		srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
-		SROM_CLK_WRITE(srom_data, cr9_ioaddr);
+		srom_clk_write(db, srom_data);
 	}
 
-	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+	uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
 
 	for (i = 16; i > 0; i--) {
-		outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
+		uw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
 		udelay(5);
-		srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
-		outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+		srom_data = (srom_data << 1) |
+			    ((ur32(DCR9) & CR9_CRDOUT) ? 1 : 0);
+		uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
 		udelay(5);
 	}
 
-	outl(CR9_SROM_READ, cr9_ioaddr);
+	uw32(DCR9, CR9_SROM_READ);
 	return srom_data;
 }
 
@@ -1506,15 +1523,16 @@
 
 static u8 uli526x_sense_speed(struct uli526x_board_info * db)
 {
+	struct uli_phy_ops *phy = &db->phy;
 	u8 ErrFlag = 0;
 	u16 phy_mode;
 
-	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
-	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
+	phy_mode = phy->read(db, db->phy_addr, 1);
+	phy_mode = phy->read(db, db->phy_addr, 1);
 
 	if ( (phy_mode & 0x24) == 0x24 ) {
 
-		phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7);
+		phy_mode = ((phy->read(db, db->phy_addr, 5) & 0x01e0)<<7);
 		if(phy_mode&0x8000)
 			phy_mode = 0x8000;
 		else if(phy_mode&0x4000)
@@ -1549,10 +1567,11 @@
 
 static void uli526x_set_phyxcer(struct uli526x_board_info *db)
 {
+	struct uli_phy_ops *phy = &db->phy;
 	u16 phy_reg;
 
 	/* Phyxcer capability setting */
-	phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
+	phy_reg = phy->read(db, db->phy_addr, 4) & ~0x01e0;
 
 	if (db->media_mode & ULI526X_AUTO) {
 		/* AUTO Mode */
@@ -1573,10 +1592,10 @@
 		phy_reg|=db->PHY_reg4;
 		db->media_mode|=ULI526X_AUTO;
 	}
-	phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
+	phy->write(db, db->phy_addr, 4, phy_reg);
 
  	/* Restart Auto-Negotiation */
-	phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
+	phy->write(db, db->phy_addr, 0, 0x1200);
 	udelay(50);
 }
 
@@ -1590,6 +1609,7 @@
 
 static void uli526x_process_mode(struct uli526x_board_info *db)
 {
+	struct uli_phy_ops *phy = &db->phy;
 	u16 phy_reg;
 
 	/* Full Duplex Mode Check */
@@ -1601,10 +1621,10 @@
 	update_cr6(db->cr6_data, db->ioaddr);
 
 	/* 10/100M phyxcer force mode need */
-	if ( !(db->media_mode & 0x8)) {
+	if (!(db->media_mode & 0x8)) {
 		/* Forece Mode */
-		phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
-		if ( !(phy_reg & 0x1) ) {
+		phy_reg = phy->read(db, db->phy_addr, 6);
+		if (!(phy_reg & 0x1)) {
 			/* parter without N-Way capability */
 			phy_reg = 0x0;
 			switch(db->op_mode) {
@@ -1613,148 +1633,126 @@
 			case ULI526X_100MHF: phy_reg = 0x2000; break;
 			case ULI526X_100MFD: phy_reg = 0x2100; break;
 			}
-			phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
+			phy->write(db, db->phy_addr, 0, phy_reg);
 		}
 	}
 }
 
 
-/*
- *	Write a word to Phy register
- */
-
-static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
+/* M5261/M5263 Chip */
+static void phy_writeby_cr9(struct uli526x_board_info *db, u8 phy_addr,
+			    u8 offset, u16 phy_data)
 {
 	u16 i;
-	unsigned long ioaddr;
-
-	if(chip_id == PCI_ULI5263_ID)
-	{
-		phy_writeby_cr10(iobase, phy_addr, offset, phy_data);
-		return;
-	}
-	/* M5261/M5263 Chip */
-	ioaddr = iobase + DCR9;
 
 	/* Send 33 synchronization clock to Phy controller */
 	for (i = 0; i < 35; i++)
-		phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+		phy_write_1bit(db, PHY_DATA_1);
 
 	/* Send start command(01) to Phy */
-	phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
-	phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+	phy_write_1bit(db, PHY_DATA_0);
+	phy_write_1bit(db, PHY_DATA_1);
 
 	/* Send write command(01) to Phy */
-	phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
-	phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+	phy_write_1bit(db, PHY_DATA_0);
+	phy_write_1bit(db, PHY_DATA_1);
 
 	/* Send Phy address */
 	for (i = 0x10; i > 0; i = i >> 1)
-		phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+		phy_write_1bit(db, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
 
 	/* Send register address */
 	for (i = 0x10; i > 0; i = i >> 1)
-		phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+		phy_write_1bit(db, offset & i ? PHY_DATA_1 : PHY_DATA_0);
 
 	/* written trasnition */
-	phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
-	phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+	phy_write_1bit(db, PHY_DATA_1);
+	phy_write_1bit(db, PHY_DATA_0);
 
 	/* Write a word data to PHY controller */
-	for ( i = 0x8000; i > 0; i >>= 1)
-		phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
-
+	for (i = 0x8000; i > 0; i >>= 1)
+		phy_write_1bit(db, phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
 }
 
-
-/*
- *	Read a word data from phy register
- */
-
-static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
+static u16 phy_readby_cr9(struct uli526x_board_info *db, u8 phy_addr, u8 offset)
 {
-	int i;
 	u16 phy_data;
-	unsigned long ioaddr;
-
-	if(chip_id == PCI_ULI5263_ID)
-		return phy_readby_cr10(iobase, phy_addr, offset);
-	/* M5261/M5263 Chip */
-	ioaddr = iobase + DCR9;
+	int i;
 
 	/* Send 33 synchronization clock to Phy controller */
 	for (i = 0; i < 35; i++)
-		phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+		phy_write_1bit(db, PHY_DATA_1);
 
 	/* Send start command(01) to Phy */
-	phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
-	phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+	phy_write_1bit(db, PHY_DATA_0);
+	phy_write_1bit(db, PHY_DATA_1);
 
 	/* Send read command(10) to Phy */
-	phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
-	phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+	phy_write_1bit(db, PHY_DATA_1);
+	phy_write_1bit(db, PHY_DATA_0);
 
 	/* Send Phy address */
 	for (i = 0x10; i > 0; i = i >> 1)
-		phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+		phy_write_1bit(db, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
 
 	/* Send register address */
 	for (i = 0x10; i > 0; i = i >> 1)
-		phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+		phy_write_1bit(db, offset & i ? PHY_DATA_1 : PHY_DATA_0);
 
 	/* Skip transition state */
-	phy_read_1bit(ioaddr, chip_id);
+	phy_read_1bit(db);
 
 	/* read 16bit data */
 	for (phy_data = 0, i = 0; i < 16; i++) {
 		phy_data <<= 1;
-		phy_data |= phy_read_1bit(ioaddr, chip_id);
+		phy_data |= phy_read_1bit(db);
 	}
 
 	return phy_data;
 }
 
-static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
+static u16 phy_readby_cr10(struct uli526x_board_info *db, u8 phy_addr,
+			   u8 offset)
 {
-	unsigned long ioaddr,cr10_value;
+	void __iomem *ioaddr = db->ioaddr;
+	u32 cr10_value = phy_addr;
 
-	ioaddr = iobase + DCR10;
-	cr10_value = phy_addr;
-	cr10_value = (cr10_value<<5) + offset;
-	cr10_value = (cr10_value<<16) + 0x08000000;
-	outl(cr10_value,ioaddr);
+	cr10_value = (cr10_value <<  5) + offset;
+	cr10_value = (cr10_value << 16) + 0x08000000;
+	uw32(DCR10, cr10_value);
 	udelay(1);
-	while(1)
-	{
-		cr10_value = inl(ioaddr);
-		if(cr10_value&0x10000000)
+	while (1) {
+		cr10_value = ur32(DCR10);
+		if (cr10_value & 0x10000000)
 			break;
 	}
 	return cr10_value & 0x0ffff;
 }
 
-static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data)
+static void phy_writeby_cr10(struct uli526x_board_info *db, u8 phy_addr,
+			     u8 offset, u16 phy_data)
 {
-	unsigned long ioaddr,cr10_value;
+	void __iomem *ioaddr = db->ioaddr;
+	u32 cr10_value = phy_addr;
 
-	ioaddr = iobase + DCR10;
-	cr10_value = phy_addr;
-	cr10_value = (cr10_value<<5) + offset;
-	cr10_value = (cr10_value<<16) + 0x04000000 + phy_data;
-	outl(cr10_value,ioaddr);
+	cr10_value = (cr10_value <<  5) + offset;
+	cr10_value = (cr10_value << 16) + 0x04000000 + phy_data;
+	uw32(DCR10, cr10_value);
 	udelay(1);
 }
 /*
  *	Write one bit data to Phy Controller
  */
 
-static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id)
+static void phy_write_1bit(struct uli526x_board_info *db, u32 data)
 {
-	outl(phy_data , ioaddr);			/* MII Clock Low */
+	void __iomem *ioaddr = db->ioaddr;
+
+	uw32(DCR9, data);		/* MII Clock Low */
 	udelay(1);
-	outl(phy_data  | MDCLKH, ioaddr);	/* MII Clock High */
+	uw32(DCR9, data | MDCLKH);	/* MII Clock High */
 	udelay(1);
-	outl(phy_data , ioaddr);			/* MII Clock Low */
+	uw32(DCR9, data);		/* MII Clock Low */
 	udelay(1);
 }
 
@@ -1763,14 +1761,15 @@
  *	Read one bit phy data from PHY controller
  */
 
-static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
+static u16 phy_read_1bit(struct uli526x_board_info *db)
 {
+	void __iomem *ioaddr = db->ioaddr;
 	u16 phy_data;
 
-	outl(0x50000 , ioaddr);
+	uw32(DCR9, 0x50000);
 	udelay(1);
-	phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
-	outl(0x40000 , ioaddr);
+	phy_data = (ur32(DCR9) >> 19) & 0x1;
+	uw32(DCR9, 0x40000);
 	udelay(1);
 
 	return phy_data;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 2ac6fff..4d1ffca 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -400,9 +400,6 @@
 	   No hold time required! */
 	iowrite32(0x00000001, ioaddr + PCIBusCfg);
 
-	dev->base_addr = (unsigned long)ioaddr;
-	dev->irq = irq;
-
 	np = netdev_priv(dev);
 	np->pci_dev = pdev;
 	np->chip_id = chip_idx;
@@ -635,17 +632,18 @@
 {
 	struct netdev_private *np = netdev_priv(dev);
 	void __iomem *ioaddr = np->base_addr;
+	const int irq = np->pci_dev->irq;
 	int i;
 
 	iowrite32(0x00000001, ioaddr + PCIBusCfg);		/* Reset */
 
 	netif_device_detach(dev);
-	i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
+	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 	if (i)
 		goto out_err;
 
 	if (debug > 1)
-		netdev_dbg(dev, "w89c840_open() irq %d\n", dev->irq);
+		netdev_dbg(dev, "w89c840_open() irq %d\n", irq);
 
 	if((i=alloc_ringdesc(dev)))
 		goto out_err;
@@ -932,6 +930,7 @@
 {
 	struct netdev_private *np = netdev_priv(dev);
 	void __iomem *ioaddr = np->base_addr;
+	const int irq = np->pci_dev->irq;
 
 	dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
 		 ioread32(ioaddr + IntrStatus));
@@ -951,7 +950,7 @@
 	       np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
 	printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
 
-	disable_irq(dev->irq);
+	disable_irq(irq);
 	spin_lock_irq(&np->lock);
 	/*
 	 * Under high load dirty_tx and the internal tx descriptor pointer
@@ -966,7 +965,7 @@
 	init_rxtx_rings(dev);
 	init_registers(dev);
 	spin_unlock_irq(&np->lock);
-	enable_irq(dev->irq);
+	enable_irq(irq);
 
 	netif_wake_queue(dev);
 	dev->trans_start = jiffies; /* prevent tx timeout */
@@ -1500,7 +1499,7 @@
 	iowrite32(0x0000, ioaddr + IntrEnable);
 	spin_unlock_irq(&np->lock);
 
-	free_irq(dev->irq, dev);
+	free_irq(np->pci_dev->irq, dev);
 	wmb();
 	netif_device_attach(dev);
 
@@ -1589,7 +1588,7 @@
 		iowrite32(0, ioaddr + IntrEnable);
 		spin_unlock_irq(&np->lock);
 
-		synchronize_irq(dev->irq);
+		synchronize_irq(np->pci_dev->irq);
 		netif_tx_disable(dev);
 
 		np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index fdb329f..138bf83 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -41,7 +41,9 @@
 MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>");
 MODULE_LICENSE("GPL");
 
-
+#define xw32(reg, val)	iowrite32(val, ioaddr + (reg))
+#define xr32(reg)	ioread32(ioaddr + (reg))
+#define xr8(reg)	ioread8(ioaddr + (reg))
 
 /* IO registers on the card, offsets */
 #define CSR0	0x00
@@ -83,7 +85,7 @@
 
 	struct sk_buff *tx_skb[4];
 
-	unsigned long io_port;
+	void __iomem *ioaddr;
 	int open;
 
 	/* transmit_used is the rotating counter that indicates which transmit
@@ -137,7 +139,7 @@
 
 
 static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = {
-	{0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,},
+	{ PCI_VDEVICE(XIRCOM, 0x0003), },
 	{0,},
 };
 MODULE_DEVICE_TABLE(pci, xircom_pci_table);
@@ -146,9 +148,7 @@
 	.name		= "xircom_cb",
 	.id_table	= xircom_pci_table,
 	.probe		= xircom_probe,
-	.remove		= xircom_remove,
-	.suspend =NULL,
-	.resume =NULL
+	.remove		= __devexit_p(xircom_remove),
 };
 
 
@@ -192,15 +192,18 @@
  */
 static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
+	struct device *d = &pdev->dev;
 	struct net_device *dev = NULL;
 	struct xircom_private *private;
 	unsigned long flags;
 	unsigned short tmp16;
+	int rc;
 
 	/* First do the PCI initialisation */
 
-	if (pci_enable_device(pdev))
-		return -ENODEV;
+	rc = pci_enable_device(pdev);
+	if (rc < 0)
+		goto out;
 
 	/* disable all powermanagement */
 	pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000);
@@ -211,11 +214,13 @@
 	pci_read_config_word (pdev,PCI_STATUS, &tmp16);
 	pci_write_config_word (pdev, PCI_STATUS,tmp16);
 
-	if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) {
+	rc = pci_request_regions(pdev, "xircom_cb");
+	if (rc < 0) {
 		pr_err("%s: failed to allocate io-region\n", __func__);
-		return -ENODEV;
+		goto err_disable;
 	}
 
+	rc = -ENOMEM;
 	/*
 	   Before changing the hardware, allocate the memory.
 	   This way, we can fail gracefully if not enough memory
@@ -223,17 +228,21 @@
 	 */
 	dev = alloc_etherdev(sizeof(struct xircom_private));
 	if (!dev)
-		goto device_fail;
+		goto err_release;
 
 	private = netdev_priv(dev);
 
 	/* Allocate the send/receive buffers */
-	private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle);
+	private->rx_buffer = dma_alloc_coherent(d, 8192,
+						&private->rx_dma_handle,
+						GFP_KERNEL);
 	if (private->rx_buffer == NULL) {
 		pr_err("%s: no memory for rx buffer\n", __func__);
 		goto rx_buf_fail;
 	}
-	private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle);
+	private->tx_buffer = dma_alloc_coherent(d, 8192,
+						&private->tx_dma_handle,
+						GFP_KERNEL);
 	if (private->tx_buffer == NULL) {
 		pr_err("%s: no memory for tx buffer\n", __func__);
 		goto tx_buf_fail;
@@ -244,10 +253,13 @@
 
 	private->dev = dev;
 	private->pdev = pdev;
-	private->io_port = pci_resource_start(pdev, 0);
+
+	/* IO range. */
+	private->ioaddr = pci_iomap(pdev, 0, 0);
+	if (!private->ioaddr)
+		goto reg_fail;
+
 	spin_lock_init(&private->lock);
-	dev->irq = pdev->irq;
-	dev->base_addr = private->io_port;
 
 	initialize_card(private);
 	read_mac_address(private);
@@ -256,9 +268,10 @@
 	dev->netdev_ops = &netdev_ops;
 	pci_set_drvdata(pdev, dev);
 
-	if (register_netdev(dev)) {
+	rc = register_netdev(dev);
+	if (rc < 0) {
 		pr_err("%s: netdevice registration failed\n", __func__);
-		goto reg_fail;
+		goto err_unmap;
 	}
 
 	netdev_info(dev, "Xircom cardbus revision %i at irq %i\n",
@@ -273,17 +286,23 @@
 	spin_unlock_irqrestore(&private->lock,flags);
 
 	trigger_receive(private);
+out:
+	return rc;
 
-	return 0;
-
+err_unmap:
+	pci_iounmap(pdev, private->ioaddr);
 reg_fail:
-	kfree(private->tx_buffer);
+	pci_set_drvdata(pdev, NULL);
+	dma_free_coherent(d, 8192, private->tx_buffer, private->tx_dma_handle);
 tx_buf_fail:
-	kfree(private->rx_buffer);
+	dma_free_coherent(d, 8192, private->rx_buffer, private->rx_dma_handle);
 rx_buf_fail:
 	free_netdev(dev);
-device_fail:
-	return -ENODEV;
+err_release:
+	pci_release_regions(pdev);
+err_disable:
+	pci_disable_device(pdev);
+	goto out;
 }
 
 
@@ -297,25 +316,28 @@
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct xircom_private *card = netdev_priv(dev);
+	struct device *d = &pdev->dev;
 
-	pci_free_consistent(pdev,8192,card->rx_buffer,card->rx_dma_handle);
-	pci_free_consistent(pdev,8192,card->tx_buffer,card->tx_dma_handle);
-
-	release_region(dev->base_addr, 128);
 	unregister_netdev(dev);
-	free_netdev(dev);
+	pci_iounmap(pdev, card->ioaddr);
 	pci_set_drvdata(pdev, NULL);
+	dma_free_coherent(d, 8192, card->tx_buffer, card->tx_dma_handle);
+	dma_free_coherent(d, 8192, card->rx_buffer, card->rx_dma_handle);
+	free_netdev(dev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
 }
 
 static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
 {
 	struct net_device *dev = (struct net_device *) dev_instance;
 	struct xircom_private *card = netdev_priv(dev);
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int status;
 	int i;
 
 	spin_lock(&card->lock);
-	status = inl(card->io_port+CSR5);
+	status = xr32(CSR5);
 
 #if defined DEBUG && DEBUG > 1
 	print_binary(status);
@@ -345,7 +367,7 @@
 	/* Clear all remaining interrupts */
 	status |= 0xffffffff; /* FIXME: make this clear only the
 				        real existing bits */
-	outl(status,card->io_port+CSR5);
+	xw32(CSR5, status);
 
 
 	for (i=0;i<NUMDESCRIPTORS;i++)
@@ -423,11 +445,11 @@
 static int xircom_open(struct net_device *dev)
 {
 	struct xircom_private *xp = netdev_priv(dev);
+	const int irq = xp->pdev->irq;
 	int retval;
 
-	netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n",
-		    dev->irq);
-	retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
+	netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n", irq);
+	retval = request_irq(irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
 	if (retval)
 		return retval;
 
@@ -459,7 +481,7 @@
 	spin_unlock_irqrestore(&card->lock,flags);
 
 	card->open = 0;
-	free_irq(dev->irq,dev);
+	free_irq(card->pdev->irq, dev);
 
 	return 0;
 
@@ -469,35 +491,39 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void xircom_poll_controller(struct net_device *dev)
 {
-	disable_irq(dev->irq);
-	xircom_interrupt(dev->irq, dev);
-	enable_irq(dev->irq);
+	struct xircom_private *xp = netdev_priv(dev);
+	const int irq = xp->pdev->irq;
+
+	disable_irq(irq);
+	xircom_interrupt(irq, dev);
+	enable_irq(irq);
 }
 #endif
 
 
 static void initialize_card(struct xircom_private *card)
 {
-	unsigned int val;
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned long flags;
+	u32 val;
 
 	spin_lock_irqsave(&card->lock, flags);
 
 	/* First: reset the card */
-	val = inl(card->io_port + CSR0);
+	val = xr32(CSR0);
 	val |= 0x01;		/* Software reset */
-	outl(val, card->io_port + CSR0);
+	xw32(CSR0, val);
 
 	udelay(100);		/* give the card some time to reset */
 
-	val = inl(card->io_port + CSR0);
+	val = xr32(CSR0);
 	val &= ~0x01;		/* disable Software reset */
-	outl(val, card->io_port + CSR0);
+	xw32(CSR0, val);
 
 
 	val = 0;		/* Value 0x00 is a safe and conservative value
 				   for the PCI configuration settings */
-	outl(val, card->io_port + CSR0);
+	xw32(CSR0, val);
 
 
 	disable_all_interrupts(card);
@@ -515,10 +541,9 @@
 */
 static void trigger_transmit(struct xircom_private *card)
 {
-	unsigned int val;
+	void __iomem *ioaddr = card->ioaddr;
 
-	val = 0;
-	outl(val, card->io_port + CSR1);
+	xw32(CSR1, 0);
 }
 
 /*
@@ -530,10 +555,9 @@
 */
 static void trigger_receive(struct xircom_private *card)
 {
-	unsigned int val;
+	void __iomem *ioaddr = card->ioaddr;
 
-	val = 0;
-	outl(val, card->io_port + CSR2);
+	xw32(CSR2, 0);
 }
 
 /*
@@ -542,6 +566,7 @@
 */
 static void setup_descriptors(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	u32 address;
 	int i;
 
@@ -571,7 +596,7 @@
 	wmb();
 	/* Write the receive descriptor ring address to the card */
 	address = card->rx_dma_handle;
-	outl(address, card->io_port + CSR3);	/* Receive descr list address */
+	xw32(CSR3, address);	/* Receive descr list address */
 
 
 	/* transmit descriptors */
@@ -596,7 +621,7 @@
 	wmb();
 	/* wite the transmit descriptor ring to the card */
 	address = card->tx_dma_handle;
-	outl(address, card->io_port + CSR4);	/* xmit descr list address */
+	xw32(CSR4, address);	/* xmit descr list address */
 }
 
 /*
@@ -605,11 +630,12 @@
 */
 static void remove_descriptors(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 
 	val = 0;
-	outl(val, card->io_port + CSR3);	/* Receive descriptor address */
-	outl(val, card->io_port + CSR4);	/* Send descriptor address */
+	xw32(CSR3, val);	/* Receive descriptor address */
+	xw32(CSR4, val);	/* Send descriptor address */
 }
 
 /*
@@ -620,17 +646,17 @@
 */
 static int link_status_changed(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 
-	val = inl(card->io_port + CSR5);	/* Status register */
-
-	if ((val & (1 << 27)) == 0)		/* no change */
+	val = xr32(CSR5);	/* Status register */
+	if (!(val & (1 << 27)))	/* no change */
 		return 0;
 
 	/* clear the event by writing a 1 to the bit in the
 	   status register. */
 	val = (1 << 27);
-	outl(val, card->io_port + CSR5);
+	xw32(CSR5, val);
 
 	return 1;
 }
@@ -642,11 +668,9 @@
 */
 static int transmit_active(struct xircom_private *card)
 {
-	unsigned int val;
+	void __iomem *ioaddr = card->ioaddr;
 
-	val = inl(card->io_port + CSR5);	/* Status register */
-
-	if ((val & (7 << 20)) == 0)		/* transmitter disabled */
+	if (!(xr32(CSR5) & (7 << 20)))	/* transmitter disabled */
 		return 0;
 
 	return 1;
@@ -658,11 +682,9 @@
 */
 static int receive_active(struct xircom_private *card)
 {
-	unsigned int val;
+	void __iomem *ioaddr = card->ioaddr;
 
-	val = inl(card->io_port + CSR5);	/* Status register */
-
-	if ((val & (7 << 17)) == 0)		/* receiver disabled */
+	if (!(xr32(CSR5) & (7 << 17)))	/* receiver disabled */
 		return 0;
 
 	return 1;
@@ -680,10 +702,11 @@
 */
 static void activate_receiver(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 	int counter;
 
-	val = inl(card->io_port + CSR6);	/* Operation mode */
+	val = xr32(CSR6);	/* Operation mode */
 
 	/* If the "active" bit is set and the receiver is already
 	   active, no need to do the expensive thing */
@@ -692,7 +715,7 @@
 
 
 	val = val & ~2;		/* disable the receiver */
-	outl(val, card->io_port + CSR6);
+	xw32(CSR6, val);
 
 	counter = 10;
 	while (counter > 0) {
@@ -706,9 +729,9 @@
 	}
 
 	/* enable the receiver */
-	val = inl(card->io_port + CSR6);	/* Operation mode */
-	val = val | 2;				/* enable the receiver */
-	outl(val, card->io_port + CSR6);
+	val = xr32(CSR6);	/* Operation mode */
+	val = val | 2;		/* enable the receiver */
+	xw32(CSR6, val);
 
 	/* now wait for the card to activate again */
 	counter = 10;
@@ -733,12 +756,13 @@
 */
 static void deactivate_receiver(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 	int counter;
 
-	val = inl(card->io_port + CSR6);	/* Operation mode */
-	val = val & ~2;				/* disable the receiver */
-	outl(val, card->io_port + CSR6);
+	val = xr32(CSR6);	/* Operation mode */
+	val = val & ~2;		/* disable the receiver */
+	xw32(CSR6, val);
 
 	counter = 10;
 	while (counter > 0) {
@@ -765,10 +789,11 @@
 */
 static void activate_transmitter(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 	int counter;
 
-	val = inl(card->io_port + CSR6);	/* Operation mode */
+	val = xr32(CSR6);	/* Operation mode */
 
 	/* If the "active" bit is set and the receiver is already
 	   active, no need to do the expensive thing */
@@ -776,7 +801,7 @@
 		return;
 
 	val = val & ~(1 << 13);	/* disable the transmitter */
-	outl(val, card->io_port + CSR6);
+	xw32(CSR6, val);
 
 	counter = 10;
 	while (counter > 0) {
@@ -791,9 +816,9 @@
 	}
 
 	/* enable the transmitter */
-	val = inl(card->io_port + CSR6);	/* Operation mode */
+	val = xr32(CSR6);	/* Operation mode */
 	val = val | (1 << 13);	/* enable the transmitter */
-	outl(val, card->io_port + CSR6);
+	xw32(CSR6, val);
 
 	/* now wait for the card to activate again */
 	counter = 10;
@@ -818,12 +843,13 @@
 */
 static void deactivate_transmitter(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 	int counter;
 
-	val = inl(card->io_port + CSR6);	/* Operation mode */
+	val = xr32(CSR6);	/* Operation mode */
 	val = val & ~2;		/* disable the transmitter */
-	outl(val, card->io_port + CSR6);
+	xw32(CSR6, val);
 
 	counter = 20;
 	while (counter > 0) {
@@ -846,11 +872,12 @@
 */
 static void enable_transmit_interrupt(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 
-	val = inl(card->io_port + CSR7);	/* Interrupt enable register */
-	val |= 1;				/* enable the transmit interrupt */
-	outl(val, card->io_port + CSR7);
+	val = xr32(CSR7);	/* Interrupt enable register */
+	val |= 1;		/* enable the transmit interrupt */
+	xw32(CSR7, val);
 }
 
 
@@ -861,11 +888,12 @@
 */
 static void enable_receive_interrupt(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 
-	val = inl(card->io_port + CSR7);	/* Interrupt enable register */
-	val = val | (1 << 6);			/* enable the receive interrupt */
-	outl(val, card->io_port + CSR7);
+	val = xr32(CSR7);	/* Interrupt enable register */
+	val = val | (1 << 6);	/* enable the receive interrupt */
+	xw32(CSR7, val);
 }
 
 /*
@@ -875,11 +903,12 @@
 */
 static void enable_link_interrupt(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 
-	val = inl(card->io_port + CSR7);	/* Interrupt enable register */
-	val = val | (1 << 27);			/* enable the link status chage interrupt */
-	outl(val, card->io_port + CSR7);
+	val = xr32(CSR7);	/* Interrupt enable register */
+	val = val | (1 << 27);	/* enable the link status chage interrupt */
+	xw32(CSR7, val);
 }
 
 
@@ -891,10 +920,9 @@
 */
 static void disable_all_interrupts(struct xircom_private *card)
 {
-	unsigned int val;
+	void __iomem *ioaddr = card->ioaddr;
 
-	val = 0;				/* disable all interrupts */
-	outl(val, card->io_port + CSR7);
+	xw32(CSR7, 0);
 }
 
 /*
@@ -904,9 +932,10 @@
 */
 static void enable_common_interrupts(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 
-	val = inl(card->io_port + CSR7);	/* Interrupt enable register */
+	val = xr32(CSR7);	/* Interrupt enable register */
 	val |= (1<<16); /* Normal Interrupt Summary */
 	val |= (1<<15); /* Abnormal Interrupt Summary */
 	val |= (1<<13); /* Fatal bus error */
@@ -915,7 +944,7 @@
 	val |= (1<<5);  /* Transmit Underflow */
 	val |= (1<<2);  /* Transmit Buffer Unavailable */
 	val |= (1<<1);  /* Transmit Process Stopped */
-	outl(val, card->io_port + CSR7);
+	xw32(CSR7, val);
 }
 
 /*
@@ -925,11 +954,12 @@
 */
 static int enable_promisc(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 
-	val = inl(card->io_port + CSR6);
+	val = xr32(CSR6);
 	val = val | (1 << 6);
-	outl(val, card->io_port + CSR6);
+	xw32(CSR6, val);
 
 	return 1;
 }
@@ -944,13 +974,16 @@
 */
 static int link_status(struct xircom_private *card)
 {
-	unsigned int val;
+	void __iomem *ioaddr = card->ioaddr;
+	u8 val;
 
-	val = inb(card->io_port + CSR12);
+	val = xr8(CSR12);
 
-	if (!(val&(1<<2)))  /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */
+	/* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */
+	if (!(val & (1 << 2)))
 		return 10;
-	if (!(val&(1<<1)))  /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */
+	/* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */
+	if (!(val & (1 << 1)))
 		return 100;
 
 	/* If we get here -> no link at all */
@@ -969,29 +1002,31 @@
  */
 static void read_mac_address(struct xircom_private *card)
 {
-	unsigned char j, tuple, link, data_id, data_count;
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned long flags;
+	u8 link;
 	int i;
 
 	spin_lock_irqsave(&card->lock, flags);
 
-	outl(1 << 12, card->io_port + CSR9);	/* enable boot rom access */
+	xw32(CSR9, 1 << 12);	/* enable boot rom access */
 	for (i = 0x100; i < 0x1f7; i += link + 2) {
-		outl(i, card->io_port + CSR10);
-		tuple = inl(card->io_port + CSR9) & 0xff;
-		outl(i + 1, card->io_port + CSR10);
-		link = inl(card->io_port + CSR9) & 0xff;
-		outl(i + 2, card->io_port + CSR10);
-		data_id = inl(card->io_port + CSR9) & 0xff;
-		outl(i + 3, card->io_port + CSR10);
-		data_count = inl(card->io_port + CSR9) & 0xff;
+		u8 tuple, data_id, data_count;
+
+		xw32(CSR10, i);
+		tuple = xr32(CSR9);
+		xw32(CSR10, i + 1);
+		link = xr32(CSR9);
+		xw32(CSR10, i + 2);
+		data_id = xr32(CSR9);
+		xw32(CSR10, i + 3);
+		data_count = xr32(CSR9);
 		if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) {
-			/*
-			 * This is it.  We have the data we want.
-			 */
+			int j;
+
 			for (j = 0; j < 6; j++) {
-				outl(i + j + 4, card->io_port + CSR10);
-				card->dev->dev_addr[j] = inl(card->io_port + CSR9) & 0xff;
+				xw32(CSR10, i + j + 4);
+				card->dev->dev_addr[j] = xr32(CSR9) & 0xff;
 			}
 			break;
 		} else if (link == 0) {
@@ -1010,6 +1045,7 @@
  */
 static void transceiver_voodoo(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned long flags;
 
 	/* disable all powermanagement */
@@ -1019,14 +1055,14 @@
 
 	spin_lock_irqsave(&card->lock, flags);
 
-	outl(0x0008, card->io_port + CSR15);
-        udelay(25);
-        outl(0xa8050000, card->io_port + CSR15);
-        udelay(25);
-        outl(0xa00f0000, card->io_port + CSR15);
-        udelay(25);
+	xw32(CSR15, 0x0008);
+	udelay(25);
+	xw32(CSR15, 0xa8050000);
+	udelay(25);
+	xw32(CSR15, 0xa00f0000);
+	udelay(25);
 
-        spin_unlock_irqrestore(&card->lock, flags);
+	spin_unlock_irqrestore(&card->lock, flags);
 
 	netif_start_queue(card->dev);
 }
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index b2dc2c8..ef4499d 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -16,6 +16,13 @@
 #include "dl2k.h"
 #include <linux/dma-mapping.h>
 
+#define dw32(reg, val)	iowrite32(val, ioaddr + (reg))
+#define dw16(reg, val)	iowrite16(val, ioaddr + (reg))
+#define dw8(reg, val)	iowrite8(val, ioaddr + (reg))
+#define dr32(reg)	ioread32(ioaddr + (reg))
+#define dr16(reg)	ioread16(ioaddr + (reg))
+#define dr8(reg)	ioread8(ioaddr + (reg))
+
 static char version[] __devinitdata =
       KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
 #define MAX_UNITS 8
@@ -49,8 +56,13 @@
 /* Enable the default interrupts */
 #define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
        UpdateStats | LinkEvent)
-#define EnableInt() \
-writew(DEFAULT_INTR, ioaddr + IntEnable)
+
+static void dl2k_enable_int(struct netdev_private *np)
+{
+	void __iomem *ioaddr = np->ioaddr;
+
+	dw16(IntEnable, DEFAULT_INTR);
+}
 
 static const int max_intrloop = 50;
 static const int multicast_filter_limit = 0x40;
@@ -73,7 +85,7 @@
 static int rio_close (struct net_device *dev);
 static int find_miiphy (struct net_device *dev);
 static int parse_eeprom (struct net_device *dev);
-static int read_eeprom (long ioaddr, int eep_addr);
+static int read_eeprom (struct netdev_private *, int eep_addr);
 static int mii_wait_link (struct net_device *dev, int wait);
 static int mii_set_media (struct net_device *dev);
 static int mii_get_media (struct net_device *dev);
@@ -106,7 +118,7 @@
 	static int card_idx;
 	int chip_idx = ent->driver_data;
 	int err, irq;
-	long ioaddr;
+	void __iomem *ioaddr;
 	static int version_printed;
 	void *ring_space;
 	dma_addr_t ring_dma;
@@ -124,26 +136,29 @@
 		goto err_out_disable;
 
 	pci_set_master (pdev);
+
+	err = -ENOMEM;
+
 	dev = alloc_etherdev (sizeof (*np));
-	if (!dev) {
-		err = -ENOMEM;
+	if (!dev)
 		goto err_out_res;
-	}
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
-#ifdef MEM_MAPPING
-	ioaddr = pci_resource_start (pdev, 1);
-	ioaddr = (long) ioremap (ioaddr, RIO_IO_SIZE);
-	if (!ioaddr) {
-		err = -ENOMEM;
-		goto err_out_dev;
-	}
-#else
-	ioaddr = pci_resource_start (pdev, 0);
-#endif
-	dev->base_addr = ioaddr;
-	dev->irq = irq;
 	np = netdev_priv(dev);
+
+	/* IO registers range. */
+	ioaddr = pci_iomap(pdev, 0, 0);
+	if (!ioaddr)
+		goto err_out_dev;
+	np->eeprom_addr = ioaddr;
+
+#ifdef MEM_MAPPING
+	/* MM registers range. */
+	ioaddr = pci_iomap(pdev, 1, 0);
+	if (!ioaddr)
+		goto err_out_iounmap;
+#endif
+	np->ioaddr = ioaddr;
 	np->chip_id = chip_idx;
 	np->pdev = pdev;
 	spin_lock_init (&np->tx_lock);
@@ -239,7 +254,7 @@
 		goto err_out_unmap_rx;
 
 	/* Fiber device? */
-	np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0;
+	np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
 	np->link_status = 0;
 	/* Set media and reset PHY */
 	if (np->phy_media) {
@@ -276,22 +291,20 @@
 		printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
 	return 0;
 
-      err_out_unmap_rx:
+err_out_unmap_rx:
 	pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
-      err_out_unmap_tx:
+err_out_unmap_tx:
 	pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
-      err_out_iounmap:
+err_out_iounmap:
 #ifdef MEM_MAPPING
-	iounmap ((void *) ioaddr);
-
-      err_out_dev:
+	pci_iounmap(pdev, np->ioaddr);
 #endif
+	pci_iounmap(pdev, np->eeprom_addr);
+err_out_dev:
 	free_netdev (dev);
-
-      err_out_res:
+err_out_res:
 	pci_release_regions (pdev);
-
-      err_out_disable:
+err_out_disable:
 	pci_disable_device (pdev);
 	return err;
 }
@@ -299,11 +312,9 @@
 static int
 find_miiphy (struct net_device *dev)
 {
+	struct netdev_private *np = netdev_priv(dev);
 	int i, phy_found = 0;
-	struct netdev_private *np;
-	long ioaddr;
 	np = netdev_priv(dev);
-	ioaddr = dev->base_addr;
 	np->phy_addr = 1;
 
 	for (i = 31; i >= 0; i--) {
@@ -323,26 +334,19 @@
 static int
 parse_eeprom (struct net_device *dev)
 {
+	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 	int i, j;
-	long ioaddr = dev->base_addr;
 	u8 sromdata[256];
 	u8 *psib;
 	u32 crc;
 	PSROM_t psrom = (PSROM_t) sromdata;
-	struct netdev_private *np = netdev_priv(dev);
 
 	int cid, next;
 
-#ifdef	MEM_MAPPING
-	ioaddr = pci_resource_start (np->pdev, 0);
-#endif
-	/* Read eeprom */
-	for (i = 0; i < 128; i++) {
-		((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom (ioaddr, i));
-	}
-#ifdef	MEM_MAPPING
-	ioaddr = dev->base_addr;
-#endif
+	for (i = 0; i < 128; i++)
+		((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i));
+
 	if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) {	/* D-Link Only */
 		/* Check CRC */
 		crc = ~ether_crc_le (256 - 4, sromdata);
@@ -378,8 +382,7 @@
 			return 0;
 		case 2:	/* Duplex Polarity */
 			np->duplex_polarity = psib[i];
-			writeb (readb (ioaddr + PhyCtrl) | psib[i],
-				ioaddr + PhyCtrl);
+			dw8(PhyCtrl, dr8(PhyCtrl) | psib[i]);
 			break;
 		case 3:	/* Wake Polarity */
 			np->wake_polarity = psib[i];
@@ -407,59 +410,57 @@
 rio_open (struct net_device *dev)
 {
 	struct netdev_private *np = netdev_priv(dev);
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = np->ioaddr;
+	const int irq = np->pdev->irq;
 	int i;
 	u16 macctrl;
 
-	i = request_irq (dev->irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
+	i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
 	if (i)
 		return i;
 
 	/* Reset all logic functions */
-	writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset,
-		ioaddr + ASICCtrl + 2);
+	dw16(ASICCtrl + 2,
+	     GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
 	mdelay(10);
 
 	/* DebugCtrl bit 4, 5, 9 must set */
-	writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl);
+	dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);
 
 	/* Jumbo frame */
 	if (np->jumbo != 0)
-		writew (MAX_JUMBO+14, ioaddr + MaxFrameSize);
+		dw16(MaxFrameSize, MAX_JUMBO+14);
 
 	alloc_list (dev);
 
 	/* Get station address */
 	for (i = 0; i < 6; i++)
-		writeb (dev->dev_addr[i], ioaddr + StationAddr0 + i);
+		dw8(StationAddr0 + i, dev->dev_addr[i]);
 
 	set_multicast (dev);
 	if (np->coalesce) {
-		writel (np->rx_coalesce | np->rx_timeout << 16,
-			ioaddr + RxDMAIntCtrl);
+		dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16);
 	}
 	/* Set RIO to poll every N*320nsec. */
-	writeb (0x20, ioaddr + RxDMAPollPeriod);
-	writeb (0xff, ioaddr + TxDMAPollPeriod);
-	writeb (0x30, ioaddr + RxDMABurstThresh);
-	writeb (0x30, ioaddr + RxDMAUrgentThresh);
-	writel (0x0007ffff, ioaddr + RmonStatMask);
+	dw8(RxDMAPollPeriod, 0x20);
+	dw8(TxDMAPollPeriod, 0xff);
+	dw8(RxDMABurstThresh, 0x30);
+	dw8(RxDMAUrgentThresh, 0x30);
+	dw32(RmonStatMask, 0x0007ffff);
 	/* clear statistics */
 	clear_stats (dev);
 
 	/* VLAN supported */
 	if (np->vlan) {
 		/* priority field in RxDMAIntCtrl  */
-		writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10,
-			ioaddr + RxDMAIntCtrl);
+		dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10);
 		/* VLANId */
-		writew (np->vlan, ioaddr + VLANId);
+		dw16(VLANId, np->vlan);
 		/* Length/Type should be 0x8100 */
-		writel (0x8100 << 16 | np->vlan, ioaddr + VLANTag);
+		dw32(VLANTag, 0x8100 << 16 | np->vlan);
 		/* Enable AutoVLANuntagging, but disable AutoVLANtagging.
 		   VLAN information tagged by TFC' VID, CFI fields. */
-		writel (readl (ioaddr + MACCtrl) | AutoVLANuntagging,
-			ioaddr + MACCtrl);
+		dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging);
 	}
 
 	init_timer (&np->timer);
@@ -469,20 +470,18 @@
 	add_timer (&np->timer);
 
 	/* Start Tx/Rx */
-	writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable,
-			ioaddr + MACCtrl);
+	dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable);
 
 	macctrl = 0;
 	macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
 	macctrl |= (np->full_duplex) ? DuplexSelect : 0;
 	macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;
 	macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;
-	writew(macctrl,	ioaddr + MACCtrl);
+	dw16(MACCtrl, macctrl);
 
 	netif_start_queue (dev);
 
-	/* Enable default interrupts */
-	EnableInt ();
+	dl2k_enable_int(np);
 	return 0;
 }
 
@@ -533,10 +532,11 @@
 static void
 rio_tx_timeout (struct net_device *dev)
 {
-	long ioaddr = dev->base_addr;
+	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 
 	printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
-		dev->name, readl (ioaddr + TxStatus));
+		dev->name, dr32(TxStatus));
 	rio_free_tx(dev, 0);
 	dev->if_port = 0;
 	dev->trans_start = jiffies; /* prevent tx timeout */
@@ -547,6 +547,7 @@
 alloc_list (struct net_device *dev)
 {
 	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 	int i;
 
 	np->cur_rx = np->cur_tx = 0;
@@ -594,24 +595,23 @@
 	}
 
 	/* Set RFDListPtr */
-	writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0);
-	writel (0, dev->base_addr + RFDListPtr1);
+	dw32(RFDListPtr0, np->rx_ring_dma);
+	dw32(RFDListPtr1, 0);
 }
 
 static netdev_tx_t
 start_xmit (struct sk_buff *skb, struct net_device *dev)
 {
 	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 	struct netdev_desc *txdesc;
 	unsigned entry;
-	u32 ioaddr;
 	u64 tfc_vlan_tag = 0;
 
 	if (np->link_status == 0) {	/* Link Down */
 		dev_kfree_skb(skb);
 		return NETDEV_TX_OK;
 	}
-	ioaddr = dev->base_addr;
 	entry = np->cur_tx % TX_RING_SIZE;
 	np->tx_skbuff[entry] = skb;
 	txdesc = &np->tx_ring[entry];
@@ -646,9 +646,9 @@
 					      (1 << FragCountShift));
 
 	/* TxDMAPollNow */
-	writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl);
+	dw32(DMACtrl, dr32(DMACtrl) | 0x00001000);
 	/* Schedule ISR */
-	writel(10000, ioaddr + CountDown);
+	dw32(CountDown, 10000);
 	np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
 	if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
 			< TX_QUEUE_LEN - 1 && np->speed != 10) {
@@ -658,10 +658,10 @@
 	}
 
 	/* The first TFDListPtr */
-	if (readl (dev->base_addr + TFDListPtr0) == 0) {
-		writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc),
-			dev->base_addr + TFDListPtr0);
-		writel (0, dev->base_addr + TFDListPtr1);
+	if (!dr32(TFDListPtr0)) {
+		dw32(TFDListPtr0, np->tx_ring_dma +
+		     entry * sizeof (struct netdev_desc));
+		dw32(TFDListPtr1, 0);
 	}
 
 	return NETDEV_TX_OK;
@@ -671,17 +671,15 @@
 rio_interrupt (int irq, void *dev_instance)
 {
 	struct net_device *dev = dev_instance;
-	struct netdev_private *np;
+	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 	unsigned int_status;
-	long ioaddr;
 	int cnt = max_intrloop;
 	int handled = 0;
 
-	ioaddr = dev->base_addr;
-	np = netdev_priv(dev);
 	while (1) {
-		int_status = readw (ioaddr + IntStatus);
-		writew (int_status, ioaddr + IntStatus);
+		int_status = dr16(IntStatus);
+		dw16(IntStatus, int_status);
 		int_status &= DEFAULT_INTR;
 		if (int_status == 0 || --cnt < 0)
 			break;
@@ -692,7 +690,7 @@
 		/* TxDMAComplete interrupt */
 		if ((int_status & (TxDMAComplete|IntRequested))) {
 			int tx_status;
-			tx_status = readl (ioaddr + TxStatus);
+			tx_status = dr32(TxStatus);
 			if (tx_status & 0x01)
 				tx_error (dev, tx_status);
 			/* Free used tx skbuffs */
@@ -705,7 +703,7 @@
 			rio_error (dev, int_status);
 	}
 	if (np->cur_tx != np->old_tx)
-		writel (100, ioaddr + CountDown);
+		dw32(CountDown, 100);
 	return IRQ_RETVAL(handled);
 }
 
@@ -765,13 +763,11 @@
 static void
 tx_error (struct net_device *dev, int tx_status)
 {
-	struct netdev_private *np;
-	long ioaddr = dev->base_addr;
+	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 	int frame_id;
 	int i;
 
-	np = netdev_priv(dev);
-
 	frame_id = (tx_status & 0xffff0000);
 	printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
 		dev->name, tx_status, frame_id);
@@ -779,23 +775,21 @@
 	/* Ttransmit Underrun */
 	if (tx_status & 0x10) {
 		np->stats.tx_fifo_errors++;
-		writew (readw (ioaddr + TxStartThresh) + 0x10,
-			ioaddr + TxStartThresh);
+		dw16(TxStartThresh, dr16(TxStartThresh) + 0x10);
 		/* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
-		writew (TxReset | DMAReset | FIFOReset | NetworkReset,
-			ioaddr + ASICCtrl + 2);
+		dw16(ASICCtrl + 2,
+		     TxReset | DMAReset | FIFOReset | NetworkReset);
 		/* Wait for ResetBusy bit clear */
 		for (i = 50; i > 0; i--) {
-			if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
+			if (!(dr16(ASICCtrl + 2) & ResetBusy))
 				break;
 			mdelay (1);
 		}
 		rio_free_tx (dev, 1);
 		/* Reset TFDListPtr */
-		writel (np->tx_ring_dma +
-			np->old_tx * sizeof (struct netdev_desc),
-			dev->base_addr + TFDListPtr0);
-		writel (0, dev->base_addr + TFDListPtr1);
+		dw32(TFDListPtr0, np->tx_ring_dma +
+		     np->old_tx * sizeof (struct netdev_desc));
+		dw32(TFDListPtr1, 0);
 
 		/* Let TxStartThresh stay default value */
 	}
@@ -803,10 +797,10 @@
 	if (tx_status & 0x04) {
 		np->stats.tx_fifo_errors++;
 		/* TxReset and clear FIFO */
-		writew (TxReset | FIFOReset, ioaddr + ASICCtrl + 2);
+		dw16(ASICCtrl + 2, TxReset | FIFOReset);
 		/* Wait reset done */
 		for (i = 50; i > 0; i--) {
-			if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
+			if (!(dr16(ASICCtrl + 2) & ResetBusy))
 				break;
 			mdelay (1);
 		}
@@ -821,7 +815,7 @@
 		np->stats.collisions++;
 #endif
 	/* Restart the Tx */
-	writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl);
+	dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
 }
 
 static int
@@ -931,8 +925,8 @@
 static void
 rio_error (struct net_device *dev, int int_status)
 {
-	long ioaddr = dev->base_addr;
 	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 	u16 macctrl;
 
 	/* Link change event */
@@ -954,7 +948,7 @@
 				TxFlowControlEnable : 0;
 			macctrl |= (np->rx_flow) ?
 				RxFlowControlEnable : 0;
-			writew(macctrl,	ioaddr + MACCtrl);
+			dw16(MACCtrl, macctrl);
 			np->link_status = 1;
 			netif_carrier_on(dev);
 		} else {
@@ -974,7 +968,7 @@
 	if (int_status & HostError) {
 		printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
 			dev->name, int_status);
-		writew (GlobalReset | HostReset, ioaddr + ASICCtrl + 2);
+		dw16(ASICCtrl + 2, GlobalReset | HostReset);
 		mdelay (500);
 	}
 }
@@ -982,8 +976,8 @@
 static struct net_device_stats *
 get_stats (struct net_device *dev)
 {
-	long ioaddr = dev->base_addr;
 	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 #ifdef MEM_MAPPING
 	int i;
 #endif
@@ -992,106 +986,107 @@
 	/* All statistics registers need to be acknowledged,
 	   else statistic overflow could cause problems */
 
-	np->stats.rx_packets += readl (ioaddr + FramesRcvOk);
-	np->stats.tx_packets += readl (ioaddr + FramesXmtOk);
-	np->stats.rx_bytes += readl (ioaddr + OctetRcvOk);
-	np->stats.tx_bytes += readl (ioaddr + OctetXmtOk);
+	np->stats.rx_packets += dr32(FramesRcvOk);
+	np->stats.tx_packets += dr32(FramesXmtOk);
+	np->stats.rx_bytes += dr32(OctetRcvOk);
+	np->stats.tx_bytes += dr32(OctetXmtOk);
 
-	np->stats.multicast = readl (ioaddr + McstFramesRcvdOk);
-	np->stats.collisions += readl (ioaddr + SingleColFrames)
-			     +  readl (ioaddr + MultiColFrames);
+	np->stats.multicast = dr32(McstFramesRcvdOk);
+	np->stats.collisions += dr32(SingleColFrames)
+			     +  dr32(MultiColFrames);
 
 	/* detailed tx errors */
-	stat_reg = readw (ioaddr + FramesAbortXSColls);
+	stat_reg = dr16(FramesAbortXSColls);
 	np->stats.tx_aborted_errors += stat_reg;
 	np->stats.tx_errors += stat_reg;
 
-	stat_reg = readw (ioaddr + CarrierSenseErrors);
+	stat_reg = dr16(CarrierSenseErrors);
 	np->stats.tx_carrier_errors += stat_reg;
 	np->stats.tx_errors += stat_reg;
 
 	/* Clear all other statistic register. */
-	readl (ioaddr + McstOctetXmtOk);
-	readw (ioaddr + BcstFramesXmtdOk);
-	readl (ioaddr + McstFramesXmtdOk);
-	readw (ioaddr + BcstFramesRcvdOk);
-	readw (ioaddr + MacControlFramesRcvd);
-	readw (ioaddr + FrameTooLongErrors);
-	readw (ioaddr + InRangeLengthErrors);
-	readw (ioaddr + FramesCheckSeqErrors);
-	readw (ioaddr + FramesLostRxErrors);
-	readl (ioaddr + McstOctetXmtOk);
-	readl (ioaddr + BcstOctetXmtOk);
-	readl (ioaddr + McstFramesXmtdOk);
-	readl (ioaddr + FramesWDeferredXmt);
-	readl (ioaddr + LateCollisions);
-	readw (ioaddr + BcstFramesXmtdOk);
-	readw (ioaddr + MacControlFramesXmtd);
-	readw (ioaddr + FramesWEXDeferal);
+	dr32(McstOctetXmtOk);
+	dr16(BcstFramesXmtdOk);
+	dr32(McstFramesXmtdOk);
+	dr16(BcstFramesRcvdOk);
+	dr16(MacControlFramesRcvd);
+	dr16(FrameTooLongErrors);
+	dr16(InRangeLengthErrors);
+	dr16(FramesCheckSeqErrors);
+	dr16(FramesLostRxErrors);
+	dr32(McstOctetXmtOk);
+	dr32(BcstOctetXmtOk);
+	dr32(McstFramesXmtdOk);
+	dr32(FramesWDeferredXmt);
+	dr32(LateCollisions);
+	dr16(BcstFramesXmtdOk);
+	dr16(MacControlFramesXmtd);
+	dr16(FramesWEXDeferal);
 
 #ifdef MEM_MAPPING
 	for (i = 0x100; i <= 0x150; i += 4)
-		readl (ioaddr + i);
+		dr32(i);
 #endif
-	readw (ioaddr + TxJumboFrames);
-	readw (ioaddr + RxJumboFrames);
-	readw (ioaddr + TCPCheckSumErrors);
-	readw (ioaddr + UDPCheckSumErrors);
-	readw (ioaddr + IPCheckSumErrors);
+	dr16(TxJumboFrames);
+	dr16(RxJumboFrames);
+	dr16(TCPCheckSumErrors);
+	dr16(UDPCheckSumErrors);
+	dr16(IPCheckSumErrors);
 	return &np->stats;
 }
 
 static int
 clear_stats (struct net_device *dev)
 {
-	long ioaddr = dev->base_addr;
+	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 #ifdef MEM_MAPPING
 	int i;
 #endif
 
 	/* All statistics registers need to be acknowledged,
 	   else statistic overflow could cause problems */
-	readl (ioaddr + FramesRcvOk);
-	readl (ioaddr + FramesXmtOk);
-	readl (ioaddr + OctetRcvOk);
-	readl (ioaddr + OctetXmtOk);
+	dr32(FramesRcvOk);
+	dr32(FramesXmtOk);
+	dr32(OctetRcvOk);
+	dr32(OctetXmtOk);
 
-	readl (ioaddr + McstFramesRcvdOk);
-	readl (ioaddr + SingleColFrames);
-	readl (ioaddr + MultiColFrames);
-	readl (ioaddr + LateCollisions);
+	dr32(McstFramesRcvdOk);
+	dr32(SingleColFrames);
+	dr32(MultiColFrames);
+	dr32(LateCollisions);
 	/* detailed rx errors */
-	readw (ioaddr + FrameTooLongErrors);
-	readw (ioaddr + InRangeLengthErrors);
-	readw (ioaddr + FramesCheckSeqErrors);
-	readw (ioaddr + FramesLostRxErrors);
+	dr16(FrameTooLongErrors);
+	dr16(InRangeLengthErrors);
+	dr16(FramesCheckSeqErrors);
+	dr16(FramesLostRxErrors);
 
 	/* detailed tx errors */
-	readw (ioaddr + FramesAbortXSColls);
-	readw (ioaddr + CarrierSenseErrors);
+	dr16(FramesAbortXSColls);
+	dr16(CarrierSenseErrors);
 
 	/* Clear all other statistic register. */
-	readl (ioaddr + McstOctetXmtOk);
-	readw (ioaddr + BcstFramesXmtdOk);
-	readl (ioaddr + McstFramesXmtdOk);
-	readw (ioaddr + BcstFramesRcvdOk);
-	readw (ioaddr + MacControlFramesRcvd);
-	readl (ioaddr + McstOctetXmtOk);
-	readl (ioaddr + BcstOctetXmtOk);
-	readl (ioaddr + McstFramesXmtdOk);
-	readl (ioaddr + FramesWDeferredXmt);
-	readw (ioaddr + BcstFramesXmtdOk);
-	readw (ioaddr + MacControlFramesXmtd);
-	readw (ioaddr + FramesWEXDeferal);
+	dr32(McstOctetXmtOk);
+	dr16(BcstFramesXmtdOk);
+	dr32(McstFramesXmtdOk);
+	dr16(BcstFramesRcvdOk);
+	dr16(MacControlFramesRcvd);
+	dr32(McstOctetXmtOk);
+	dr32(BcstOctetXmtOk);
+	dr32(McstFramesXmtdOk);
+	dr32(FramesWDeferredXmt);
+	dr16(BcstFramesXmtdOk);
+	dr16(MacControlFramesXmtd);
+	dr16(FramesWEXDeferal);
 #ifdef MEM_MAPPING
 	for (i = 0x100; i <= 0x150; i += 4)
-		readl (ioaddr + i);
+		dr32(i);
 #endif
-	readw (ioaddr + TxJumboFrames);
-	readw (ioaddr + RxJumboFrames);
-	readw (ioaddr + TCPCheckSumErrors);
-	readw (ioaddr + UDPCheckSumErrors);
-	readw (ioaddr + IPCheckSumErrors);
+	dr16(TxJumboFrames);
+	dr16(RxJumboFrames);
+	dr16(TCPCheckSumErrors);
+	dr16(UDPCheckSumErrors);
+	dr16(IPCheckSumErrors);
 	return 0;
 }
 
@@ -1114,10 +1109,10 @@
 static void
 set_multicast (struct net_device *dev)
 {
-	long ioaddr = dev->base_addr;
+	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 	u32 hash_table[2];
 	u16 rx_mode = 0;
-	struct netdev_private *np = netdev_priv(dev);
 
 	hash_table[0] = hash_table[1] = 0;
 	/* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
@@ -1153,9 +1148,9 @@
 		rx_mode |= ReceiveVLANMatch;
 	}
 
-	writel (hash_table[0], ioaddr + HashTable0);
-	writel (hash_table[1], ioaddr + HashTable1);
-	writew (rx_mode, ioaddr + ReceiveMode);
+	dw32(HashTable0, hash_table[0]);
+	dw32(HashTable1, hash_table[1]);
+	dw16(ReceiveMode, rx_mode);
 }
 
 static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
@@ -1318,15 +1313,15 @@
 #define EEP_BUSY 0x8000
 /* Read the EEPROM word */
 /* We use I/O instruction to read/write eeprom to avoid fail on some machines */
-static int
-read_eeprom (long ioaddr, int eep_addr)
+static int read_eeprom(struct netdev_private *np, int eep_addr)
 {
+	void __iomem *ioaddr = np->eeprom_addr;
 	int i = 1000;
-	outw (EEP_READ | (eep_addr & 0xff), ioaddr + EepromCtrl);
+
+	dw16(EepromCtrl, EEP_READ | (eep_addr & 0xff));
 	while (i-- > 0) {
-		if (!(inw (ioaddr + EepromCtrl) & EEP_BUSY)) {
-			return inw (ioaddr + EepromData);
-		}
+		if (!(dr16(EepromCtrl) & EEP_BUSY))
+			return dr16(EepromData);
 	}
 	return 0;
 }
@@ -1336,38 +1331,40 @@
 	MII_DUPLEX = 0x08,
 };
 
-#define mii_delay() readb(ioaddr)
+#define mii_delay() dr8(PhyCtrl)
 static void
 mii_sendbit (struct net_device *dev, u32 data)
 {
-	long ioaddr = dev->base_addr + PhyCtrl;
-	data = (data) ? MII_DATA1 : 0;
-	data |= MII_WRITE;
-	data |= (readb (ioaddr) & 0xf8) | MII_WRITE;
-	writeb (data, ioaddr);
+	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
+
+	data = ((data) ? MII_DATA1 : 0) | (dr8(PhyCtrl) & 0xf8) | MII_WRITE;
+	dw8(PhyCtrl, data);
 	mii_delay ();
-	writeb (data | MII_CLK, ioaddr);
+	dw8(PhyCtrl, data | MII_CLK);
 	mii_delay ();
 }
 
 static int
 mii_getbit (struct net_device *dev)
 {
-	long ioaddr = dev->base_addr + PhyCtrl;
+	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 	u8 data;
 
-	data = (readb (ioaddr) & 0xf8) | MII_READ;
-	writeb (data, ioaddr);
+	data = (dr8(PhyCtrl) & 0xf8) | MII_READ;
+	dw8(PhyCtrl, data);
 	mii_delay ();
-	writeb (data | MII_CLK, ioaddr);
+	dw8(PhyCtrl, data | MII_CLK);
 	mii_delay ();
-	return ((readb (ioaddr) >> 1) & 1);
+	return (dr8(PhyCtrl) >> 1) & 1;
 }
 
 static void
 mii_send_bits (struct net_device *dev, u32 data, int len)
 {
 	int i;
+
 	for (i = len - 1; i >= 0; i--) {
 		mii_sendbit (dev, data & (1 << i));
 	}
@@ -1721,28 +1718,29 @@
 static int
 rio_close (struct net_device *dev)
 {
-	long ioaddr = dev->base_addr;
 	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
+
+	struct pci_dev *pdev = np->pdev;
 	struct sk_buff *skb;
 	int i;
 
 	netif_stop_queue (dev);
 
 	/* Disable interrupts */
-	writew (0, ioaddr + IntEnable);
+	dw16(IntEnable, 0);
 
 	/* Stop Tx and Rx logics */
-	writel (TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl);
+	dw32(MACCtrl, TxDisable | RxDisable | StatsDisable);
 
-	free_irq (dev->irq, dev);
+	free_irq(pdev->irq, dev);
 	del_timer_sync (&np->timer);
 
 	/* Free all the skbuffs in the queue. */
 	for (i = 0; i < RX_RING_SIZE; i++) {
 		skb = np->rx_skbuff[i];
 		if (skb) {
-			pci_unmap_single(np->pdev,
-					 desc_to_dma(&np->rx_ring[i]),
+			pci_unmap_single(pdev, desc_to_dma(&np->rx_ring[i]),
 					 skb->len, PCI_DMA_FROMDEVICE);
 			dev_kfree_skb (skb);
 			np->rx_skbuff[i] = NULL;
@@ -1753,8 +1751,7 @@
 	for (i = 0; i < TX_RING_SIZE; i++) {
 		skb = np->tx_skbuff[i];
 		if (skb) {
-			pci_unmap_single(np->pdev,
-					 desc_to_dma(&np->tx_ring[i]),
+			pci_unmap_single(pdev, desc_to_dma(&np->tx_ring[i]),
 					 skb->len, PCI_DMA_TODEVICE);
 			dev_kfree_skb (skb);
 			np->tx_skbuff[i] = NULL;
@@ -1778,8 +1775,9 @@
 		pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
 				     np->tx_ring_dma);
 #ifdef MEM_MAPPING
-		iounmap ((char *) (dev->base_addr));
+		pci_iounmap(pdev, np->ioaddr);
 #endif
+		pci_iounmap(pdev, np->eeprom_addr);
 		free_netdev (dev);
 		pci_release_regions (pdev);
 		pci_disable_device (pdev);
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index ba0adca..40ba6e0 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -42,23 +42,6 @@
 #define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct netdev_desc)
 #define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct netdev_desc)
 
-/* This driver was written to use PCI memory space, however x86-oriented
-   hardware often uses I/O space accesses. */
-#ifndef MEM_MAPPING
-#undef readb
-#undef readw
-#undef readl
-#undef writeb
-#undef writew
-#undef writel
-#define readb inb
-#define readw inw
-#define readl inl
-#define writeb outb
-#define writew outw
-#define writel outl
-#endif
-
 /* Offsets to the device registers.
    Unlike software-only systems, device drivers interact with complex hardware.
    It's not useful to define symbolic names for every register bit in the
@@ -391,6 +374,8 @@
 	dma_addr_t tx_ring_dma;
 	dma_addr_t rx_ring_dma;
 	struct pci_dev *pdev;
+	void __iomem *ioaddr;
+	void __iomem *eeprom_addr;
 	spinlock_t tx_lock;
 	spinlock_t rx_lock;
 	struct net_device_stats stats;
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index d783f4f..d7bb52a 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -522,9 +522,6 @@
 			cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
-	dev->base_addr = (unsigned long)ioaddr;
-	dev->irq = irq;
-
 	np = netdev_priv(dev);
 	np->base = ioaddr;
 	np->pci_dev = pdev;
@@ -828,18 +825,19 @@
 {
 	struct netdev_private *np = netdev_priv(dev);
 	void __iomem *ioaddr = np->base;
+	const int irq = np->pci_dev->irq;
 	unsigned long flags;
 	int i;
 
 	/* Do we need to reset the chip??? */
 
-	i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
+	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 	if (i)
 		return i;
 
 	if (netif_msg_ifup(np))
-		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
-			   dev->name, dev->irq);
+		printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
+
 	init_ring(dev);
 
 	iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
@@ -1814,7 +1812,7 @@
 	}
 #endif /* __i386__ debugging only */
 
-	free_irq(dev->irq, dev);
+	free_irq(np->pci_dev->irq, dev);
 
 	del_timer_sync(&np->timer);
 
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index b276469..290b26f 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -815,6 +815,7 @@
 	.set_settings		= dnet_set_settings,
 	.get_drvinfo		= dnet_get_drvinfo,
 	.get_link		= ethtool_op_get_link,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 static const struct net_device_ops dnet_netdev_ops = {
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 9576ac0..ad69cf8 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -313,6 +313,23 @@
 #define BE_UC_PMAC_COUNT		30
 #define BE_VF_UC_PMAC_COUNT		2
 
+struct phy_info {
+	u8 transceiver;
+	u8 autoneg;
+	u8 fc_autoneg;
+	u8 port_type;
+	u16 phy_type;
+	u16 interface_type;
+	u32 misc_params;
+	u16 auto_speeds_supported;
+	u16 fixed_speeds_supported;
+	int link_speed;
+	int forced_port_speed;
+	u32 dac_cable_len;
+	u32 advertising;
+	u32 supported;
+};
+
 struct be_adapter {
 	struct pci_dev *pdev;
 	struct net_device *netdev;
@@ -377,10 +394,6 @@
 	u32 rx_fc;		/* Rx flow control */
 	u32 tx_fc;		/* Tx flow control */
 	bool stats_cmd_sent;
-	int link_speed;
-	u8 port_type;
-	u8 transceiver;
-	u8 autoneg;
 	u8 generation;		/* BladeEngine ASIC generation */
 	u32 flash_status;
 	struct completion flash_compl;
@@ -392,6 +405,7 @@
 	u32 sli_family;
 	u8 hba_port_num;
 	u16 pvid;
+	struct phy_info phy;
 	u8 wol_cap;
 	bool wol;
 	u32 max_pmac_cnt;	/* Max secondary UC MACs programmable */
@@ -583,4 +597,5 @@
 extern void be_parse_stats(struct be_adapter *adapter);
 extern int be_load_fw(struct be_adapter *adapter, u8 *func);
 extern bool be_is_wol_supported(struct be_adapter *adapter);
+extern bool be_pause_supported(struct be_adapter *adapter);
 #endif				/* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 67b030d..22be08c 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -126,7 +126,7 @@
 		struct be_async_event_link_state *evt)
 {
 	/* When link status changes, link speed must be re-queried from FW */
-	adapter->link_speed = -1;
+	adapter->phy.link_speed = -1;
 
 	/* For the initial link status do not rely on the ASYNC event as
 	 * it may not be received in some cases.
@@ -153,7 +153,7 @@
 {
 	if (evt->physical_port == adapter->port_num) {
 		/* qos_link_speed is in units of 10 Mbps */
-		adapter->link_speed = evt->qos_link_speed * 10;
+		adapter->phy.link_speed = evt->qos_link_speed * 10;
 	}
 }
 
@@ -2136,8 +2136,7 @@
 	return status;
 }
 
-int be_cmd_get_phy_info(struct be_adapter *adapter,
-				struct be_phy_info *phy_info)
+int be_cmd_get_phy_info(struct be_adapter *adapter)
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_get_phy_info *req;
@@ -2170,9 +2169,15 @@
 	if (!status) {
 		struct be_phy_info *resp_phy_info =
 				cmd.va + sizeof(struct be_cmd_req_hdr);
-		phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type);
-		phy_info->interface_type =
+		adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
+		adapter->phy.interface_type =
 			le16_to_cpu(resp_phy_info->interface_type);
+		adapter->phy.auto_speeds_supported =
+			le16_to_cpu(resp_phy_info->auto_speeds_supported);
+		adapter->phy.fixed_speeds_supported =
+			le16_to_cpu(resp_phy_info->fixed_speeds_supported);
+		adapter->phy.misc_params =
+			le32_to_cpu(resp_phy_info->misc_params);
 	}
 	pci_free_consistent(adapter->pdev, cmd.size,
 				cmd.va, cmd.dma);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d5b680c..3c54361 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1309,9 +1309,36 @@
 	PHY_TYPE_KX4_10GB,
 	PHY_TYPE_BASET_10GB,
 	PHY_TYPE_BASET_1GB,
+	PHY_TYPE_BASEX_1GB,
+	PHY_TYPE_SGMII,
 	PHY_TYPE_DISABLED = 255
 };
 
+#define BE_SUPPORTED_SPEED_NONE		0
+#define BE_SUPPORTED_SPEED_10MBPS	1
+#define BE_SUPPORTED_SPEED_100MBPS	2
+#define BE_SUPPORTED_SPEED_1GBPS	4
+#define BE_SUPPORTED_SPEED_10GBPS	8
+
+#define BE_AN_EN			0x2
+#define BE_PAUSE_SYM_EN			0x80
+
+/* MAC speed valid values */
+#define SPEED_DEFAULT  0x0
+#define SPEED_FORCED_10GB  0x1
+#define SPEED_FORCED_1GB  0x2
+#define SPEED_AUTONEG_10GB  0x3
+#define SPEED_AUTONEG_1GB  0x4
+#define SPEED_AUTONEG_100MB  0x5
+#define SPEED_AUTONEG_10GB_1GB 0x6
+#define SPEED_AUTONEG_10GB_1GB_100MB 0x7
+#define SPEED_AUTONEG_1GB_100MB  0x8
+#define SPEED_AUTONEG_10MB  0x9
+#define SPEED_AUTONEG_1GB_100MB_10MB 0xa
+#define SPEED_AUTONEG_100MB_10MB 0xb
+#define SPEED_FORCED_100MB  0xc
+#define SPEED_FORCED_10MB  0xd
+
 struct be_cmd_req_get_phy_info {
 	struct be_cmd_req_hdr hdr;
 	u8 rsvd0[24];
@@ -1321,7 +1348,11 @@
 	u16 phy_type;
 	u16 interface_type;
 	u32 misc_params;
-	u32 future_use[4];
+	u16 ext_phy_details;
+	u16 rsvd;
+	u16 auto_speeds_supported;
+	u16 fixed_speeds_supported;
+	u32 future_use[2];
 };
 
 struct be_cmd_resp_get_phy_info {
@@ -1655,8 +1686,7 @@
 				struct be_dma_mem *nonemb_cmd);
 extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
 				u8 loopback_type, u8 enable);
-extern int be_cmd_get_phy_info(struct be_adapter *adapter,
-				struct be_phy_info *phy_info);
+extern int be_cmd_get_phy_info(struct be_adapter *adapter);
 extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
 extern void be_detect_dump_ue(struct be_adapter *adapter);
 extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index c1ff73c..dc9f74c 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -433,102 +433,193 @@
 	}
 }
 
+static u32 be_get_port_type(u32 phy_type, u32 dac_cable_len)
+{
+	u32 port;
+
+	switch (phy_type) {
+	case PHY_TYPE_BASET_1GB:
+	case PHY_TYPE_BASEX_1GB:
+	case PHY_TYPE_SGMII:
+		port = PORT_TP;
+		break;
+	case PHY_TYPE_SFP_PLUS_10GB:
+		port = dac_cable_len ? PORT_DA : PORT_FIBRE;
+		break;
+	case PHY_TYPE_XFP_10GB:
+	case PHY_TYPE_SFP_1GB:
+		port = PORT_FIBRE;
+		break;
+	case PHY_TYPE_BASET_10GB:
+		port = PORT_TP;
+		break;
+	default:
+		port = PORT_OTHER;
+	}
+
+	return port;
+}
+
+static u32 convert_to_et_setting(u32 if_type, u32 if_speeds)
+{
+	u32 val = 0;
+
+	switch (if_type) {
+	case PHY_TYPE_BASET_1GB:
+	case PHY_TYPE_BASEX_1GB:
+	case PHY_TYPE_SGMII:
+		val |= SUPPORTED_TP;
+		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
+			val |= SUPPORTED_1000baseT_Full;
+		if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
+			val |= SUPPORTED_100baseT_Full;
+		if (if_speeds & BE_SUPPORTED_SPEED_10MBPS)
+			val |= SUPPORTED_10baseT_Full;
+		break;
+	case PHY_TYPE_KX4_10GB:
+		val |= SUPPORTED_Backplane;
+		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
+			val |= SUPPORTED_1000baseKX_Full;
+		if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
+			val |= SUPPORTED_10000baseKX4_Full;
+		break;
+	case PHY_TYPE_KR_10GB:
+		val |= SUPPORTED_Backplane |
+				SUPPORTED_10000baseKR_Full;
+		break;
+	case PHY_TYPE_SFP_PLUS_10GB:
+	case PHY_TYPE_XFP_10GB:
+	case PHY_TYPE_SFP_1GB:
+		val |= SUPPORTED_FIBRE;
+		if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
+			val |= SUPPORTED_10000baseT_Full;
+		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
+			val |= SUPPORTED_1000baseT_Full;
+		break;
+	case PHY_TYPE_BASET_10GB:
+		val |= SUPPORTED_TP;
+		if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
+			val |= SUPPORTED_10000baseT_Full;
+		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
+			val |= SUPPORTED_1000baseT_Full;
+		if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
+			val |= SUPPORTED_100baseT_Full;
+		break;
+	default:
+		val |= SUPPORTED_TP;
+	}
+
+	return val;
+}
+
+static int convert_to_et_speed(u32 be_speed)
+{
+	int et_speed = SPEED_10000;
+
+	switch (be_speed) {
+	case PHY_LINK_SPEED_10MBPS:
+		et_speed = SPEED_10;
+		break;
+	case PHY_LINK_SPEED_100MBPS:
+		et_speed = SPEED_100;
+		break;
+	case PHY_LINK_SPEED_1GBPS:
+		et_speed = SPEED_1000;
+		break;
+	case PHY_LINK_SPEED_10GBPS:
+		et_speed = SPEED_10000;
+		break;
+	}
+
+	return et_speed;
+}
+
+bool be_pause_supported(struct be_adapter *adapter)
+{
+	return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB ||
+		adapter->phy.interface_type == PHY_TYPE_XFP_10GB) ?
+		false : true;
+}
+
 static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
-	struct be_phy_info phy_info;
-	u8 mac_speed = 0;
+	u8 port_speed = 0;
 	u16 link_speed = 0;
 	u8 link_status;
+	u32 et_speed = 0;
 	int status;
 
-	if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
-		status = be_cmd_link_status_query(adapter, &mac_speed,
-						  &link_speed, &link_status, 0);
-		if (!status)
-			be_link_status_update(adapter, link_status);
-
-		/* link_speed is in units of 10 Mbps */
-		if (link_speed) {
-			ethtool_cmd_speed_set(ecmd, link_speed*10);
+	if (adapter->phy.link_speed < 0 || !(netdev->flags & IFF_UP)) {
+		if (adapter->phy.forced_port_speed < 0) {
+			status = be_cmd_link_status_query(adapter, &port_speed,
+						&link_speed, &link_status, 0);
+			if (!status)
+				be_link_status_update(adapter, link_status);
+			if (link_speed)
+				et_speed = link_speed;
+			else
+				et_speed = convert_to_et_speed(port_speed);
 		} else {
-			switch (mac_speed) {
-			case PHY_LINK_SPEED_10MBPS:
-				ethtool_cmd_speed_set(ecmd, SPEED_10);
-				break;
-			case PHY_LINK_SPEED_100MBPS:
-				ethtool_cmd_speed_set(ecmd, SPEED_100);
-				break;
-			case PHY_LINK_SPEED_1GBPS:
-				ethtool_cmd_speed_set(ecmd, SPEED_1000);
-				break;
-			case PHY_LINK_SPEED_10GBPS:
-				ethtool_cmd_speed_set(ecmd, SPEED_10000);
-				break;
-			case PHY_LINK_SPEED_ZERO:
-				ethtool_cmd_speed_set(ecmd, 0);
-				break;
-			}
+			et_speed = adapter->phy.forced_port_speed;
 		}
 
-		status = be_cmd_get_phy_info(adapter, &phy_info);
-		if (!status) {
-			switch (phy_info.interface_type) {
-			case PHY_TYPE_XFP_10GB:
-			case PHY_TYPE_SFP_1GB:
-			case PHY_TYPE_SFP_PLUS_10GB:
-				ecmd->port = PORT_FIBRE;
-				break;
-			default:
-				ecmd->port = PORT_TP;
-				break;
-			}
+		ethtool_cmd_speed_set(ecmd, et_speed);
 
-			switch (phy_info.interface_type) {
-			case PHY_TYPE_KR_10GB:
-			case PHY_TYPE_KX4_10GB:
-				ecmd->autoneg = AUTONEG_ENABLE;
+		status = be_cmd_get_phy_info(adapter);
+		if (status)
+			return status;
+
+		ecmd->supported =
+			convert_to_et_setting(adapter->phy.interface_type,
+					adapter->phy.auto_speeds_supported |
+					adapter->phy.fixed_speeds_supported);
+		ecmd->advertising =
+			convert_to_et_setting(adapter->phy.interface_type,
+					adapter->phy.auto_speeds_supported);
+
+		ecmd->port = be_get_port_type(adapter->phy.interface_type,
+					      adapter->phy.dac_cable_len);
+
+		if (adapter->phy.auto_speeds_supported) {
+			ecmd->supported |= SUPPORTED_Autoneg;
+			ecmd->autoneg = AUTONEG_ENABLE;
+			ecmd->advertising |= ADVERTISED_Autoneg;
+		}
+
+		if (be_pause_supported(adapter)) {
+			ecmd->supported |= SUPPORTED_Pause;
+			ecmd->advertising |= ADVERTISED_Pause;
+		}
+
+		switch (adapter->phy.interface_type) {
+		case PHY_TYPE_KR_10GB:
+		case PHY_TYPE_KX4_10GB:
 			ecmd->transceiver = XCVR_INTERNAL;
-				break;
-			default:
-				ecmd->autoneg = AUTONEG_DISABLE;
-				ecmd->transceiver = XCVR_EXTERNAL;
-				break;
-			}
+			break;
+		default:
+			ecmd->transceiver = XCVR_EXTERNAL;
+			break;
 		}
 
 		/* Save for future use */
-		adapter->link_speed = ethtool_cmd_speed(ecmd);
-		adapter->port_type = ecmd->port;
-		adapter->transceiver = ecmd->transceiver;
-		adapter->autoneg = ecmd->autoneg;
+		adapter->phy.link_speed = ethtool_cmd_speed(ecmd);
+		adapter->phy.port_type = ecmd->port;
+		adapter->phy.transceiver = ecmd->transceiver;
+		adapter->phy.autoneg = ecmd->autoneg;
+		adapter->phy.advertising = ecmd->advertising;
+		adapter->phy.supported = ecmd->supported;
 	} else {
-		ethtool_cmd_speed_set(ecmd, adapter->link_speed);
-		ecmd->port = adapter->port_type;
-		ecmd->transceiver = adapter->transceiver;
-		ecmd->autoneg = adapter->autoneg;
+		ethtool_cmd_speed_set(ecmd, adapter->phy.link_speed);
+		ecmd->port = adapter->phy.port_type;
+		ecmd->transceiver = adapter->phy.transceiver;
+		ecmd->autoneg = adapter->phy.autoneg;
+		ecmd->advertising = adapter->phy.advertising;
+		ecmd->supported = adapter->phy.supported;
 	}
 
 	ecmd->duplex = DUPLEX_FULL;
 	ecmd->phy_address = adapter->port_num;
-	switch (ecmd->port) {
-	case PORT_FIBRE:
-		ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
-		break;
-	case PORT_TP:
-		ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
-		break;
-	case PORT_AUI:
-		ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI);
-		break;
-	}
-
-	if (ecmd->autoneg) {
-		ecmd->supported |= SUPPORTED_1000baseT_Full;
-		ecmd->supported |= SUPPORTED_Autoneg;
-		ecmd->advertising |= (ADVERTISED_10000baseT_Full |
-				ADVERTISED_1000baseT_Full);
-	}
 
 	return 0;
 }
@@ -548,7 +639,7 @@
 	struct be_adapter *adapter = netdev_priv(netdev);
 
 	be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
-	ecmd->autoneg = 0;
+	ecmd->autoneg = adapter->phy.fc_autoneg;
 }
 
 static int
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 528a886..a5bc608 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2571,11 +2571,12 @@
 static void be_setup_init(struct be_adapter *adapter)
 {
 	adapter->vlan_prio_bmap = 0xff;
-	adapter->link_speed = -1;
+	adapter->phy.link_speed = -1;
 	adapter->if_handle = -1;
 	adapter->be3_native = false;
 	adapter->promiscuous = false;
 	adapter->eq_next_idx = 0;
+	adapter->phy.forced_port_speed = -1;
 }
 
 static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
@@ -2707,6 +2708,10 @@
 			goto err;
 	}
 
+	be_cmd_get_phy_info(adapter);
+	if (be_pause_supported(adapter))
+		adapter->phy.fc_autoneg = 1;
+
 	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
 	adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
 
@@ -2760,17 +2765,8 @@
 
 static bool phy_flashing_required(struct be_adapter *adapter)
 {
-	int status = 0;
-	struct be_phy_info phy_info;
-
-	status = be_cmd_get_phy_info(adapter, &phy_info);
-	if (status)
-		return false;
-	if ((phy_info.phy_type == TN_8022) &&
-		(phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
-		return true;
-	}
-	return false;
+	return (adapter->phy.phy_type == TN_8022 &&
+		adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
 }
 
 static int be_flash_data(struct be_adapter *adapter,
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 1637b98..9d71c9c 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -545,9 +545,6 @@
 	/* Reset the chip to erase previous misconfiguration. */
 	iowrite32(0x00000001, ioaddr + BCR);
 
-	dev->base_addr = (unsigned long)ioaddr;
-	dev->irq = irq;
-
 	/* Make certain the descriptor lists are aligned. */
 	np = netdev_priv(dev);
 	np->mem = ioaddr;
@@ -832,11 +829,13 @@
 {
 	struct netdev_private *np = netdev_priv(dev);
 	void __iomem *ioaddr = np->mem;
-	int i;
+	const int irq = np->pci_dev->irq;
+	int rc, i;
 
 	iowrite32(0x00000001, ioaddr + BCR);	/* Reset */
 
-	if (request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev))
+	rc = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
+	if (rc)
 		return -EAGAIN;
 
 	for (i = 0; i < 3; i++)
@@ -924,8 +923,7 @@
 	np->reset_timer.data = (unsigned long) dev;
 	np->reset_timer.function = reset_timer;
 	np->reset_timer_armed = 0;
-
-	return 0;
+	return rc;
 }
 
 
@@ -1910,7 +1908,7 @@
 	del_timer_sync(&np->timer);
 	del_timer_sync(&np->reset_timer);
 
-	free_irq(dev->irq, dev);
+	free_irq(np->pci_dev->irq, dev);
 
 	/* Free all the skbuffs in the Rx queue. */
 	for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index a12b3f5..7fa0227 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -1161,6 +1161,7 @@
 	.set_settings		= fec_enet_set_settings,
 	.get_drvinfo		= fec_enet_get_drvinfo,
 	.get_link		= ethtool_op_get_link,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 7b34d8c..97f947b 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -811,6 +811,7 @@
 	.get_link = ethtool_op_get_link,
 	.get_msglevel = mpc52xx_fec_get_msglevel,
 	.set_msglevel = mpc52xx_fec_set_msglevel,
+	.get_ts_info = ethtool_op_get_ts_info,
 };
 
 
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index e4e6cd2..2b7633f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -963,6 +963,7 @@
 	.get_msglevel = fs_get_msglevel,
 	.set_msglevel = fs_set_msglevel,
 	.get_regs = fs_get_regs,
+	.get_ts_info = ethtool_op_get_ts_info,
 };
 
 static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index e7bed53..1adb024 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -136,7 +136,7 @@
 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
-			      int amount_pull);
+			      int amount_pull, struct napi_struct *napi);
 void gfar_halt(struct net_device *dev);
 static void gfar_halt_nodisable(struct net_device *dev);
 void gfar_start(struct net_device *dev);
@@ -2675,12 +2675,12 @@
 /* gfar_process_frame() -- handle one incoming packet if skb
  * isn't NULL.  */
 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
-			      int amount_pull)
+			      int amount_pull, struct napi_struct *napi)
 {
 	struct gfar_private *priv = netdev_priv(dev);
 	struct rxfcb *fcb = NULL;
 
-	int ret;
+	gro_result_t ret;
 
 	/* fcb is at the beginning if exists */
 	fcb = (struct rxfcb *)skb->data;
@@ -2719,9 +2719,9 @@
 		__vlan_hwaccel_put_tag(skb, fcb->vlctl);
 
 	/* Send the packet up the stack */
-	ret = netif_receive_skb(skb);
+	ret = napi_gro_receive(napi, skb);
 
-	if (NET_RX_DROP == ret)
+	if (GRO_DROP == ret)
 		priv->extra_stats.kernel_dropped++;
 
 	return 0;
@@ -2783,7 +2783,8 @@
 				skb_put(skb, pkt_len);
 				rx_queue->stats.rx_bytes += pkt_len;
 				skb_record_rx_queue(skb, rx_queue->qindex);
-				gfar_process_frame(dev, skb, amount_pull);
+				gfar_process_frame(dev, skb, amount_pull,
+						&rx_queue->grp->napi);
 
 			} else {
 				netif_warn(priv, rx_err, dev, "Missing skb!\n");
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 4c9f8d4..2136c7f 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1210,4 +1210,7 @@
 	struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20];
 };
 
+/* The gianfar_ptp module will set this variable */
+extern int gfar_phc_index;
+
 #endif /* __GIANFAR_H */
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 8d74efd..8a02557 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -26,6 +26,7 @@
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/net_tstamp.h>
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/mm.h>
@@ -1739,6 +1740,34 @@
 	return ret;
 }
 
+int gfar_phc_index = -1;
+
+static int gfar_get_ts_info(struct net_device *dev,
+			    struct ethtool_ts_info *info)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+
+	if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
+		info->so_timestamping =
+			SOF_TIMESTAMPING_RX_SOFTWARE |
+			SOF_TIMESTAMPING_SOFTWARE;
+		info->phc_index = -1;
+		return 0;
+	}
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_HARDWARE |
+		SOF_TIMESTAMPING_RX_HARDWARE |
+		SOF_TIMESTAMPING_RAW_HARDWARE;
+	info->phc_index = gfar_phc_index;
+	info->tx_types =
+		(1 << HWTSTAMP_TX_OFF) |
+		(1 << HWTSTAMP_TX_ON);
+	info->rx_filters =
+		(1 << HWTSTAMP_FILTER_NONE) |
+		(1 << HWTSTAMP_FILTER_ALL);
+	return 0;
+}
+
 const struct ethtool_ops gfar_ethtool_ops = {
 	.get_settings = gfar_gsettings,
 	.set_settings = gfar_ssettings,
@@ -1761,4 +1790,5 @@
 #endif
 	.set_rxnfc = gfar_set_nfc,
 	.get_rxnfc = gfar_get_nfc,
+	.get_ts_info = gfar_get_ts_info,
 };
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 5fd620b..c08e5d4 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -515,6 +515,7 @@
 		err = PTR_ERR(etsects->clock);
 		goto no_clock;
 	}
+	gfar_phc_clock = ptp_clock_index(etsects->clock);
 
 	dev_set_drvdata(&dev->dev, etsects);
 
@@ -538,6 +539,7 @@
 	gfar_write(&etsects->regs->tmr_temask, 0);
 	gfar_write(&etsects->regs->tmr_ctrl,   0);
 
+	gfar_phc_clock = -1;
 	ptp_clock_unregister(etsects->clock);
 	iounmap(etsects->regs);
 	release_resource(etsects->rsrc);
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index a97257f..37b0353 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -415,6 +415,7 @@
 	.get_ethtool_stats      = uec_get_ethtool_stats,
 	.get_wol		= uec_get_wol,
 	.set_wol		= uec_set_wol,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 void uec_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 7621316..74215c0 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -7,7 +7,7 @@
 	default y
 	depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \
 		   ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \
-		   GSC || BVME6000 || MVME16x || ARCH_ENP2611 || \
+		   GSC || BVME6000 || MVME16x || \
 		   (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \
 		   EXPERIMENTAL
 	---help---
@@ -120,6 +120,17 @@
 	  driver.  DCA is a method for warming the CPU cache before data
 	  is used, with the intent of lessening the impact of cache misses.
 
+config IGB_PTP
+	bool "PTP Hardware Clock (PHC)"
+	default y
+	depends on IGB && PTP_1588_CLOCK
+	---help---
+	  Say Y here if you want to use PTP Hardware Clock (PHC) in the
+	  driver.  Only the basic clock operations have been implemented.
+
+	  Every timestamp and clock read operations must consult the
+	  overflow counter to form a correct time value.
+
 config IGBVF
 	tristate "Intel(R) 82576 Virtual Function Ethernet support"
 	depends on PCI
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index e498eff..ada720b 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1759,6 +1759,7 @@
 		skb->data, skb->len, PCI_DMA_TODEVICE));
 	/* check for mapping failure? */
 	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
+	skb_tx_timestamp(skb);
 }
 
 static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
@@ -2733,6 +2734,7 @@
 	.set_phys_id		= e100_set_phys_id,
 	.get_ethtool_stats	= e100_get_ethtool_stats,
 	.get_sset_count		= e100_get_sset_count,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 4348b6f..3d712f2 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -827,9 +827,10 @@
 	if (changed & NETIF_F_HW_VLAN_RX)
 		e1000_vlan_mode(netdev, features);
 
-	if (!(changed & NETIF_F_RXCSUM))
+	if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
 		return 0;
 
+	netdev->features = features;
 	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
 
 	if (netif_running(netdev))
@@ -1074,6 +1075,7 @@
 
 	netdev->features |= netdev->hw_features;
 	netdev->hw_features |= NETIF_F_RXCSUM;
+	netdev->hw_features |= NETIF_F_RXALL;
 	netdev->hw_features |= NETIF_F_RXFCS;
 
 	if (pci_using_dac) {
@@ -1841,6 +1843,22 @@
 			break;
 	}
 
+	/* This is useful for sniffing bad packets. */
+	if (adapter->netdev->features & NETIF_F_RXALL) {
+		/* UPE and MPE will be handled by normal PROMISC logic
+		 * in e1000e_set_rx_mode */
+		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
+			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
+			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
+
+		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
+			  E1000_RCTL_DPF | /* Allow filtered pause */
+			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
+		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
+		 * and that breaks VLANs.
+		 */
+	}
+
 	ew32(RCTL, rctl);
 }
 
@@ -4057,6 +4075,8 @@
 				                       irq_flags);
 				length--;
 			} else {
+				if (netdev->features & NETIF_F_RXALL)
+					goto process_skb;
 				/* recycle both page and skb */
 				buffer_info->skb = skb;
 				/* an error means any chain goes out the window
@@ -4069,6 +4089,7 @@
 		}
 
 #define rxtop rx_ring->rx_skb_top
+process_skb:
 		if (!(status & E1000_RXD_STAT_EOP)) {
 			/* this descriptor is only the beginning (or middle) */
 			if (!rxtop) {
@@ -4276,12 +4297,15 @@
 				                       flags);
 				length--;
 			} else {
+				if (netdev->features & NETIF_F_RXALL)
+					goto process_skb;
 				/* recycle */
 				buffer_info->skb = skb;
 				goto next_desc;
 			}
 		}
 
+process_skb:
 		total_rx_bytes += (length - 4); /* don't count FCS */
 		total_rx_packets++;
 
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index bac9dda..fbc84d4 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -228,9 +228,7 @@
 	/* FWSM register */
 	mac->has_fwsm = true;
 	/* ARC supported; valid only if manageability features are enabled. */
-	mac->arc_subsystem_valid =
-	        (er32(FWSM) & E1000_FWSM_MODE_MASK)
-	                ? true : false;
+	mac->arc_subsystem_valid = !!(er32(FWSM) & E1000_FWSM_MODE_MASK);
 	/* Adaptive IFS not supported */
 	mac->adaptive_ifs = false;
 
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index b3fdc69..609c18c 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -295,9 +295,8 @@
 		 * ARC supported; valid only if manageability features are
 		 * enabled.
 		 */
-		mac->arc_subsystem_valid =
-			(er32(FWSM) & E1000_FWSM_MODE_MASK)
-			? true : false;
+		mac->arc_subsystem_valid = !!(er32(FWSM) &
+					      E1000_FWSM_MODE_MASK);
 		break;
 	case e1000_82574:
 	case e1000_82583:
@@ -798,7 +797,7 @@
 	/* Check for pending operations. */
 	for (i = 0; i < E1000_FLASH_UPDATES; i++) {
 		usleep_range(1000, 2000);
-		if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
+		if (!(er32(EECD) & E1000_EECD_FLUPD))
 			break;
 	}
 
@@ -822,7 +821,7 @@
 
 	for (i = 0; i < E1000_FLASH_UPDATES; i++) {
 		usleep_range(1000, 2000);
-		if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
+		if (!(er32(EECD) & E1000_EECD_FLUPD))
 			break;
 	}
 
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index db35dd5..4f1edd9 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -259,8 +259,7 @@
 	 * cannot be changed
 	 */
 	if (hw->phy.ops.check_reset_block(hw)) {
-		e_err("Cannot change link characteristics when SoL/IDER is "
-		      "active.\n");
+		e_err("Cannot change link characteristics when SoL/IDER is active.\n");
 		return -EINVAL;
 	}
 
@@ -403,15 +402,15 @@
 	regs_buff[1]  = er32(STATUS);
 
 	regs_buff[2]  = er32(RCTL);
-	regs_buff[3]  = er32(RDLEN);
-	regs_buff[4]  = er32(RDH);
-	regs_buff[5]  = er32(RDT);
+	regs_buff[3]  = er32(RDLEN(0));
+	regs_buff[4]  = er32(RDH(0));
+	regs_buff[5]  = er32(RDT(0));
 	regs_buff[6]  = er32(RDTR);
 
 	regs_buff[7]  = er32(TCTL);
-	regs_buff[8]  = er32(TDLEN);
-	regs_buff[9]  = er32(TDH);
-	regs_buff[10] = er32(TDT);
+	regs_buff[8]  = er32(TDLEN(0));
+	regs_buff[9]  = er32(TDH(0));
+	regs_buff[10] = er32(TDT(0));
 	regs_buff[11] = er32(TIDV);
 
 	regs_buff[12] = adapter->hw.phy.type;  /* PHY type (IGP=1, M88=0) */
@@ -727,9 +726,8 @@
 				      (test[pat] & write));
 		val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
 		if (val != (test[pat] & write & mask)) {
-			e_err("pattern test reg %04X failed: got 0x%08X "
-			      "expected 0x%08X\n", reg + offset, val,
-			      (test[pat] & write & mask));
+			e_err("pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
+			      reg + offset, val, (test[pat] & write & mask));
 			*data = reg;
 			return 1;
 		}
@@ -744,8 +742,8 @@
 	__ew32(&adapter->hw, reg, write & mask);
 	val = __er32(&adapter->hw, reg);
 	if ((write & mask) != (val & mask)) {
-		e_err("set/check reg %04X test failed: got 0x%08X "
-		      "expected 0x%08X\n", reg, (val & mask), (write & mask));
+		e_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+		      reg, (val & mask), (write & mask));
 		*data = reg;
 		return 1;
 	}
@@ -797,8 +795,8 @@
 	ew32(STATUS, toggle);
 	after = er32(STATUS) & toggle;
 	if (value != after) {
-		e_err("failed STATUS register test got: 0x%08X expected: "
-		      "0x%08X\n", after, value);
+		e_err("failed STATUS register test got: 0x%08X expected: 0x%08X\n",
+		      after, value);
 		*data = 1;
 		return 1;
 	}
@@ -813,15 +811,15 @@
 	}
 
 	REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF);
-	REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
-	REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF);
-	REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF);
-	REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF);
+	REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
+	REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF);
+	REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF);
+	REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF);
 	REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8);
 	REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF);
 	REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
-	REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
-	REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF);
+	REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
+	REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF);
 
 	REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);
 
@@ -830,10 +828,10 @@
 	REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);
 
 	REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
-	REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+	REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
 	if (!(adapter->flags & FLAG_IS_ICH))
 		REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
-	REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+	REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
 	REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
 	mask = 0x8003FFFF;
 	switch (mac->type) {
@@ -1104,11 +1102,11 @@
 	tx_ring->next_to_use = 0;
 	tx_ring->next_to_clean = 0;
 
-	ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
-	ew32(TDBAH, ((u64) tx_ring->dma >> 32));
-	ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc));
-	ew32(TDH, 0);
-	ew32(TDT, 0);
+	ew32(TDBAL(0), ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
+	ew32(TDBAH(0), ((u64) tx_ring->dma >> 32));
+	ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc));
+	ew32(TDH(0), 0);
+	ew32(TDT(0), 0);
 	ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR |
 	     E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
 	     E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
@@ -1168,11 +1166,11 @@
 	rctl = er32(RCTL);
 	if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
 		ew32(RCTL, rctl & ~E1000_RCTL_EN);
-	ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF));
-	ew32(RDBAH, ((u64) rx_ring->dma >> 32));
-	ew32(RDLEN, rx_ring->size);
-	ew32(RDH, 0);
-	ew32(RDT, 0);
+	ew32(RDBAL(0), ((u64) rx_ring->dma & 0xFFFFFFFF));
+	ew32(RDBAH(0), ((u64) rx_ring->dma >> 32));
+	ew32(RDLEN(0), rx_ring->size);
+	ew32(RDH(0), 0);
+	ew32(RDT(0), 0);
 	rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
 		E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
 		E1000_RCTL_SBP | E1000_RCTL_SECRC |
@@ -1534,7 +1532,7 @@
 	int ret_val = 0;
 	unsigned long time;
 
-	ew32(RDT, rx_ring->count - 1);
+	ew32(RDT(0), rx_ring->count - 1);
 
 	/*
 	 * Calculate the loop count based on the largest descriptor ring
@@ -1561,7 +1559,7 @@
 			if (k == tx_ring->count)
 				k = 0;
 		}
-		ew32(TDT, k);
+		ew32(TDT(0), k);
 		e1e_flush();
 		msleep(200);
 		time = jiffies; /* set the start time for the receive */
@@ -1791,8 +1789,7 @@
 		wol->supported &= ~WAKE_UCAST;
 
 		if (adapter->wol & E1000_WUFC_EX)
-			e_err("Interface does not support directed (unicast) "
-			      "frame wake-up packets\n");
+			e_err("Interface does not support directed (unicast) frame wake-up packets\n");
 	}
 
 	if (adapter->wol & E1000_WUFC_EX)
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index f82ecf5..923d3fd 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -94,31 +94,40 @@
 	E1000_FCRTL    = 0x02160, /* Flow Control Receive Threshold Low - RW */
 	E1000_FCRTH    = 0x02168, /* Flow Control Receive Threshold High - RW */
 	E1000_PSRCTL   = 0x02170, /* Packet Split Receive Control - RW */
-	E1000_RDBAL    = 0x02800, /* Rx Descriptor Base Address Low - RW */
-	E1000_RDBAH    = 0x02804, /* Rx Descriptor Base Address High - RW */
-	E1000_RDLEN    = 0x02808, /* Rx Descriptor Length - RW */
-	E1000_RDH      = 0x02810, /* Rx Descriptor Head - RW */
-	E1000_RDT      = 0x02818, /* Rx Descriptor Tail - RW */
+/*
+ * Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL(current_rx_queue)
+ */
+	E1000_RDBAL_BASE = 0x02800, /* Rx Descriptor Base Address Low - RW */
+#define E1000_RDBAL(_n)	(E1000_RDBAL_BASE + (_n << 8))
+	E1000_RDBAH_BASE = 0x02804, /* Rx Descriptor Base Address High - RW */
+#define E1000_RDBAH(_n)	(E1000_RDBAH_BASE + (_n << 8))
+	E1000_RDLEN_BASE = 0x02808, /* Rx Descriptor Length - RW */
+#define E1000_RDLEN(_n)	(E1000_RDLEN_BASE + (_n << 8))
+	E1000_RDH_BASE = 0x02810, /* Rx Descriptor Head - RW */
+#define E1000_RDH(_n)	(E1000_RDH_BASE + (_n << 8))
+	E1000_RDT_BASE = 0x02818, /* Rx Descriptor Tail - RW */
+#define E1000_RDT(_n)	(E1000_RDT_BASE + (_n << 8))
 	E1000_RDTR     = 0x02820, /* Rx Delay Timer - RW */
 	E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
 #define E1000_RXDCTL(_n)   (E1000_RXDCTL_BASE + (_n << 8))
 	E1000_RADV     = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
 
-/* Convenience macros
- *
- * Note: "_n" is the queue number of the register to be written to.
- *
- * Example usage:
- * E1000_RDBAL_REG(current_rx_queue)
- *
- */
-#define E1000_RDBAL_REG(_n)   (E1000_RDBAL + (_n << 8))
 	E1000_KABGTXD  = 0x03004, /* AFE Band Gap Transmit Ref Data */
-	E1000_TDBAL    = 0x03800, /* Tx Descriptor Base Address Low - RW */
-	E1000_TDBAH    = 0x03804, /* Tx Descriptor Base Address High - RW */
-	E1000_TDLEN    = 0x03808, /* Tx Descriptor Length - RW */
-	E1000_TDH      = 0x03810, /* Tx Descriptor Head - RW */
-	E1000_TDT      = 0x03818, /* Tx Descriptor Tail - RW */
+	E1000_TDBAL_BASE = 0x03800, /* Tx Descriptor Base Address Low - RW */
+#define E1000_TDBAL(_n)	(E1000_TDBAL_BASE + (_n << 8))
+	E1000_TDBAH_BASE = 0x03804, /* Tx Descriptor Base Address High - RW */
+#define E1000_TDBAH(_n)	(E1000_TDBAH_BASE + (_n << 8))
+	E1000_TDLEN_BASE = 0x03808, /* Tx Descriptor Length - RW */
+#define E1000_TDLEN(_n)	(E1000_TDLEN_BASE + (_n << 8))
+	E1000_TDH_BASE = 0x03810, /* Tx Descriptor Head - RW */
+#define E1000_TDH(_n)	(E1000_TDH_BASE + (_n << 8))
+	E1000_TDT_BASE = 0x03818, /* Tx Descriptor Tail - RW */
+#define E1000_TDT(_n)	(E1000_TDT_BASE + (_n << 8))
 	E1000_TIDV     = 0x03820, /* Tx Interrupt Delay Value - RW */
 	E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */
 #define E1000_TXDCTL(_n)   (E1000_TXDCTL_BASE + (_n << 8))
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 64c7644..14af3e2 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1310,10 +1310,6 @@
 
 		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
 			oem_reg |= HV_OEM_BITS_LPLU;
-
-		/* Set Restart auto-neg to activate the bits */
-		if (!hw->phy.ops.check_reset_block(hw))
-			oem_reg |= HV_OEM_BITS_RESTART_AN;
 	} else {
 		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
 			       E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
@@ -1324,6 +1320,11 @@
 			oem_reg |= HV_OEM_BITS_LPLU;
 	}
 
+	/* Set Restart auto-neg to activate the bits */
+	if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
+	    !hw->phy.ops.check_reset_block(hw))
+		oem_reg |= HV_OEM_BITS_RESTART_AN;
+
 	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
 
 release:
@@ -2212,7 +2213,7 @@
 	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
 
 	/* Check if the flash descriptor is valid */
-	if (hsfsts.hsf_status.fldesvalid == 0) {
+	if (!hsfsts.hsf_status.fldesvalid) {
 		e_dbg("Flash descriptor invalid.  SW Sequencing must be used.\n");
 		return -E1000_ERR_NVM;
 	}
@@ -2232,7 +2233,7 @@
 	 * completed.
 	 */
 
-	if (hsfsts.hsf_status.flcinprog == 0) {
+	if (!hsfsts.hsf_status.flcinprog) {
 		/*
 		 * There is no cycle running at present,
 		 * so we can start a cycle.
@@ -2250,7 +2251,7 @@
 		 */
 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
 			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
-			if (hsfsts.hsf_status.flcinprog == 0) {
+			if (!hsfsts.hsf_status.flcinprog) {
 				ret_val = 0;
 				break;
 			}
@@ -2292,12 +2293,12 @@
 	/* wait till FDONE bit is set to 1 */
 	do {
 		hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
-		if (hsfsts.hsf_status.flcdone == 1)
+		if (hsfsts.hsf_status.flcdone)
 			break;
 		udelay(1);
 	} while (i++ < timeout);
 
-	if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
+	if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
 		return 0;
 
 	return -E1000_ERR_NVM;
@@ -2408,10 +2409,10 @@
 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
 			 */
 			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
-			if (hsfsts.hsf_status.flcerr == 1) {
+			if (hsfsts.hsf_status.flcerr) {
 				/* Repeat for some time before giving up. */
 				continue;
-			} else if (hsfsts.hsf_status.flcdone == 0) {
+			} else if (!hsfsts.hsf_status.flcdone) {
 				e_dbg("Timeout error - flash cycle did not complete.\n");
 				break;
 			}
@@ -2641,7 +2642,7 @@
 	if (ret_val)
 		return ret_val;
 
-	if ((data & 0x40) == 0) {
+	if (!(data & 0x40)) {
 		data |= 0x40;
 		ret_val = e1000_write_nvm(hw, 0x19, 1, &data);
 		if (ret_val)
@@ -2759,10 +2760,10 @@
 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
 		 */
 		hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
-		if (hsfsts.hsf_status.flcerr == 1)
+		if (hsfsts.hsf_status.flcerr)
 			/* Repeat for some time before giving up. */
 			continue;
-		if (hsfsts.hsf_status.flcdone == 0) {
+		if (!hsfsts.hsf_status.flcdone) {
 			e_dbg("Timeout error - flash cycle did not complete.\n");
 			break;
 		}
@@ -2914,10 +2915,10 @@
 			 * a few more times else Done
 			 */
 			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
-			if (hsfsts.hsf_status.flcerr == 1)
+			if (hsfsts.hsf_status.flcerr)
 				/* repeat for some time before giving up */
 				continue;
-			else if (hsfsts.hsf_status.flcdone == 0)
+			else if (!hsfsts.hsf_status.flcdone)
 				return ret_val;
 		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
 	}
@@ -3682,7 +3683,11 @@
 
 	if (hw->mac.type >= e1000_pchlan) {
 		e1000_oem_bits_config_ich8lan(hw, false);
-		e1000_phy_hw_reset_ich8lan(hw);
+
+		/* Reset PHY to activate OEM bits on 82577/8 */
+		if (hw->mac.type == e1000_pchlan)
+			e1000e_phy_hw_reset_generic(hw);
+
 		ret_val = hw->phy.ops.acquire(hw);
 		if (ret_val)
 			return;
@@ -3916,7 +3921,7 @@
 
 	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
 	if (hw->mac.type <= e1000_ich9lan) {
-		if (((er32(EECD) & E1000_EECD_PRES) == 0) &&
+		if (!(er32(EECD) & E1000_EECD_PRES) &&
 		    (hw->phy.type == e1000_phy_igp_3)) {
 			e1000e_phy_init_script_igp3(hw);
 		}
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index decad98..d832749 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -681,7 +681,7 @@
 		return ret_val;
 	}
 
-	if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
+	if (!(nvm_data & NVM_WORD0F_PAUSE_MASK))
 		hw->fc.requested_mode = e1000_fc_none;
 	else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
 		hw->fc.requested_mode = e1000_fc_tx_pause;
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index 473f8e7..bacc950 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -85,7 +85,7 @@
 
 	/* Check that the host interface is enabled. */
 	hicr = er32(HICR);
-	if ((hicr & E1000_HICR_EN) == 0) {
+	if (!(hicr & E1000_HICR_EN)) {
 		e_dbg("E1000_HOST_EN bit disabled.\n");
 		return -E1000_ERR_HOST_INTERFACE_COMMAND;
 	}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 19ab215..851f793 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -56,7 +56,7 @@
 
 #define DRV_EXTRAVERSION "-k"
 
-#define DRV_VERSION "1.9.5" DRV_EXTRAVERSION
+#define DRV_VERSION "1.10.6" DRV_EXTRAVERSION
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 
@@ -110,14 +110,14 @@
 
 	/* Rx Registers */
 	{E1000_RCTL, "RCTL"},
-	{E1000_RDLEN, "RDLEN"},
-	{E1000_RDH, "RDH"},
-	{E1000_RDT, "RDT"},
+	{E1000_RDLEN(0), "RDLEN"},
+	{E1000_RDH(0), "RDH"},
+	{E1000_RDT(0), "RDT"},
 	{E1000_RDTR, "RDTR"},
 	{E1000_RXDCTL(0), "RXDCTL"},
 	{E1000_ERT, "ERT"},
-	{E1000_RDBAL, "RDBAL"},
-	{E1000_RDBAH, "RDBAH"},
+	{E1000_RDBAL(0), "RDBAL"},
+	{E1000_RDBAH(0), "RDBAH"},
 	{E1000_RDFH, "RDFH"},
 	{E1000_RDFT, "RDFT"},
 	{E1000_RDFHS, "RDFHS"},
@@ -126,11 +126,11 @@
 
 	/* Tx Registers */
 	{E1000_TCTL, "TCTL"},
-	{E1000_TDBAL, "TDBAL"},
-	{E1000_TDBAH, "TDBAH"},
-	{E1000_TDLEN, "TDLEN"},
-	{E1000_TDH, "TDH"},
-	{E1000_TDT, "TDT"},
+	{E1000_TDBAL(0), "TDBAL"},
+	{E1000_TDBAH(0), "TDBAH"},
+	{E1000_TDLEN(0), "TDLEN"},
+	{E1000_TDH(0), "TDH"},
+	{E1000_TDT(0), "TDT"},
 	{E1000_TIDV, "TIDV"},
 	{E1000_TXDCTL(0), "TXDCTL"},
 	{E1000_TADV, "TADV"},
@@ -1053,7 +1053,8 @@
 
 	if (!adapter->tx_hang_recheck &&
 	    (adapter->flags2 & FLAG2_DMA_BURST)) {
-		/* May be block on write-back, flush and detect again
+		/*
+		 * May be block on write-back, flush and detect again
 		 * flush pending descriptor writebacks to memory
 		 */
 		ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
@@ -2530,33 +2531,31 @@
 }
 
 /**
- * e1000_clean - NAPI Rx polling callback
+ * e1000e_poll - NAPI Rx polling callback
  * @napi: struct associated with this polling callback
- * @budget: amount of packets driver is allowed to process this poll
+ * @weight: number of packets driver is allowed to process this poll
  **/
-static int e1000_clean(struct napi_struct *napi, int budget)
+static int e1000e_poll(struct napi_struct *napi, int weight)
 {
-	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
+	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
+						     napi);
 	struct e1000_hw *hw = &adapter->hw;
 	struct net_device *poll_dev = adapter->netdev;
 	int tx_cleaned = 1, work_done = 0;
 
 	adapter = netdev_priv(poll_dev);
 
-	if (adapter->msix_entries &&
-	    !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
-		goto clean_rx;
+	if (!adapter->msix_entries ||
+	    (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
+		tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
 
-	tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
-
-clean_rx:
-	adapter->clean_rx(adapter->rx_ring, &work_done, budget);
+	adapter->clean_rx(adapter->rx_ring, &work_done, weight);
 
 	if (!tx_cleaned)
-		work_done = budget;
+		work_done = weight;
 
-	/* If budget not fully consumed, exit the polling mode */
-	if (work_done < budget) {
+	/* If weight not fully consumed, exit the polling mode */
+	if (work_done < weight) {
 		if (adapter->itr_setting & 3)
 			e1000_set_itr(adapter);
 		napi_complete(napi);
@@ -2800,13 +2799,13 @@
 	/* Setup the HW Tx Head and Tail descriptor pointers */
 	tdba = tx_ring->dma;
 	tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
-	ew32(TDBAL, (tdba & DMA_BIT_MASK(32)));
-	ew32(TDBAH, (tdba >> 32));
-	ew32(TDLEN, tdlen);
-	ew32(TDH, 0);
-	ew32(TDT, 0);
-	tx_ring->head = adapter->hw.hw_addr + E1000_TDH;
-	tx_ring->tail = adapter->hw.hw_addr + E1000_TDT;
+	ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
+	ew32(TDBAH(0), (tdba >> 32));
+	ew32(TDLEN(0), tdlen);
+	ew32(TDH(0), 0);
+	ew32(TDT(0), 0);
+	tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
+	tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
 
 	/* Set the Tx Interrupt Delay register */
 	ew32(TIDV, adapter->tx_int_delay);
@@ -3110,13 +3109,13 @@
 	 * the Base and Length of the Rx Descriptor Ring
 	 */
 	rdba = rx_ring->dma;
-	ew32(RDBAL, (rdba & DMA_BIT_MASK(32)));
-	ew32(RDBAH, (rdba >> 32));
-	ew32(RDLEN, rdlen);
-	ew32(RDH, 0);
-	ew32(RDT, 0);
-	rx_ring->head = adapter->hw.hw_addr + E1000_RDH;
-	rx_ring->tail = adapter->hw.hw_addr + E1000_RDT;
+	ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
+	ew32(RDBAH(0), (rdba >> 32));
+	ew32(RDLEN(0), rdlen);
+	ew32(RDH(0), 0);
+	ew32(RDT(0), 0);
+	rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
+	rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
 
 	/* Enable Receive Checksum Offload for TCP and UDP */
 	rxcsum = er32(RXCSUM);
@@ -6226,7 +6225,7 @@
 	netdev->netdev_ops		= &e1000e_netdev_ops;
 	e1000e_set_ethtool_ops(netdev);
 	netdev->watchdog_timeo		= 5 * HZ;
-	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
+	netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
 	strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
 
 	netdev->mem_start = mmio_start;
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index ff796e4..feb6eeb 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -166,8 +166,8 @@
  *
  * Default Value: 1 (enabled)
  */
-E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \
-                          "the CRC");
+E1000_PARAM(CrcStripping,
+	    "Enable CRC Stripping, disable if your BMC needs the CRC");
 
 struct e1000_option {
 	enum { enable_option, range_option, list_option } type;
@@ -360,8 +360,8 @@
 				adapter->itr = 20000;
 				break;
 			case 4:
-				e_info("%s set to simplified (2000-8000 ints) "
-				       "mode\n", opt.name);
+				e_info("%s set to simplified (2000-8000 ints) mode\n",
+				       opt.name);
 				adapter->itr_setting = 4;
 				break;
 			default:
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 35b4557..bd5ef64 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -718,7 +718,7 @@
 	 *   1 - Enabled
 	 */
 	phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
-	if (phy->disable_polarity_correction == 1)
+	if (phy->disable_polarity_correction)
 		phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
 
 	/* Enable downshift on BM (disabled by default) */
@@ -1090,7 +1090,7 @@
 	 * If autoneg_advertised is zero, we assume it was not defaulted
 	 * by the calling code so we set to advertise full capability.
 	 */
-	if (phy->autoneg_advertised == 0)
+	if (!phy->autoneg_advertised)
 		phy->autoneg_advertised = phy->autoneg_mask;
 
 	e_dbg("Reconfiguring auto-neg advertisement params\n");
@@ -1596,7 +1596,7 @@
 	ret_val = e1e_rphy(hw, offset, &phy_data);
 
 	if (!ret_val)
-		phy->speed_downgraded = (phy_data & mask);
+		phy->speed_downgraded = !!(phy_data & mask);
 
 	return ret_val;
 }
@@ -1925,8 +1925,8 @@
 	if (ret_val)
 		return ret_val;
 
-	phy->polarity_correction = (phy_data &
-				    M88E1000_PSCR_POLARITY_REVERSAL);
+	phy->polarity_correction = !!(phy_data &
+				      M88E1000_PSCR_POLARITY_REVERSAL);
 
 	ret_val = e1000_check_polarity_m88(hw);
 	if (ret_val)
@@ -1936,7 +1936,7 @@
 	if (ret_val)
 		return ret_val;
 
-	phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX);
+	phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX);
 
 	if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
 		ret_val = e1000_get_cable_length(hw);
@@ -1999,7 +1999,7 @@
 	if (ret_val)
 		return ret_val;
 
-	phy->is_mdix = (data & IGP01E1000_PSSR_MDIX);
+	phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX);
 
 	if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
 	    IGP01E1000_PSSR_SPEED_1000MBPS) {
@@ -2052,8 +2052,7 @@
 	ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data);
 	if (ret_val)
 		return ret_val;
-	phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE)
-	                           ? false : true;
+	phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE);
 
 	if (phy->polarity_correction) {
 		ret_val = e1000_check_polarity_ife(hw);
@@ -2070,7 +2069,7 @@
 	if (ret_val)
 		return ret_val;
 
-	phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false;
+	phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS);
 
 	/* The following parameters are undefined for 10/100 operation. */
 	phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
@@ -2979,7 +2978,7 @@
 		if ((hw->phy.type == e1000_phy_82578) &&
 		    (hw->phy.revision >= 1) &&
 		    (hw->phy.addr == 2) &&
-		    ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) {
+		    !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
 			u16 data2 = 0x7EFF;
 			ret_val = e1000_access_phy_debug_regs_hv(hw,
 								 (1 << 6) | 0x3,
@@ -3265,7 +3264,7 @@
 	if (ret_val)
 		return ret_val;
 
-	phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false;
+	phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX);
 
 	if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
 	    I82577_PHY_STATUS2_SPEED_1000MBPS) {
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 6565c46..4bd16e2 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -35,3 +35,4 @@
 igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
 	    e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o
 
+igb-$(CONFIG_IGB_PTP) += igb_ptp.o
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 8e33bdd..3758ad2 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -35,8 +35,8 @@
 #include "e1000_82575.h"
 
 #include <linux/clocksource.h>
-#include <linux/timecompare.h>
 #include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
 #include <linux/bitops.h>
 #include <linux/if_vlan.h>
 
@@ -328,9 +328,6 @@
 
 	/* OS defined structs */
 	struct pci_dev *pdev;
-	struct cyclecounter cycles;
-	struct timecounter clock;
-	struct timecompare compare;
 	struct hwtstamp_config hwtstamp_config;
 
 	spinlock_t stats64_lock;
@@ -364,6 +361,13 @@
 	u32 wvbr;
 	int node;
 	u32 *shadow_vfta;
+
+	struct ptp_clock *ptp_clock;
+	struct ptp_clock_info caps;
+	struct delayed_work overflow_work;
+	spinlock_t tmreg_lock;
+	struct cyclecounter cc;
+	struct timecounter tc;
 };
 
 #define IGB_FLAG_HAS_MSI           (1 << 0)
@@ -378,7 +382,6 @@
 #define IGB_DMCTLX_DCFLUSH_DIS     0x80000000  /* Disable DMA Coal Flush */
 
 #define IGB_82576_TSYNC_SHIFT 19
-#define IGB_82580_TSYNC_SHIFT 24
 #define IGB_TS_HDR_LEN        16
 enum e1000_state_t {
 	__IGB_TESTING,
@@ -414,7 +417,15 @@
 extern bool igb_has_link(struct igb_adapter *adapter);
 extern void igb_set_ethtool_ops(struct net_device *);
 extern void igb_power_up_link(struct igb_adapter *);
+#ifdef CONFIG_IGB_PTP
+extern void igb_ptp_init(struct igb_adapter *adapter);
+extern void igb_ptp_remove(struct igb_adapter *adapter);
 
+extern void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
+				   struct skb_shared_hwtstamps *hwtstamps,
+				   u64 systim);
+
+#endif
 static inline s32 igb_reset_phy(struct e1000_hw *hw)
 {
 	if (hw->phy.ops.reset)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 5ec3159..28a37bb 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -60,8 +60,8 @@
 #include "igb.h"
 
 #define MAJ 3
-#define MIN 2
-#define BUILD 10
+#define MIN 4
+#define BUILD 7
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
 __stringify(BUILD) "-k"
 char igb_driver_name[] = "igb";
@@ -114,7 +114,6 @@
 static void igb_setup_mrqc(struct igb_adapter *);
 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
 static void __devexit igb_remove(struct pci_dev *pdev);
-static void igb_init_hw_timer(struct igb_adapter *adapter);
 static int igb_sw_init(struct igb_adapter *);
 static int igb_open(struct net_device *);
 static int igb_close(struct net_device *);
@@ -565,33 +564,6 @@
 	return;
 }
 
-
-/**
- * igb_read_clock - read raw cycle counter (to be used by time counter)
- */
-static cycle_t igb_read_clock(const struct cyclecounter *tc)
-{
-	struct igb_adapter *adapter =
-		container_of(tc, struct igb_adapter, cycles);
-	struct e1000_hw *hw = &adapter->hw;
-	u64 stamp = 0;
-	int shift = 0;
-
-	/*
-	 * The timestamp latches on lowest register read. For the 82580
-	 * the lowest register is SYSTIMR instead of SYSTIML.  However we never
-	 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
-	 */
-	if (hw->mac.type >= e1000_82580) {
-		stamp = rd32(E1000_SYSTIMR) >> 8;
-		shift = IGB_82580_TSYNC_SHIFT;
-	}
-
-	stamp |= (u64)rd32(E1000_SYSTIML) << shift;
-	stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
-	return stamp;
-}
-
 /**
  * igb_get_hw_dev - return device
  * used by hardware layer to print debugging information
@@ -2110,9 +2082,11 @@
 	}
 
 #endif
+#ifdef CONFIG_IGB_PTP
 	/* do hw tstamp init after resetting */
-	igb_init_hw_timer(adapter);
+	igb_ptp_init(adapter);
 
+#endif
 	dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
 	/* print bus type/speed/width info */
 	dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -2184,7 +2158,10 @@
 	struct e1000_hw *hw = &adapter->hw;
 
 	pm_runtime_get_noresume(&pdev->dev);
+#ifdef CONFIG_IGB_PTP
+	igb_ptp_remove(adapter);
 
+#endif
 	/*
 	 * The watchdog timer may be rescheduled, so explicitly
 	 * disable watchdog from being rescheduled.
@@ -2304,112 +2281,6 @@
 }
 
 /**
- * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
- * @adapter: board private structure to initialize
- *
- * igb_init_hw_timer initializes the function pointer and values for the hw
- * timer found in hardware.
- **/
-static void igb_init_hw_timer(struct igb_adapter *adapter)
-{
-	struct e1000_hw *hw = &adapter->hw;
-
-	switch (hw->mac.type) {
-	case e1000_i350:
-	case e1000_82580:
-		memset(&adapter->cycles, 0, sizeof(adapter->cycles));
-		adapter->cycles.read = igb_read_clock;
-		adapter->cycles.mask = CLOCKSOURCE_MASK(64);
-		adapter->cycles.mult = 1;
-		/*
-		 * The 82580 timesync updates the system timer every 8ns by 8ns
-		 * and the value cannot be shifted.  Instead we need to shift
-		 * the registers to generate a 64bit timer value.  As a result
-		 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
-		 * 24 in order to generate a larger value for synchronization.
-		 */
-		adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
-		/* disable system timer temporarily by setting bit 31 */
-		wr32(E1000_TSAUXC, 0x80000000);
-		wrfl();
-
-		/* Set registers so that rollover occurs soon to test this. */
-		wr32(E1000_SYSTIMR, 0x00000000);
-		wr32(E1000_SYSTIML, 0x80000000);
-		wr32(E1000_SYSTIMH, 0x000000FF);
-		wrfl();
-
-		/* enable system timer by clearing bit 31 */
-		wr32(E1000_TSAUXC, 0x0);
-		wrfl();
-
-		timecounter_init(&adapter->clock,
-				 &adapter->cycles,
-				 ktime_to_ns(ktime_get_real()));
-		/*
-		 * Synchronize our NIC clock against system wall clock. NIC
-		 * time stamp reading requires ~3us per sample, each sample
-		 * was pretty stable even under load => only require 10
-		 * samples for each offset comparison.
-		 */
-		memset(&adapter->compare, 0, sizeof(adapter->compare));
-		adapter->compare.source = &adapter->clock;
-		adapter->compare.target = ktime_get_real;
-		adapter->compare.num_samples = 10;
-		timecompare_update(&adapter->compare, 0);
-		break;
-	case e1000_82576:
-		/*
-		 * Initialize hardware timer: we keep it running just in case
-		 * that some program needs it later on.
-		 */
-		memset(&adapter->cycles, 0, sizeof(adapter->cycles));
-		adapter->cycles.read = igb_read_clock;
-		adapter->cycles.mask = CLOCKSOURCE_MASK(64);
-		adapter->cycles.mult = 1;
-		/**
-		 * Scale the NIC clock cycle by a large factor so that
-		 * relatively small clock corrections can be added or
-		 * subtracted at each clock tick. The drawbacks of a large
-		 * factor are a) that the clock register overflows more quickly
-		 * (not such a big deal) and b) that the increment per tick has
-		 * to fit into 24 bits.  As a result we need to use a shift of
-		 * 19 so we can fit a value of 16 into the TIMINCA register.
-		 */
-		adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
-		wr32(E1000_TIMINCA,
-		                (1 << E1000_TIMINCA_16NS_SHIFT) |
-		                (16 << IGB_82576_TSYNC_SHIFT));
-
-		/* Set registers so that rollover occurs soon to test this. */
-		wr32(E1000_SYSTIML, 0x00000000);
-		wr32(E1000_SYSTIMH, 0xFF800000);
-		wrfl();
-
-		timecounter_init(&adapter->clock,
-				 &adapter->cycles,
-				 ktime_to_ns(ktime_get_real()));
-		/*
-		 * Synchronize our NIC clock against system wall clock. NIC
-		 * time stamp reading requires ~3us per sample, each sample
-		 * was pretty stable even under load => only require 10
-		 * samples for each offset comparison.
-		 */
-		memset(&adapter->compare, 0, sizeof(adapter->compare));
-		adapter->compare.source = &adapter->clock;
-		adapter->compare.target = ktime_get_real;
-		adapter->compare.num_samples = 10;
-		timecompare_update(&adapter->compare, 0);
-		break;
-	case e1000_82575:
-		/* 82575 does not support timesync */
-	default:
-		break;
-	}
-
-}
-
-/**
  * igb_sw_init - Initialize general software structures (struct igb_adapter)
  * @adapter: board private structure to initialize
  *
@@ -5718,35 +5589,7 @@
 	return 0;
 }
 
-/**
- * igb_systim_to_hwtstamp - convert system time value to hw timestamp
- * @adapter: board private structure
- * @shhwtstamps: timestamp structure to update
- * @regval: unsigned 64bit system time value.
- *
- * We need to convert the system time value stored in the RX/TXSTMP registers
- * into a hwtstamp which can be used by the upper level timestamping functions
- */
-static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
-                                   struct skb_shared_hwtstamps *shhwtstamps,
-                                   u64 regval)
-{
-	u64 ns;
-
-	/*
-	 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
-	 * 24 to match clock shift we setup earlier.
-	 */
-	if (adapter->hw.mac.type >= e1000_82580)
-		regval <<= IGB_82580_TSYNC_SHIFT;
-
-	ns = timecounter_cyc2time(&adapter->clock, regval);
-	timecompare_update(&adapter->compare, ns);
-	memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
-	shhwtstamps->hwtstamp = ns_to_ktime(ns);
-	shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
-}
-
+#ifdef CONFIG_IGB_PTP
 /**
  * igb_tx_hwtstamp - utility function which checks for TX time stamp
  * @q_vector: pointer to q_vector containing needed info
@@ -5776,6 +5619,7 @@
 	skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
 }
 
+#endif
 /**
  * igb_clean_tx_irq - Reclaim resources after transmit completes
  * @q_vector: pointer to q_vector containing needed info
@@ -5819,9 +5663,11 @@
 		total_bytes += tx_buffer->bytecount;
 		total_packets += tx_buffer->gso_segs;
 
+#ifdef CONFIG_IGB_PTP
 		/* retrieve hardware timestamp */
 		igb_tx_hwtstamp(q_vector, tx_buffer);
 
+#endif
 		/* free the skb */
 		dev_kfree_skb_any(tx_buffer->skb);
 		tx_buffer->skb = NULL;
@@ -5993,6 +5839,7 @@
 		skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
 }
 
+#ifdef CONFIG_IGB_PTP
 static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
 			    union e1000_adv_rx_desc *rx_desc,
 			    struct sk_buff *skb)
@@ -6032,6 +5879,7 @@
 	igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
 }
 
+#endif
 static void igb_rx_vlan(struct igb_ring *ring,
 			union e1000_adv_rx_desc *rx_desc,
 			struct sk_buff *skb)
@@ -6142,7 +5990,9 @@
 			goto next_desc;
 		}
 
+#ifdef CONFIG_IGB_PTP
 		igb_rx_hwtstamp(q_vector, rx_desc, skb);
+#endif
 		igb_rx_hash(rx_ring, rx_desc, skb);
 		igb_rx_checksum(rx_ring, rx_desc, skb);
 		igb_rx_vlan(rx_ring, rx_desc, skb);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
new file mode 100644
index 0000000..c9b71c5
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -0,0 +1,381 @@
+/*
+ * PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580
+ *
+ * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+
+#include "igb.h"
+
+#define INCVALUE_MASK		0x7fffffff
+#define ISGN			0x80000000
+
+/*
+ * The 82580 timesync updates the system timer every 8ns by 8ns,
+ * and this update value cannot be reprogrammed.
+ *
+ * Neither the 82576 nor the 82580 offer registers wide enough to hold
+ * nanoseconds time values for very long. For the 82580, SYSTIM always
+ * counts nanoseconds, but the upper 24 bits are not availible. The
+ * frequency is adjusted by changing the 32 bit fractional nanoseconds
+ * register, TIMINCA.
+ *
+ * For the 82576, the SYSTIM register time unit is affect by the
+ * choice of the 24 bit TININCA:IV (incvalue) field. Five bits of this
+ * field are needed to provide the nominal 16 nanosecond period,
+ * leaving 19 bits for fractional nanoseconds.
+ *
+ * We scale the NIC clock cycle by a large factor so that relatively
+ * small clock corrections can be added or subtracted at each clock
+ * tick. The drawbacks of a large factor are a) that the clock
+ * register overflows more quickly (not such a big deal) and b) that
+ * the increment per tick has to fit into 24 bits.  As a result we
+ * need to use a shift of 19 so we can fit a value of 16 into the
+ * TIMINCA register.
+ *
+ *
+ *             SYSTIMH            SYSTIML
+ *        +--------------+   +---+---+------+
+ *  82576 |      32      |   | 8 | 5 |  19  |
+ *        +--------------+   +---+---+------+
+ *         \________ 45 bits _______/  fract
+ *
+ *        +----------+---+   +--------------+
+ *  82580 |    24    | 8 |   |      32      |
+ *        +----------+---+   +--------------+
+ *          reserved  \______ 40 bits _____/
+ *
+ *
+ * The 45 bit 82576 SYSTIM overflows every
+ *   2^45 * 10^-9 / 3600 = 9.77 hours.
+ *
+ * The 40 bit 82580 SYSTIM overflows every
+ *   2^40 * 10^-9 /  60  = 18.3 minutes.
+ */
+
+#define IGB_OVERFLOW_PERIOD	(HZ * 60 * 9)
+#define INCPERIOD_82576		(1 << E1000_TIMINCA_16NS_SHIFT)
+#define INCVALUE_82576_MASK	((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
+#define INCVALUE_82576		(16 << IGB_82576_TSYNC_SHIFT)
+#define IGB_NBITS_82580		40
+
+/*
+ * SYSTIM read access for the 82576
+ */
+
+static cycle_t igb_82576_systim_read(const struct cyclecounter *cc)
+{
+	u64 val;
+	u32 lo, hi;
+	struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
+	struct e1000_hw *hw = &igb->hw;
+
+	lo = rd32(E1000_SYSTIML);
+	hi = rd32(E1000_SYSTIMH);
+
+	val = ((u64) hi) << 32;
+	val |= lo;
+
+	return val;
+}
+
+/*
+ * SYSTIM read access for the 82580
+ */
+
+static cycle_t igb_82580_systim_read(const struct cyclecounter *cc)
+{
+	u64 val;
+	u32 lo, hi, jk;
+	struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
+	struct e1000_hw *hw = &igb->hw;
+
+	/*
+	 * The timestamp latches on lowest register read. For the 82580
+	 * the lowest register is SYSTIMR instead of SYSTIML.  However we only
+	 * need to provide nanosecond resolution, so we just ignore it.
+	 */
+	jk = rd32(E1000_SYSTIMR);
+	lo = rd32(E1000_SYSTIML);
+	hi = rd32(E1000_SYSTIMH);
+
+	val = ((u64) hi) << 32;
+	val |= lo;
+
+	return val;
+}
+
+/*
+ * PTP clock operations
+ */
+
+static int ptp_82576_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+	u64 rate;
+	u32 incvalue;
+	int neg_adj = 0;
+	struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+	struct e1000_hw *hw = &igb->hw;
+
+	if (ppb < 0) {
+		neg_adj = 1;
+		ppb = -ppb;
+	}
+	rate = ppb;
+	rate <<= 14;
+	rate = div_u64(rate, 1953125);
+
+	incvalue = 16 << IGB_82576_TSYNC_SHIFT;
+
+	if (neg_adj)
+		incvalue -= rate;
+	else
+		incvalue += rate;
+
+	wr32(E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK));
+
+	return 0;
+}
+
+static int ptp_82580_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+	u64 rate;
+	u32 inca;
+	int neg_adj = 0;
+	struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+	struct e1000_hw *hw = &igb->hw;
+
+	if (ppb < 0) {
+		neg_adj = 1;
+		ppb = -ppb;
+	}
+	rate = ppb;
+	rate <<= 26;
+	rate = div_u64(rate, 1953125);
+
+	inca = rate & INCVALUE_MASK;
+	if (neg_adj)
+		inca |= ISGN;
+
+	wr32(E1000_TIMINCA, inca);
+
+	return 0;
+}
+
+static int igb_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+	s64 now;
+	unsigned long flags;
+	struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+
+	spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+	now = timecounter_read(&igb->tc);
+	now += delta;
+	timecounter_init(&igb->tc, &igb->cc, now);
+
+	spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+	return 0;
+}
+
+static int igb_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+	u64 ns;
+	u32 remainder;
+	unsigned long flags;
+	struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+
+	spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+	ns = timecounter_read(&igb->tc);
+
+	spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+	ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+	ts->tv_nsec = remainder;
+
+	return 0;
+}
+
+static int igb_settime(struct ptp_clock_info *ptp, const struct timespec *ts)
+{
+	u64 ns;
+	unsigned long flags;
+	struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+
+	ns = ts->tv_sec * 1000000000ULL;
+	ns += ts->tv_nsec;
+
+	spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+	timecounter_init(&igb->tc, &igb->cc, ns);
+
+	spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+	return 0;
+}
+
+static int ptp_82576_enable(struct ptp_clock_info *ptp,
+			    struct ptp_clock_request *rq, int on)
+{
+	return -EOPNOTSUPP;
+}
+
+static int ptp_82580_enable(struct ptp_clock_info *ptp,
+			    struct ptp_clock_request *rq, int on)
+{
+	return -EOPNOTSUPP;
+}
+
+static void igb_overflow_check(struct work_struct *work)
+{
+	struct timespec ts;
+	struct igb_adapter *igb =
+		container_of(work, struct igb_adapter, overflow_work.work);
+
+	igb_gettime(&igb->caps, &ts);
+
+	pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+
+	schedule_delayed_work(&igb->overflow_work, IGB_OVERFLOW_PERIOD);
+}
+
+void igb_ptp_init(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	switch (hw->mac.type) {
+	case e1000_i350:
+	case e1000_82580:
+		adapter->caps.owner	= THIS_MODULE;
+		strcpy(adapter->caps.name, "igb-82580");
+		adapter->caps.max_adj	= 62499999;
+		adapter->caps.n_ext_ts	= 0;
+		adapter->caps.pps	= 0;
+		adapter->caps.adjfreq	= ptp_82580_adjfreq;
+		adapter->caps.adjtime	= igb_adjtime;
+		adapter->caps.gettime	= igb_gettime;
+		adapter->caps.settime	= igb_settime;
+		adapter->caps.enable	= ptp_82580_enable;
+		adapter->cc.read	= igb_82580_systim_read;
+		adapter->cc.mask	= CLOCKSOURCE_MASK(IGB_NBITS_82580);
+		adapter->cc.mult	= 1;
+		adapter->cc.shift	= 0;
+		/* Enable the timer functions by clearing bit 31. */
+		wr32(E1000_TSAUXC, 0x0);
+		break;
+
+	case e1000_82576:
+		adapter->caps.owner	= THIS_MODULE;
+		strcpy(adapter->caps.name, "igb-82576");
+		adapter->caps.max_adj	= 1000000000;
+		adapter->caps.n_ext_ts	= 0;
+		adapter->caps.pps	= 0;
+		adapter->caps.adjfreq	= ptp_82576_adjfreq;
+		adapter->caps.adjtime	= igb_adjtime;
+		adapter->caps.gettime	= igb_gettime;
+		adapter->caps.settime	= igb_settime;
+		adapter->caps.enable	= ptp_82576_enable;
+		adapter->cc.read	= igb_82576_systim_read;
+		adapter->cc.mask	= CLOCKSOURCE_MASK(64);
+		adapter->cc.mult	= 1;
+		adapter->cc.shift	= IGB_82576_TSYNC_SHIFT;
+		/* Dial the nominal frequency. */
+		wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
+		break;
+
+	default:
+		adapter->ptp_clock = NULL;
+		return;
+	}
+
+	wrfl();
+
+	timecounter_init(&adapter->tc, &adapter->cc,
+			 ktime_to_ns(ktime_get_real()));
+
+	INIT_DELAYED_WORK(&adapter->overflow_work, igb_overflow_check);
+
+	spin_lock_init(&adapter->tmreg_lock);
+
+	schedule_delayed_work(&adapter->overflow_work, IGB_OVERFLOW_PERIOD);
+
+	adapter->ptp_clock = ptp_clock_register(&adapter->caps);
+	if (IS_ERR(adapter->ptp_clock)) {
+		adapter->ptp_clock = NULL;
+		dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
+	} else
+		dev_info(&adapter->pdev->dev, "added PHC on %s\n",
+			 adapter->netdev->name);
+}
+
+void igb_ptp_remove(struct igb_adapter *adapter)
+{
+	cancel_delayed_work_sync(&adapter->overflow_work);
+
+	if (adapter->ptp_clock) {
+		ptp_clock_unregister(adapter->ptp_clock);
+		dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
+			 adapter->netdev->name);
+	}
+}
+
+/**
+ * igb_systim_to_hwtstamp - convert system time value to hw timestamp
+ * @adapter: board private structure
+ * @hwtstamps: timestamp structure to update
+ * @systim: unsigned 64bit system time value.
+ *
+ * We need to convert the system time value stored in the RX/TXSTMP registers
+ * into a hwtstamp which can be used by the upper level timestamping functions.
+ *
+ * The 'tmreg_lock' spinlock is used to protect the consistency of the
+ * system time value. This is needed because reading the 64 bit time
+ * value involves reading two (or three) 32 bit registers. The first
+ * read latches the value. Ditto for writing.
+ *
+ * In addition, here have extended the system time with an overflow
+ * counter in software.
+ **/
+void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
+			    struct skb_shared_hwtstamps *hwtstamps,
+			    u64 systim)
+{
+	u64 ns;
+	unsigned long flags;
+
+	switch (adapter->hw.mac.type) {
+	case e1000_i350:
+	case e1000_82580:
+	case e1000_82576:
+		break;
+	default:
+		return;
+	}
+
+	spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
+	ns = timecounter_cyc2time(&adapter->tc, systim);
+
+	spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+	memset(hwtstamps, 0, sizeof(*hwtstamps));
+	hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 85d2e2c..56fd468 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -91,29 +91,6 @@
 	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
 }
 
-/**
- *  ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
- *  @hw: pointer to hardware structure
- *
- *  Read PCIe configuration space, and get the MSI-X vector count from
- *  the capabilities table.
- **/
-static u16 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
-{
-	struct ixgbe_adapter *adapter = hw->back;
-	u16 msix_count;
-	pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82598_CAPS,
-	                     &msix_count);
-	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
-
-	/* MSI-X count is zero-based in HW, so increment to give proper value */
-	msix_count++;
-
-	return msix_count;
-}
-
-/**
- */
 static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
 {
 	struct ixgbe_mac_info *mac = &hw->mac;
@@ -126,7 +103,7 @@
 	mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
 	mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
 	mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
-	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
+	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 49aa41f..e598881 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -2783,17 +2783,36 @@
  *  Read PCIe configuration space, and get the MSI-X vector count from
  *  the capabilities table.
  **/
-u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
+u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
 {
 	struct ixgbe_adapter *adapter = hw->back;
-	u16 msix_count;
-	pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS,
-	                     &msix_count);
+	u16 msix_count = 1;
+	u16 max_msix_count;
+	u16 pcie_offset;
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
+		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+		pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
+		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+		break;
+	default:
+		return msix_count;
+	}
+
+	pci_read_config_word(adapter->pdev, pcie_offset, &msix_count);
 	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
 
-	/* MSI-X count is zero-based in HW, so increment to give proper value */
+	/* MSI-X count is zero-based in HW */
 	msix_count++;
 
+	if (msix_count > max_msix_count)
+		msix_count = max_msix_count;
+
 	return msix_count;
 }
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 204f062..d6d3432 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -31,7 +31,7 @@
 #include "ixgbe_type.h"
 #include "ixgbe.h"
 
-u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
+u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 027d7a7..ed1b47d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -622,6 +622,16 @@
 		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
 			set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
 
+#ifdef IXGBE_FCOE
+		if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
+			struct ixgbe_ring_feature *f;
+			f = &adapter->ring_feature[RING_F_FCOE];
+			if ((rxr_idx >= f->mask) &&
+			    (rxr_idx < f->mask + f->indices))
+				set_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state);
+		}
+
+#endif /* IXGBE_FCOE */
 		/* apply Rx specific ring traits */
 		ring->count = adapter->rx_ring_count;
 		ring->queue_index = rxr_idx;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 3e26b1f..7c4325ec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2904,33 +2904,6 @@
 	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
 }
 
-/**
- *  ixgbe_set_uta - Set unicast filter table address
- *  @adapter: board private structure
- *
- *  The unicast table address is a register array of 32-bit registers.
- *  The table is meant to be used in a way similar to how the MTA is used
- *  however due to certain limitations in the hardware it is necessary to
- *  set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
- *  enable bit to allow vlan tag stripping when promiscuous mode is enabled
- **/
-static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
-{
-	struct ixgbe_hw *hw = &adapter->hw;
-	int i;
-
-	/* The UTA table only exists on 82599 hardware and newer */
-	if (hw->mac.type < ixgbe_mac_82599EB)
-		return;
-
-	/* we only need to do this if VMDq is enabled */
-	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
-		return;
-
-	for (i = 0; i < 128; i++)
-		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
-}
-
 #define IXGBE_MAX_RX_DESC_POLL 10
 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
 				       struct ixgbe_ring *ring)
@@ -3154,14 +3127,6 @@
 			set_ring_rsc_enabled(rx_ring);
 		else
 			clear_ring_rsc_enabled(rx_ring);
-#ifdef IXGBE_FCOE
-		if (netdev->features & NETIF_F_FCOE_MTU) {
-			struct ixgbe_ring_feature *f;
-			f = &adapter->ring_feature[RING_F_FCOE];
-			if ((i >= f->mask) && (i < f->mask + f->indices))
-				set_bit(__IXGBE_RX_FCOE_BUFSZ, &rx_ring->state);
-		}
-#endif /* IXGBE_FCOE */
 	}
 }
 
@@ -3224,8 +3189,6 @@
 	/* Program registers for the distribution of queues */
 	ixgbe_setup_mrqc(adapter);
 
-	ixgbe_set_uta(adapter);
-
 	/* set_rx_buffer_len must be called before ring initialization */
 	ixgbe_set_rx_buffer_len(adapter);
 
@@ -3462,16 +3425,17 @@
 		}
 		ixgbe_vlan_filter_enable(adapter);
 		hw->addr_ctrl.user_set_promisc = false;
-		/*
-		 * Write addresses to available RAR registers, if there is not
-		 * sufficient space to store all the addresses then enable
-		 * unicast promiscuous mode
-		 */
-		count = ixgbe_write_uc_addr_list(netdev);
-		if (count < 0) {
-			fctrl |= IXGBE_FCTRL_UPE;
-			vmolr |= IXGBE_VMOLR_ROPE;
-		}
+	}
+
+	/*
+	 * Write addresses to available RAR registers, if there is not
+	 * sufficient space to store all the addresses then enable
+	 * unicast promiscuous mode
+	 */
+	count = ixgbe_write_uc_addr_list(netdev);
+	if (count < 0) {
+		fctrl |= IXGBE_FCTRL_UPE;
+		vmolr |= IXGBE_VMOLR_ROPE;
 	}
 
 	if (adapter->num_vfs) {
@@ -4836,7 +4800,9 @@
 
 	pci_wake_from_d3(pdev, false);
 
+	rtnl_lock();
 	err = ixgbe_init_interrupt_scheme(adapter);
+	rtnl_unlock();
 	if (err) {
 		e_dev_err("Cannot initialize interrupts for device\n");
 		return err;
@@ -4893,6 +4859,16 @@
 	if (wufc) {
 		ixgbe_set_rx_mode(netdev);
 
+		/*
+		 * enable the optics for both mult-speed fiber and
+		 * 82599 SFP+ fiber as we can WoL.
+		 */
+		if (hw->mac.ops.enable_tx_laser &&
+		    (hw->phy.multispeed_fiber ||
+		    (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber &&
+		     hw->mac.type == ixgbe_mac_82599EB)))
+			hw->mac.ops.enable_tx_laser(hw);
+
 		/* turn on all-multi mode if wake on multicast is enabled */
 		if (wufc & IXGBE_WUFC_MC) {
 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
@@ -6681,6 +6657,74 @@
 	return 0;
 }
 
+static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
+			     struct net_device *dev,
+			     unsigned char *addr,
+			     u16 flags)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(dev);
+	int err = -EOPNOTSUPP;
+
+	if (ndm->ndm_state & NUD_PERMANENT) {
+		pr_info("%s: FDB only supports static addresses\n",
+			ixgbe_driver_name);
+		return -EINVAL;
+	}
+
+	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+		if (is_unicast_ether_addr(addr))
+			err = dev_uc_add_excl(dev, addr);
+		else if (is_multicast_ether_addr(addr))
+			err = dev_mc_add_excl(dev, addr);
+		else
+			err = -EINVAL;
+	}
+
+	/* Only return duplicate errors if NLM_F_EXCL is set */
+	if (err == -EEXIST && !(flags & NLM_F_EXCL))
+		err = 0;
+
+	return err;
+}
+
+static int ixgbe_ndo_fdb_del(struct ndmsg *ndm,
+			     struct net_device *dev,
+			     unsigned char *addr)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(dev);
+	int err = -EOPNOTSUPP;
+
+	if (ndm->ndm_state & NUD_PERMANENT) {
+		pr_info("%s: FDB only supports static addresses\n",
+			ixgbe_driver_name);
+		return -EINVAL;
+	}
+
+	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+		if (is_unicast_ether_addr(addr))
+			err = dev_uc_del(dev, addr);
+		else if (is_multicast_ether_addr(addr))
+			err = dev_mc_del(dev, addr);
+		else
+			err = -EINVAL;
+	}
+
+	return err;
+}
+
+static int ixgbe_ndo_fdb_dump(struct sk_buff *skb,
+			      struct netlink_callback *cb,
+			      struct net_device *dev,
+			      int idx)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+		idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+
+	return idx;
+}
+
 static const struct net_device_ops ixgbe_netdev_ops = {
 	.ndo_open		= ixgbe_open,
 	.ndo_stop		= ixgbe_close,
@@ -6717,6 +6761,9 @@
 #endif /* IXGBE_FCOE */
 	.ndo_set_features = ixgbe_set_features,
 	.ndo_fix_features = ixgbe_fix_features,
+	.ndo_fdb_add		= ixgbe_ndo_fdb_add,
+	.ndo_fdb_del		= ixgbe_ndo_fdb_del,
+	.ndo_fdb_dump		= ixgbe_ndo_fdb_dump,
 };
 
 static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index bf9f82f..2411770 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1582,13 +1582,21 @@
  **/
 static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
 {
-	*i2cctl |= IXGBE_I2C_CLK_OUT;
+	u32 i = 0;
+	u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
+	u32 i2cctl_r = 0;
 
-	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
-	IXGBE_WRITE_FLUSH(hw);
+	for (i = 0; i < timeout; i++) {
+		*i2cctl |= IXGBE_I2C_CLK_OUT;
+		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+		IXGBE_WRITE_FLUSH(hw);
+		/* SCL rise time (1000ns) */
+		udelay(IXGBE_I2C_T_RISE);
 
-	/* SCL rise time (1000ns) */
-	udelay(IXGBE_I2C_T_RISE);
+		i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+		if (i2cctl_r & IXGBE_I2C_CLK_IN)
+			break;
+	}
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 8636e83..4acd9e6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -110,6 +110,7 @@
 #define IXGBE_I2C_CLK_OUT   0x00000002
 #define IXGBE_I2C_DATA_IN   0x00000004
 #define IXGBE_I2C_DATA_OUT  0x00000008
+#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT	500
 
 /* Interrupt Registers */
 #define IXGBE_EICR      0x00800
@@ -1681,7 +1682,9 @@
 #define IXGBE_DEVICE_CAPS       0x2C
 #define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
 #define IXGBE_PCIE_MSIX_82599_CAPS  0x72
+#define IXGBE_MAX_MSIX_VECTORS_82599	0x40
 #define IXGBE_PCIE_MSIX_82598_CAPS  0x62
+#define IXGBE_MAX_MSIX_VECTORS_82598	0x13
 
 /* MSI-X capability fields masks */
 #define IXGBE_PCIE_MSIX_TBL_SZ_MASK     0x7FF
@@ -2813,6 +2816,7 @@
 	u16                             wwnn_prefix;
 	/* prefix for World Wide Port Name (WWPN) */
 	u16                             wwpn_prefix;
+	u16				max_msix_vectors;
 #define IXGBE_MAX_MTA			128
 	u32				mta_shadow[IXGBE_MAX_MTA];
 	s32                             mc_filter_type;
@@ -2823,7 +2827,6 @@
 	u32				rx_pb_size;
 	u32                             max_tx_queues;
 	u32                             max_rx_queues;
-	u32                             max_msix_vectors;
 	u32                             orig_autoc;
 	u32                             orig_autoc2;
 	bool                            orig_link_settings_stored;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 5e1ca0f..c8950da 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1665,6 +1665,7 @@
 	.get_strings		= mv643xx_eth_get_strings,
 	.get_ethtool_stats	= mv643xx_eth_get_ethtool_stats,
 	.get_sset_count		= mv643xx_eth_get_sset_count,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index efec6b6..1db023b 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1456,6 +1456,7 @@
 	.set_settings = pxa168_set_settings,
 	.get_drvinfo = pxa168_get_drvinfo,
 	.get_link = ethtool_op_get_link,
+	.get_ts_info = ethtool_op_get_ts_info,
 };
 
 static const struct net_device_ops pxa168_eth_netdev_ops = {
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index c9b504e..7732474 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4816,14 +4816,14 @@
 
 	init_waitqueue_head(&hw->msi_wait);
 
-	sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
-
 	err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
 	if (err) {
 		dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
 		return err;
 	}
 
+	sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
+
 	sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
 	sky2_read8(hw, B0_CTST);
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 1bb9353..5f027f9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -11,6 +11,18 @@
 	  This driver supports Mellanox Technologies ConnectX Ethernet
 	  devices.
 
+config MLX4_EN_DCB
+	bool "Data Center Bridging (DCB) Support"
+	default y
+	depends on MLX4_EN && DCB
+	---help---
+	  Say Y here if you want to use Data Center Bridging (DCB) in the
+	  driver.
+	  If set to N, will not be able to configure QoS and ratelimit attributes.
+	  This flag is depended on the kernel's DCB support.
+
+	  If unsure, set to Y
+
 config MLX4_CORE
 	tristate
 	depends on PCI
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index 4a40ab9..293127d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -7,3 +7,4 @@
 
 mlx4_en-y := 	en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
 		en_resources.o en_netdev.o en_selftest.o
+mlx4_en-$(CONFIG_MLX4_EN_DCB) += en_dcb_nl.o
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 00b8127..908a460 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -124,11 +124,7 @@
 	cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
 	cq->mcq.event = mlx4_en_cq_event;
 
-	if (cq->is_tx) {
-		init_timer(&cq->timer);
-		cq->timer.function = mlx4_en_poll_tx_cq;
-		cq->timer.data = (unsigned long) cq;
-	} else {
+	if (!cq->is_tx) {
 		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
 		napi_enable(&cq->napi);
 	}
@@ -151,16 +147,12 @@
 
 void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
 {
-	struct mlx4_en_dev *mdev = priv->mdev;
-
-	if (cq->is_tx)
-		del_timer(&cq->timer);
-	else {
+	if (!cq->is_tx) {
 		napi_disable(&cq->napi);
 		netif_napi_del(&cq->napi);
 	}
 
-	mlx4_cq_free(mdev->dev, &cq->mcq);
+	mlx4_cq_free(priv->mdev->dev, &cq->mcq);
 }
 
 /* Set rx cq moderation parameters */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
new file mode 100644
index 0000000..5d367958
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2011 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/dcbnl.h>
+#include <linux/math64.h>
+
+#include "mlx4_en.h"
+
+static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
+				   struct ieee_ets *ets)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct ieee_ets *my_ets = &priv->ets;
+
+	/* No IEEE PFC settings available */
+	if (!my_ets)
+		return -EINVAL;
+
+	ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
+	ets->cbs = my_ets->cbs;
+	memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
+	memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
+	memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
+
+	return 0;
+}
+
+static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
+{
+	int i;
+	int total_ets_bw = 0;
+	int has_ets_tc = 0;
+
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		if (ets->prio_tc[i] > MLX4_EN_NUM_UP) {
+			en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
+					i, ets->prio_tc[i]);
+			return -EINVAL;
+		}
+
+		switch (ets->tc_tsa[i]) {
+		case IEEE_8021QAZ_TSA_STRICT:
+			break;
+		case IEEE_8021QAZ_TSA_ETS:
+			has_ets_tc = 1;
+			total_ets_bw += ets->tc_tx_bw[i];
+			break;
+		default:
+			en_err(priv, "TC[%d]: Not supported TSA: %d\n",
+					i, ets->tc_tsa[i]);
+			return -ENOTSUPP;
+		}
+	}
+
+	if (has_ets_tc && total_ets_bw != MLX4_EN_BW_MAX) {
+		en_err(priv, "Bad ETS BW sum: %d. Should be exactly 100%%\n",
+				total_ets_bw);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
+		struct ieee_ets *ets, u16 *ratelimit)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int num_strict = 0;
+	int i;
+	__u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = { 0 };
+	__u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 };
+
+	ets = ets ?: &priv->ets;
+	ratelimit = ratelimit ?: priv->maxrate;
+
+	/* higher TC means higher priority => lower pg */
+	for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
+		switch (ets->tc_tsa[i]) {
+		case IEEE_8021QAZ_TSA_STRICT:
+			pg[i] = num_strict++;
+			tc_tx_bw[i] = MLX4_EN_BW_MAX;
+			break;
+		case IEEE_8021QAZ_TSA_ETS:
+			pg[i] = MLX4_EN_TC_ETS;
+			tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX4_EN_BW_MIN;
+			break;
+		}
+	}
+
+	return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg,
+			ratelimit);
+}
+
+static int
+mlx4_en_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int err;
+
+	err = mlx4_en_ets_validate(priv, ets);
+	if (err)
+		return err;
+
+	err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc);
+	if (err)
+		return err;
+
+	err = mlx4_en_config_port_scheduler(priv, ets, NULL);
+	if (err)
+		return err;
+
+	memcpy(&priv->ets, ets, sizeof(priv->ets));
+
+	return 0;
+}
+
+static int mlx4_en_dcbnl_ieee_getpfc(struct net_device *dev,
+		struct ieee_pfc *pfc)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+
+	pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
+	pfc->pfc_en = priv->prof->tx_ppp;
+
+	return 0;
+}
+
+static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
+		struct ieee_pfc *pfc)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int err;
+
+	en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
+			pfc->pfc_cap,
+			pfc->pfc_en,
+			pfc->mbc,
+			pfc->delay);
+
+	priv->prof->rx_pause = priv->prof->tx_pause = !!pfc->pfc_en;
+	priv->prof->rx_ppp = priv->prof->tx_ppp = pfc->pfc_en;
+
+	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+				    priv->rx_skb_size + ETH_FCS_LEN,
+				    priv->prof->tx_pause,
+				    priv->prof->tx_ppp,
+				    priv->prof->rx_pause,
+				    priv->prof->rx_ppp);
+	if (err)
+		en_err(priv, "Failed setting pause params\n");
+
+	return err;
+}
+
+static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
+{
+	return DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
+{
+	if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
+	    (mode & DCB_CAP_DCBX_VER_CEE) ||
+	    !(mode & DCB_CAP_DCBX_VER_IEEE) ||
+	    !(mode & DCB_CAP_DCBX_HOST))
+		return 1;
+
+	return 0;
+}
+
+#define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */
+static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev,
+				   struct ieee_maxrate *maxrate)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	int i;
+
+	if (!priv->maxrate)
+		return -EINVAL;
+
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+		maxrate->tc_maxrate[i] =
+			priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB;
+
+	return 0;
+}
+
+static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
+		struct ieee_maxrate *maxrate)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	u16 tmp[IEEE_8021QAZ_MAX_TCS];
+	int i, err;
+
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		/* Convert from Kbps into HW units, rounding result up.
+		 * Setting to 0, means unlimited BW.
+		 */
+		tmp[i] = div_u64(maxrate->tc_maxrate[i] +
+				 MLX4_RATELIMIT_UNITS_IN_KB - 1,
+				 MLX4_RATELIMIT_UNITS_IN_KB);
+	}
+
+	err = mlx4_en_config_port_scheduler(priv, NULL, tmp);
+	if (err)
+		return err;
+
+	memcpy(priv->maxrate, tmp, sizeof(*priv->maxrate));
+
+	return 0;
+}
+
+const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
+	.ieee_getets	= mlx4_en_dcbnl_ieee_getets,
+	.ieee_setets	= mlx4_en_dcbnl_ieee_setets,
+	.ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
+	.ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
+	.ieee_getpfc	= mlx4_en_dcbnl_ieee_getpfc,
+	.ieee_setpfc	= mlx4_en_dcbnl_ieee_setpfc,
+
+	.getdcbx	= mlx4_en_dcbnl_getdcbx,
+	.setdcbx	= mlx4_en_dcbnl_setdcbx,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 70346fd..72901ce 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -83,7 +83,7 @@
 #define NUM_ALL_STATS	(NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
 
 static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
-	"Interupt Test",
+	"Interrupt Test",
 	"Link Test",
 	"Speed Test",
 	"Register Test",
@@ -359,8 +359,8 @@
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 
-	coal->tx_coalesce_usecs = 0;
-	coal->tx_max_coalesced_frames = 0;
+	coal->tx_coalesce_usecs = priv->tx_usecs;
+	coal->tx_max_coalesced_frames = priv->tx_frames;
 	coal->rx_coalesce_usecs = priv->rx_usecs;
 	coal->rx_max_coalesced_frames = priv->rx_frames;
 
@@ -388,6 +388,21 @@
 				MLX4_EN_RX_COAL_TIME :
 				coal->rx_coalesce_usecs;
 
+	/* Setting TX coalescing parameters */
+	if (coal->tx_coalesce_usecs != priv->tx_usecs ||
+	    coal->tx_max_coalesced_frames != priv->tx_frames) {
+		priv->tx_usecs = coal->tx_coalesce_usecs;
+		priv->tx_frames = coal->tx_max_coalesced_frames;
+		for (i = 0; i < priv->tx_ring_num; i++) {
+			priv->tx_cq[i].moder_cnt = priv->tx_frames;
+			priv->tx_cq[i].moder_time = priv->tx_usecs;
+			if (mlx4_en_set_cq_moder(priv, &priv->tx_cq[i])) {
+				en_warn(priv, "Failed changing moderation "
+					      "for TX cq %d\n", i);
+			}
+		}
+	}
+
 	/* Set adaptive coalescing params */
 	priv->pkt_rate_low = coal->pkt_rate_low;
 	priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 2097a7d..346fdb2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -114,7 +114,7 @@
 		params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
 		params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
 		params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS +
-			(!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
+			MLX4_EN_NUM_PPP_RINGS;
 		params->prof[i].rss_rings = 0;
 	}
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 31b455a..eaa8fad 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -45,6 +45,14 @@
 #include "mlx4_en.h"
 #include "en_port.h"
 
+static int mlx4_en_setup_tc(struct net_device *dev, u8 up)
+{
+	if (up != MLX4_EN_NUM_UP)
+		return -EINVAL;
+
+	return 0;
+}
+
 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -421,6 +429,8 @@
 	 */
 	priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
 	priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
+	priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
+	priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
 	en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
 			   "rx_frames:%d rx_usecs:%d\n",
 		 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
@@ -437,8 +447,8 @@
 
 	for (i = 0; i < priv->tx_ring_num; i++) {
 		cq = &priv->tx_cq[i];
-		cq->moder_cnt = MLX4_EN_TX_COAL_PKTS;
-		cq->moder_time = MLX4_EN_TX_COAL_TIME;
+		cq->moder_cnt = priv->tx_frames;
+		cq->moder_time = priv->tx_usecs;
 	}
 
 	/* Reset auto-moderation params */
@@ -650,12 +660,18 @@
 
 		/* Configure ring */
 		tx_ring = &priv->tx_ring[i];
-		err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn);
+		err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
+				max(0, i - MLX4_EN_NUM_TX_RINGS));
 		if (err) {
 			en_err(priv, "Failed allocating Tx ring\n");
 			mlx4_en_deactivate_cq(priv, cq);
 			goto tx_err;
 		}
+		tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
+
+		/* Arm CQ for TX completions */
+		mlx4_en_arm_cq(priv, cq);
+
 		/* Set initial ownership of all Tx TXBBs to SW (1) */
 		for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
 			*((u32 *) (tx_ring->buf + j)) = 0xffffffff;
@@ -797,12 +813,15 @@
 						 watchdog_task);
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct net_device *dev = priv->dev;
+	int i;
 
 	en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
 
 	mutex_lock(&mdev->state_lock);
 	if (priv->port_up) {
 		mlx4_en_stop_port(dev);
+		for (i = 0; i < priv->tx_ring_num; i++)
+			netdev_tx_reset_queue(priv->tx_ring[i].tx_queue);
 		if (mlx4_en_start_port(dev))
 			en_err(priv, "Failed restarting port %d\n", priv->port);
 	}
@@ -966,6 +985,7 @@
 	mutex_unlock(&mdev->state_lock);
 
 	mlx4_en_free_resources(priv);
+
 	free_netdev(dev);
 }
 
@@ -1036,6 +1056,7 @@
 	.ndo_poll_controller	= mlx4_en_netpoll,
 #endif
 	.ndo_set_features	= mlx4_en_set_features,
+	.ndo_setup_tc		= mlx4_en_setup_tc,
 };
 
 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1079,6 +1100,10 @@
 	INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
 	INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
 	INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
+#ifdef CONFIG_MLX4_EN_DCB
+	if (!mlx4_is_slave(priv->mdev->dev))
+		dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
+#endif
 
 	/* Query for default mac and max mtu */
 	priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
@@ -1113,6 +1138,15 @@
 	netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
 	netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
 
+	netdev_set_num_tc(dev, MLX4_EN_NUM_UP);
+
+	/* First 9 rings are for UP 0 */
+	netdev_set_tc_queue(dev, 0, MLX4_EN_NUM_TX_RINGS + 1, 0);
+
+	/* Partition Tx queues evenly amongst UP's 1-7 */
+	for (i = 1; i < MLX4_EN_NUM_UP; i++)
+		netdev_set_tc_queue(dev, i, 1, MLX4_EN_NUM_TX_RINGS + i);
+
 	SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
 
 	/* Set defualt MAC */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h
index 6934fd7..745090b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.h
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h
@@ -39,6 +39,8 @@
 #define SET_PORT_PROMISC_SHIFT	31
 #define SET_PORT_MC_PROMISC_SHIFT	30
 
+#define MLX4_EN_NUM_TC		8
+
 #define VLAN_FLTR_SIZE	128
 struct mlx4_set_vlan_fltr_mbox {
 	__be32 entry[VLAN_FLTR_SIZE];
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index bcbc54c..10c24c7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -39,7 +39,7 @@
 
 void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
 			     int is_tx, int rss, int qpn, int cqn,
-			     struct mlx4_qp_context *context)
+			     int user_prio, struct mlx4_qp_context *context)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
 
@@ -57,6 +57,10 @@
 	context->local_qpn = cpu_to_be32(qpn);
 	context->pri_path.ackto = 1 & 0x07;
 	context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
+	if (user_prio >= 0) {
+		context->pri_path.sched_queue |= user_prio << 3;
+		context->pri_path.feup = 1 << 6;
+	}
 	context->pri_path.counter_index = 0xff;
 	context->cqn_send = cpu_to_be32(cqn);
 	context->cqn_recv = cpu_to_be32(cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9adbd53..d49a7ac 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -823,7 +823,7 @@
 
 	memset(context, 0, sizeof *context);
 	mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
-				qpn, ring->cqn, context);
+				qpn, ring->cqn, -1, context);
 	context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
 
 	/* Cancel FCS removal if FW allows */
@@ -890,7 +890,7 @@
 	}
 	rss_map->indir_qp.event = mlx4_en_sqp_event;
 	mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
-				priv->rx_ring[0].cqn, &context);
+				priv->rx_ring[0].cqn, -1, &context);
 
 	if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
 		rss_rings = priv->rx_ring_num;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 1796824..9a38483 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -67,8 +67,6 @@
 
 	inline_thold = min(inline_thold, MAX_INLINE);
 
-	spin_lock_init(&ring->comp_lock);
-
 	tmp = size * sizeof(struct mlx4_en_tx_info);
 	ring->tx_info = vmalloc(tmp);
 	if (!ring->tx_info)
@@ -156,7 +154,7 @@
 
 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
 			     struct mlx4_en_tx_ring *ring,
-			     int cq)
+			     int cq, int user_prio)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
 	int err;
@@ -174,7 +172,7 @@
 	ring->doorbell_qpn = ring->qp.qpn << 8;
 
 	mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
-				ring->cqn, &ring->context);
+				ring->cqn, user_prio, &ring->context);
 	if (ring->bf_enabled)
 		ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
 
@@ -317,6 +315,8 @@
 	int size = cq->size;
 	u32 size_mask = ring->size_mask;
 	struct mlx4_cqe *buf = cq->buf;
+	u32 packets = 0;
+	u32 bytes = 0;
 
 	if (!priv->port_up)
 		return;
@@ -345,6 +345,8 @@
 					priv, ring, ring_index,
 					!!((ring->cons + txbbs_skipped) &
 							ring->size));
+			packets++;
+			bytes += ring->tx_info[ring_index].nr_bytes;
 		} while (ring_index != new_index);
 
 		++cons_index;
@@ -361,13 +363,14 @@
 	mlx4_cq_set_ci(mcq);
 	wmb();
 	ring->cons += txbbs_skipped;
+	netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
 
 	/* Wakeup Tx queue if this ring stopped it */
 	if (unlikely(ring->blocked)) {
 		if ((u32) (ring->prod - ring->cons) <=
 		     ring->size - HEADROOM - MAX_DESC_TXBBS) {
 			ring->blocked = 0;
-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
+			netif_tx_wake_queue(ring->tx_queue);
 			priv->port_stats.wake_queue++;
 		}
 	}
@@ -377,41 +380,12 @@
 {
 	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
 	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
-	struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
 
-	if (!spin_trylock(&ring->comp_lock))
-		return;
 	mlx4_en_process_tx_cq(cq->dev, cq);
-	mod_timer(&cq->timer, jiffies + 1);
-	spin_unlock(&ring->comp_lock);
+	mlx4_en_arm_cq(priv, cq);
 }
 
 
-void mlx4_en_poll_tx_cq(unsigned long data)
-{
-	struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
-	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
-	struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
-	u32 inflight;
-
-	INC_PERF_COUNTER(priv->pstats.tx_poll);
-
-	if (!spin_trylock_irq(&ring->comp_lock)) {
-		mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
-		return;
-	}
-	mlx4_en_process_tx_cq(cq->dev, cq);
-	inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
-
-	/* If there are still packets in flight and the timer has not already
-	 * been scheduled by the Tx routine then schedule it here to guarantee
-	 * completion processing of these packets */
-	if (inflight && priv->port_up)
-		mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
-
-	spin_unlock_irq(&ring->comp_lock);
-}
-
 static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
 						      struct mlx4_en_tx_ring *ring,
 						      u32 index,
@@ -440,25 +414,6 @@
 	return ring->buf + index * TXBB_SIZE;
 }
 
-static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
-{
-	struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
-	struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
-	unsigned long flags;
-
-	/* If we don't have a pending timer, set one up to catch our recent
-	   post in case the interface becomes idle */
-	if (!timer_pending(&cq->timer))
-		mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
-
-	/* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
-	if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
-		if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
-			mlx4_en_process_tx_cq(priv->dev, cq);
-			spin_unlock_irqrestore(&ring->comp_lock, flags);
-		}
-}
-
 static int is_inline(struct sk_buff *skb, void **pfrag)
 {
 	void *ptr;
@@ -570,13 +525,9 @@
 
 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
-	struct mlx4_en_priv *priv = netdev_priv(dev);
 	u16 vlan_tag = 0;
 
-	/* If we support per priority flow control and the packet contains
-	 * a vlan tag, send the packet to the TX ring assigned to that priority
-	 */
-	if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) {
+	if (vlan_tx_tag_present(skb)) {
 		vlan_tag = vlan_tx_tag_get(skb);
 		return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
 	}
@@ -594,7 +545,6 @@
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct mlx4_en_tx_ring *ring;
-	struct mlx4_en_cq *cq;
 	struct mlx4_en_tx_desc *tx_desc;
 	struct mlx4_wqe_data_seg *data;
 	struct skb_frag_struct *frag;
@@ -638,13 +588,10 @@
 	if (unlikely(((int)(ring->prod - ring->cons)) >
 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
 		/* every full Tx ring stops queue */
-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
+		netif_tx_stop_queue(ring->tx_queue);
 		ring->blocked = 1;
 		priv->port_stats.queue_stopped++;
 
-		/* Use interrupts to find out when queue opened */
-		cq = &priv->tx_cq[tx_ind];
-		mlx4_en_arm_cq(priv, cq);
 		return NETDEV_TX_BUSY;
 	}
 
@@ -707,7 +654,7 @@
 		priv->port_stats.tso_packets++;
 		i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
 			!!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
-		ring->bytes += skb->len + (i - 1) * lso_header_size;
+		tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size;
 		ring->packets += i;
 	} else {
 		/* Normal (Non LSO) packet */
@@ -715,10 +662,12 @@
 			((ring->prod & ring->size) ?
 			 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
 		data = &tx_desc->data;
-		ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
+		tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
 		ring->packets++;
 
 	}
+	ring->bytes += tx_info->nr_bytes;
+	netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
 	AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
 
 
@@ -792,9 +741,6 @@
 		iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
 	}
 
-	/* Poll CQ here */
-	mlx4_en_xmit_poll(priv, tx_ind);
-
 	return NETDEV_TX_OK;
 
 tx_drop:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 2a0ff2c..cd56f1a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -53,6 +53,26 @@
 #define DRV_VERSION	"1.1"
 #define DRV_RELDATE	"Dec, 2011"
 
+#define MLX4_NUM_UP		8
+#define MLX4_NUM_TC		8
+#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
+#define MLX4_RATELIMIT_DEFAULT 0xffff
+
+struct mlx4_set_port_prio2tc_context {
+	u8 prio2tc[4];
+};
+
+struct mlx4_port_scheduler_tc_cfg_be {
+	__be16 pg;
+	__be16 bw_precentage;
+	__be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */
+	__be16 max_bw_value;
+};
+
+struct mlx4_set_port_scheduler_context {
+	struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC];
+};
+
 enum {
 	MLX4_HCR_BASE		= 0x80680,
 	MLX4_HCR_SIZE		= 0x0001c,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d69fee4..5d87637 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -40,6 +40,9 @@
 #include <linux/mutex.h>
 #include <linux/netdevice.h>
 #include <linux/if_vlan.h>
+#ifdef CONFIG_MLX4_EN_DCB
+#include <linux/dcbnl.h>
+#endif
 
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/qp.h>
@@ -111,6 +114,7 @@
 #define MLX4_EN_NUM_TX_RINGS		8
 #define MLX4_EN_NUM_PPP_RINGS		8
 #define MAX_TX_RINGS			(MLX4_EN_NUM_TX_RINGS + MLX4_EN_NUM_PPP_RINGS)
+#define MLX4_EN_NUM_UP			8
 #define MLX4_EN_DEF_TX_RING_SIZE	512
 #define MLX4_EN_DEF_RX_RING_SIZE  	1024
 
@@ -118,7 +122,7 @@
 #define MLX4_EN_RX_COAL_TARGET	44
 #define MLX4_EN_RX_COAL_TIME	0x10
 
-#define MLX4_EN_TX_COAL_PKTS	5
+#define MLX4_EN_TX_COAL_PKTS	16
 #define MLX4_EN_TX_COAL_TIME	0x80
 
 #define MLX4_EN_RX_RATE_LOW		400000
@@ -196,6 +200,7 @@
 struct mlx4_en_tx_info {
 	struct sk_buff *skb;
 	u32 nr_txbb;
+	u32 nr_bytes;
 	u8 linear;
 	u8 data_offset;
 	u8 inl;
@@ -251,9 +256,9 @@
 	unsigned long bytes;
 	unsigned long packets;
 	unsigned long tx_csum;
-	spinlock_t comp_lock;
 	struct mlx4_bf bf;
 	bool bf_enabled;
+	struct netdev_queue *tx_queue;
 };
 
 struct mlx4_en_rx_desc {
@@ -304,8 +309,6 @@
 	spinlock_t              lock;
 	struct net_device      *dev;
 	struct napi_struct	napi;
-	/* Per-core Tx cq processing support */
-	struct timer_list timer;
 	int size;
 	int buf_size;
 	unsigned vector;
@@ -411,6 +414,15 @@
 
 };
 
+#ifdef CONFIG_MLX4_EN_DCB
+/* Minimal TC BW - setting to 0 will block traffic */
+#define MLX4_EN_BW_MIN 1
+#define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
+
+#define MLX4_EN_TC_ETS 7
+
+#endif
+
 struct mlx4_en_priv {
 	struct mlx4_en_dev *mdev;
 	struct mlx4_en_port_profile *prof;
@@ -484,6 +496,11 @@
 	int vids[128];
 	bool wol;
 	struct device *ddev;
+
+#ifdef CONFIG_MLX4_EN_DCB
+	struct ieee_ets ets;
+	u16 maxrate[IEEE_8021QAZ_MAX_TCS];
+#endif
 };
 
 enum mlx4_en_wol {
@@ -512,7 +529,6 @@
 int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
 int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
 
-void mlx4_en_poll_tx_cq(unsigned long data);
 void mlx4_en_tx_irq(struct mlx4_cq *mcq);
 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -522,7 +538,7 @@
 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
 			     struct mlx4_en_tx_ring *ring,
-			     int cq);
+			     int cq, int user_prio);
 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
 				struct mlx4_en_tx_ring *ring);
 
@@ -540,8 +556,8 @@
 			  int budget);
 int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
-			     int is_tx, int rss, int qpn, int cqn,
-			     struct mlx4_qp_context *context);
+		int is_tx, int rss, int qpn, int cqn, int user_prio,
+		struct mlx4_qp_context *context);
 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
 int mlx4_en_map_buffer(struct mlx4_buf *buf);
 void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
@@ -558,6 +574,10 @@
 int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
 int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
 
+#ifdef CONFIG_MLX4_EN_DCB
+extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
+#endif
+
 #define MLX4_EN_NUM_SELF_TEST	5
 void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
 u64 mlx4_en_mac_to_u64(u8 *addr);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 77535ff..55b12e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -834,6 +834,68 @@
 }
 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
 
+int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_set_port_prio2tc_context *context;
+	int err;
+	u32 in_mod;
+	int i;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+	context = mailbox->buf;
+	memset(context, 0, sizeof *context);
+
+	for (i = 0; i < MLX4_NUM_UP; i += 2)
+		context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
+
+	in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
+	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
+
+int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
+		u8 *pg, u16 *ratelimit)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_set_port_scheduler_context *context;
+	int err;
+	u32 in_mod;
+	int i;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+	context = mailbox->buf;
+	memset(context, 0, sizeof *context);
+
+	for (i = 0; i < MLX4_NUM_TC; i++) {
+		struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
+		u16 r = ratelimit && ratelimit[i] ? ratelimit[i] :
+			MLX4_RATELIMIT_DEFAULT;
+
+		tc->pg = htons(pg[i]);
+		tc->bw_precentage = htons(tc_tx_bw[i]);
+
+		tc->max_bw_units = htons(MLX4_RATELIMIT_UNITS);
+		tc->max_bw_value = htons(r);
+	}
+
+	in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
+	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
+
 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
 				struct mlx4_vhcr *vhcr,
 				struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index f84dd2d..24fb049 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1262,7 +1262,7 @@
 		.owner	= THIS_MODULE,
 	},
 	.probe		= ks8842_probe,
-	.remove		= ks8842_remove,
+	.remove		= __devexit_p(ks8842_remove),
 };
 
 module_platform_driver(ks8842_platform_driver);
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index c722aa6..f8dda00 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -889,16 +889,17 @@
 	netif_stop_queue(dev);
 
 	mutex_lock(&ks->lock);
+	/* turn off the IRQs and ack any outstanding */
+	ks8851_wrreg16(ks, KS_IER, 0x0000);
+	ks8851_wrreg16(ks, KS_ISR, 0xffff);
+	mutex_unlock(&ks->lock);
 
 	/* stop any outstanding work */
 	flush_work(&ks->irq_work);
 	flush_work(&ks->tx_work);
 	flush_work(&ks->rxctrl_work);
 
-	/* turn off the IRQs and ack any outstanding */
-	ks8851_wrreg16(ks, KS_IER, 0x0000);
-	ks8851_wrreg16(ks, KS_ISR, 0xffff);
-
+	mutex_lock(&ks->lock);
 	/* shutdown RX process */
 	ks8851_wrreg16(ks, KS_RXCR1, 0x0000);
 
@@ -907,6 +908,7 @@
 
 	/* set powermode to soft power down to save power */
 	ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
+	mutex_unlock(&ks->lock);
 
 	/* ensure any queued tx buffers are dumped */
 	while (!skb_queue_empty(&ks->txq)) {
@@ -918,7 +920,6 @@
 		dev_kfree_skb(txb);
 	}
 
-	mutex_unlock(&ks->lock);
 	return 0;
 }
 
@@ -1418,6 +1419,7 @@
 	struct net_device *ndev;
 	struct ks8851_net *ks;
 	int ret;
+	unsigned cider;
 
 	ndev = alloc_etherdev(sizeof(struct ks8851_net));
 	if (!ndev)
@@ -1484,8 +1486,8 @@
 	ks8851_soft_reset(ks, GRR_GSR);
 
 	/* simple check for a valid chip being connected to the bus */
-
-	if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
+	cider = ks8851_rdreg16(ks, KS_CIDER);
+	if ((cider & ~CIDER_REV_MASK) != CIDER_ID) {
 		dev_err(&spi->dev, "failed to read device ID\n");
 		ret = -ENODEV;
 		goto err_id;
@@ -1516,15 +1518,14 @@
 	}
 
 	netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
-		    CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
-		    ndev->dev_addr, ndev->irq,
+		    CIDER_REV_GET(cider), ndev->dev_addr, ndev->irq,
 		    ks->rc_ccr & CCR_EEPROM ? "has" : "no");
 
 	return 0;
 
 
 err_netdev:
-	free_irq(ndev->irq, ndev);
+	free_irq(ndev->irq, ks);
 
 err_id:
 err_irq:
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index b8104d9..5ffde23 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -40,7 +40,7 @@
 #define	DRV_NAME	"ks8851_mll"
 
 static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
-#define MAX_RECV_FRAMES			32
+#define MAX_RECV_FRAMES			255
 #define MAX_BUF_SIZE			2048
 #define TX_BUF_SIZE			2000
 #define RX_BUF_SIZE			2000
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index ef723b1..eaf9ff0 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5675,7 +5675,7 @@
 		memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
 	}
 
-	memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
+	memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN);
 
 	interrupt = hw_block_intr(hw);
 
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 27273ae..90153fc 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -4033,7 +4033,6 @@
 
 	netdev->netdev_ops = &myri10ge_netdev_ops;
 	netdev->mtu = myri10ge_initial_mtu;
-	netdev->base_addr = mgp->iomem_base;
 	netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM;
 	netdev->features = netdev->hw_features;
 
@@ -4047,12 +4046,10 @@
 		netdev->vlan_features &= ~NETIF_F_TSO;
 
 	/* make sure we can get an irq, and that MSI can be
-	 * setup (if available).  Also ensure netdev->irq
-	 * is set to correct value if MSI is enabled */
+	 * setup (if available). */
 	status = myri10ge_request_irq(mgp);
 	if (status != 0)
 		goto abort_with_firmware;
-	netdev->irq = pdev->irq;
 	myri10ge_free_irq(mgp);
 
 	/* Save configuration space to be restored if the
@@ -4077,7 +4074,7 @@
 	else
 		dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
 			 mgp->msi_enabled ? "MSI" : "xPIC",
-			 netdev->irq, mgp->tx_boundary, mgp->fw_name,
+			 pdev->irq, mgp->tx_boundary, mgp->fw_name,
 			 (mgp->wc_enabled ? "Enabled" : "Disabled"));
 
 	board_number++;
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index d38e48d..5b61d12 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -547,6 +547,7 @@
 	struct sk_buff *tx_skbuff[TX_RING_SIZE];
 	dma_addr_t tx_dma[TX_RING_SIZE];
 	struct net_device *dev;
+	void __iomem *ioaddr;
 	struct napi_struct napi;
 	/* Media monitoring timer */
 	struct timer_list timer;
@@ -699,7 +700,9 @@
 
 static inline void __iomem *ns_ioaddr(struct net_device *dev)
 {
-	return (void __iomem *) dev->base_addr;
+	struct netdev_private *np = netdev_priv(dev);
+
+	return np->ioaddr;
 }
 
 static inline void natsemi_irq_enable(struct net_device *dev)
@@ -863,10 +866,9 @@
 	/* Store MAC Address in perm_addr */
 	memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
 
-	dev->base_addr = (unsigned long __force) ioaddr;
-	dev->irq = irq;
-
 	np = netdev_priv(dev);
+	np->ioaddr = ioaddr;
+
 	netif_napi_add(dev, &np->napi, natsemi_poll, 64);
 	np->dev = dev;
 
@@ -914,9 +916,6 @@
 	}
 
 	option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
-	if (dev->mem_start)
-		option = dev->mem_start;
-
 	/* The lower four bits are the media type. */
 	if (option) {
 		if (option & 0x200)
@@ -1532,20 +1531,21 @@
 {
 	struct netdev_private *np = netdev_priv(dev);
 	void __iomem * ioaddr = ns_ioaddr(dev);
+	const int irq = np->pci_dev->irq;
 	int i;
 
 	/* Reset the chip, just in case. */
 	natsemi_reset(dev);
 
-	i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
+	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 	if (i) return i;
 
 	if (netif_msg_ifup(np))
 		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
-			dev->name, dev->irq);
+			dev->name, irq);
 	i = alloc_ring(dev);
 	if (i < 0) {
-		free_irq(dev->irq, dev);
+		free_irq(irq, dev);
 		return i;
 	}
 	napi_enable(&np->napi);
@@ -1794,6 +1794,7 @@
 	struct netdev_private *np = netdev_priv(dev);
 	void __iomem * ioaddr = ns_ioaddr(dev);
 	int next_tick = NATSEMI_TIMER_FREQ;
+	const int irq = np->pci_dev->irq;
 
 	if (netif_msg_timer(np)) {
 		/* DO NOT read the IntrStatus register,
@@ -1817,14 +1818,14 @@
 				if (netif_msg_drv(np))
 					printk(KERN_NOTICE "%s: possible phy reset: "
 						"re-initializing\n", dev->name);
-				disable_irq(dev->irq);
+				disable_irq(irq);
 				spin_lock_irq(&np->lock);
 				natsemi_stop_rxtx(dev);
 				dump_ring(dev);
 				reinit_ring(dev);
 				init_registers(dev);
 				spin_unlock_irq(&np->lock);
-				enable_irq(dev->irq);
+				enable_irq(irq);
 			} else {
 				/* hurry back */
 				next_tick = HZ;
@@ -1841,10 +1842,10 @@
 		spin_unlock_irq(&np->lock);
 	}
 	if (np->oom) {
-		disable_irq(dev->irq);
+		disable_irq(irq);
 		np->oom = 0;
 		refill_rx(dev);
-		enable_irq(dev->irq);
+		enable_irq(irq);
 		if (!np->oom) {
 			writel(RxOn, ioaddr + ChipCmd);
 		} else {
@@ -1885,8 +1886,9 @@
 {
 	struct netdev_private *np = netdev_priv(dev);
 	void __iomem * ioaddr = ns_ioaddr(dev);
+	const int irq = np->pci_dev->irq;
 
-	disable_irq(dev->irq);
+	disable_irq(irq);
 	spin_lock_irq(&np->lock);
 	if (!np->hands_off) {
 		if (netif_msg_tx_err(np))
@@ -1905,7 +1907,7 @@
 			dev->name);
 	}
 	spin_unlock_irq(&np->lock);
-	enable_irq(dev->irq);
+	enable_irq(irq);
 
 	dev->trans_start = jiffies; /* prevent tx timeout */
 	dev->stats.tx_errors++;
@@ -2470,9 +2472,12 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void natsemi_poll_controller(struct net_device *dev)
 {
-	disable_irq(dev->irq);
-	intr_handler(dev->irq, dev);
-	enable_irq(dev->irq);
+	struct netdev_private *np = netdev_priv(dev);
+	const int irq = np->pci_dev->irq;
+
+	disable_irq(irq);
+	intr_handler(irq, dev);
+	enable_irq(irq);
 }
 #endif
 
@@ -2523,8 +2528,9 @@
 	if (netif_running(dev)) {
 		struct netdev_private *np = netdev_priv(dev);
 		void __iomem * ioaddr = ns_ioaddr(dev);
+		const int irq = np->pci_dev->irq;
 
-		disable_irq(dev->irq);
+		disable_irq(irq);
 		spin_lock(&np->lock);
 		/* stop engines */
 		natsemi_stop_rxtx(dev);
@@ -2537,7 +2543,7 @@
 		/* restart engines */
 		writel(RxOn | TxOn, ioaddr + ChipCmd);
 		spin_unlock(&np->lock);
-		enable_irq(dev->irq);
+		enable_irq(irq);
 	}
 	return 0;
 }
@@ -3135,6 +3141,7 @@
 {
 	void __iomem * ioaddr = ns_ioaddr(dev);
 	struct netdev_private *np = netdev_priv(dev);
+	const int irq = np->pci_dev->irq;
 
 	if (netif_msg_ifdown(np))
 		printk(KERN_DEBUG
@@ -3156,14 +3163,14 @@
 	 */
 
 	del_timer_sync(&np->timer);
-	disable_irq(dev->irq);
+	disable_irq(irq);
 	spin_lock_irq(&np->lock);
 	natsemi_irq_disable(dev);
 	np->hands_off = 1;
 	spin_unlock_irq(&np->lock);
-	enable_irq(dev->irq);
+	enable_irq(irq);
 
-	free_irq(dev->irq, dev);
+	free_irq(irq, dev);
 
 	/* Interrupt disabled, interrupt handler released,
 	 * queue stopped, timer deleted, rtnl_lock held
@@ -3256,9 +3263,11 @@
 
 	rtnl_lock();
 	if (netif_running (dev)) {
+		const int irq = np->pci_dev->irq;
+
 		del_timer_sync(&np->timer);
 
-		disable_irq(dev->irq);
+		disable_irq(irq);
 		spin_lock_irq(&np->lock);
 
 		natsemi_irq_disable(dev);
@@ -3267,7 +3276,7 @@
 		netif_stop_queue(dev);
 
 		spin_unlock_irq(&np->lock);
-		enable_irq(dev->irq);
+		enable_irq(irq);
 
 		napi_disable(&np->napi);
 
@@ -3307,6 +3316,8 @@
 	if (netif_device_present(dev))
 		goto out;
 	if (netif_running(dev)) {
+		const int irq = np->pci_dev->irq;
+
 		BUG_ON(!np->hands_off);
 		ret = pci_enable_device(pdev);
 		if (ret < 0) {
@@ -3320,13 +3331,13 @@
 
 		natsemi_reset(dev);
 		init_ring(dev);
-		disable_irq(dev->irq);
+		disable_irq(irq);
 		spin_lock_irq(&np->lock);
 		np->hands_off = 0;
 		init_registers(dev);
 		netif_device_attach(dev);
 		spin_unlock_irq(&np->lock);
-		enable_irq(dev->irq);
+		enable_irq(irq);
 
 		mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ));
 	}
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 6338ef8..bb36758 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2846,6 +2846,7 @@
 static void s2io_netpoll(struct net_device *dev)
 {
 	struct s2io_nic *nic = netdev_priv(dev);
+	const int irq = nic->pdev->irq;
 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
 	u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
 	int i;
@@ -2855,7 +2856,7 @@
 	if (pci_channel_offline(nic->pdev))
 		return;
 
-	disable_irq(dev->irq);
+	disable_irq(irq);
 
 	writeq(val64, &bar0->rx_traffic_int);
 	writeq(val64, &bar0->tx_traffic_int);
@@ -2884,7 +2885,7 @@
 			break;
 		}
 	}
-	enable_irq(dev->irq);
+	enable_irq(irq);
 }
 #endif
 
@@ -3897,9 +3898,7 @@
 
 static void remove_inta_isr(struct s2io_nic *sp)
 {
-	struct net_device *dev = sp->dev;
-
-	free_irq(sp->pdev->irq, dev);
+	free_irq(sp->pdev->irq, sp->dev);
 }
 
 /* ********************************************************* *
@@ -7046,7 +7045,7 @@
 		}
 	}
 	if (sp->config.intr_type == INTA) {
-		err = request_irq((int)sp->pdev->irq, s2io_isr, IRQF_SHARED,
+		err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
 				  sp->name, dev);
 		if (err) {
 			DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
@@ -7908,9 +7907,6 @@
 		goto bar1_remap_failed;
 	}
 
-	dev->irq = pdev->irq;
-	dev->base_addr = (unsigned long)sp->bar0;
-
 	/* Initializing the BAR1 address as the start of the FIFO pointer. */
 	for (j = 0; j < MAX_TX_FIFOS; j++) {
 		mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index ef76725..51387c3 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1882,25 +1882,24 @@
  */
 static void vxge_netpoll(struct net_device *dev)
 {
-	struct __vxge_hw_device *hldev;
-	struct vxgedev *vdev;
-
-	vdev = netdev_priv(dev);
-	hldev = pci_get_drvdata(vdev->pdev);
+	struct vxgedev *vdev = netdev_priv(dev);
+	struct pci_dev *pdev = vdev->pdev;
+	struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
+	const int irq = pdev->irq;
 
 	vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
-	if (pci_channel_offline(vdev->pdev))
+	if (pci_channel_offline(pdev))
 		return;
 
-	disable_irq(dev->irq);
+	disable_irq(irq);
 	vxge_hw_device_clear_tx_rx(hldev);
 
 	vxge_hw_device_clear_tx_rx(hldev);
 	VXGE_COMPLETE_ALL_RX(vdev);
 	VXGE_COMPLETE_ALL_TX(vdev);
 
-	enable_irq(dev->irq);
+	enable_irq(irq);
 
 	vxge_debug_entryexit(VXGE_TRACE,
 		"%s:%d  Exiting...", __func__, __LINE__);
@@ -2860,12 +2859,12 @@
 		vdev->config.rx_pause_enable);
 
 	if (vdev->vp_reset_timer.function == NULL)
-		vxge_os_timer(vdev->vp_reset_timer,
-			vxge_poll_vp_reset, vdev, (HZ/2));
+		vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset, vdev,
+			      HZ / 2);
 
 	/* There is no need to check for RxD leak and RxD lookup on Titan1A */
 	if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
-		vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
+		vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
 			      HZ / 2);
 
 	set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -3424,9 +3423,6 @@
 	ndev->features |= ndev->hw_features |
 		NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
 
-	/*  Driver entry points */
-	ndev->irq = vdev->pdev->irq;
-	ndev->base_addr = (unsigned long) hldev->bar0;
 
 	ndev->netdev_ops = &vxge_netdev_ops;
 
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index f52a42d..35f3e75 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -416,12 +416,15 @@
 	static int p = val; \
 	module_param(p, int, 0)
 
-#define vxge_os_timer(timer, handle, arg, exp) do { \
-		init_timer(&timer); \
-		timer.function = handle; \
-		timer.data = (unsigned long) arg; \
-		mod_timer(&timer, (jiffies + exp)); \
-	} while (0);
+static inline
+void vxge_os_timer(struct timer_list *timer, void (*func)(unsigned long data),
+		   struct vxgedev *vdev, unsigned long timeout)
+{
+	init_timer(timer);
+	timer->function = func;
+	timer->data = (unsigned long)vdev;
+	mod_timer(timer, jiffies + timeout);
+}
 
 void vxge_initialize_ethtool_ops(struct net_device *ndev);
 enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index aca1304..d93a088 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3942,13 +3942,11 @@
 		ret = pci_enable_msi(np->pci_dev);
 		if (ret == 0) {
 			np->msi_flags |= NV_MSI_ENABLED;
-			dev->irq = np->pci_dev->irq;
 			if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
 				netdev_info(dev, "request_irq failed %d\n",
 					    ret);
 				pci_disable_msi(np->pci_dev);
 				np->msi_flags &= ~NV_MSI_ENABLED;
-				dev->irq = np->pci_dev->irq;
 				goto out_err;
 			}
 
@@ -5649,9 +5647,6 @@
 	np->base = ioremap(addr, np->register_size);
 	if (!np->base)
 		goto out_relreg;
-	dev->base_addr = (unsigned long)np->base;
-
-	dev->irq = pci_dev->irq;
 
 	np->rx_ring_size = RX_RING_DEFAULT;
 	np->tx_ring_size = TX_RING_DEFAULT;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 6dfc26d..d3469d8 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -990,10 +990,10 @@
 			ndev->stats.rx_errors++;
 		} else {
 			/* Packet is good */
-			skb = dev_alloc_skb(len + 8);
-			if (!skb)
+			skb = dev_alloc_skb(len);
+			if (!skb) {
 				ndev->stats.rx_dropped++;
-			else {
+			} else {
 				prdbuf = skb_put(skb, len);
 
 				/* Copy packet from buffer */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index dd14915..9f3dbc4 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -660,6 +660,7 @@
 extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
 extern u64 pch_rx_snap_read(struct pci_dev *pdev);
 extern u64 pch_tx_snap_read(struct pci_dev *pdev);
+extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
 #endif
 
 /* pch_gbe_param.c */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 8035e5f..89c6bcf 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -101,18 +101,19 @@
 
 #ifdef CONFIG_PCH_PTP
 /* Macros for ieee1588 */
-#define TICKS_NS_SHIFT  5
-
 /* 0x40 Time Synchronization Channel Control Register Bits */
 #define MASTER_MODE   (1<<0)
-#define SLAVE_MODE    (0<<0)
+#define SLAVE_MODE    (0)
 #define V2_MODE       (1<<31)
-#define CAP_MODE0     (0<<16)
+#define CAP_MODE0     (0)
 #define CAP_MODE2     (1<<17)
 
 /* 0x44 Time Synchronization Channel Event Register Bits */
 #define TX_SNAPSHOT_LOCKED (1<<0)
 #define RX_SNAPSHOT_LOCKED (1<<1)
+
+#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
+#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
 #endif
 
 static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
@@ -133,10 +134,8 @@
 	u16 *hi, *id;
 	u32 lo;
 
-	if ((sk_run_filter(skb, ptp_filter) != PTP_CLASS_V2_IPV4) &&
-		(sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)) {
+	if (sk_run_filter(skb, ptp_filter) == PTP_CLASS_NONE)
 		return 0;
-	}
 
 	offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
 
@@ -153,8 +152,8 @@
 		seqid  == *id);
 }
 
-static void pch_rx_timestamp(
-			struct pch_gbe_adapter *adapter, struct sk_buff *skb)
+static void
+pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
 {
 	struct skb_shared_hwtstamps *shhwtstamps;
 	struct pci_dev *pdev;
@@ -183,7 +182,6 @@
 		goto out;
 
 	ns = pch_rx_snap_read(pdev);
-	ns <<= TICKS_NS_SHIFT;
 
 	shhwtstamps = skb_hwtstamps(skb);
 	memset(shhwtstamps, 0, sizeof(*shhwtstamps));
@@ -192,8 +190,8 @@
 	pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
 }
 
-static void pch_tx_timestamp(
-			struct pch_gbe_adapter *adapter, struct sk_buff *skb)
+static void
+pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
 {
 	struct skb_shared_hwtstamps shhwtstamps;
 	struct pci_dev *pdev;
@@ -202,17 +200,16 @@
 	u32 cnt, val;
 
 	shtx = skb_shinfo(skb);
-	if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en))
-		shtx->tx_flags |= SKBTX_IN_PROGRESS;
-	else
+	if (likely(!(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)))
 		return;
 
+	shtx->tx_flags |= SKBTX_IN_PROGRESS;
+
 	/* Get ieee1588's dev information */
 	pdev = adapter->ptp_pdev;
 
 	/*
 	 * This really stinks, but we have to poll for the Tx time stamp.
-	 * Usually, the time stamp is ready after 4 to 6 microseconds.
 	 */
 	for (cnt = 0; cnt < 100; cnt++) {
 		val = pch_ch_event_read(pdev);
@@ -226,7 +223,6 @@
 	}
 
 	ns = pch_tx_snap_read(pdev);
-	ns <<= TICKS_NS_SHIFT;
 
 	memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 	shhwtstamps.hwtstamp = ns_to_ktime(ns);
@@ -240,6 +236,7 @@
 	struct hwtstamp_config cfg;
 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 	struct pci_dev *pdev;
+	u8 station[20];
 
 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
 		return -EFAULT;
@@ -267,15 +264,23 @@
 		break;
 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 		adapter->hwts_rx_en = 0;
-		pch_ch_control_write(pdev, (SLAVE_MODE | CAP_MODE0));
+		pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0);
 		break;
 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
 		adapter->hwts_rx_en = 1;
-		pch_ch_control_write(pdev, (MASTER_MODE | CAP_MODE0));
+		pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0);
 		break;
-	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
 		adapter->hwts_rx_en = 1;
-		pch_ch_control_write(pdev, (V2_MODE | CAP_MODE2));
+		pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
+		strcpy(station, PTP_L4_MULTICAST_SA);
+		pch_set_station_address(station, pdev);
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+		adapter->hwts_rx_en = 1;
+		pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
+		strcpy(station, PTP_L2_MULTICAST_SA);
+		pch_set_station_address(station, pdev);
 		break;
 	default:
 		return -ERANGE;
@@ -387,31 +392,85 @@
 }
 
 /**
+ * pch_gbe_mac_save_mac_addr_regs - Save MAC addresse registers
+ * @hw:		Pointer to the HW structure
+ * @addr:	Pointer to the MAC address
+ * @index:  MAC address array register
+ */
+static void
+pch_gbe_mac_save_mac_addr_regs(struct pch_gbe_hw *hw,
+			struct pch_gbe_regs_mac_adr *mac_adr, u32 index)
+{
+	mac_adr->high = ioread32(&hw->reg->mac_adr[index].high);
+	mac_adr->low = ioread32(&hw->reg->mac_adr[index].low);
+}
+
+/**
+ * pch_gbe_mac_store_mac_addr_regs - Store MAC addresse registers
+ * @hw:		Pointer to the HW structure
+ * @addr:	Pointer to the MAC address
+ * @index:  MAC address array register
+ */
+static void
+pch_gbe_mac_store_mac_addr_regs(struct pch_gbe_hw *hw,
+			struct pch_gbe_regs_mac_adr *mac_adr, u32 index)
+{
+	u32 adrmask;
+
+	adrmask = ioread32(&hw->reg->ADDR_MASK);
+	iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
+	/* wait busy */
+	pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+	/* Set the MAC address to the MAC address xA/xB register */
+	iowrite32(mac_adr->high, &hw->reg->mac_adr[index].high);
+	iowrite32(mac_adr->low, &hw->reg->mac_adr[index].low);
+	iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
+	/* wait busy */
+	pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+}
+
+#define MAC_ADDR_LIST_NUM 16
+/**
  * pch_gbe_mac_reset_hw - Reset hardware
  * @hw:	Pointer to the HW structure
  */
 static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
 {
+	struct pch_gbe_regs_mac_adr mac_addr_list[MAC_ADDR_LIST_NUM];
+	int i;
+
 	/* Read the MAC address. and store to the private data */
 	pch_gbe_mac_read_mac_addr(hw);
+	/* Read other MAC addresses */
+	for (i = 1; i < MAC_ADDR_LIST_NUM; i++)
+		pch_gbe_mac_save_mac_addr_regs(hw, &mac_addr_list[i], i);
 	iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
 #ifdef PCH_GBE_MAC_IFOP_RGMII
 	iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
 #endif
 	pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
-	/* Setup the receive address */
+	/* Setup the receive addresses */
 	pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
+	for (i = 1; i < MAC_ADDR_LIST_NUM; i++)
+		pch_gbe_mac_store_mac_addr_regs(hw, &mac_addr_list[i], i);
 	return;
 }
 
 static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
 {
-	/* Read the MAC address. and store to the private data */
+	struct pch_gbe_regs_mac_adr mac_addr_list[MAC_ADDR_LIST_NUM];
+	int i;
+
+	/* Read the MAC addresses. and store to the private data */
 	pch_gbe_mac_read_mac_addr(hw);
+	for (i = 1; i < MAC_ADDR_LIST_NUM; i++)
+		pch_gbe_mac_save_mac_addr_regs(hw, &mac_addr_list[i], i);
 	iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
 	pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
-	/* Setup the MAC address */
+	/* Setup the MAC addresses */
 	pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
+	for (i = 1; i < MAC_ADDR_LIST_NUM; i++)
+		pch_gbe_mac_store_mac_addr_regs(hw, &mac_addr_list[i], i);
 	return;
 }
 
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 0d29f5f..c236715 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -683,8 +683,6 @@
 	}
 
 	hmp->base = ioaddr;
-	dev->base_addr = (unsigned long)ioaddr;
-	dev->irq = irq;
 	pci_set_drvdata(pdev, dev);
 
 	hmp->chip_id = chip_id;
@@ -859,14 +857,11 @@
 	u32 rx_int_var, tx_int_var;
 	u16 fifo_info;
 
-	i = request_irq(dev->irq, hamachi_interrupt, IRQF_SHARED, dev->name, dev);
+	i = request_irq(hmp->pci_dev->irq, hamachi_interrupt, IRQF_SHARED,
+			dev->name, dev);
 	if (i)
 		return i;
 
-	if (hamachi_debug > 1)
-		printk(KERN_DEBUG "%s: hamachi_open() irq %d.\n",
-			   dev->name, dev->irq);
-
 	hamachi_init_ring(dev);
 
 #if ADDRLEN == 64
@@ -1705,7 +1700,7 @@
 	}
 #endif /* __i386__ debugging only */
 
-	free_irq(dev->irq, dev);
+	free_irq(hmp->pci_dev->irq, dev);
 
 	del_timer_sync(&hmp->timer);
 
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 7757b80..04e622f 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -427,9 +427,6 @@
 	/* Reset the chip. */
 	iowrite32(0x80000000, ioaddr + DMACtrl);
 
-	dev->base_addr = (unsigned long)ioaddr;
-	dev->irq = irq;
-
 	pci_set_drvdata(pdev, dev);
 	spin_lock_init(&np->lock);
 
@@ -569,25 +566,20 @@
 static int yellowfin_open(struct net_device *dev)
 {
 	struct yellowfin_private *yp = netdev_priv(dev);
+	const int irq = yp->pci_dev->irq;
 	void __iomem *ioaddr = yp->base;
-	int i, ret;
+	int i, rc;
 
 	/* Reset the chip. */
 	iowrite32(0x80000000, ioaddr + DMACtrl);
 
-	ret = request_irq(dev->irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
-	if (ret)
-		return ret;
+	rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
+	if (rc)
+		return rc;
 
-	if (yellowfin_debug > 1)
-		netdev_printk(KERN_DEBUG, dev, "%s() irq %d\n",
-			      __func__, dev->irq);
-
-	ret = yellowfin_init_ring(dev);
-	if (ret) {
-		free_irq(dev->irq, dev);
-		return ret;
-	}
+	rc = yellowfin_init_ring(dev);
+	if (rc < 0)
+		goto err_free_irq;
 
 	iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
 	iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
@@ -647,8 +639,12 @@
 	yp->timer.data = (unsigned long)dev;
 	yp->timer.function = yellowfin_timer;				/* timer handler */
 	add_timer(&yp->timer);
+out:
+	return rc;
 
-	return 0;
+err_free_irq:
+	free_irq(irq, dev);
+	goto out;
 }
 
 static void yellowfin_timer(unsigned long data)
@@ -1251,7 +1247,7 @@
 	}
 #endif /* __i386__ debugging only */
 
-	free_irq(dev->irq, dev);
+	free_irq(yp->pci_dev->irq, dev);
 
 	/* Free all the skbuffs in the Rx queue. */
 	for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 49343ec..09d8d33 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -3845,7 +3845,7 @@
 	if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
 			WAKE_MCAST | WAKE_BCAST)) {
 		netif_err(qdev, ifdown, qdev->ndev,
-			  "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
+			  "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
 			  qdev->wol);
 		return -EINVAL;
 	}
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index b96e192..4de7364 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -4,7 +4,7 @@
  * Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw>
  * Copyright (C) 2007
  *	Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>
- *	Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2007-2012 Florian Fainelli <florian@openwrt.org>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -74,9 +74,13 @@
 #define MT_ICR		0x0C	/* TX interrupt control */
 #define MR_ICR		0x10	/* RX interrupt control */
 #define MTPR		0x14	/* TX poll command register */
+#define  TM2TX		0x0001	/* Trigger MAC to transmit */
 #define MR_BSR		0x18	/* RX buffer size */
 #define MR_DCR		0x1A	/* RX descriptor control */
 #define MLSR		0x1C	/* Last status */
+#define  TX_FIFO_UNDR	0x0200	/* TX FIFO under-run */
+#define	 TX_EXCEEDC	0x2000	/* Transmit exceed collision */
+#define  TX_LATEC	0x4000	/* Transmit late collision */
 #define MMDIO		0x20	/* MDIO control register */
 #define  MDIO_WRITE	0x4000	/* MDIO write */
 #define  MDIO_READ	0x2000	/* MDIO read */
@@ -124,6 +128,9 @@
 #define MID_3M		0x82	/* MID3 Medium */
 #define MID_3H		0x84	/* MID3 High */
 #define PHY_CC		0x88	/* PHY status change configuration register */
+#define  SCEN		0x8000	/* PHY status change enable */
+#define  PHYAD_SHIFT	8	/* PHY address shift */
+#define  TMRDIV_SHIFT	0	/* Timer divider shift */
 #define PHY_ST		0x8A	/* PHY status register */
 #define MAC_SM		0xAC	/* MAC status machine */
 #define  MAC_SM_RST	0x0002	/* MAC status machine reset */
@@ -137,6 +144,8 @@
 #define MBCR_DEFAULT	0x012A	/* MAC Bus Control Register */
 #define MCAST_MAX	3	/* Max number multicast addresses to filter */
 
+#define MAC_DEF_TIMEOUT	2048	/* Default MAC read/write operation timeout */
+
 /* Descriptor status */
 #define DSC_OWNER_MAC	0x8000	/* MAC is the owner of this descriptor */
 #define DSC_RX_OK	0x4000	/* RX was successful */
@@ -187,7 +196,7 @@
 	dma_addr_t rx_ring_dma;
 	dma_addr_t tx_ring_dma;
 	u16	tx_free_desc;
-	u16	mcr0, mcr1;
+	u16	mcr0;
 	struct net_device *dev;
 	struct mii_bus *mii_bus;
 	struct napi_struct napi;
@@ -204,7 +213,7 @@
 /* Read a word data from PHY Chip */
 static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
 {
-	int limit = 2048;
+	int limit = MAC_DEF_TIMEOUT;
 	u16 cmd;
 
 	iowrite16(MDIO_READ + reg + (phy_addr << 8), ioaddr + MMDIO);
@@ -222,7 +231,7 @@
 static void r6040_phy_write(void __iomem *ioaddr,
 					int phy_addr, int reg, u16 val)
 {
-	int limit = 2048;
+	int limit = MAC_DEF_TIMEOUT;
 	u16 cmd;
 
 	iowrite16(val, ioaddr + MMWD);
@@ -358,27 +367,35 @@
 	return rc;
 }
 
-static void r6040_init_mac_regs(struct net_device *dev)
+static void r6040_reset_mac(struct r6040_private *lp)
 {
-	struct r6040_private *lp = netdev_priv(dev);
 	void __iomem *ioaddr = lp->base;
-	int limit = 2048;
+	int limit = MAC_DEF_TIMEOUT;
 	u16 cmd;
 
-	/* Mask Off Interrupt */
-	iowrite16(MSK_INT, ioaddr + MIER);
-
-	/* Reset RDC MAC */
 	iowrite16(MAC_RST, ioaddr + MCR1);
 	while (limit--) {
 		cmd = ioread16(ioaddr + MCR1);
 		if (cmd & MAC_RST)
 			break;
 	}
+
 	/* Reset internal state machine */
 	iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
 	iowrite16(0, ioaddr + MAC_SM);
 	mdelay(5);
+}
+
+static void r6040_init_mac_regs(struct net_device *dev)
+{
+	struct r6040_private *lp = netdev_priv(dev);
+	void __iomem *ioaddr = lp->base;
+
+	/* Mask Off Interrupt */
+	iowrite16(MSK_INT, ioaddr + MIER);
+
+	/* Reset RDC MAC */
+	r6040_reset_mac(lp);
 
 	/* MAC Bus Control Register */
 	iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
@@ -407,7 +424,7 @@
 	/* Let TX poll the descriptors
 	 * we may got called by r6040_tx_timeout which has left
 	 * some unsent tx buffers */
-	iowrite16(0x01, ioaddr + MTPR);
+	iowrite16(TM2TX, ioaddr + MTPR);
 }
 
 static void r6040_tx_timeout(struct net_device *dev)
@@ -445,18 +462,13 @@
 {
 	struct r6040_private *lp = netdev_priv(dev);
 	void __iomem *ioaddr = lp->base;
-	int limit = 2048;
 	u16 *adrp;
-	u16 cmd;
 
 	/* Stop MAC */
 	iowrite16(MSK_INT, ioaddr + MIER);	/* Mask Off Interrupt */
-	iowrite16(MAC_RST, ioaddr + MCR1);	/* Reset RDC MAC */
-	while (limit--) {
-		cmd = ioread16(ioaddr + MCR1);
-		if (cmd & MAC_RST)
-			break;
-	}
+
+	/* Reset RDC MAC */
+	r6040_reset_mac(lp);
 
 	/* Restore MAC Address to MIDx */
 	adrp = (u16 *) dev->dev_addr;
@@ -599,9 +611,9 @@
 		/* Check for errors */
 		err = ioread16(ioaddr + MLSR);
 
-		if (err & 0x0200)
-			dev->stats.rx_fifo_errors++;
-		if (err & (0x2000 | 0x4000))
+		if (err & TX_FIFO_UNDR)
+			dev->stats.tx_fifo_errors++;
+		if (err & (TX_EXCEEDC | TX_LATEC))
 			dev->stats.tx_carrier_errors++;
 
 		if (descptr->status & DSC_OWNER_MAC)
@@ -736,11 +748,7 @@
 	u16 *adrp;
 
 	/* Reset MAC */
-	iowrite16(MAC_RST, ioaddr + MCR1);
-	/* Reset internal state machine */
-	iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
-	iowrite16(0, ioaddr + MAC_SM);
-	mdelay(5);
+	r6040_reset_mac(lp);
 
 	/* Restore MAC Address */
 	adrp = (u16 *) dev->dev_addr;
@@ -840,7 +848,7 @@
 	skb_tx_timestamp(skb);
 
 	/* Trigger the MAC to check the TX descriptor */
-	iowrite16(0x01, ioaddr + MTPR);
+	iowrite16(TM2TX, ioaddr + MTPR);
 	lp->tx_insert_ptr = descptr->vndescp;
 
 	/* If no tx resource, stop */
@@ -973,6 +981,7 @@
 	.get_settings		= netdev_get_settings,
 	.set_settings		= netdev_set_settings,
 	.get_link		= ethtool_op_get_link,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 static const struct net_device_ops r6040_netdev_ops = {
@@ -1126,10 +1135,15 @@
 		err = -EIO;
 		goto err_out_free_res;
 	}
+
 	/* If PHY status change register is still set to zero it means the
-	 * bootloader didn't initialize it */
+	 * bootloader didn't initialize it, so we set it to:
+	 * - enable phy status change
+	 * - enable all phy addresses
+	 * - set to lowest timer divider */
 	if (ioread16(ioaddr + PHY_CC) == 0)
-		iowrite16(0x9f07, ioaddr + PHY_CC);
+		iowrite16(SCEN | PHY_MAX_ADDR << PHYAD_SHIFT |
+				7 << TMRDIV_SHIFT, ioaddr + PHY_CC);
 
 	/* Init system & device */
 	lp->base = ioaddr;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index abc7907..5eef290 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -635,9 +635,12 @@
  */
 static void cp_poll_controller(struct net_device *dev)
 {
-	disable_irq(dev->irq);
-	cp_interrupt(dev->irq, dev);
-	enable_irq(dev->irq);
+	struct cp_private *cp = netdev_priv(dev);
+	const int irq = cp->pdev->irq;
+
+	disable_irq(irq);
+	cp_interrupt(irq, dev);
+	enable_irq(irq);
 }
 #endif
 
@@ -958,6 +961,11 @@
 	cpw8(Cmd, RxOn | TxOn);
 }
 
+static void cp_enable_irq(struct cp_private *cp)
+{
+	cpw16_f(IntrMask, cp_intr_mask);
+}
+
 static void cp_init_hw (struct cp_private *cp)
 {
 	struct net_device *dev = cp->dev;
@@ -997,8 +1005,6 @@
 
 	cpw16(MultiIntr, 0);
 
-	cpw16_f(IntrMask, cp_intr_mask);
-
 	cpw8_f(Cfg9346, Cfg9346_Lock);
 }
 
@@ -1114,6 +1120,7 @@
 static int cp_open (struct net_device *dev)
 {
 	struct cp_private *cp = netdev_priv(dev);
+	const int irq = cp->pdev->irq;
 	int rc;
 
 	netif_dbg(cp, ifup, dev, "enabling interface\n");
@@ -1126,10 +1133,12 @@
 
 	cp_init_hw(cp);
 
-	rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
+	rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
 	if (rc)
 		goto err_out_hw;
 
+	cp_enable_irq(cp);
+
 	netif_carrier_off(dev);
 	mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
 	netif_start_queue(dev);
@@ -1161,7 +1170,7 @@
 
 	spin_unlock_irqrestore(&cp->lock, flags);
 
-	free_irq(dev->irq, dev);
+	free_irq(cp->pdev->irq, dev);
 
 	cp_free_rings(cp);
 	return 0;
@@ -1909,7 +1918,6 @@
 		       (unsigned long long)pciaddr);
 		goto err_out_res;
 	}
-	dev->base_addr = (unsigned long) regs;
 	cp->regs = regs;
 
 	cp_stop_hw(cp);
@@ -1937,14 +1945,12 @@
 	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
 		NETIF_F_HIGHDMA;
 
-	dev->irq = pdev->irq;
-
 	rc = register_netdev(dev);
 	if (rc)
 		goto err_out_iomap;
 
-	netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
-		    dev->base_addr, dev->dev_addr, dev->irq);
+	netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n",
+		    regs, dev->dev_addr, pdev->irq);
 
 	pci_set_drvdata(pdev, dev);
 
@@ -2031,6 +2037,7 @@
 	/* FIXME: sh*t may happen if the Rx ring buffer is depleted */
 	cp_init_rings_index (cp);
 	cp_init_hw (cp);
+	cp_enable_irq(cp);
 	netif_start_queue (dev);
 
 	spin_lock_irqsave (&cp->lock, flags);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index df7fd8d..03df076 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -148,9 +148,9 @@
 
 /* Whether to use MMIO or PIO. Default to MMIO. */
 #ifdef CONFIG_8139TOO_PIO
-static int use_io = 1;
+static bool use_io = true;
 #else
-static int use_io = 0;
+static bool use_io = false;
 #endif
 
 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
@@ -620,7 +620,7 @@
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
-module_param(use_io, int, 0);
+module_param(use_io, bool, 0);
 MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO");
 module_param(multicast_filter_limit, int, 0);
 module_param_array(media, int, NULL, 0);
@@ -750,15 +750,22 @@
 
 static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev)
 {
+	struct device *d = &pdev->dev;
 	void __iomem *ioaddr;
 	struct net_device *dev;
 	struct rtl8139_private *tp;
 	u8 tmp8;
 	int rc, disable_dev_on_err = 0;
-	unsigned int i;
-	unsigned long pio_start, pio_end, pio_flags, pio_len;
-	unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
+	unsigned int i, bar;
+	unsigned long io_len;
 	u32 version;
+	static const struct {
+		unsigned long mask;
+		char *type;
+	} res[] = {
+		{ IORESOURCE_IO,  "PIO" },
+		{ IORESOURCE_MEM, "MMIO" }
+	};
 
 	assert (pdev != NULL);
 
@@ -777,78 +784,45 @@
 	if (rc)
 		goto err_out;
 
-	pio_start = pci_resource_start (pdev, 0);
-	pio_end = pci_resource_end (pdev, 0);
-	pio_flags = pci_resource_flags (pdev, 0);
-	pio_len = pci_resource_len (pdev, 0);
-
-	mmio_start = pci_resource_start (pdev, 1);
-	mmio_end = pci_resource_end (pdev, 1);
-	mmio_flags = pci_resource_flags (pdev, 1);
-	mmio_len = pci_resource_len (pdev, 1);
-
-	/* set this immediately, we need to know before
-	 * we talk to the chip directly */
-	pr_debug("PIO region size == 0x%02lX\n", pio_len);
-	pr_debug("MMIO region size == 0x%02lX\n", mmio_len);
-
-retry:
-	if (use_io) {
-		/* make sure PCI base addr 0 is PIO */
-		if (!(pio_flags & IORESOURCE_IO)) {
-			dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n");
-			rc = -ENODEV;
-			goto err_out;
-		}
-		/* check for weird/broken PCI region reporting */
-		if (pio_len < RTL_MIN_IO_SIZE) {
-			dev_err(&pdev->dev, "Invalid PCI I/O region size(s), aborting\n");
-			rc = -ENODEV;
-			goto err_out;
-		}
-	} else {
-		/* make sure PCI base addr 1 is MMIO */
-		if (!(mmio_flags & IORESOURCE_MEM)) {
-			dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
-			rc = -ENODEV;
-			goto err_out;
-		}
-		if (mmio_len < RTL_MIN_IO_SIZE) {
-			dev_err(&pdev->dev, "Invalid PCI mem region size(s), aborting\n");
-			rc = -ENODEV;
-			goto err_out;
-		}
-	}
-
 	rc = pci_request_regions (pdev, DRV_NAME);
 	if (rc)
 		goto err_out;
 	disable_dev_on_err = 1;
 
-	/* enable PCI bus-mastering */
 	pci_set_master (pdev);
 
-	if (use_io) {
-		ioaddr = pci_iomap(pdev, 0, 0);
-		if (!ioaddr) {
-			dev_err(&pdev->dev, "cannot map PIO, aborting\n");
-			rc = -EIO;
-			goto err_out;
-		}
-		dev->base_addr = pio_start;
-		tp->regs_len = pio_len;
-	} else {
-		/* ioremap MMIO region */
-		ioaddr = pci_iomap(pdev, 1, 0);
-		if (ioaddr == NULL) {
-			dev_err(&pdev->dev, "cannot remap MMIO, trying PIO\n");
-			pci_release_regions(pdev);
-			use_io = 1;
+retry:
+	/* PIO bar register comes first. */
+	bar = !use_io;
+
+	io_len = pci_resource_len(pdev, bar);
+
+	dev_dbg(d, "%s region size = 0x%02lX\n", res[bar].type, io_len);
+
+	if (!(pci_resource_flags(pdev, bar) & res[bar].mask)) {
+		dev_err(d, "region #%d not a %s resource, aborting\n", bar,
+			res[bar].type);
+		rc = -ENODEV;
+		goto err_out;
+	}
+	if (io_len < RTL_MIN_IO_SIZE) {
+		dev_err(d, "Invalid PCI %s region size(s), aborting\n",
+			res[bar].type);
+		rc = -ENODEV;
+		goto err_out;
+	}
+
+	ioaddr = pci_iomap(pdev, bar, 0);
+	if (!ioaddr) {
+		dev_err(d, "cannot map %s\n", res[bar].type);
+		if (!use_io) {
+			use_io = true;
 			goto retry;
 		}
-		dev->base_addr = (long) ioaddr;
-		tp->regs_len = mmio_len;
+		rc = -ENODEV;
+		goto err_out;
 	}
+	tp->regs_len = io_len;
 	tp->mmio_addr = ioaddr;
 
 	/* Bring old chips out of low-power mode. */
@@ -1035,8 +1009,6 @@
 	dev->hw_features |= NETIF_F_RXALL;
 	dev->hw_features |= NETIF_F_RXFCS;
 
-	dev->irq = pdev->irq;
-
 	/* tp zeroed and aligned in alloc_etherdev */
 	tp = netdev_priv(dev);
 
@@ -1062,9 +1034,9 @@
 
 	pci_set_drvdata (pdev, dev);
 
-	netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n",
+	netdev_info(dev, "%s at 0x%p, %pM, IRQ %d\n",
 		    board_info[ent->driver_data].name,
-		    dev->base_addr, dev->dev_addr, dev->irq);
+		    ioaddr, dev->dev_addr, pdev->irq);
 
 	netdev_dbg(dev, "Identified 8139 chip type '%s'\n",
 		   rtl_chip_info[tp->chipset].name);
@@ -1339,10 +1311,11 @@
 static int rtl8139_open (struct net_device *dev)
 {
 	struct rtl8139_private *tp = netdev_priv(dev);
-	int retval;
 	void __iomem *ioaddr = tp->mmio_addr;
+	const int irq = tp->pci_dev->irq;
+	int retval;
 
-	retval = request_irq (dev->irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev);
+	retval = request_irq(irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev);
 	if (retval)
 		return retval;
 
@@ -1351,7 +1324,7 @@
 	tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN,
 					   &tp->rx_ring_dma, GFP_KERNEL);
 	if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
-		free_irq(dev->irq, dev);
+		free_irq(irq, dev);
 
 		if (tp->tx_bufs)
 			dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN,
@@ -1377,7 +1350,7 @@
 		  "%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n",
 		  __func__,
 		  (unsigned long long)pci_resource_start (tp->pci_dev, 1),
-		  dev->irq, RTL_R8 (MediaStatus),
+		  irq, RTL_R8 (MediaStatus),
 		  tp->mii.full_duplex ? "full" : "half");
 
 	rtl8139_start_thread(tp);
@@ -2240,9 +2213,12 @@
  */
 static void rtl8139_poll_controller(struct net_device *dev)
 {
-	disable_irq(dev->irq);
-	rtl8139_interrupt(dev->irq, dev);
-	enable_irq(dev->irq);
+	struct rtl8139_private *tp = netdev_priv(dev);
+	const int irq = tp->pci_dev->irq;
+
+	disable_irq(irq);
+	rtl8139_interrupt(irq, dev);
+	enable_irq(irq);
 }
 #endif
 
@@ -2295,7 +2271,7 @@
 
 	spin_unlock_irqrestore (&tp->lock, flags);
 
-	free_irq (dev->irq, dev);
+	free_irq(tp->pci_dev->irq, dev);
 
 	rtl8139_tx_clear (tp);
 
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index f545093..00628d8 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -44,6 +44,8 @@
 #define FIRMWARE_8168F_1	"rtl_nic/rtl8168f-1.fw"
 #define FIRMWARE_8168F_2	"rtl_nic/rtl8168f-2.fw"
 #define FIRMWARE_8105E_1	"rtl_nic/rtl8105e-1.fw"
+#define FIRMWARE_8402_1		"rtl_nic/rtl8402-1.fw"
+#define FIRMWARE_8411_1		"rtl_nic/rtl8411-1.fw"
 
 #ifdef RTL8169_DEBUG
 #define assert(expr) \
@@ -133,6 +135,8 @@
 	RTL_GIGA_MAC_VER_34,
 	RTL_GIGA_MAC_VER_35,
 	RTL_GIGA_MAC_VER_36,
+	RTL_GIGA_MAC_VER_37,
+	RTL_GIGA_MAC_VER_38,
 	RTL_GIGA_MAC_NONE   = 0xff,
 };
 
@@ -245,6 +249,12 @@
 	[RTL_GIGA_MAC_VER_36] =
 		_R("RTL8168f/8111f",	RTL_TD_1, FIRMWARE_8168F_2,
 							JUMBO_9K, false),
+	[RTL_GIGA_MAC_VER_37] =
+		_R("RTL8402",		RTL_TD_1, FIRMWARE_8402_1,
+							JUMBO_1K, true),
+	[RTL_GIGA_MAC_VER_38] =
+		_R("RTL8411",		RTL_TD_1, FIRMWARE_8411_1,
+							JUMBO_9K, false),
 };
 #undef _R
 
@@ -315,6 +325,8 @@
 	Config0		= 0x51,
 	Config1		= 0x52,
 	Config2		= 0x53,
+#define PME_SIGNAL			(1 << 5)	/* 8168c and later */
+
 	Config3		= 0x54,
 	Config4		= 0x55,
 	Config5		= 0x56,
@@ -355,6 +367,9 @@
 #define	CSIAR_BYTE_ENABLE		0x0f
 #define	CSIAR_BYTE_ENABLE_SHIFT		12
 #define	CSIAR_ADDR_MASK			0x0fff
+#define CSIAR_FUNC_CARD			0x00000000
+#define CSIAR_FUNC_SDIO			0x00010000
+#define CSIAR_FUNC_NIC			0x00020000
 	PMCH			= 0x6f,
 	EPHYAR			= 0x80,
 #define	EPHYAR_FLAG			0x80000000
@@ -716,6 +731,11 @@
 		void (*disable)(struct rtl8169_private *);
 	} jumbo_ops;
 
+	struct csi_ops {
+		void (*write)(void __iomem *, int, int);
+		u32 (*read)(void __iomem *, int);
+	} csi_ops;
+
 	int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
 	int (*get_settings)(struct net_device *, struct ethtool_cmd *);
 	void (*phy_reset_enable)(struct rtl8169_private *tp);
@@ -768,6 +788,8 @@
 MODULE_FIRMWARE(FIRMWARE_8105E_1);
 MODULE_FIRMWARE(FIRMWARE_8168F_1);
 MODULE_FIRMWARE(FIRMWARE_8168F_2);
+MODULE_FIRMWARE(FIRMWARE_8402_1);
+MODULE_FIRMWARE(FIRMWARE_8411_1);
 
 static void rtl_lock_work(struct rtl8169_private *tp)
 {
@@ -1078,40 +1100,6 @@
 	return value;
 }
 
-static void rtl_csi_write(void __iomem *ioaddr, int addr, int value)
-{
-	unsigned int i;
-
-	RTL_W32(CSIDR, value);
-	RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
-		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
-
-	for (i = 0; i < 100; i++) {
-		if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
-			break;
-		udelay(10);
-	}
-}
-
-static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
-{
-	u32 value = ~0x00;
-	unsigned int i;
-
-	RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
-		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
-
-	for (i = 0; i < 100; i++) {
-		if (RTL_R32(CSIAR) & CSIAR_FLAG) {
-			value = RTL_R32(CSIDR);
-			break;
-		}
-		udelay(10);
-	}
-
-	return value;
-}
-
 static
 void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type)
 {
@@ -1281,7 +1269,8 @@
 	if (!netif_running(dev))
 		return;
 
-	if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+	if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
+	    tp->mac_version == RTL_GIGA_MAC_VER_38) {
 		if (RTL_R8(PHYstatus) & _1000bpsF) {
 			rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
 				      0x00000011, ERIAR_EXGMAC);
@@ -1316,6 +1305,16 @@
 			rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
 				      0x0000003f, ERIAR_EXGMAC);
 		}
+	} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
+		if (RTL_R8(PHYstatus) & _10bps) {
+			rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
+				      0x4d02, ERIAR_EXGMAC);
+			rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_0011,
+				      0x0060, ERIAR_EXGMAC);
+		} else {
+			rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
+				      0x0000, ERIAR_EXGMAC);
+		}
 	}
 }
 
@@ -1396,7 +1395,6 @@
 		u16 reg;
 		u8  mask;
 	} cfg[] = {
-		{ WAKE_ANY,   Config1, PMEnable },
 		{ WAKE_PHY,   Config3, LinkUp },
 		{ WAKE_MAGIC, Config3, MagicPacket },
 		{ WAKE_UCAST, Config5, UWF },
@@ -1404,16 +1402,32 @@
 		{ WAKE_MCAST, Config5, MWF },
 		{ WAKE_ANY,   Config5, LanWake }
 	};
+	u8 options;
 
 	RTL_W8(Cfg9346, Cfg9346_Unlock);
 
 	for (i = 0; i < ARRAY_SIZE(cfg); i++) {
-		u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
+		options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
 		if (wolopts & cfg[i].opt)
 			options |= cfg[i].mask;
 		RTL_W8(cfg[i].reg, options);
 	}
 
+	switch (tp->mac_version) {
+	case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
+		options = RTL_R8(Config1) & ~PMEnable;
+		if (wolopts)
+			options |= PMEnable;
+		RTL_W8(Config1, options);
+		break;
+	default:
+		options = RTL_R8(Config2) & ~PME_SIGNAL;
+		if (wolopts)
+			options |= PME_SIGNAL;
+		RTL_W8(Config2, options);
+		break;
+	}
+
 	RTL_W8(Cfg9346, Cfg9346_Lock);
 }
 
@@ -1853,6 +1867,7 @@
 	.get_strings		= rtl8169_get_strings,
 	.get_sset_count		= rtl8169_get_sset_count,
 	.get_ethtool_stats	= rtl8169_get_ethtool_stats,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -1876,6 +1891,7 @@
 		int mac_version;
 	} mac_info[] = {
 		/* 8168F family. */
+		{ 0x7c800000, 0x48800000,	RTL_GIGA_MAC_VER_38 },
 		{ 0x7cf00000, 0x48100000,	RTL_GIGA_MAC_VER_36 },
 		{ 0x7cf00000, 0x48000000,	RTL_GIGA_MAC_VER_35 },
 
@@ -1913,6 +1929,7 @@
 		{ 0x7c800000, 0x30000000,	RTL_GIGA_MAC_VER_11 },
 
 		/* 8101 family. */
+		{ 0x7c800000, 0x44000000,	RTL_GIGA_MAC_VER_37 },
 		{ 0x7cf00000, 0x40b00000,	RTL_GIGA_MAC_VER_30 },
 		{ 0x7cf00000, 0x40a00000,	RTL_GIGA_MAC_VER_30 },
 		{ 0x7cf00000, 0x40900000,	RTL_GIGA_MAC_VER_29 },
@@ -3013,6 +3030,28 @@
 	rtl_writephy(tp, 0x1f, 0x0000);
 }
 
+static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
+{
+	/* For 4-corner performance improve */
+	rtl_writephy(tp, 0x1f, 0x0005);
+	rtl_writephy(tp, 0x05, 0x8b80);
+	rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
+	rtl_writephy(tp, 0x1f, 0x0000);
+
+	/* PHY auto speed down */
+	rtl_writephy(tp, 0x1f, 0x0007);
+	rtl_writephy(tp, 0x1e, 0x002d);
+	rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
+	rtl_writephy(tp, 0x1f, 0x0000);
+	rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
+
+	/* Improve 10M EEE waveform */
+	rtl_writephy(tp, 0x1f, 0x0005);
+	rtl_writephy(tp, 0x05, 0x8b86);
+	rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
+	rtl_writephy(tp, 0x1f, 0x0000);
+}
+
 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
 {
 	static const struct phy_reg phy_reg_init[] = {
@@ -3054,24 +3093,7 @@
 
 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 
-	/* For 4-corner performance improve */
-	rtl_writephy(tp, 0x1f, 0x0005);
-	rtl_writephy(tp, 0x05, 0x8b80);
-	rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
-	rtl_writephy(tp, 0x1f, 0x0000);
-
-	/* PHY auto speed down */
-	rtl_writephy(tp, 0x1f, 0x0007);
-	rtl_writephy(tp, 0x1e, 0x002d);
-	rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
-	rtl_writephy(tp, 0x1f, 0x0000);
-	rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
-
-	/* Improve 10M EEE waveform */
-	rtl_writephy(tp, 0x1f, 0x0005);
-	rtl_writephy(tp, 0x05, 0x8b86);
-	rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
-	rtl_writephy(tp, 0x1f, 0x0000);
+	rtl8168f_hw_phy_config(tp);
 
 	/* Improve 2-pair detection performance */
 	rtl_writephy(tp, 0x1f, 0x0005);
@@ -3084,23 +3106,104 @@
 {
 	rtl_apply_firmware(tp);
 
-	/* For 4-corner performance improve */
+	rtl8168f_hw_phy_config(tp);
+}
+
+static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
+{
+	void __iomem *ioaddr = tp->mmio_addr;
+	static const struct phy_reg phy_reg_init[] = {
+		/* Channel estimation fine tune */
+		{ 0x1f, 0x0003 },
+		{ 0x09, 0xa20f },
+		{ 0x1f, 0x0000 },
+
+		/* Modify green table for giga & fnet */
+		{ 0x1f, 0x0005 },
+		{ 0x05, 0x8b55 },
+		{ 0x06, 0x0000 },
+		{ 0x05, 0x8b5e },
+		{ 0x06, 0x0000 },
+		{ 0x05, 0x8b67 },
+		{ 0x06, 0x0000 },
+		{ 0x05, 0x8b70 },
+		{ 0x06, 0x0000 },
+		{ 0x1f, 0x0000 },
+		{ 0x1f, 0x0007 },
+		{ 0x1e, 0x0078 },
+		{ 0x17, 0x0000 },
+		{ 0x19, 0x00aa },
+		{ 0x1f, 0x0000 },
+
+		/* Modify green table for 10M */
+		{ 0x1f, 0x0005 },
+		{ 0x05, 0x8b79 },
+		{ 0x06, 0xaa00 },
+		{ 0x1f, 0x0000 },
+
+		/* Disable hiimpedance detection (RTCT) */
+		{ 0x1f, 0x0003 },
+		{ 0x01, 0x328a },
+		{ 0x1f, 0x0000 }
+	};
+
+
+	rtl_apply_firmware(tp);
+
+	rtl8168f_hw_phy_config(tp);
+
+	/* Improve 2-pair detection performance */
 	rtl_writephy(tp, 0x1f, 0x0005);
-	rtl_writephy(tp, 0x05, 0x8b80);
-	rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
+	rtl_writephy(tp, 0x05, 0x8b85);
+	rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
 	rtl_writephy(tp, 0x1f, 0x0000);
 
-	/* PHY auto speed down */
+	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+	/* Modify green table for giga */
+	rtl_writephy(tp, 0x1f, 0x0005);
+	rtl_writephy(tp, 0x05, 0x8b54);
+	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
+	rtl_writephy(tp, 0x05, 0x8b5d);
+	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
+	rtl_writephy(tp, 0x05, 0x8a7c);
+	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
+	rtl_writephy(tp, 0x05, 0x8a7f);
+	rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
+	rtl_writephy(tp, 0x05, 0x8a82);
+	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
+	rtl_writephy(tp, 0x05, 0x8a85);
+	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
+	rtl_writephy(tp, 0x05, 0x8a88);
+	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
+	rtl_writephy(tp, 0x1f, 0x0000);
+
+	/* uc same-seed solution */
+	rtl_writephy(tp, 0x1f, 0x0005);
+	rtl_writephy(tp, 0x05, 0x8b85);
+	rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
+	rtl_writephy(tp, 0x1f, 0x0000);
+
+	/* eee setting */
+	rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
+	rtl_writephy(tp, 0x1f, 0x0005);
+	rtl_writephy(tp, 0x05, 0x8b85);
+	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
+	rtl_writephy(tp, 0x1f, 0x0004);
 	rtl_writephy(tp, 0x1f, 0x0007);
-	rtl_writephy(tp, 0x1e, 0x002d);
-	rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
+	rtl_writephy(tp, 0x1e, 0x0020);
+	rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
 	rtl_writephy(tp, 0x1f, 0x0000);
-	rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
+	rtl_writephy(tp, 0x0d, 0x0007);
+	rtl_writephy(tp, 0x0e, 0x003c);
+	rtl_writephy(tp, 0x0d, 0x4007);
+	rtl_writephy(tp, 0x0e, 0x0000);
+	rtl_writephy(tp, 0x0d, 0x0000);
 
-	/* Improve 10M EEE waveform */
-	rtl_writephy(tp, 0x1f, 0x0005);
-	rtl_writephy(tp, 0x05, 0x8b86);
-	rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
+	/* Green feature */
+	rtl_writephy(tp, 0x1f, 0x0003);
+	rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
+	rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
 	rtl_writephy(tp, 0x1f, 0x0000);
 }
 
@@ -3147,6 +3250,25 @@
 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 }
 
+static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
+{
+	void __iomem *ioaddr = tp->mmio_addr;
+
+	/* Disable ALDPS before setting firmware */
+	rtl_writephy(tp, 0x1f, 0x0000);
+	rtl_writephy(tp, 0x18, 0x0310);
+	msleep(20);
+
+	rtl_apply_firmware(tp);
+
+	/* EEE setting */
+	rtl_eri_write(ioaddr, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+	rtl_writephy(tp, 0x1f, 0x0004);
+	rtl_writephy(tp, 0x10, 0x401f);
+	rtl_writephy(tp, 0x19, 0x7030);
+	rtl_writephy(tp, 0x1f, 0x0000);
+}
+
 static void rtl_hw_phy_config(struct net_device *dev)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
@@ -3235,6 +3357,14 @@
 		rtl8168f_2_hw_phy_config(tp);
 		break;
 
+	case RTL_GIGA_MAC_VER_37:
+		rtl8402_hw_phy_config(tp);
+		break;
+
+	case RTL_GIGA_MAC_VER_38:
+		rtl8411_hw_phy_config(tp);
+		break;
+
 	default:
 		break;
 	}
@@ -3472,6 +3602,8 @@
 	case RTL_GIGA_MAC_VER_32:
 	case RTL_GIGA_MAC_VER_33:
 	case RTL_GIGA_MAC_VER_34:
+	case RTL_GIGA_MAC_VER_37:
+	case RTL_GIGA_MAC_VER_38:
 		RTL_W32(RxConfig, RTL_R32(RxConfig) |
 			AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
 		break;
@@ -3507,15 +3639,45 @@
 
 static void r810x_pll_power_down(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
+
 	if (rtl_wol_pll_power_down(tp))
 		return;
 
 	r810x_phy_power_down(tp);
+
+	switch (tp->mac_version) {
+	case RTL_GIGA_MAC_VER_07:
+	case RTL_GIGA_MAC_VER_08:
+	case RTL_GIGA_MAC_VER_09:
+	case RTL_GIGA_MAC_VER_10:
+	case RTL_GIGA_MAC_VER_13:
+	case RTL_GIGA_MAC_VER_16:
+		break;
+	default:
+		RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
+		break;
+	}
 }
 
 static void r810x_pll_power_up(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
+
 	r810x_phy_power_up(tp);
+
+	switch (tp->mac_version) {
+	case RTL_GIGA_MAC_VER_07:
+	case RTL_GIGA_MAC_VER_08:
+	case RTL_GIGA_MAC_VER_09:
+	case RTL_GIGA_MAC_VER_10:
+	case RTL_GIGA_MAC_VER_13:
+	case RTL_GIGA_MAC_VER_16:
+		break;
+	default:
+		RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
+		break;
+	}
 }
 
 static void r8168_phy_power_up(struct rtl8169_private *tp)
@@ -3619,13 +3781,6 @@
 {
 	void __iomem *ioaddr = tp->mmio_addr;
 
-	if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
-	     tp->mac_version == RTL_GIGA_MAC_VER_28 ||
-	     tp->mac_version == RTL_GIGA_MAC_VER_31) &&
-	    r8168dp_check_dash(tp)) {
-		return;
-	}
-
 	switch (tp->mac_version) {
 	case RTL_GIGA_MAC_VER_25:
 	case RTL_GIGA_MAC_VER_26:
@@ -3670,6 +3825,7 @@
 	case RTL_GIGA_MAC_VER_16:
 	case RTL_GIGA_MAC_VER_29:
 	case RTL_GIGA_MAC_VER_30:
+	case RTL_GIGA_MAC_VER_37:
 		ops->down	= r810x_pll_power_down;
 		ops->up		= r810x_pll_power_up;
 		break;
@@ -3694,6 +3850,7 @@
 	case RTL_GIGA_MAC_VER_34:
 	case RTL_GIGA_MAC_VER_35:
 	case RTL_GIGA_MAC_VER_36:
+	case RTL_GIGA_MAC_VER_38:
 		ops->down	= r8168_pll_power_down;
 		ops->up		= r8168_pll_power_up;
 		break;
@@ -3979,7 +4136,9 @@
 			udelay(20);
 	} else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
 	           tp->mac_version == RTL_GIGA_MAC_VER_35 ||
-	           tp->mac_version == RTL_GIGA_MAC_VER_36) {
+	           tp->mac_version == RTL_GIGA_MAC_VER_36 ||
+	           tp->mac_version == RTL_GIGA_MAC_VER_37 ||
+	           tp->mac_version == RTL_GIGA_MAC_VER_38) {
 		RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
 		while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
 			udelay(100);
@@ -4185,22 +4344,141 @@
 	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
 }
 
-static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits)
+static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
+{
+	if (tp->csi_ops.write)
+		tp->csi_ops.write(tp->mmio_addr, addr, value);
+}
+
+static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
+{
+	if (tp->csi_ops.read)
+		return tp->csi_ops.read(tp->mmio_addr, addr);
+	else
+		return ~0;
+}
+
+static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
 {
 	u32 csi;
 
-	csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
-	rtl_csi_write(ioaddr, 0x070c, csi | bits);
+	csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
+	rtl_csi_write(tp, 0x070c, csi | bits);
 }
 
-static void rtl_csi_access_enable_1(void __iomem *ioaddr)
+static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
 {
-	rtl_csi_access_enable(ioaddr, 0x17000000);
+	rtl_csi_access_enable(tp, 0x17000000);
 }
 
-static void rtl_csi_access_enable_2(void __iomem *ioaddr)
+static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
 {
-	rtl_csi_access_enable(ioaddr, 0x27000000);
+	rtl_csi_access_enable(tp, 0x27000000);
+}
+
+static void r8169_csi_write(void __iomem *ioaddr, int addr, int value)
+{
+	unsigned int i;
+
+	RTL_W32(CSIDR, value);
+	RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
+		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
+
+	for (i = 0; i < 100; i++) {
+		if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
+			break;
+		udelay(10);
+	}
+}
+
+static u32 r8169_csi_read(void __iomem *ioaddr, int addr)
+{
+	u32 value = ~0x00;
+	unsigned int i;
+
+	RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
+		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
+
+	for (i = 0; i < 100; i++) {
+		if (RTL_R32(CSIAR) & CSIAR_FLAG) {
+			value = RTL_R32(CSIDR);
+			break;
+		}
+		udelay(10);
+	}
+
+	return value;
+}
+
+static void r8402_csi_write(void __iomem *ioaddr, int addr, int value)
+{
+	unsigned int i;
+
+	RTL_W32(CSIDR, value);
+	RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
+		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
+		CSIAR_FUNC_NIC);
+
+	for (i = 0; i < 100; i++) {
+		if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
+			break;
+		udelay(10);
+	}
+}
+
+static u32 r8402_csi_read(void __iomem *ioaddr, int addr)
+{
+	u32 value = ~0x00;
+	unsigned int i;
+
+	RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
+		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
+
+	for (i = 0; i < 100; i++) {
+		if (RTL_R32(CSIAR) & CSIAR_FLAG) {
+			value = RTL_R32(CSIDR);
+			break;
+		}
+		udelay(10);
+	}
+
+	return value;
+}
+
+static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp)
+{
+	struct csi_ops *ops = &tp->csi_ops;
+
+	switch (tp->mac_version) {
+	case RTL_GIGA_MAC_VER_01:
+	case RTL_GIGA_MAC_VER_02:
+	case RTL_GIGA_MAC_VER_03:
+	case RTL_GIGA_MAC_VER_04:
+	case RTL_GIGA_MAC_VER_05:
+	case RTL_GIGA_MAC_VER_06:
+	case RTL_GIGA_MAC_VER_10:
+	case RTL_GIGA_MAC_VER_11:
+	case RTL_GIGA_MAC_VER_12:
+	case RTL_GIGA_MAC_VER_13:
+	case RTL_GIGA_MAC_VER_14:
+	case RTL_GIGA_MAC_VER_15:
+	case RTL_GIGA_MAC_VER_16:
+	case RTL_GIGA_MAC_VER_17:
+		ops->write	= NULL;
+		ops->read	= NULL;
+		break;
+
+	case RTL_GIGA_MAC_VER_37:
+	case RTL_GIGA_MAC_VER_38:
+		ops->write	= r8402_csi_write;
+		ops->read	= r8402_csi_read;
+		break;
+
+	default:
+		ops->write	= r8169_csi_write;
+		ops->read	= r8169_csi_read;
+		break;
+	}
 }
 
 struct ephy_info {
@@ -4257,8 +4535,11 @@
 	PktCntrDisable | \
 	Mac_dbgo_sel)
 
-static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
+
 	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
 
 	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
@@ -4267,17 +4548,22 @@
 		(0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
 }
 
-static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
 {
-	rtl_hw_start_8168bb(ioaddr, pdev);
+	void __iomem *ioaddr = tp->mmio_addr;
+
+	rtl_hw_start_8168bb(tp);
 
 	RTL_W8(MaxTxPacketSize, TxPacketMax);
 
 	RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
 }
 
-static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev)
+static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
+
 	RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
 
 	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
@@ -4289,8 +4575,9 @@
 	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 }
 
-static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
 	static const struct ephy_info e_info_8168cp[] = {
 		{ 0x01, 0,	0x0001 },
 		{ 0x02, 0x0800,	0x1000 },
@@ -4299,16 +4586,19 @@
 		{ 0x07, 0,	0x2000 }
 	};
 
-	rtl_csi_access_enable_2(ioaddr);
+	rtl_csi_access_enable_2(tp);
 
 	rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
 
-	__rtl_hw_start_8168cp(ioaddr, pdev);
+	__rtl_hw_start_8168cp(tp);
 }
 
-static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
 {
-	rtl_csi_access_enable_2(ioaddr);
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
+
+	rtl_csi_access_enable_2(tp);
 
 	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
 
@@ -4317,9 +4607,12 @@
 	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 }
 
-static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
 {
-	rtl_csi_access_enable_2(ioaddr);
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
+
+	rtl_csi_access_enable_2(tp);
 
 	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
 
@@ -4333,52 +4626,57 @@
 	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 }
 
-static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
 	static const struct ephy_info e_info_8168c_1[] = {
 		{ 0x02, 0x0800,	0x1000 },
 		{ 0x03, 0,	0x0002 },
 		{ 0x06, 0x0080,	0x0000 }
 	};
 
-	rtl_csi_access_enable_2(ioaddr);
+	rtl_csi_access_enable_2(tp);
 
 	RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
 
 	rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
 
-	__rtl_hw_start_8168cp(ioaddr, pdev);
+	__rtl_hw_start_8168cp(tp);
 }
 
-static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
 	static const struct ephy_info e_info_8168c_2[] = {
 		{ 0x01, 0,	0x0001 },
 		{ 0x03, 0x0400,	0x0220 }
 	};
 
-	rtl_csi_access_enable_2(ioaddr);
+	rtl_csi_access_enable_2(tp);
 
 	rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
 
-	__rtl_hw_start_8168cp(ioaddr, pdev);
+	__rtl_hw_start_8168cp(tp);
 }
 
-static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
 {
-	rtl_hw_start_8168c_2(ioaddr, pdev);
+	rtl_hw_start_8168c_2(tp);
 }
 
-static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
 {
-	rtl_csi_access_enable_2(ioaddr);
+	rtl_csi_access_enable_2(tp);
 
-	__rtl_hw_start_8168cp(ioaddr, pdev);
+	__rtl_hw_start_8168cp(tp);
 }
 
-static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168d(struct rtl8169_private *tp)
 {
-	rtl_csi_access_enable_2(ioaddr);
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
+
+	rtl_csi_access_enable_2(tp);
 
 	rtl_disable_clock_request(pdev);
 
@@ -4389,9 +4687,12 @@
 	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 }
 
-static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
 {
-	rtl_csi_access_enable_1(ioaddr);
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
+
+	rtl_csi_access_enable_1(tp);
 
 	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
@@ -4400,8 +4701,10 @@
 	rtl_disable_clock_request(pdev);
 }
 
-static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
 	static const struct ephy_info e_info_8168d_4[] = {
 		{ 0x0b, ~0,	0x48 },
 		{ 0x19, 0x20,	0x50 },
@@ -4409,7 +4712,7 @@
 	};
 	int i;
 
-	rtl_csi_access_enable_1(ioaddr);
+	rtl_csi_access_enable_1(tp);
 
 	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
@@ -4426,8 +4729,10 @@
 	rtl_enable_clock_request(pdev);
 }
 
-static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
 	static const struct ephy_info e_info_8168e_1[] = {
 		{ 0x00, 0x0200,	0x0100 },
 		{ 0x00, 0x0000,	0x0004 },
@@ -4444,7 +4749,7 @@
 		{ 0x0a, 0x0000,	0x0040 }
 	};
 
-	rtl_csi_access_enable_2(ioaddr);
+	rtl_csi_access_enable_2(tp);
 
 	rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
 
@@ -4461,14 +4766,16 @@
 	RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
 }
 
-static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
 	static const struct ephy_info e_info_8168e_2[] = {
 		{ 0x09, 0x0000,	0x0080 },
 		{ 0x19, 0x0000,	0x0224 }
 	};
 
-	rtl_csi_access_enable_1(ioaddr);
+	rtl_csi_access_enable_1(tp);
 
 	rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
 
@@ -4499,18 +4806,12 @@
 	RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
 }
 
-static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168f(struct rtl8169_private *tp)
 {
-	static const struct ephy_info e_info_8168f_1[] = {
-		{ 0x06, 0x00c0,	0x0020 },
-		{ 0x08, 0x0001,	0x0002 },
-		{ 0x09, 0x0000,	0x0080 },
-		{ 0x19, 0x0000,	0x0224 }
-	};
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
 
-	rtl_csi_access_enable_1(ioaddr);
-
-	rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+	rtl_csi_access_enable_2(tp);
 
 	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
@@ -4524,8 +4825,6 @@
 	rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
 	rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
 	rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
-	rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
-		     ERIAR_EXGMAC);
 
 	RTL_W8(MaxTxPacketSize, EarlySize);
 
@@ -4533,20 +4832,54 @@
 
 	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
 	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
-
-	/* Adjust EEE LED frequency */
-	RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
-
 	RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
 	RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
 	RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
 }
 
+static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
+{
+	void __iomem *ioaddr = tp->mmio_addr;
+	static const struct ephy_info e_info_8168f_1[] = {
+		{ 0x06, 0x00c0,	0x0020 },
+		{ 0x08, 0x0001,	0x0002 },
+		{ 0x09, 0x0000,	0x0080 },
+		{ 0x19, 0x0000,	0x0224 }
+	};
+
+	rtl_hw_start_8168f(tp);
+
+	rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+
+	rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
+		     ERIAR_EXGMAC);
+
+	/* Adjust EEE LED frequency */
+	RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
+}
+
+static void rtl_hw_start_8411(struct rtl8169_private *tp)
+{
+	void __iomem *ioaddr = tp->mmio_addr;
+	static const struct ephy_info e_info_8168f_1[] = {
+		{ 0x06, 0x00c0,	0x0020 },
+		{ 0x0f, 0xffff,	0x5200 },
+		{ 0x1e, 0x0000,	0x4000 },
+		{ 0x19, 0x0000,	0x0224 }
+	};
+
+	rtl_hw_start_8168f(tp);
+
+	rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+
+	rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000,
+		     ERIAR_EXGMAC);
+}
+
 static void rtl_hw_start_8168(struct net_device *dev)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
 	void __iomem *ioaddr = tp->mmio_addr;
-	struct pci_dev *pdev = tp->pci_dev;
 
 	RTL_W8(Cfg9346, Cfg9346_Unlock);
 
@@ -4577,67 +4910,71 @@
 
 	switch (tp->mac_version) {
 	case RTL_GIGA_MAC_VER_11:
-		rtl_hw_start_8168bb(ioaddr, pdev);
+		rtl_hw_start_8168bb(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_12:
 	case RTL_GIGA_MAC_VER_17:
-		rtl_hw_start_8168bef(ioaddr, pdev);
+		rtl_hw_start_8168bef(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_18:
-		rtl_hw_start_8168cp_1(ioaddr, pdev);
+		rtl_hw_start_8168cp_1(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_19:
-		rtl_hw_start_8168c_1(ioaddr, pdev);
+		rtl_hw_start_8168c_1(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_20:
-		rtl_hw_start_8168c_2(ioaddr, pdev);
+		rtl_hw_start_8168c_2(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_21:
-		rtl_hw_start_8168c_3(ioaddr, pdev);
+		rtl_hw_start_8168c_3(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_22:
-		rtl_hw_start_8168c_4(ioaddr, pdev);
+		rtl_hw_start_8168c_4(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_23:
-		rtl_hw_start_8168cp_2(ioaddr, pdev);
+		rtl_hw_start_8168cp_2(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_24:
-		rtl_hw_start_8168cp_3(ioaddr, pdev);
+		rtl_hw_start_8168cp_3(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_25:
 	case RTL_GIGA_MAC_VER_26:
 	case RTL_GIGA_MAC_VER_27:
-		rtl_hw_start_8168d(ioaddr, pdev);
+		rtl_hw_start_8168d(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_28:
-		rtl_hw_start_8168d_4(ioaddr, pdev);
+		rtl_hw_start_8168d_4(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_31:
-		rtl_hw_start_8168dp(ioaddr, pdev);
+		rtl_hw_start_8168dp(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_32:
 	case RTL_GIGA_MAC_VER_33:
-		rtl_hw_start_8168e_1(ioaddr, pdev);
+		rtl_hw_start_8168e_1(tp);
 		break;
 	case RTL_GIGA_MAC_VER_34:
-		rtl_hw_start_8168e_2(ioaddr, pdev);
+		rtl_hw_start_8168e_2(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_35:
 	case RTL_GIGA_MAC_VER_36:
-		rtl_hw_start_8168f_1(ioaddr, pdev);
+		rtl_hw_start_8168f_1(tp);
+		break;
+
+	case RTL_GIGA_MAC_VER_38:
+		rtl_hw_start_8411(tp);
 		break;
 
 	default:
@@ -4664,8 +5001,10 @@
 	PktCntrDisable | \
 	Mac_dbgo_sel)
 
-static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
 	static const struct ephy_info e_info_8102e_1[] = {
 		{ 0x01,	0, 0x6e65 },
 		{ 0x02,	0, 0x091f },
@@ -4678,7 +5017,7 @@
 	};
 	u8 cfg1;
 
-	rtl_csi_access_enable_2(ioaddr);
+	rtl_csi_access_enable_2(tp);
 
 	RTL_W8(DBG_REG, FIX_NAK_1);
 
@@ -4695,9 +5034,12 @@
 	rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
 }
 
-static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
 {
-	rtl_csi_access_enable_2(ioaddr);
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
+
+	rtl_csi_access_enable_2(tp);
 
 	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
@@ -4705,15 +5047,16 @@
 	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
 }
 
-static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
 {
-	rtl_hw_start_8102e_2(ioaddr, pdev);
+	rtl_hw_start_8102e_2(tp);
 
-	rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
+	rtl_ephy_write(tp->mmio_addr, 0x03, 0xc2f9);
 }
 
-static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
 	static const struct ephy_info e_info_8105e_1[] = {
 		{ 0x07,	0, 0x4000 },
 		{ 0x19,	0, 0x0200 },
@@ -4737,12 +5080,44 @@
 	rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
 }
 
-static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
 {
-	rtl_hw_start_8105e_1(ioaddr, pdev);
+	void __iomem *ioaddr = tp->mmio_addr;
+
+	rtl_hw_start_8105e_1(tp);
 	rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
 }
 
+static void rtl_hw_start_8402(struct rtl8169_private *tp)
+{
+	void __iomem *ioaddr = tp->mmio_addr;
+	static const struct ephy_info e_info_8402[] = {
+		{ 0x19,	0xffff, 0xff64 },
+		{ 0x1e,	0, 0x4000 }
+	};
+
+	rtl_csi_access_enable_2(tp);
+
+	/* Force LAN exit from ASPM if Rx/Tx are not idle */
+	RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
+
+	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
+	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
+
+	rtl_ephy_init(ioaddr, e_info_8402, ARRAY_SIZE(e_info_8402));
+
+	rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+	rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
+	rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
+	rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
+	rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+	rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+	rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+	rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00,
+		     ERIAR_EXGMAC);
+}
+
 static void rtl_hw_start_8101(struct net_device *dev)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
@@ -4766,22 +5141,26 @@
 
 	switch (tp->mac_version) {
 	case RTL_GIGA_MAC_VER_07:
-		rtl_hw_start_8102e_1(ioaddr, pdev);
+		rtl_hw_start_8102e_1(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_08:
-		rtl_hw_start_8102e_3(ioaddr, pdev);
+		rtl_hw_start_8102e_3(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_09:
-		rtl_hw_start_8102e_2(ioaddr, pdev);
+		rtl_hw_start_8102e_2(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_29:
-		rtl_hw_start_8105e_1(ioaddr, pdev);
+		rtl_hw_start_8105e_1(tp);
 		break;
 	case RTL_GIGA_MAC_VER_30:
-		rtl_hw_start_8105e_2(ioaddr, pdev);
+		rtl_hw_start_8105e_2(tp);
+		break;
+
+	case RTL_GIGA_MAC_VER_37:
+		rtl_hw_start_8402(tp);
 		break;
 	}
 
@@ -6178,6 +6557,7 @@
 	rtl_init_mdio_ops(tp);
 	rtl_init_pll_power_ops(tp);
 	rtl_init_jumbo_ops(tp);
+	rtl_init_csi_ops(tp);
 
 	rtl8169_print_mac_version(tp);
 
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 3fb2355..46df3a0 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -4,11 +4,11 @@
 
 config SH_ETH
 	tristate "Renesas SuperH Ethernet support"
-	depends on SUPERH && \
+	depends on (SUPERH || ARCH_SHMOBILE) && \
 		(CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
 		 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
 		 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \
-		 CPU_SUBTYPE_SH7757)
+		 CPU_SUBTYPE_SH7757 || ARCH_R8A7740)
 	select CRC32
 	select NET_CORE
 	select MII
@@ -17,4 +17,5 @@
 	---help---
 	  Renesas SuperH Ethernet device driver.
 	  This driver supporting CPUs are:
-		- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763 and SH7757.
+		- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
+		  and R8A7740.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index d63e09b..be3c221 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -386,6 +386,114 @@
 		sh_eth_write(ndev, 0x0, CSMR);
 }
 
+#elif defined(CONFIG_ARCH_R8A7740)
+#define SH_ETH_HAS_TSU	1
+static void sh_eth_chip_reset(struct net_device *ndev)
+{
+	struct sh_eth_private *mdp = netdev_priv(ndev);
+	unsigned long mii;
+
+	/* reset device */
+	sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
+	mdelay(1);
+
+	switch (mdp->phy_interface) {
+	case PHY_INTERFACE_MODE_GMII:
+		mii = 2;
+		break;
+	case PHY_INTERFACE_MODE_MII:
+		mii = 1;
+		break;
+	case PHY_INTERFACE_MODE_RMII:
+	default:
+		mii = 0;
+		break;
+	}
+	sh_eth_write(ndev, mii, RMII_MII);
+}
+
+static void sh_eth_reset(struct net_device *ndev)
+{
+	int cnt = 100;
+
+	sh_eth_write(ndev, EDSR_ENALL, EDSR);
+	sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
+	while (cnt > 0) {
+		if (!(sh_eth_read(ndev, EDMR) & 0x3))
+			break;
+		mdelay(1);
+		cnt--;
+	}
+	if (cnt == 0)
+		printk(KERN_ERR "Device reset fail\n");
+
+	/* Table Init */
+	sh_eth_write(ndev, 0x0, TDLAR);
+	sh_eth_write(ndev, 0x0, TDFAR);
+	sh_eth_write(ndev, 0x0, TDFXR);
+	sh_eth_write(ndev, 0x0, TDFFR);
+	sh_eth_write(ndev, 0x0, RDLAR);
+	sh_eth_write(ndev, 0x0, RDFAR);
+	sh_eth_write(ndev, 0x0, RDFXR);
+	sh_eth_write(ndev, 0x0, RDFFR);
+}
+
+static void sh_eth_set_duplex(struct net_device *ndev)
+{
+	struct sh_eth_private *mdp = netdev_priv(ndev);
+
+	if (mdp->duplex) /* Full */
+		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
+	else		/* Half */
+		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
+}
+
+static void sh_eth_set_rate(struct net_device *ndev)
+{
+	struct sh_eth_private *mdp = netdev_priv(ndev);
+
+	switch (mdp->speed) {
+	case 10: /* 10BASE */
+		sh_eth_write(ndev, GECMR_10, GECMR);
+		break;
+	case 100:/* 100BASE */
+		sh_eth_write(ndev, GECMR_100, GECMR);
+		break;
+	case 1000: /* 1000BASE */
+		sh_eth_write(ndev, GECMR_1000, GECMR);
+		break;
+	default:
+		break;
+	}
+}
+
+/* R8A7740 */
+static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+	.chip_reset	= sh_eth_chip_reset,
+	.set_duplex	= sh_eth_set_duplex,
+	.set_rate	= sh_eth_set_rate,
+
+	.ecsr_value	= ECSR_ICD | ECSR_MPD,
+	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
+	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+
+	.tx_check	= EESR_TC1 | EESR_FTC,
+	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
+			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+			  EESR_ECI,
+	.tx_error_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
+			  EESR_TFE,
+
+	.apr		= 1,
+	.mpr		= 1,
+	.tpauser	= 1,
+	.bculr		= 1,
+	.hw_swap	= 1,
+	.no_trimd	= 1,
+	.no_ade		= 1,
+	.tsu		= 1,
+};
+
 #elif defined(CONFIG_CPU_SUBTYPE_SH7619)
 #define SH_ETH_RESET_DEFAULT	1
 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
@@ -443,7 +551,7 @@
 }
 #endif
 
-#if defined(CONFIG_CPU_SH4)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
 static void sh_eth_set_receive_align(struct sk_buff *skb)
 {
 	int reserve;
@@ -919,6 +1027,10 @@
 		desc_status = edmac_to_cpu(mdp, rxdesc->status);
 		pkt_len = rxdesc->frame_length;
 
+#if defined(CONFIG_ARCH_R8A7740)
+		desc_status >>= 16;
+#endif
+
 		if (--boguscnt < 0)
 			break;
 
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 0fa14afc..57b8e1f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -372,7 +372,7 @@
 };
 
 /* Driver's parameters */
-#if defined(CONFIG_CPU_SH4)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
 #define SH4_SKB_RX_ALIGN	32
 #else
 #define SH2_SH3_SKB_RX_ALIGN	2
@@ -381,7 +381,8 @@
 /*
  * Register's bits
  */
-#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
+#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) ||\
+    defined(CONFIG_ARCH_R8A7740)
 /* EDSR */
 enum EDSR_BIT {
 	EDSR_ENT = 0x01, EDSR_ENR = 0x02,
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index 1895605..8e9fda0 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -937,7 +937,7 @@
 	do {
 		unsigned long flags;
 		spin_lock_irqsave(&pd->lock, flags);
-		for (i = 0; i < sizeof(pd->stats) / sizeof(unsigned long); i++)
+		for (i = 0; i < ARRAY_SIZE(pd->stats); i++)
 			pd->stats[i] =
 				pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1);
 		s6gmac_stats_collect(pd, &statinf[0][0]);
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index a284d64..32e5566 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -39,9 +39,7 @@
 #define SC92031_NAME "sc92031"
 
 /* BAR 0 is MMIO, BAR 1 is PIO */
-#ifndef SC92031_USE_BAR
-#define SC92031_USE_BAR 0
-#endif
+#define SC92031_USE_PIO	0
 
 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
 static int multicast_filter_limit = 64;
@@ -366,7 +364,7 @@
 	mmiowb();
 
 	/* wait for any concurrent interrupt/tasklet to finish */
-	synchronize_irq(dev->irq);
+	synchronize_irq(priv->pdev->irq);
 	tasklet_disable(&priv->tasklet);
 }
 
@@ -1114,10 +1112,13 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void sc92031_poll_controller(struct net_device *dev)
 {
-	disable_irq(dev->irq);
-	if (sc92031_interrupt(dev->irq, dev) != IRQ_NONE)
+	struct sc92031_priv *priv = netdev_priv(dev);
+	const int irq = priv->pdev->irq;
+
+	disable_irq(irq);
+	if (sc92031_interrupt(irq, dev) != IRQ_NONE)
 		sc92031_tasklet((unsigned long)dev);
-	enable_irq(dev->irq);
+	enable_irq(irq);
 }
 #endif
 
@@ -1402,7 +1403,6 @@
 	struct net_device *dev;
 	struct sc92031_priv *priv;
 	u32 mac0, mac1;
-	unsigned long base_addr;
 
 	err = pci_enable_device(pdev);
 	if (unlikely(err < 0))
@@ -1422,7 +1422,7 @@
 	if (unlikely(err < 0))
 		goto out_request_regions;
 
-	port_base = pci_iomap(pdev, SC92031_USE_BAR, 0);
+	port_base = pci_iomap(pdev, SC92031_USE_PIO, 0);
 	if (unlikely(!port_base)) {
 		err = -EIO;
 		goto out_iomap;
@@ -1437,14 +1437,6 @@
 	pci_set_drvdata(pdev, dev);
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
-#if SC92031_USE_BAR == 0
-	dev->mem_start = pci_resource_start(pdev, SC92031_USE_BAR);
-	dev->mem_end = pci_resource_end(pdev, SC92031_USE_BAR);
-#elif SC92031_USE_BAR == 1
-	dev->base_addr = pci_resource_start(pdev, SC92031_USE_BAR);
-#endif
-	dev->irq = pdev->irq;
-
 	/* faked with skb_copy_and_csum_dev */
 	dev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -1478,13 +1470,9 @@
 	if (err < 0)
 		goto out_register_netdev;
 
-#if SC92031_USE_BAR == 0
-	base_addr = dev->mem_start;
-#elif SC92031_USE_BAR == 1
-	base_addr = dev->base_addr;
-#endif
 	printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name,
-			base_addr, dev->dev_addr, dev->irq);
+	       (long)pci_resource_start(pdev, SC92031_USE_PIO), dev->dev_addr,
+	       pdev->irq);
 
 	return 0;
 
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index a9deda8..4613591 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -729,7 +729,7 @@
  * The interrupt handler does all of the Rx thread work and cleans up after
  * the Tx thread.
  */
-static irqreturn_t sis190_interrupt(int irq, void *__dev)
+static irqreturn_t sis190_irq(int irq, void *__dev)
 {
 	struct net_device *dev = __dev;
 	struct sis190_private *tp = netdev_priv(dev);
@@ -772,11 +772,11 @@
 static void sis190_netpoll(struct net_device *dev)
 {
 	struct sis190_private *tp = netdev_priv(dev);
-	struct pci_dev *pdev = tp->pci_dev;
+	const int irq = tp->pci_dev->irq;
 
-	disable_irq(pdev->irq);
-	sis190_interrupt(pdev->irq, dev);
-	enable_irq(pdev->irq);
+	disable_irq(irq);
+	sis190_irq(irq, dev);
+	enable_irq(irq);
 }
 #endif
 
@@ -1085,7 +1085,7 @@
 
 	sis190_request_timer(dev);
 
-	rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
+	rc = request_irq(pdev->irq, sis190_irq, IRQF_SHARED, dev->name, dev);
 	if (rc < 0)
 		goto err_release_timer_2;
 
@@ -1097,11 +1097,9 @@
 	sis190_delete_timer(dev);
 	sis190_rx_clear(tp);
 err_free_rx_1:
-	pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
-		tp->rx_dma);
+	pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
 err_free_tx_0:
-	pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
-		tp->tx_dma);
+	pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
 	goto out;
 }
 
@@ -1141,7 +1139,7 @@
 
 		spin_unlock_irq(&tp->lock);
 
-		synchronize_irq(dev->irq);
+		synchronize_irq(tp->pci_dev->irq);
 
 		if (!poll_locked)
 			poll_locked++;
@@ -1161,7 +1159,7 @@
 
 	sis190_down(dev);
 
-	free_irq(dev->irq, dev);
+	free_irq(pdev->irq, dev);
 
 	pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
 	pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
@@ -1884,8 +1882,6 @@
 	dev->netdev_ops = &sis190_netdev_ops;
 
 	SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
-	dev->irq = pdev->irq;
-	dev->base_addr = (unsigned long) 0xdead;
 	dev->watchdog_timeo = SIS190_TX_TIMEOUT;
 
 	spin_lock_init(&tp->lock);
@@ -1902,7 +1898,7 @@
 		netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
 			    pci_name(pdev),
 			    sis_chip_info[ent->driver_data].name,
-			    ioaddr, dev->irq, dev->dev_addr);
+			    ioaddr, pdev->irq, dev->dev_addr);
 		netdev_info(dev, "%s mode.\n",
 			    (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
 	}
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 5ccf02e..203d9c6 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -168,6 +168,8 @@
 	unsigned int cur_phy;
 	struct mii_if_info mii_info;
 
+	void __iomem	*ioaddr;
+
 	struct timer_list timer; /* Link status detection timer. */
 	u8 autong_complete; /* 1: auto-negotiate complete  */
 
@@ -201,13 +203,18 @@
 MODULE_PARM_DESC(max_interrupt_work, "SiS 900/7016 maximum events handled per interrupt");
 MODULE_PARM_DESC(sis900_debug, "SiS 900/7016 bitmapped debugging message level");
 
+#define sw32(reg, val)	iowrite32(val, ioaddr + (reg))
+#define sw8(reg, val)	iowrite8(val, ioaddr + (reg))
+#define sr32(reg)	ioread32(ioaddr + (reg))
+#define sr16(reg)	ioread16(ioaddr + (reg))
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void sis900_poll(struct net_device *dev);
 #endif
 static int sis900_open(struct net_device *net_dev);
 static int sis900_mii_probe (struct net_device * net_dev);
 static void sis900_init_rxfilter (struct net_device * net_dev);
-static u16 read_eeprom(long ioaddr, int location);
+static u16 read_eeprom(void __iomem *ioaddr, int location);
 static int mdio_read(struct net_device *net_dev, int phy_id, int location);
 static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val);
 static void sis900_timer(unsigned long data);
@@ -231,7 +238,7 @@
 static void sis900_set_capability( struct net_device *net_dev ,struct mii_phy *phy);
 static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr);
 static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr);
-static void sis900_set_mode (long ioaddr, int speed, int duplex);
+static void sis900_set_mode(struct sis900_private *, int speed, int duplex);
 static const struct ethtool_ops sis900_ethtool_ops;
 
 /**
@@ -246,7 +253,8 @@
 
 static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev)
 {
-	long ioaddr = pci_resource_start(pci_dev, 0);
+	struct sis900_private *sis_priv = netdev_priv(net_dev);
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	u16 signature;
 	int i;
 
@@ -325,29 +333,30 @@
 static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
 					struct net_device *net_dev)
 {
-	long ioaddr = net_dev->base_addr;
+	struct sis900_private *sis_priv = netdev_priv(net_dev);
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	u32 rfcrSave;
 	u32 i;
 
-	rfcrSave = inl(rfcr + ioaddr);
+	rfcrSave = sr32(rfcr);
 
-	outl(rfcrSave | RELOAD, ioaddr + cr);
-	outl(0, ioaddr + cr);
+	sw32(cr, rfcrSave | RELOAD);
+	sw32(cr, 0);
 
 	/* disable packet filtering before setting filter */
-	outl(rfcrSave & ~RFEN, rfcr + ioaddr);
+	sw32(rfcr, rfcrSave & ~RFEN);
 
 	/* load MAC addr to filter data register */
 	for (i = 0 ; i < 3 ; i++) {
-		outl((i << RFADDR_shift), ioaddr + rfcr);
-		*( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr);
+		sw32(rfcr, (i << RFADDR_shift));
+		*( ((u16 *)net_dev->dev_addr) + i) = sr16(rfdr);
 	}
 
 	/* Store MAC Address in perm_addr */
 	memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
 
 	/* enable packet filtering */
-	outl(rfcrSave | RFEN, rfcr + ioaddr);
+	sw32(rfcr, rfcrSave | RFEN);
 
 	return 1;
 }
@@ -371,31 +380,30 @@
 static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
 					struct net_device *net_dev)
 {
-	long ioaddr = net_dev->base_addr;
-	long ee_addr = ioaddr + mear;
-	u32 waittime = 0;
-	int i;
+	struct sis900_private *sis_priv = netdev_priv(net_dev);
+	void __iomem *ioaddr = sis_priv->ioaddr;
+	int wait, rc = 0;
 
-	outl(EEREQ, ee_addr);
-	while(waittime < 2000) {
-		if(inl(ee_addr) & EEGNT) {
+	sw32(mear, EEREQ);
+	for (wait = 0; wait < 2000; wait++) {
+		if (sr32(mear) & EEGNT) {
+			u16 *mac = (u16 *)net_dev->dev_addr;
+			int i;
 
 			/* get MAC address from EEPROM */
 			for (i = 0; i < 3; i++)
-			        ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+			        mac[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
 
 			/* Store MAC Address in perm_addr */
 			memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
 
-			outl(EEDONE, ee_addr);
-			return 1;
-		} else {
-			udelay(1);
-			waittime ++;
+			rc = 1;
+			break;
 		}
+		udelay(1);
 	}
-	outl(EEDONE, ee_addr);
-	return 0;
+	sw32(mear, EEDONE);
+	return rc;
 }
 
 static const struct net_device_ops sis900_netdev_ops = {
@@ -433,7 +441,7 @@
 	struct pci_dev *dev;
 	dma_addr_t ring_dma;
 	void *ring_space;
-	long ioaddr;
+	void __iomem *ioaddr;
 	int i, ret;
 	const char *card_name = card_names[pci_id->driver_data];
 	const char *dev_name = pci_name(pci_dev);
@@ -464,14 +472,17 @@
 	SET_NETDEV_DEV(net_dev, &pci_dev->dev);
 
 	/* We do a request_region() to register /proc/ioports info. */
-	ioaddr = pci_resource_start(pci_dev, 0);
 	ret = pci_request_regions(pci_dev, "sis900");
 	if (ret)
 		goto err_out;
 
+	/* IO region. */
+	ioaddr = pci_iomap(pci_dev, 0, 0);
+	if (!ioaddr)
+		goto err_out_cleardev;
+
 	sis_priv = netdev_priv(net_dev);
-	net_dev->base_addr = ioaddr;
-	net_dev->irq = pci_dev->irq;
+	sis_priv->ioaddr = ioaddr;
 	sis_priv->pci_dev = pci_dev;
 	spin_lock_init(&sis_priv->lock);
 
@@ -480,7 +491,7 @@
 	ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma);
 	if (!ring_space) {
 		ret = -ENOMEM;
-		goto err_out_cleardev;
+		goto err_out_unmap;
 	}
 	sis_priv->tx_ring = ring_space;
 	sis_priv->tx_ring_dma = ring_dma;
@@ -534,7 +545,7 @@
 
 	/* 630ET : set the mii access mode as software-mode */
 	if (sis_priv->chipset_rev == SIS630ET_900_REV)
-		outl(ACCESSMODE | inl(ioaddr + cr), ioaddr + cr);
+		sw32(cr, ACCESSMODE | sr32(cr));
 
 	/* probe for mii transceiver */
 	if (sis900_mii_probe(net_dev) == 0) {
@@ -556,25 +567,27 @@
 		goto err_unmap_rx;
 
 	/* print some information about our NIC */
-	printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n",
-	       net_dev->name, card_name, ioaddr, net_dev->irq,
+	printk(KERN_INFO "%s: %s at 0x%p, IRQ %d, %pM\n",
+	       net_dev->name, card_name, ioaddr, pci_dev->irq,
 	       net_dev->dev_addr);
 
 	/* Detect Wake on Lan support */
-	ret = (inl(net_dev->base_addr + CFGPMC) & PMESP) >> 27;
+	ret = (sr32(CFGPMC) & PMESP) >> 27;
 	if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0)
 		printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name);
 
 	return 0;
 
- err_unmap_rx:
+err_unmap_rx:
 	pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
 		sis_priv->rx_ring_dma);
- err_unmap_tx:
+err_unmap_tx:
 	pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
 		sis_priv->tx_ring_dma);
- err_out_cleardev:
- 	pci_set_drvdata(pci_dev, NULL);
+err_out_unmap:
+	pci_iounmap(pci_dev, ioaddr);
+err_out_cleardev:
+	pci_set_drvdata(pci_dev, NULL);
 	pci_release_regions(pci_dev);
  err_out:
 	free_netdev(net_dev);
@@ -798,7 +811,7 @@
 
 
 /* Delay between EEPROM clock transitions. */
-#define eeprom_delay()  inl(ee_addr)
+#define eeprom_delay()	sr32(mear)
 
 /**
  *	read_eeprom - Read Serial EEPROM
@@ -809,41 +822,41 @@
  *	Note that location is in word (16 bits) unit
  */
 
-static u16 __devinit read_eeprom(long ioaddr, int location)
+static u16 __devinit read_eeprom(void __iomem *ioaddr, int location)
 {
+	u32 read_cmd = location | EEread;
 	int i;
 	u16 retval = 0;
-	long ee_addr = ioaddr + mear;
-	u32 read_cmd = location | EEread;
 
-	outl(0, ee_addr);
+	sw32(mear, 0);
 	eeprom_delay();
-	outl(EECS, ee_addr);
+	sw32(mear, EECS);
 	eeprom_delay();
 
 	/* Shift the read command (9) bits out. */
 	for (i = 8; i >= 0; i--) {
 		u32 dataval = (read_cmd & (1 << i)) ? EEDI | EECS : EECS;
-		outl(dataval, ee_addr);
+
+		sw32(mear, dataval);
 		eeprom_delay();
-		outl(dataval | EECLK, ee_addr);
+		sw32(mear, dataval | EECLK);
 		eeprom_delay();
 	}
-	outl(EECS, ee_addr);
+	sw32(mear, EECS);
 	eeprom_delay();
 
 	/* read the 16-bits data in */
 	for (i = 16; i > 0; i--) {
-		outl(EECS, ee_addr);
+		sw32(mear, EECS);
 		eeprom_delay();
-		outl(EECS | EECLK, ee_addr);
+		sw32(mear, EECS | EECLK);
 		eeprom_delay();
-		retval = (retval << 1) | ((inl(ee_addr) & EEDO) ? 1 : 0);
+		retval = (retval << 1) | ((sr32(mear) & EEDO) ? 1 : 0);
 		eeprom_delay();
 	}
 
 	/* Terminate the EEPROM access. */
-	outl(0, ee_addr);
+	sw32(mear, 0);
 	eeprom_delay();
 
 	return retval;
@@ -852,24 +865,27 @@
 /* Read and write the MII management registers using software-generated
    serial MDIO protocol. Note that the command bits and data bits are
    send out separately */
-#define mdio_delay()    inl(mdio_addr)
+#define mdio_delay()	sr32(mear)
 
-static void mdio_idle(long mdio_addr)
+static void mdio_idle(struct sis900_private *sp)
 {
-	outl(MDIO | MDDIR, mdio_addr);
+	void __iomem *ioaddr = sp->ioaddr;
+
+	sw32(mear, MDIO | MDDIR);
 	mdio_delay();
-	outl(MDIO | MDDIR | MDC, mdio_addr);
+	sw32(mear, MDIO | MDDIR | MDC);
 }
 
-/* Syncronize the MII management interface by shifting 32 one bits out. */
-static void mdio_reset(long mdio_addr)
+/* Synchronize the MII management interface by shifting 32 one bits out. */
+static void mdio_reset(struct sis900_private *sp)
 {
+	void __iomem *ioaddr = sp->ioaddr;
 	int i;
 
 	for (i = 31; i >= 0; i--) {
-		outl(MDDIR | MDIO, mdio_addr);
+		sw32(mear, MDDIR | MDIO);
 		mdio_delay();
-		outl(MDDIR | MDIO | MDC, mdio_addr);
+		sw32(mear, MDDIR | MDIO | MDC);
 		mdio_delay();
 	}
 }
@@ -887,31 +903,33 @@
 
 static int mdio_read(struct net_device *net_dev, int phy_id, int location)
 {
-	long mdio_addr = net_dev->base_addr + mear;
 	int mii_cmd = MIIread|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
+	struct sis900_private *sp = netdev_priv(net_dev);
+	void __iomem *ioaddr = sp->ioaddr;
 	u16 retval = 0;
 	int i;
 
-	mdio_reset(mdio_addr);
-	mdio_idle(mdio_addr);
+	mdio_reset(sp);
+	mdio_idle(sp);
 
 	for (i = 15; i >= 0; i--) {
 		int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
-		outl(dataval, mdio_addr);
+
+		sw32(mear, dataval);
 		mdio_delay();
-		outl(dataval | MDC, mdio_addr);
+		sw32(mear, dataval | MDC);
 		mdio_delay();
 	}
 
 	/* Read the 16 data bits. */
 	for (i = 16; i > 0; i--) {
-		outl(0, mdio_addr);
+		sw32(mear, 0);
 		mdio_delay();
-		retval = (retval << 1) | ((inl(mdio_addr) & MDIO) ? 1 : 0);
-		outl(MDC, mdio_addr);
+		retval = (retval << 1) | ((sr32(mear) & MDIO) ? 1 : 0);
+		sw32(mear, MDC);
 		mdio_delay();
 	}
-	outl(0x00, mdio_addr);
+	sw32(mear, 0x00);
 
 	return retval;
 }
@@ -931,19 +949,21 @@
 static void mdio_write(struct net_device *net_dev, int phy_id, int location,
 			int value)
 {
-	long mdio_addr = net_dev->base_addr + mear;
 	int mii_cmd = MIIwrite|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
+	struct sis900_private *sp = netdev_priv(net_dev);
+	void __iomem *ioaddr = sp->ioaddr;
 	int i;
 
-	mdio_reset(mdio_addr);
-	mdio_idle(mdio_addr);
+	mdio_reset(sp);
+	mdio_idle(sp);
 
 	/* Shift the command bits out. */
 	for (i = 15; i >= 0; i--) {
 		int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
-		outb(dataval, mdio_addr);
+
+		sw8(mear, dataval);
 		mdio_delay();
-		outb(dataval | MDC, mdio_addr);
+		sw8(mear, dataval | MDC);
 		mdio_delay();
 	}
 	mdio_delay();
@@ -951,21 +971,22 @@
 	/* Shift the value bits out. */
 	for (i = 15; i >= 0; i--) {
 		int dataval = (value & (1 << i)) ? MDDIR | MDIO : MDDIR;
-		outl(dataval, mdio_addr);
+
+		sw32(mear, dataval);
 		mdio_delay();
-		outl(dataval | MDC, mdio_addr);
+		sw32(mear, dataval | MDC);
 		mdio_delay();
 	}
 	mdio_delay();
 
 	/* Clear out extra bits. */
 	for (i = 2; i > 0; i--) {
-		outb(0, mdio_addr);
+		sw8(mear, 0);
 		mdio_delay();
-		outb(MDC, mdio_addr);
+		sw8(mear, MDC);
 		mdio_delay();
 	}
-	outl(0x00, mdio_addr);
+	sw32(mear, 0x00);
 }
 
 
@@ -1000,9 +1021,12 @@
 */
 static void sis900_poll(struct net_device *dev)
 {
-	disable_irq(dev->irq);
-	sis900_interrupt(dev->irq, dev);
-	enable_irq(dev->irq);
+	struct sis900_private *sp = netdev_priv(dev);
+	const int irq = sp->pci_dev->irq;
+
+	disable_irq(irq);
+	sis900_interrupt(irq, dev);
+	enable_irq(irq);
 }
 #endif
 
@@ -1018,7 +1042,7 @@
 sis900_open(struct net_device *net_dev)
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long ioaddr = net_dev->base_addr;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	int ret;
 
 	/* Soft reset the chip. */
@@ -1027,8 +1051,8 @@
 	/* Equalizer workaround Rule */
 	sis630_set_eq(net_dev, sis_priv->chipset_rev);
 
-	ret = request_irq(net_dev->irq, sis900_interrupt, IRQF_SHARED,
-						net_dev->name, net_dev);
+	ret = request_irq(sis_priv->pci_dev->irq, sis900_interrupt, IRQF_SHARED,
+			  net_dev->name, net_dev);
 	if (ret)
 		return ret;
 
@@ -1042,12 +1066,12 @@
 	netif_start_queue(net_dev);
 
 	/* Workaround for EDB */
-	sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
+	sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
 
 	/* Enable all known interrupts by setting the interrupt mask. */
-	outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
-	outl(RxENA | inl(ioaddr + cr), ioaddr + cr);
-	outl(IE, ioaddr + ier);
+	sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
+	sw32(cr, RxENA | sr32(cr));
+	sw32(ier, IE);
 
 	sis900_check_mode(net_dev, sis_priv->mii);
 
@@ -1074,31 +1098,30 @@
 sis900_init_rxfilter (struct net_device * net_dev)
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long ioaddr = net_dev->base_addr;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	u32 rfcrSave;
 	u32 i;
 
-	rfcrSave = inl(rfcr + ioaddr);
+	rfcrSave = sr32(rfcr);
 
 	/* disable packet filtering before setting filter */
-	outl(rfcrSave & ~RFEN, rfcr + ioaddr);
+	sw32(rfcr, rfcrSave & ~RFEN);
 
 	/* load MAC addr to filter data register */
 	for (i = 0 ; i < 3 ; i++) {
-		u32 w;
+		u32 w = (u32) *((u16 *)(net_dev->dev_addr)+i);
 
-		w = (u32) *((u16 *)(net_dev->dev_addr)+i);
-		outl((i << RFADDR_shift), ioaddr + rfcr);
-		outl(w, ioaddr + rfdr);
+		sw32(rfcr, i << RFADDR_shift);
+		sw32(rfdr, w);
 
 		if (netif_msg_hw(sis_priv)) {
 			printk(KERN_DEBUG "%s: Receive Filter Addrss[%d]=%x\n",
-			       net_dev->name, i, inl(ioaddr + rfdr));
+			       net_dev->name, i, sr32(rfdr));
 		}
 	}
 
 	/* enable packet filtering */
-	outl(rfcrSave | RFEN, rfcr + ioaddr);
+	sw32(rfcr, rfcrSave | RFEN);
 }
 
 /**
@@ -1112,7 +1135,7 @@
 sis900_init_tx_ring(struct net_device *net_dev)
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long ioaddr = net_dev->base_addr;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	int i;
 
 	sis_priv->tx_full = 0;
@@ -1128,10 +1151,10 @@
 	}
 
 	/* load Transmit Descriptor Register */
-	outl(sis_priv->tx_ring_dma, ioaddr + txdp);
+	sw32(txdp, sis_priv->tx_ring_dma);
 	if (netif_msg_hw(sis_priv))
 		printk(KERN_DEBUG "%s: TX descriptor register loaded with: %8.8x\n",
-		       net_dev->name, inl(ioaddr + txdp));
+		       net_dev->name, sr32(txdp));
 }
 
 /**
@@ -1146,7 +1169,7 @@
 sis900_init_rx_ring(struct net_device *net_dev)
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long ioaddr = net_dev->base_addr;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	int i;
 
 	sis_priv->cur_rx = 0;
@@ -1181,10 +1204,10 @@
 	sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC);
 
 	/* load Receive Descriptor Register */
-	outl(sis_priv->rx_ring_dma, ioaddr + rxdp);
+	sw32(rxdp, sis_priv->rx_ring_dma);
 	if (netif_msg_hw(sis_priv))
 		printk(KERN_DEBUG "%s: RX descriptor register loaded with: %8.8x\n",
-		       net_dev->name, inl(ioaddr + rxdp));
+		       net_dev->name, sr32(rxdp));
 }
 
 /**
@@ -1298,7 +1321,7 @@
 
 		sis900_read_mode(net_dev, &speed, &duplex);
 		if (duplex){
-			sis900_set_mode(net_dev->base_addr, speed, duplex);
+			sis900_set_mode(sis_priv, speed, duplex);
 			sis630_set_eq(net_dev, sis_priv->chipset_rev);
 			netif_start_queue(net_dev);
 		}
@@ -1359,25 +1382,25 @@
 static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_phy)
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long ioaddr = net_dev->base_addr;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	int speed, duplex;
 
 	if (mii_phy->phy_types == LAN) {
-		outl(~EXD & inl(ioaddr + cfg), ioaddr + cfg);
+		sw32(cfg, ~EXD & sr32(cfg));
 		sis900_set_capability(net_dev , mii_phy);
 		sis900_auto_negotiate(net_dev, sis_priv->cur_phy);
 	} else {
-		outl(EXD | inl(ioaddr + cfg), ioaddr + cfg);
+		sw32(cfg, EXD | sr32(cfg));
 		speed = HW_SPEED_HOME;
 		duplex = FDX_CAPABLE_HALF_SELECTED;
-		sis900_set_mode(ioaddr, speed, duplex);
+		sis900_set_mode(sis_priv, speed, duplex);
 		sis_priv->autong_complete = 1;
 	}
 }
 
 /**
  *	sis900_set_mode - Set the media mode of mac register.
- *	@ioaddr: the address of the device
+ *	@sp:     the device private data
  *	@speed : the transmit speed to be determined
  *	@duplex: the duplex mode to be determined
  *
@@ -1388,11 +1411,12 @@
  *	double words.
  */
 
-static void sis900_set_mode (long ioaddr, int speed, int duplex)
+static void sis900_set_mode(struct sis900_private *sp, int speed, int duplex)
 {
+	void __iomem *ioaddr = sp->ioaddr;
 	u32 tx_flags = 0, rx_flags = 0;
 
-	if (inl(ioaddr + cfg) & EDB_MASTER_EN) {
+	if (sr32( cfg) & EDB_MASTER_EN) {
 		tx_flags = TxATP | (DMA_BURST_64 << TxMXDMA_shift) |
 					(TX_FILL_THRESH << TxFILLT_shift);
 		rx_flags = DMA_BURST_64 << RxMXDMA_shift;
@@ -1420,8 +1444,8 @@
 	rx_flags |= RxAJAB;
 #endif
 
-	outl (tx_flags, ioaddr + txcfg);
-	outl (rx_flags, ioaddr + rxcfg);
+	sw32(txcfg, tx_flags);
+	sw32(rxcfg, rx_flags);
 }
 
 /**
@@ -1528,16 +1552,17 @@
 static void sis900_tx_timeout(struct net_device *net_dev)
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long ioaddr = net_dev->base_addr;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	unsigned long flags;
 	int i;
 
-	if(netif_msg_tx_err(sis_priv))
+	if (netif_msg_tx_err(sis_priv)) {
 		printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n",
-	       		net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr));
+			net_dev->name, sr32(cr), sr32(isr));
+	}
 
 	/* Disable interrupts by clearing the interrupt mask. */
-	outl(0x0000, ioaddr + imr);
+	sw32(imr, 0x0000);
 
 	/* use spinlock to prevent interrupt handler accessing buffer ring */
 	spin_lock_irqsave(&sis_priv->lock, flags);
@@ -1566,10 +1591,10 @@
 	net_dev->trans_start = jiffies; /* prevent tx timeout */
 
 	/* load Transmit Descriptor Register */
-	outl(sis_priv->tx_ring_dma, ioaddr + txdp);
+	sw32(txdp, sis_priv->tx_ring_dma);
 
 	/* Enable all known interrupts by setting the interrupt mask. */
-	outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
+	sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
 }
 
 /**
@@ -1586,7 +1611,7 @@
 sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long ioaddr = net_dev->base_addr;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	unsigned int  entry;
 	unsigned long flags;
 	unsigned int  index_cur_tx, index_dirty_tx;
@@ -1608,7 +1633,7 @@
 	sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev,
 		skb->data, skb->len, PCI_DMA_TODEVICE);
 	sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len);
-	outl(TxENA | inl(ioaddr + cr), ioaddr + cr);
+	sw32(cr, TxENA | sr32(cr));
 
 	sis_priv->cur_tx ++;
 	index_cur_tx = sis_priv->cur_tx;
@@ -1654,14 +1679,14 @@
 	struct net_device *net_dev = dev_instance;
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
 	int boguscnt = max_interrupt_work;
-	long ioaddr = net_dev->base_addr;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	u32 status;
 	unsigned int handled = 0;
 
 	spin_lock (&sis_priv->lock);
 
 	do {
-		status = inl(ioaddr + isr);
+		status = sr32(isr);
 
 		if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0)
 			/* nothing intresting happened */
@@ -1696,7 +1721,7 @@
 	if(netif_msg_intr(sis_priv))
 		printk(KERN_DEBUG "%s: exiting interrupt, "
 		       "interrupt status = 0x%#8.8x.\n",
-		       net_dev->name, inl(ioaddr + isr));
+		       net_dev->name, sr32(isr));
 
 	spin_unlock (&sis_priv->lock);
 	return IRQ_RETVAL(handled);
@@ -1715,7 +1740,7 @@
 static int sis900_rx(struct net_device *net_dev)
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long ioaddr = net_dev->base_addr;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC;
 	u32 rx_status = sis_priv->rx_ring[entry].cmdsts;
 	int rx_work_limit;
@@ -1847,7 +1872,7 @@
 		}
 	}
 	/* re-enable the potentially idle receive state matchine */
-	outl(RxENA | inl(ioaddr + cr), ioaddr + cr );
+	sw32(cr , RxENA | sr32(cr));
 
 	return 0;
 }
@@ -1932,31 +1957,31 @@
 
 static int sis900_close(struct net_device *net_dev)
 {
-	long ioaddr = net_dev->base_addr;
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
+	struct pci_dev *pdev = sis_priv->pci_dev;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	struct sk_buff *skb;
 	int i;
 
 	netif_stop_queue(net_dev);
 
 	/* Disable interrupts by clearing the interrupt mask. */
-	outl(0x0000, ioaddr + imr);
-	outl(0x0000, ioaddr + ier);
+	sw32(imr, 0x0000);
+	sw32(ier, 0x0000);
 
 	/* Stop the chip's Tx and Rx Status Machine */
-	outl(RxDIS | TxDIS | inl(ioaddr + cr), ioaddr + cr);
+	sw32(cr, RxDIS | TxDIS | sr32(cr));
 
 	del_timer(&sis_priv->timer);
 
-	free_irq(net_dev->irq, net_dev);
+	free_irq(pdev->irq, net_dev);
 
 	/* Free Tx and RX skbuff */
 	for (i = 0; i < NUM_RX_DESC; i++) {
 		skb = sis_priv->rx_skbuff[i];
 		if (skb) {
-			pci_unmap_single(sis_priv->pci_dev,
-				sis_priv->rx_ring[i].bufptr,
-				RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+			pci_unmap_single(pdev, sis_priv->rx_ring[i].bufptr,
+					 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
 			dev_kfree_skb(skb);
 			sis_priv->rx_skbuff[i] = NULL;
 		}
@@ -1964,9 +1989,8 @@
 	for (i = 0; i < NUM_TX_DESC; i++) {
 		skb = sis_priv->tx_skbuff[i];
 		if (skb) {
-			pci_unmap_single(sis_priv->pci_dev,
-				sis_priv->tx_ring[i].bufptr, skb->len,
-				PCI_DMA_TODEVICE);
+			pci_unmap_single(pdev, sis_priv->tx_ring[i].bufptr,
+					 skb->len, PCI_DMA_TODEVICE);
 			dev_kfree_skb(skb);
 			sis_priv->tx_skbuff[i] = NULL;
 		}
@@ -2055,14 +2079,14 @@
 static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long pmctrl_addr = net_dev->base_addr + pmctrl;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	u32 cfgpmcsr = 0, pmctrl_bits = 0;
 
 	if (wol->wolopts == 0) {
 		pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
 		cfgpmcsr &= ~PME_EN;
 		pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
-		outl(pmctrl_bits, pmctrl_addr);
+		sw32(pmctrl, pmctrl_bits);
 		if (netif_msg_wol(sis_priv))
 			printk(KERN_DEBUG "%s: Wake on LAN disabled\n", net_dev->name);
 		return 0;
@@ -2077,7 +2101,7 @@
 	if (wol->wolopts & WAKE_PHY)
 		pmctrl_bits |= LINKON;
 
-	outl(pmctrl_bits, pmctrl_addr);
+	sw32(pmctrl, pmctrl_bits);
 
 	pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
 	cfgpmcsr |= PME_EN;
@@ -2090,10 +2114,11 @@
 
 static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
 {
-	long pmctrl_addr = net_dev->base_addr + pmctrl;
+	struct sis900_private *sp = netdev_priv(net_dev);
+	void __iomem *ioaddr = sp->ioaddr;
 	u32 pmctrl_bits;
 
-	pmctrl_bits = inl(pmctrl_addr);
+	pmctrl_bits = sr32(pmctrl);
 	if (pmctrl_bits & MAGICPKT)
 		wol->wolopts |= WAKE_MAGIC;
 	if (pmctrl_bits & LINKON)
@@ -2279,8 +2304,8 @@
 
 static void set_rx_mode(struct net_device *net_dev)
 {
-	long ioaddr = net_dev->base_addr;
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	u16 mc_filter[16] = {0};	/* 256/128 bits multicast hash table */
 	int i, table_entries;
 	u32 rx_mode;
@@ -2322,24 +2347,24 @@
 	/* update Multicast Hash Table in Receive Filter */
 	for (i = 0; i < table_entries; i++) {
                 /* why plus 0x04 ??, That makes the correct value for hash table. */
-		outl((u32)(0x00000004+i) << RFADDR_shift, ioaddr + rfcr);
-		outl(mc_filter[i], ioaddr + rfdr);
+		sw32(rfcr, (u32)(0x00000004 + i) << RFADDR_shift);
+		sw32(rfdr, mc_filter[i]);
 	}
 
-	outl(RFEN | rx_mode, ioaddr + rfcr);
+	sw32(rfcr, RFEN | rx_mode);
 
 	/* sis900 is capable of looping back packets at MAC level for
 	 * debugging purpose */
 	if (net_dev->flags & IFF_LOOPBACK) {
 		u32 cr_saved;
 		/* We must disable Tx/Rx before setting loopback mode */
-		cr_saved = inl(ioaddr + cr);
-		outl(cr_saved | TxDIS | RxDIS, ioaddr + cr);
+		cr_saved = sr32(cr);
+		sw32(cr, cr_saved | TxDIS | RxDIS);
 		/* enable loopback */
-		outl(inl(ioaddr + txcfg) | TxMLB, ioaddr + txcfg);
-		outl(inl(ioaddr + rxcfg) | RxATX, ioaddr + rxcfg);
+		sw32(txcfg, sr32(txcfg) | TxMLB);
+		sw32(rxcfg, sr32(rxcfg) | RxATX);
 		/* restore cr */
-		outl(cr_saved, ioaddr + cr);
+		sw32(cr, cr_saved);
 	}
 }
 
@@ -2355,26 +2380,25 @@
 static void sis900_reset(struct net_device *net_dev)
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long ioaddr = net_dev->base_addr;
-	int i = 0;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	u32 status = TxRCMP | RxRCMP;
+	int i;
 
-	outl(0, ioaddr + ier);
-	outl(0, ioaddr + imr);
-	outl(0, ioaddr + rfcr);
+	sw32(ier, 0);
+	sw32(imr, 0);
+	sw32(rfcr, 0);
 
-	outl(RxRESET | TxRESET | RESET | inl(ioaddr + cr), ioaddr + cr);
+	sw32(cr, RxRESET | TxRESET | RESET | sr32(cr));
 
 	/* Check that the chip has finished the reset. */
-	while (status && (i++ < 1000)) {
-		status ^= (inl(isr + ioaddr) & status);
-	}
+	for (i = 0; status && (i < 1000); i++)
+		status ^= sr32(isr) & status;
 
-	if( (sis_priv->chipset_rev >= SIS635A_900_REV) ||
-			(sis_priv->chipset_rev == SIS900B_900_REV) )
-		outl(PESEL | RND_CNT, ioaddr + cfg);
+	if (sis_priv->chipset_rev >= SIS635A_900_REV ||
+	    sis_priv->chipset_rev == SIS900B_900_REV)
+		sw32(cfg, PESEL | RND_CNT);
 	else
-		outl(PESEL, ioaddr + cfg);
+		sw32(cfg, PESEL);
 }
 
 /**
@@ -2388,10 +2412,12 @@
 {
 	struct net_device *net_dev = pci_get_drvdata(pci_dev);
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	struct mii_phy *phy = NULL;
+
+	unregister_netdev(net_dev);
 
 	while (sis_priv->first_mii) {
-		phy = sis_priv->first_mii;
+		struct mii_phy *phy = sis_priv->first_mii;
+
 		sis_priv->first_mii = phy->next;
 		kfree(phy);
 	}
@@ -2400,7 +2426,7 @@
 		sis_priv->rx_ring_dma);
 	pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
 		sis_priv->tx_ring_dma);
-	unregister_netdev(net_dev);
+	pci_iounmap(pci_dev, sis_priv->ioaddr);
 	free_netdev(net_dev);
 	pci_release_regions(pci_dev);
 	pci_set_drvdata(pci_dev, NULL);
@@ -2411,7 +2437,8 @@
 static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state)
 {
 	struct net_device *net_dev = pci_get_drvdata(pci_dev);
-	long ioaddr = net_dev->base_addr;
+	struct sis900_private *sis_priv = netdev_priv(net_dev);
+	void __iomem *ioaddr = sis_priv->ioaddr;
 
 	if(!netif_running(net_dev))
 		return 0;
@@ -2420,7 +2447,7 @@
 	netif_device_detach(net_dev);
 
 	/* Stop the chip's Tx and Rx Status Machine */
-	outl(RxDIS | TxDIS | inl(ioaddr + cr), ioaddr + cr);
+	sw32(cr, RxDIS | TxDIS | sr32(cr));
 
 	pci_set_power_state(pci_dev, PCI_D3hot);
 	pci_save_state(pci_dev);
@@ -2432,7 +2459,7 @@
 {
 	struct net_device *net_dev = pci_get_drvdata(pci_dev);
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long ioaddr = net_dev->base_addr;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 
 	if(!netif_running(net_dev))
 		return 0;
@@ -2453,9 +2480,9 @@
 	sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
 
 	/* Enable all known interrupts by setting the interrupt mask. */
-	outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
-	outl(RxENA | inl(ioaddr + cr), ioaddr + cr);
-	outl(IE, ioaddr + ier);
+	sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
+	sw32(cr, RxENA | sr32(cr));
+	sw32(ier, IE);
 
 	sis900_check_mode(net_dev, sis_priv->mii);
 
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 2a662e6..d01e59c 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -146,6 +146,12 @@
 #define EPIC_TOTAL_SIZE 0x100
 #define USE_IO_OPS 1
 
+#ifdef USE_IO_OPS
+#define EPIC_BAR	0
+#else
+#define EPIC_BAR	1
+#endif
+
 typedef enum {
 	SMSC_83C170_0,
 	SMSC_83C170,
@@ -176,21 +182,11 @@
 };
 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
 
-
-#ifndef USE_IO_OPS
-#undef inb
-#undef inw
-#undef inl
-#undef outb
-#undef outw
-#undef outl
-#define inb readb
-#define inw readw
-#define inl readl
-#define outb writeb
-#define outw writew
-#define outl writel
-#endif
+#define ew16(reg, val)	iowrite16(val, ioaddr + (reg))
+#define ew32(reg, val)	iowrite32(val, ioaddr + (reg))
+#define er8(reg)	ioread8(ioaddr + (reg))
+#define er16(reg)	ioread16(ioaddr + (reg))
+#define er32(reg)	ioread32(ioaddr + (reg))
 
 /* Offsets to registers, using the (ugh) SMC names. */
 enum epic_registers {
@@ -275,6 +271,7 @@
 	u32 irq_mask;
 	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
 
+	void __iomem *ioaddr;
 	struct pci_dev *pci_dev;			/* PCI bus location. */
 	int chip_id, chip_flags;
 
@@ -290,7 +287,7 @@
 };
 
 static int epic_open(struct net_device *dev);
-static int read_eeprom(long ioaddr, int location);
+static int read_eeprom(struct epic_private *, int);
 static int mdio_read(struct net_device *dev, int phy_id, int location);
 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
 static void epic_restart(struct net_device *dev);
@@ -321,11 +318,11 @@
 	.ndo_validate_addr	= eth_validate_addr,
 };
 
-static int __devinit epic_init_one (struct pci_dev *pdev,
-				    const struct pci_device_id *ent)
+static int __devinit epic_init_one(struct pci_dev *pdev,
+				   const struct pci_device_id *ent)
 {
 	static int card_idx = -1;
-	long ioaddr;
+	void __iomem *ioaddr;
 	int chip_idx = (int) ent->driver_data;
 	int irq;
 	struct net_device *dev;
@@ -368,19 +365,15 @@
 
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
-#ifdef USE_IO_OPS
-	ioaddr = pci_resource_start (pdev, 0);
-#else
-	ioaddr = pci_resource_start (pdev, 1);
-	ioaddr = (long) pci_ioremap_bar(pdev, 1);
+	ioaddr = pci_iomap(pdev, EPIC_BAR, 0);
 	if (!ioaddr) {
 		dev_err(&pdev->dev, "ioremap failed\n");
 		goto err_out_free_netdev;
 	}
-#endif
 
 	pci_set_drvdata(pdev, dev);
 	ep = netdev_priv(dev);
+	ep->ioaddr = ioaddr;
 	ep->mii.dev = dev;
 	ep->mii.mdio_read = mdio_read;
 	ep->mii.mdio_write = mdio_write;
@@ -409,34 +402,31 @@
 			duplex = full_duplex[card_idx];
 	}
 
-	dev->base_addr = ioaddr;
-	dev->irq = irq;
-
 	spin_lock_init(&ep->lock);
 	spin_lock_init(&ep->napi_lock);
 	ep->reschedule_in_poll = 0;
 
 	/* Bring the chip out of low-power mode. */
-	outl(0x4200, ioaddr + GENCTL);
+	ew32(GENCTL, 0x4200);
 	/* Magic?!  If we don't set this bit the MII interface won't work. */
 	/* This magic is documented in SMSC app note 7.15 */
 	for (i = 16; i > 0; i--)
-		outl(0x0008, ioaddr + TEST1);
+		ew32(TEST1, 0x0008);
 
 	/* Turn on the MII transceiver. */
-	outl(0x12, ioaddr + MIICfg);
+	ew32(MIICfg, 0x12);
 	if (chip_idx == 1)
-		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
-	outl(0x0200, ioaddr + GENCTL);
+		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
+	ew32(GENCTL, 0x0200);
 
 	/* Note: the '175 does not have a serial EEPROM. */
 	for (i = 0; i < 3; i++)
-		((__le16 *)dev->dev_addr)[i] = cpu_to_le16(inw(ioaddr + LAN0 + i*4));
+		((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
 
 	if (debug > 2) {
 		dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
 		for (i = 0; i < 64; i++)
-			printk(" %4.4x%s", read_eeprom(ioaddr, i),
+			printk(" %4.4x%s", read_eeprom(ep, i),
 				   i % 16 == 15 ? "\n" : "");
 	}
 
@@ -481,8 +471,8 @@
 
 	/* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
 	if (ep->chip_flags & MII_PWRDWN)
-		outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
-	outl(0x0008, ioaddr + GENCTL);
+		ew32(NVCTL, er32(NVCTL) & ~0x483c);
+	ew32(GENCTL, 0x0008);
 
 	/* The lower four bits are the media type. */
 	if (duplex) {
@@ -501,8 +491,9 @@
 	if (ret < 0)
 		goto err_out_unmap_rx;
 
-	printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n",
-	       dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq,
+	printk(KERN_INFO "%s: %s at %lx, IRQ %d, %pM\n",
+	       dev->name, pci_id_tbl[chip_idx].name,
+	       (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
 	       dev->dev_addr);
 
 out:
@@ -513,10 +504,8 @@
 err_out_unmap_tx:
 	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
 err_out_iounmap:
-#ifndef USE_IO_OPS
-	iounmap(ioaddr);
+	pci_iounmap(pdev, ioaddr);
 err_out_free_netdev:
-#endif
 	free_netdev(dev);
 err_out_free_res:
 	pci_release_regions(pdev);
@@ -540,7 +529,7 @@
    This serves to flush the operation to the PCI bus.
  */
 
-#define eeprom_delay()	inl(ee_addr)
+#define eeprom_delay()	er32(EECTL)
 
 /* The EEPROM commands include the alway-set leading bit. */
 #define EE_WRITE_CMD	(5 << 6)
@@ -550,67 +539,67 @@
 
 static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
 {
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = ep->ioaddr;
 
-	outl(0x00000000, ioaddr + INTMASK);
+	ew32(INTMASK, 0x00000000);
 }
 
-static inline void __epic_pci_commit(long ioaddr)
+static inline void __epic_pci_commit(void __iomem *ioaddr)
 {
 #ifndef USE_IO_OPS
-	inl(ioaddr + INTMASK);
+	er32(INTMASK);
 #endif
 }
 
 static inline void epic_napi_irq_off(struct net_device *dev,
 				     struct epic_private *ep)
 {
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = ep->ioaddr;
 
-	outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
+	ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent);
 	__epic_pci_commit(ioaddr);
 }
 
 static inline void epic_napi_irq_on(struct net_device *dev,
 				    struct epic_private *ep)
 {
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = ep->ioaddr;
 
 	/* No need to commit possible posted write */
-	outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
+	ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
 }
 
-static int __devinit read_eeprom(long ioaddr, int location)
+static int __devinit read_eeprom(struct epic_private *ep, int location)
 {
+	void __iomem *ioaddr = ep->ioaddr;
 	int i;
 	int retval = 0;
-	long ee_addr = ioaddr + EECTL;
 	int read_cmd = location |
-		(inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
+		(er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
 
-	outl(EE_ENB & ~EE_CS, ee_addr);
-	outl(EE_ENB, ee_addr);
+	ew32(EECTL, EE_ENB & ~EE_CS);
+	ew32(EECTL, EE_ENB);
 
 	/* Shift the read command bits out. */
 	for (i = 12; i >= 0; i--) {
 		short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
-		outl(EE_ENB | dataval, ee_addr);
+		ew32(EECTL, EE_ENB | dataval);
 		eeprom_delay();
-		outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+		ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK);
 		eeprom_delay();
 	}
-	outl(EE_ENB, ee_addr);
+	ew32(EECTL, EE_ENB);
 
 	for (i = 16; i > 0; i--) {
-		outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
+		ew32(EECTL, EE_ENB | EE_SHIFT_CLK);
 		eeprom_delay();
-		retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
-		outl(EE_ENB, ee_addr);
+		retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0);
+		ew32(EECTL, EE_ENB);
 		eeprom_delay();
 	}
 
 	/* Terminate the EEPROM access. */
-	outl(EE_ENB & ~EE_CS, ee_addr);
+	ew32(EECTL, EE_ENB & ~EE_CS);
 	return retval;
 }
 
@@ -618,22 +607,23 @@
 #define MII_WRITEOP		2
 static int mdio_read(struct net_device *dev, int phy_id, int location)
 {
-	long ioaddr = dev->base_addr;
+	struct epic_private *ep = netdev_priv(dev);
+	void __iomem *ioaddr = ep->ioaddr;
 	int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
 	int i;
 
-	outl(read_cmd, ioaddr + MIICtrl);
+	ew32(MIICtrl, read_cmd);
 	/* Typical operation takes 25 loops. */
 	for (i = 400; i > 0; i--) {
 		barrier();
-		if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
+		if ((er32(MIICtrl) & MII_READOP) == 0) {
 			/* Work around read failure bug. */
 			if (phy_id == 1 && location < 6 &&
-			    inw(ioaddr + MIIData) == 0xffff) {
-				outl(read_cmd, ioaddr + MIICtrl);
+			    er16(MIIData) == 0xffff) {
+				ew32(MIICtrl, read_cmd);
 				continue;
 			}
-			return inw(ioaddr + MIIData);
+			return er16(MIIData);
 		}
 	}
 	return 0xffff;
@@ -641,14 +631,15 @@
 
 static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
 {
-	long ioaddr = dev->base_addr;
+	struct epic_private *ep = netdev_priv(dev);
+	void __iomem *ioaddr = ep->ioaddr;
 	int i;
 
-	outw(value, ioaddr + MIIData);
-	outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
+	ew16(MIIData, value);
+	ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP);
 	for (i = 10000; i > 0; i--) {
 		barrier();
-		if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
+		if ((er32(MIICtrl) & MII_WRITEOP) == 0)
 			break;
 	}
 }
@@ -657,25 +648,26 @@
 static int epic_open(struct net_device *dev)
 {
 	struct epic_private *ep = netdev_priv(dev);
-	long ioaddr = dev->base_addr;
-	int i;
-	int retval;
+	void __iomem *ioaddr = ep->ioaddr;
+	const int irq = ep->pci_dev->irq;
+	int rc, i;
 
 	/* Soft reset the chip. */
-	outl(0x4001, ioaddr + GENCTL);
+	ew32(GENCTL, 0x4001);
 
 	napi_enable(&ep->napi);
-	if ((retval = request_irq(dev->irq, epic_interrupt, IRQF_SHARED, dev->name, dev))) {
+	rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev);
+	if (rc) {
 		napi_disable(&ep->napi);
-		return retval;
+		return rc;
 	}
 
 	epic_init_ring(dev);
 
-	outl(0x4000, ioaddr + GENCTL);
+	ew32(GENCTL, 0x4000);
 	/* This magic is documented in SMSC app note 7.15 */
 	for (i = 16; i > 0; i--)
-		outl(0x0008, ioaddr + TEST1);
+		ew32(TEST1, 0x0008);
 
 	/* Pull the chip out of low-power mode, enable interrupts, and set for
 	   PCI read multiple.  The MIIcfg setting and strange write order are
@@ -683,29 +675,29 @@
 	   wiring on the Ositech CardBus card.
 	*/
 #if 0
-	outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
+	ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
 #endif
 	if (ep->chip_flags & MII_PWRDWN)
-		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 
 	/* Tell the chip to byteswap descriptors on big-endian hosts */
 #ifdef __BIG_ENDIAN
-	outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
-	inl(ioaddr + GENCTL);
-	outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+	ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8));
+	er32(GENCTL);
+	ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
 #else
-	outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
-	inl(ioaddr + GENCTL);
-	outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+	ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8));
+	er32(GENCTL);
+	ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
 #endif
 
 	udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
 
 	for (i = 0; i < 3; i++)
-		outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
+		ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
 
 	ep->tx_threshold = TX_FIFO_THRESH;
-	outl(ep->tx_threshold, ioaddr + TxThresh);
+	ew32(TxThresh, ep->tx_threshold);
 
 	if (media2miictl[dev->if_port & 15]) {
 		if (ep->mii_phy_cnt)
@@ -731,26 +723,27 @@
 		}
 	}
 
-	outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
-	outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
-	outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
+	ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
+	ew32(PRxCDAR, ep->rx_ring_dma);
+	ew32(PTxCDAR, ep->tx_ring_dma);
 
 	/* Start the chip's Rx process. */
 	set_rx_mode(dev);
-	outl(StartRx | RxQueued, ioaddr + COMMAND);
+	ew32(COMMAND, StartRx | RxQueued);
 
 	netif_start_queue(dev);
 
 	/* Enable interrupts by setting the interrupt mask. */
-	outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
-		 | CntFull | TxUnderrun
-		 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
+	ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
+	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
+	     TxUnderrun);
 
-	if (debug > 1)
-		printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
-			   "%s-duplex.\n",
-			   dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
-			   ep->mii.full_duplex ? "full" : "half");
+	if (debug > 1) {
+		printk(KERN_DEBUG "%s: epic_open() ioaddr %p IRQ %d "
+		       "status %4.4x %s-duplex.\n",
+		       dev->name, ioaddr, irq, er32(GENCTL),
+		       ep->mii.full_duplex ? "full" : "half");
+	}
 
 	/* Set the timer to switch to check for link beat and perhaps switch
 	   to an alternate media type. */
@@ -760,27 +753,29 @@
 	ep->timer.function = epic_timer;				/* timer handler */
 	add_timer(&ep->timer);
 
-	return 0;
+	return rc;
 }
 
 /* Reset the chip to recover from a PCI transaction error.
    This may occur at interrupt time. */
 static void epic_pause(struct net_device *dev)
 {
-	long ioaddr = dev->base_addr;
+	struct net_device_stats *stats = &dev->stats;
+	struct epic_private *ep = netdev_priv(dev);
+	void __iomem *ioaddr = ep->ioaddr;
 
 	netif_stop_queue (dev);
 
 	/* Disable interrupts by clearing the interrupt mask. */
-	outl(0x00000000, ioaddr + INTMASK);
+	ew32(INTMASK, 0x00000000);
 	/* Stop the chip's Tx and Rx DMA processes. */
-	outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
+	ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA);
 
 	/* Update the error counts. */
-	if (inw(ioaddr + COMMAND) != 0xffff) {
-		dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
-		dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
-		dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+	if (er16(COMMAND) != 0xffff) {
+		stats->rx_missed_errors	+= er8(MPCNT);
+		stats->rx_frame_errors	+= er8(ALICNT);
+		stats->rx_crc_errors	+= er8(CRCCNT);
 	}
 
 	/* Remove the packets on the Rx queue. */
@@ -789,12 +784,12 @@
 
 static void epic_restart(struct net_device *dev)
 {
-	long ioaddr = dev->base_addr;
 	struct epic_private *ep = netdev_priv(dev);
+	void __iomem *ioaddr = ep->ioaddr;
 	int i;
 
 	/* Soft reset the chip. */
-	outl(0x4001, ioaddr + GENCTL);
+	ew32(GENCTL, 0x4001);
 
 	printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
 		   dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
@@ -802,47 +797,46 @@
 
 	/* This magic is documented in SMSC app note 7.15 */
 	for (i = 16; i > 0; i--)
-		outl(0x0008, ioaddr + TEST1);
+		ew32(TEST1, 0x0008);
 
 #ifdef __BIG_ENDIAN
-	outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+	ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
 #else
-	outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+	ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
 #endif
-	outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
+	ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
 	if (ep->chip_flags & MII_PWRDWN)
-		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 
 	for (i = 0; i < 3; i++)
-		outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
+		ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
 
 	ep->tx_threshold = TX_FIFO_THRESH;
-	outl(ep->tx_threshold, ioaddr + TxThresh);
-	outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
-	outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
-		sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
-	outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
-		 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
+	ew32(TxThresh, ep->tx_threshold);
+	ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
+	ew32(PRxCDAR, ep->rx_ring_dma +
+	     (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc));
+	ew32(PTxCDAR, ep->tx_ring_dma +
+	     (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc));
 
 	/* Start the chip's Rx process. */
 	set_rx_mode(dev);
-	outl(StartRx | RxQueued, ioaddr + COMMAND);
+	ew32(COMMAND, StartRx | RxQueued);
 
 	/* Enable interrupts by setting the interrupt mask. */
-	outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
-		 | CntFull | TxUnderrun
-		 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
+	ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
+	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
+	     TxUnderrun);
 
 	printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
 		   " interrupt %4.4x.\n",
-		   dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
-		   (int)inl(ioaddr + INTSTAT));
+		   dev->name, er32(COMMAND), er32(GENCTL), er32(INTSTAT));
 }
 
 static void check_media(struct net_device *dev)
 {
 	struct epic_private *ep = netdev_priv(dev);
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = ep->ioaddr;
 	int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
 	int negotiated = mii_lpa & ep->mii.advertising;
 	int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
@@ -856,7 +850,7 @@
 		printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
 			   " partner capability of %4.4x.\n", dev->name,
 			   ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
-		outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
+		ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
 	}
 }
 
@@ -864,16 +858,15 @@
 {
 	struct net_device *dev = (struct net_device *)data;
 	struct epic_private *ep = netdev_priv(dev);
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = ep->ioaddr;
 	int next_tick = 5*HZ;
 
 	if (debug > 3) {
 		printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
-			   dev->name, (int)inl(ioaddr + TxSTAT));
+		       dev->name, er32(TxSTAT));
 		printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
-			   "IntStatus %4.4x RxStatus %4.4x.\n",
-			   dev->name, (int)inl(ioaddr + INTMASK),
-			   (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
+		       "IntStatus %4.4x RxStatus %4.4x.\n", dev->name,
+		       er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
 	}
 
 	check_media(dev);
@@ -885,23 +878,22 @@
 static void epic_tx_timeout(struct net_device *dev)
 {
 	struct epic_private *ep = netdev_priv(dev);
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = ep->ioaddr;
 
 	if (debug > 0) {
 		printk(KERN_WARNING "%s: Transmit timeout using MII device, "
-			   "Tx status %4.4x.\n",
-			   dev->name, (int)inw(ioaddr + TxSTAT));
+		       "Tx status %4.4x.\n", dev->name, er16(TxSTAT));
 		if (debug > 1) {
 			printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
 				   dev->name, ep->dirty_tx, ep->cur_tx);
 		}
 	}
-	if (inw(ioaddr + TxSTAT) & 0x10) {		/* Tx FIFO underflow. */
+	if (er16(TxSTAT) & 0x10) {		/* Tx FIFO underflow. */
 		dev->stats.tx_fifo_errors++;
-		outl(RestartTx, ioaddr + COMMAND);
+		ew32(COMMAND, RestartTx);
 	} else {
 		epic_restart(dev);
-		outl(TxQueued, dev->base_addr + COMMAND);
+		ew32(COMMAND, TxQueued);
 	}
 
 	dev->trans_start = jiffies; /* prevent tx timeout */
@@ -959,6 +951,7 @@
 static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct epic_private *ep = netdev_priv(dev);
+	void __iomem *ioaddr = ep->ioaddr;
 	int entry, free_count;
 	u32 ctrl_word;
 	unsigned long flags;
@@ -999,13 +992,12 @@
 
 	spin_unlock_irqrestore(&ep->lock, flags);
 	/* Trigger an immediate transmit demand. */
-	outl(TxQueued, dev->base_addr + COMMAND);
+	ew32(COMMAND, TxQueued);
 
 	if (debug > 4)
 		printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
-			   "flag %2.2x Tx status %8.8x.\n",
-			   dev->name, (int)skb->len, entry, ctrl_word,
-			   (int)inl(dev->base_addr + TxSTAT));
+		       "flag %2.2x Tx status %8.8x.\n", dev->name, skb->len,
+		       entry, ctrl_word, er32(TxSTAT));
 
 	return NETDEV_TX_OK;
 }
@@ -1086,18 +1078,17 @@
 {
 	struct net_device *dev = dev_instance;
 	struct epic_private *ep = netdev_priv(dev);
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = ep->ioaddr;
 	unsigned int handled = 0;
 	int status;
 
-	status = inl(ioaddr + INTSTAT);
+	status = er32(INTSTAT);
 	/* Acknowledge all of the current interrupt sources ASAP. */
-	outl(status & EpicNormalEvent, ioaddr + INTSTAT);
+	ew32(INTSTAT, status & EpicNormalEvent);
 
 	if (debug > 4) {
 		printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
-				   "intstat=%#8.8x.\n", dev->name, status,
-				   (int)inl(ioaddr + INTSTAT));
+		       "intstat=%#8.8x.\n", dev->name, status, er32(INTSTAT));
 	}
 
 	if ((status & IntrSummary) == 0)
@@ -1118,19 +1109,21 @@
 
 	/* Check uncommon events all at once. */
 	if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
+		struct net_device_stats *stats = &dev->stats;
+
 		if (status == EpicRemoved)
 			goto out;
 
 		/* Always update the error counts to avoid overhead later. */
-		dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
-		dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
-		dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+		stats->rx_missed_errors	+= er8(MPCNT);
+		stats->rx_frame_errors	+= er8(ALICNT);
+		stats->rx_crc_errors	+= er8(CRCCNT);
 
 		if (status & TxUnderrun) { /* Tx FIFO underflow. */
-			dev->stats.tx_fifo_errors++;
-			outl(ep->tx_threshold += 128, ioaddr + TxThresh);
+			stats->tx_fifo_errors++;
+			ew32(TxThresh, ep->tx_threshold += 128);
 			/* Restart the transmit process. */
-			outl(RestartTx, ioaddr + COMMAND);
+			ew32(COMMAND, RestartTx);
 		}
 		if (status & PCIBusErr170) {
 			printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
@@ -1139,7 +1132,7 @@
 			epic_restart(dev);
 		}
 		/* Clear all error sources. */
-		outl(status & 0x7f18, ioaddr + INTSTAT);
+		ew32(INTSTAT, status & 0x7f18);
 	}
 
 out:
@@ -1248,17 +1241,17 @@
 
 static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
 {
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = ep->ioaddr;
 	int status;
 
-	status = inl(ioaddr + INTSTAT);
+	status = er32(INTSTAT);
 
 	if (status == EpicRemoved)
 		return;
 	if (status & RxOverflow) 	/* Missed a Rx frame. */
 		dev->stats.rx_errors++;
 	if (status & (RxOverflow | RxFull))
-		outw(RxQueued, ioaddr + COMMAND);
+		ew16(COMMAND, RxQueued);
 }
 
 static int epic_poll(struct napi_struct *napi, int budget)
@@ -1266,7 +1259,7 @@
 	struct epic_private *ep = container_of(napi, struct epic_private, napi);
 	struct net_device *dev = ep->mii.dev;
 	int work_done = 0;
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = ep->ioaddr;
 
 rx_action:
 
@@ -1287,7 +1280,7 @@
 		more = ep->reschedule_in_poll;
 		if (!more) {
 			__napi_complete(napi);
-			outl(EpicNapiEvent, ioaddr + INTSTAT);
+			ew32(INTSTAT, EpicNapiEvent);
 			epic_napi_irq_on(dev, ep);
 		} else
 			ep->reschedule_in_poll--;
@@ -1303,8 +1296,9 @@
 
 static int epic_close(struct net_device *dev)
 {
-	long ioaddr = dev->base_addr;
 	struct epic_private *ep = netdev_priv(dev);
+	struct pci_dev *pdev = ep->pci_dev;
+	void __iomem *ioaddr = ep->ioaddr;
 	struct sk_buff *skb;
 	int i;
 
@@ -1313,13 +1307,13 @@
 
 	if (debug > 1)
 		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
-			   dev->name, (int)inl(ioaddr + INTSTAT));
+		       dev->name, er32(INTSTAT));
 
 	del_timer_sync(&ep->timer);
 
 	epic_disable_int(dev, ep);
 
-	free_irq(dev->irq, dev);
+	free_irq(pdev->irq, dev);
 
 	epic_pause(dev);
 
@@ -1330,7 +1324,7 @@
 		ep->rx_ring[i].rxstatus = 0;		/* Not owned by Epic chip. */
 		ep->rx_ring[i].buflength = 0;
 		if (skb) {
-			pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
+			pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
 				 	 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
 			dev_kfree_skb(skb);
 		}
@@ -1341,26 +1335,28 @@
 		ep->tx_skbuff[i] = NULL;
 		if (!skb)
 			continue;
-		pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
-				 skb->len, PCI_DMA_TODEVICE);
+		pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len,
+				 PCI_DMA_TODEVICE);
 		dev_kfree_skb(skb);
 	}
 
 	/* Green! Leave the chip in low-power mode. */
-	outl(0x0008, ioaddr + GENCTL);
+	ew32(GENCTL, 0x0008);
 
 	return 0;
 }
 
 static struct net_device_stats *epic_get_stats(struct net_device *dev)
 {
-	long ioaddr = dev->base_addr;
+	struct epic_private *ep = netdev_priv(dev);
+	void __iomem *ioaddr = ep->ioaddr;
 
 	if (netif_running(dev)) {
-		/* Update the error counts. */
-		dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
-		dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
-		dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+		struct net_device_stats *stats = &dev->stats;
+
+		stats->rx_missed_errors	+= er8(MPCNT);
+		stats->rx_frame_errors	+= er8(ALICNT);
+		stats->rx_crc_errors	+= er8(CRCCNT);
 	}
 
 	return &dev->stats;
@@ -1373,13 +1369,13 @@
 
 static void set_rx_mode(struct net_device *dev)
 {
-	long ioaddr = dev->base_addr;
 	struct epic_private *ep = netdev_priv(dev);
+	void __iomem *ioaddr = ep->ioaddr;
 	unsigned char mc_filter[8];		 /* Multicast hash filter */
 	int i;
 
 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
-		outl(0x002C, ioaddr + RxCtrl);
+		ew32(RxCtrl, 0x002c);
 		/* Unconditionally log net taps. */
 		memset(mc_filter, 0xff, sizeof(mc_filter));
 	} else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
@@ -1387,9 +1383,9 @@
 		   is never enabled. */
 		/* Too many to filter perfectly -- accept all multicasts. */
 		memset(mc_filter, 0xff, sizeof(mc_filter));
-		outl(0x000C, ioaddr + RxCtrl);
+		ew32(RxCtrl, 0x000c);
 	} else if (netdev_mc_empty(dev)) {
-		outl(0x0004, ioaddr + RxCtrl);
+		ew32(RxCtrl, 0x0004);
 		return;
 	} else {					/* Never executed, for now. */
 		struct netdev_hw_addr *ha;
@@ -1404,7 +1400,7 @@
 	/* ToDo: perhaps we need to stop the Tx and Rx process here? */
 	if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
 		for (i = 0; i < 4; i++)
-			outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
+			ew16(MC0 + i*4, ((u16 *)mc_filter)[i]);
 		memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
 	}
 }
@@ -1466,22 +1462,26 @@
 
 static int ethtool_begin(struct net_device *dev)
 {
-	unsigned long ioaddr = dev->base_addr;
+	struct epic_private *ep = netdev_priv(dev);
+	void __iomem *ioaddr = ep->ioaddr;
+
 	/* power-up, if interface is down */
-	if (! netif_running(dev)) {
-		outl(0x0200, ioaddr + GENCTL);
-		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+	if (!netif_running(dev)) {
+		ew32(GENCTL, 0x0200);
+		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 	}
 	return 0;
 }
 
 static void ethtool_complete(struct net_device *dev)
 {
-	unsigned long ioaddr = dev->base_addr;
+	struct epic_private *ep = netdev_priv(dev);
+	void __iomem *ioaddr = ep->ioaddr;
+
 	/* power-down, if interface is down */
-	if (! netif_running(dev)) {
-		outl(0x0008, ioaddr + GENCTL);
-		outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
+	if (!netif_running(dev)) {
+		ew32(GENCTL, 0x0008);
+		ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
 	}
 }
 
@@ -1500,14 +1500,14 @@
 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
 	struct epic_private *np = netdev_priv(dev);
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = np->ioaddr;
 	struct mii_ioctl_data *data = if_mii(rq);
 	int rc;
 
 	/* power-up, if interface is down */
 	if (! netif_running(dev)) {
-		outl(0x0200, ioaddr + GENCTL);
-		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+		ew32(GENCTL, 0x0200);
+		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 	}
 
 	/* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
@@ -1517,14 +1517,14 @@
 
 	/* power-down, if interface is down */
 	if (! netif_running(dev)) {
-		outl(0x0008, ioaddr + GENCTL);
-		outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
+		ew32(GENCTL, 0x0008);
+		ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
 	}
 	return rc;
 }
 
 
-static void __devexit epic_remove_one (struct pci_dev *pdev)
+static void __devexit epic_remove_one(struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct epic_private *ep = netdev_priv(dev);
@@ -1532,9 +1532,7 @@
 	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
 	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
 	unregister_netdev(dev);
-#ifndef USE_IO_OPS
-	iounmap((void*) dev->base_addr);
-#endif
+	pci_iounmap(pdev, ep->ioaddr);
 	pci_release_regions(pdev);
 	free_netdev(dev);
 	pci_disable_device(pdev);
@@ -1548,13 +1546,14 @@
 static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
-	long ioaddr = dev->base_addr;
+	struct epic_private *ep = netdev_priv(dev);
+	void __iomem *ioaddr = ep->ioaddr;
 
 	if (!netif_running(dev))
 		return 0;
 	epic_pause(dev);
 	/* Put the chip into low-power mode. */
-	outl(0x0008, ioaddr + GENCTL);
+	ew32(GENCTL, 0x0008);
 	/* pci_power_off(pdev, -1); */
 	return 0;
 }
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 4a69710..dab9c6f 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1166,10 +1166,8 @@
 
 /* Quickly dumps bad packets */
 static void
-smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes)
+smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktwords)
 {
-	unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2;
-
 	if (likely(pktwords >= 4)) {
 		unsigned int timeout = 500;
 		unsigned int val;
@@ -1233,7 +1231,7 @@
 			continue;
 		}
 
-		skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN);
+		skb = netdev_alloc_skb(dev, pktwords << 2);
 		if (unlikely(!skb)) {
 			SMSC_WARN(pdata, rx_err,
 				  "Unable to allocate skb for rx packet");
@@ -1243,14 +1241,12 @@
 			break;
 		}
 
-		skb->data = skb->head;
-		skb_reset_tail_pointer(skb);
+		pdata->ops->rx_readfifo(pdata,
+				 (unsigned int *)skb->data, pktwords);
 
 		/* Align IP on 16B boundary */
 		skb_reserve(skb, NET_IP_ALIGN);
 		skb_put(skb, pktlength - 4);
-		pdata->ops->rx_readfifo(pdata,
-				 (unsigned int *)skb->head, pktwords);
 		skb->protocol = eth_type_trans(skb, dev);
 		skb_checksum_none_assert(skb);
 		netif_receive_skb(skb);
@@ -1565,7 +1561,7 @@
 	smsc911x_reg_write(pdata, FIFO_INT, temp);
 
 	/* set RX Data offset to 2 bytes for alignment */
-	smsc911x_reg_write(pdata, RX_CFG, (2 << 8));
+	smsc911x_reg_write(pdata, RX_CFG, (NET_IP_ALIGN << 8));
 
 	/* enable NAPI polling before enabling RX interrupts */
 	napi_enable(&pdata->napi);
@@ -2070,6 +2066,7 @@
 	.get_eeprom_len = smsc911x_ethtool_get_eeprom_len,
 	.get_eeprom = smsc911x_ethtool_get_eeprom,
 	.set_eeprom = smsc911x_ethtool_set_eeprom,
+	.get_ts_info = ethtool_op_get_ts_info,
 };
 
 static const struct net_device_ops smsc911x_netdev_ops = {
@@ -2382,7 +2379,6 @@
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
 	pdata = netdev_priv(dev);
-
 	dev->irq = irq_res->start;
 	irq_flags = irq_res->flags & IRQF_TRIGGER_MASK;
 	pdata->ioaddr = ioremap_nocache(res->start, res_size);
@@ -2446,7 +2442,7 @@
 	if (retval) {
 		SMSC_WARN(pdata, probe,
 			  "Unable to claim requested irq: %d", dev->irq);
-		goto out_free_irq;
+		goto out_disable_resources;
 	}
 
 	retval = register_netdev(dev);
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 3838647..fd33b21 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -54,7 +54,7 @@
 };
 
 struct smsc9420_pdata {
-	void __iomem *base_addr;
+	void __iomem *ioaddr;
 	struct pci_dev *pdev;
 	struct net_device *dev;
 
@@ -114,13 +114,13 @@
 
 static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset)
 {
-	return ioread32(pd->base_addr + offset);
+	return ioread32(pd->ioaddr + offset);
 }
 
 static inline void
 smsc9420_reg_write(struct smsc9420_pdata *pd, u32 offset, u32 value)
 {
-	iowrite32(value, pd->base_addr + offset);
+	iowrite32(value, pd->ioaddr + offset);
 }
 
 static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd)
@@ -469,6 +469,7 @@
 	.set_eeprom = smsc9420_ethtool_set_eeprom,
 	.get_regs_len = smsc9420_ethtool_getregslen,
 	.get_regs = smsc9420_ethtool_getregs,
+	.get_ts_info = ethtool_op_get_ts_info,
 };
 
 /* Sets the device MAC address to dev_addr */
@@ -659,7 +660,7 @@
 	ulong flags;
 
 	BUG_ON(!pd);
-	BUG_ON(!pd->base_addr);
+	BUG_ON(!pd->ioaddr);
 
 	int_cfg = smsc9420_reg_read(pd, INT_CFG);
 
@@ -720,9 +721,12 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void smsc9420_poll_controller(struct net_device *dev)
 {
-	disable_irq(dev->irq);
+	struct smsc9420_pdata *pd = netdev_priv(dev);
+	const int irq = pd->pdev->irq;
+
+	disable_irq(irq);
 	smsc9420_isr(0, dev);
-	enable_irq(dev->irq);
+	enable_irq(irq);
 }
 #endif /* CONFIG_NET_POLL_CONTROLLER */
 
@@ -759,7 +763,7 @@
 	smsc9420_stop_rx(pd);
 	smsc9420_free_rx_ring(pd);
 
-	free_irq(dev->irq, pd);
+	free_irq(pd->pdev->irq, pd);
 
 	smsc9420_dmac_soft_reset(pd);
 
@@ -1331,15 +1335,12 @@
 
 static int smsc9420_open(struct net_device *dev)
 {
-	struct smsc9420_pdata *pd;
+	struct smsc9420_pdata *pd = netdev_priv(dev);
 	u32 bus_mode, mac_cr, dmac_control, int_cfg, dma_intr_ena, int_ctl;
+	const int irq = pd->pdev->irq;
 	unsigned long flags;
 	int result = 0, timeout;
 
-	BUG_ON(!dev);
-	pd = netdev_priv(dev);
-	BUG_ON(!pd);
-
 	if (!is_valid_ether_addr(dev->dev_addr)) {
 		smsc_warn(IFUP, "dev_addr is not a valid MAC address");
 		result = -EADDRNOTAVAIL;
@@ -1358,9 +1359,10 @@
 	smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF);
 	smsc9420_pci_flush_write(pd);
 
-	if (request_irq(dev->irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED,
-			DRV_NAME, pd)) {
-		smsc_warn(IFUP, "Unable to use IRQ = %d", dev->irq);
+	result = request_irq(irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED,
+			     DRV_NAME, pd);
+	if (result) {
+		smsc_warn(IFUP, "Unable to use IRQ = %d", irq);
 		result = -ENODEV;
 		goto out_0;
 	}
@@ -1395,7 +1397,7 @@
 	smsc9420_pci_flush_write(pd);
 
 	/* test the IRQ connection to the ISR */
-	smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq);
+	smsc_dbg(IFUP, "Testing ISR using IRQ %d", irq);
 	pd->software_irq_signal = false;
 
 	spin_lock_irqsave(&pd->int_lock, flags);
@@ -1430,7 +1432,7 @@
 		goto out_free_irq_1;
 	}
 
-	smsc_dbg(IFUP, "ISR passed test using IRQ %d", dev->irq);
+	smsc_dbg(IFUP, "ISR passed test using IRQ %d", irq);
 
 	result = smsc9420_alloc_tx_ring(pd);
 	if (result) {
@@ -1490,7 +1492,7 @@
 out_free_tx_ring_2:
 	smsc9420_free_tx_ring(pd);
 out_free_irq_1:
-	free_irq(dev->irq, pd);
+	free_irq(irq, pd);
 out_0:
 	return result;
 }
@@ -1519,7 +1521,7 @@
 		smsc9420_stop_rx(pd);
 		smsc9420_free_rx_ring(pd);
 
-		free_irq(dev->irq, pd);
+		free_irq(pd->pdev->irq, pd);
 
 		netif_device_detach(dev);
 	}
@@ -1552,6 +1554,7 @@
 		smsc_warn(IFUP, "pci_enable_wake failed: %d", err);
 
 	if (netif_running(dev)) {
+		/* FIXME: gross. It looks like ancient PM relic.*/
 		err = smsc9420_open(dev);
 		netif_device_attach(dev);
 	}
@@ -1625,8 +1628,6 @@
 	/* registers are double mapped with 0 offset for LE and 0x200 for BE */
 	virt_addr += LAN9420_CPSR_ENDIAN_OFFSET;
 
-	dev->base_addr = (ulong)virt_addr;
-
 	pd = netdev_priv(dev);
 
 	/* pci descriptors are created in the PCI consistent area */
@@ -1646,7 +1647,7 @@
 
 	pd->pdev = pdev;
 	pd->dev = dev;
-	pd->base_addr = virt_addr;
+	pd->ioaddr = virt_addr;
 	pd->msg_enable = smsc_debug;
 	pd->rx_csum = true;
 
@@ -1669,7 +1670,6 @@
 
 	dev->netdev_ops = &smsc9420_netdev_ops;
 	dev->ethtool_ops = &smsc9420_ethtool_ops;
-	dev->irq = pdev->irq;
 
 	netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT);
 
@@ -1727,7 +1727,7 @@
 	pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) *
 		(RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr);
 
-	iounmap(pd->base_addr - LAN9420_CPSR_ENDIAN_OFFSET);
+	iounmap(pd->ioaddr - LAN9420_CPSR_ENDIAN_OFFSET);
 	pci_release_regions(pdev);
 	free_netdev(dev);
 	pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 0319d64..f5dedcb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -97,6 +97,16 @@
 	unsigned long normal_irq_n;
 };
 
+/* CSR Frequency Access Defines*/
+#define CSR_F_35M	35000000
+#define CSR_F_60M	60000000
+#define CSR_F_100M	100000000
+#define CSR_F_150M	150000000
+#define CSR_F_250M	250000000
+#define CSR_F_300M	300000000
+
+#define	MAC_CSR_H_FRQ_MASK	0x20
+
 #define HASH_TABLE_SIZE 64
 #define PAUSE_TIME 0x200
 
@@ -137,6 +147,7 @@
 #define DMA_HW_FEAT_FLEXIPPSEN	0x04000000 /* Flexible PPS Output */
 #define DMA_HW_FEAT_SAVLANINS	0x08000000 /* Source Addr or VLAN Insertion */
 #define DMA_HW_FEAT_ACTPHYIF	0x70000000 /* Active/selected PHY interface */
+#define DEFAULT_DMA_PBL		8
 
 enum rx_frame_status { /* IPC status */
 	good_frame = 0,
@@ -228,7 +239,7 @@
 	int (*get_rx_owner) (struct dma_desc *p);
 	void (*set_rx_owner) (struct dma_desc *p);
 	/* Get the receive frame size */
-	int (*get_rx_frame_len) (struct dma_desc *p);
+	int (*get_rx_frame_len) (struct dma_desc *p, int rx_coe_type);
 	/* Return the reception status looking at the RDES1 */
 	int (*rx_status) (void *data, struct stmmac_extra_stats *x,
 			  struct dma_desc *p);
@@ -236,7 +247,8 @@
 
 struct stmmac_dma_ops {
 	/* DMA core initialization */
-	int (*init) (void __iomem *ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
+	int (*init) (void __iomem *ioaddr, int pbl, int fb, int burst_len,
+			u32 dma_tx, u32 dma_rx);
 	/* Dump DMA registers */
 	void (*dump_regs) (void __iomem *ioaddr);
 	/* Set tx/rx threshold in the csr6 register
@@ -261,8 +273,8 @@
 struct stmmac_ops {
 	/* MAC core initialization */
 	void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned;
-	/* Support checksum offload engine */
-	int  (*rx_coe) (void __iomem *ioaddr);
+	/* Enable and verify that the IPC module is supported */
+	int (*rx_ipc) (void __iomem *ioaddr);
 	/* Dump MAC registers */
 	void (*dump_regs) (void __iomem *ioaddr);
 	/* Handle extra events on specific interrupts hw dependent */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index cfcef0e..54339a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -142,7 +142,7 @@
 #define DMA_BUS_MODE_RPBL_MASK	0x003e0000	/* Rx-Programmable Burst Len */
 #define DMA_BUS_MODE_RPBL_SHIFT	17
 #define DMA_BUS_MODE_USP	0x00800000
-#define DMA_BUS_MODE_4PBL	0x01000000
+#define DMA_BUS_MODE_PBL	0x01000000
 #define DMA_BUS_MODE_AAL	0x02000000
 
 /* DMA CRS Control and Status Register Mapping */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index b1c48b9..e7cbcd9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -46,7 +46,7 @@
 #endif
 }
 
-static int dwmac1000_rx_coe_supported(void __iomem *ioaddr)
+static int dwmac1000_rx_ipc_enable(void __iomem *ioaddr)
 {
 	u32 value = readl(ioaddr + GMAC_CONTROL);
 
@@ -211,7 +211,7 @@
 
 static const struct stmmac_ops dwmac1000_ops = {
 	.core_init = dwmac1000_core_init,
-	.rx_coe = dwmac1000_rx_coe_supported,
+	.rx_ipc = dwmac1000_rx_ipc_enable,
 	.dump_regs = dwmac1000_dump_regs,
 	.host_irq_status = dwmac1000_irq_status,
 	.set_filter = dwmac1000_set_filter,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 4d5402a..3675c57 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -30,8 +30,8 @@
 #include "dwmac1000.h"
 #include "dwmac_dma.h"
 
-static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
-			      u32 dma_rx)
+static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb,
+			      int burst_len, u32 dma_tx, u32 dma_rx)
 {
 	u32 value = readl(ioaddr + DMA_BUS_MODE);
 	int limit;
@@ -48,15 +48,47 @@
 	if (limit < 0)
 		return -EBUSY;
 
-	value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
-	    ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
-	     (pbl << DMA_BUS_MODE_RPBL_SHIFT));
+	/*
+	 * Set the DMA PBL (Programmable Burst Length) mode
+	 * Before stmmac core 3.50 this mode bit was 4xPBL, and
+	 * post 3.5 mode bit acts as 8*PBL.
+	 * For core rev < 3.5, when the core is set for 4xPBL mode, the
+	 * DMA transfers the data in 4, 8, 16, 32, 64 & 128 beats
+	 * depending on pbl value.
+	 * For core rev > 3.5, when the core is set for 8xPBL mode, the
+	 * DMA transfers the data in 8, 16, 32, 64, 128 & 256 beats
+	 * depending on pbl value.
+	 */
+	value = DMA_BUS_MODE_PBL | ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
+		(pbl << DMA_BUS_MODE_RPBL_SHIFT));
+
+	/* Set the Fixed burst mode */
+	if (fb)
+		value |= DMA_BUS_MODE_FB;
 
 #ifdef CONFIG_STMMAC_DA
 	value |= DMA_BUS_MODE_DA;	/* Rx has priority over tx */
 #endif
 	writel(value, ioaddr + DMA_BUS_MODE);
 
+	/* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE
+	 * for supported bursts.
+	 *
+	 * Note: This is applicable only for revision GMACv3.61a. For
+	 * older version this register is reserved and shall have no
+	 * effect.
+	 *
+	 * Note:
+	 *  For Fixed Burst Mode: if we directly write 0xFF to this
+	 *  register using the configurations pass from platform code,
+	 *  this would ensure that all bursts supported by core are set
+	 *  and those which are not supported would remain ineffective.
+	 *
+	 *  For Non Fixed Burst Mode: provide the maximum value of the
+	 *  burst length. Any burst equal or below the provided burst
+	 *  length would be allowed to perform. */
+	writel(burst_len, ioaddr + DMA_AXI_BUS_MODE);
+
 	/* Mask interrupts by writing to CSR7 */
 	writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 138fb8dd..efde50f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -43,11 +43,6 @@
 #endif
 }
 
-static int dwmac100_rx_coe_supported(void __iomem *ioaddr)
-{
-	return 0;
-}
-
 static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
 {
 	pr_info("\t----------------------------------------------\n"
@@ -72,6 +67,11 @@
 		readl(ioaddr + MAC_VLAN2));
 }
 
+static int dwmac100_rx_ipc_enable(void __iomem *ioaddr)
+{
+	return 0;
+}
+
 static void dwmac100_irq_status(void __iomem *ioaddr)
 {
 	return;
@@ -160,7 +160,7 @@
 
 static const struct stmmac_ops dwmac100_ops = {
 	.core_init = dwmac100_core_init,
-	.rx_coe = dwmac100_rx_coe_supported,
+	.rx_ipc = dwmac100_rx_ipc_enable,
 	.dump_regs = dwmac100_dump_mac_regs,
 	.host_irq_status = dwmac100_irq_status,
 	.set_filter = dwmac100_set_filter,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index bc17fd0..92ed2e0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -32,8 +32,8 @@
 #include "dwmac100.h"
 #include "dwmac_dma.h"
 
-static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
-			     u32 dma_rx)
+static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb,
+			     int burst_len, u32 dma_tx, u32 dma_rx)
 {
 	u32 value = readl(ioaddr + DMA_BUS_MODE);
 	int limit;
@@ -52,7 +52,7 @@
 
 	/* Enable Application Access by writing to DMA CSR0 */
 	writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
-	       ioaddr + DMA_BUS_MODE);
+			ioaddr + DMA_BUS_MODE);
 
 	/* Mask interrupts by writing to CSR7 */
 	writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 437edac..6e0360f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -32,6 +32,7 @@
 #define DMA_CONTROL		0x00001018	/* Ctrl (Operational Mode) */
 #define DMA_INTR_ENA		0x0000101c	/* Interrupt Enable */
 #define DMA_MISSED_FRAME_CTR	0x00001020	/* Missed Frame Counter */
+#define DMA_AXI_BUS_MODE       0x00001028      /* AXI Bus Mode */
 #define DMA_CUR_TX_BUF_ADDR	0x00001050	/* Current Host Tx Buffer */
 #define DMA_CUR_RX_BUF_ADDR	0x00001054	/* Current Host Rx Buffer */
 #define DMA_HW_FEATURE		0x00001058	/* HW Feature Register */
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index ad1b627..2fc8ef9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -22,6 +22,7 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#include <linux/stmmac.h>
 #include "common.h"
 #include "descs_com.h"
 
@@ -309,9 +310,17 @@
 	p->des01.etx.interrupt = 1;
 }
 
-static int enh_desc_get_rx_frame_len(struct dma_desc *p)
+static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
 {
-	return p->des01.erx.frame_length;
+	/* The type-1 checksum offload engines append the checksum at
+	 * the end of frame and the two bytes of checksum are added in
+	 * the length.
+	 * Adjust for that in the framelen for type-1 checksum offload
+	 * engines. */
+	if (rx_coe_type == STMMAC_RX_COE_TYPE1)
+		return p->des01.erx.frame_length - 2;
+	else
+		return p->des01.erx.frame_length;
 }
 
 const struct stmmac_desc_ops enh_desc_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 25953bb..68962c5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -22,6 +22,7 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#include <linux/stmmac.h>
 #include "common.h"
 #include "descs_com.h"
 
@@ -201,9 +202,17 @@
 	p->des01.tx.interrupt = 1;
 }
 
-static int ndesc_get_rx_frame_len(struct dma_desc *p)
+static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
 {
-	return p->des01.rx.frame_length;
+	/* The type-1 checksum offload engines append the checksum at
+	 * the end of frame and the two bytes of checksum are added in
+	 * the length.
+	 * Adjust for that in the framelen for type-1 checksum offload
+	 * engines. */
+	if (rx_coe_type == STMMAC_RX_COE_TYPE1)
+		return p->des01.rx.frame_length - 2;
+	else
+		return p->des01.rx.frame_length;
 }
 
 const struct stmmac_desc_ops ndesc_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index b4b095f..db2de9a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -21,7 +21,9 @@
 *******************************************************************************/
 
 #define STMMAC_RESOURCE_NAME   "stmmaceth"
-#define DRV_MODULE_VERSION	"Feb_2012"
+#define DRV_MODULE_VERSION	"March_2012"
+
+#include <linux/clk.h>
 #include <linux/stmmac.h>
 #include <linux/phy.h>
 #include "common.h"
@@ -56,8 +58,6 @@
 
 	struct stmmac_extra_stats xstats;
 	struct napi_struct napi;
-
-	int rx_coe;
 	int no_csum_insertion;
 
 	struct phy_device *phydev;
@@ -81,6 +81,10 @@
 	struct stmmac_counters mmc;
 	struct dma_features dma_cap;
 	int hw_cap_support;
+#ifdef CONFIG_HAVE_CLK
+	struct clk *stmmac_clk;
+#endif
+	int clk_csr;
 };
 
 extern int phyaddr;
@@ -99,3 +103,42 @@
 struct stmmac_priv *stmmac_dvr_probe(struct device *device,
 				     struct plat_stmmacenet_data *plat_dat,
 				     void __iomem *addr);
+
+#ifdef CONFIG_HAVE_CLK
+static inline int stmmac_clk_enable(struct stmmac_priv *priv)
+{
+	if (!IS_ERR(priv->stmmac_clk))
+		return clk_enable(priv->stmmac_clk);
+
+	return 0;
+}
+
+static inline void stmmac_clk_disable(struct stmmac_priv *priv)
+{
+	if (IS_ERR(priv->stmmac_clk))
+		return;
+
+	clk_disable(priv->stmmac_clk);
+}
+static inline int stmmac_clk_get(struct stmmac_priv *priv)
+{
+	priv->stmmac_clk = clk_get(priv->device, NULL);
+
+	if (IS_ERR(priv->stmmac_clk))
+		return PTR_ERR(priv->stmmac_clk);
+
+	return 0;
+}
+#else
+static inline int stmmac_clk_enable(struct stmmac_priv *priv)
+{
+	return 0;
+}
+static inline void stmmac_clk_disable(struct stmmac_priv *priv)
+{
+}
+static inline int stmmac_clk_get(struct stmmac_priv *priv)
+{
+	return 0;
+}
+#endif /* CONFIG_HAVE_CLK */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index f98e151..ce43184 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -481,6 +481,7 @@
 	.get_wol = stmmac_get_wol,
 	.set_wol = stmmac_set_wol,
 	.get_sset_count	= stmmac_get_sset_count,
+	.get_ts_info = ethtool_op_get_ts_info,
 };
 
 void stmmac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 48d56da..1a4cf81 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -163,6 +163,38 @@
 		pause = PAUSE_TIME;
 }
 
+static void stmmac_clk_csr_set(struct stmmac_priv *priv)
+{
+#ifdef CONFIG_HAVE_CLK
+	u32 clk_rate;
+
+	if (IS_ERR(priv->stmmac_clk))
+		return;
+
+	clk_rate = clk_get_rate(priv->stmmac_clk);
+
+	/* Platform provided default clk_csr would be assumed valid
+	 * for all other cases except for the below mentioned ones. */
+	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
+		if (clk_rate < CSR_F_35M)
+			priv->clk_csr = STMMAC_CSR_20_35M;
+		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
+			priv->clk_csr = STMMAC_CSR_35_60M;
+		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
+			priv->clk_csr = STMMAC_CSR_60_100M;
+		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
+			priv->clk_csr = STMMAC_CSR_100_150M;
+		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
+			priv->clk_csr = STMMAC_CSR_150_250M;
+		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
+			priv->clk_csr = STMMAC_CSR_250_300M;
+	} /* For values higher than the IEEE 802.3 specified frequency
+	   * we can not estimate the proper divider as it is not known
+	   * the frequency of clk_csr_i. So we do not change the default
+	   * divider. */
+#endif
+}
+
 #if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
 static void print_pkt(unsigned char *buf, int len)
 {
@@ -307,7 +339,13 @@
 	priv->speed = 0;
 	priv->oldduplex = -1;
 
-	snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", priv->plat->bus_id);
+	if (priv->plat->phy_bus_name)
+		snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
+				priv->plat->phy_bus_name, priv->plat->bus_id);
+	else
+		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
+				priv->plat->bus_id);
+
 	snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
 		 priv->plat->phy_addr);
 	pr_debug("stmmac_init_phy:  trying to attach to %s\n", phy_id);
@@ -884,6 +922,24 @@
 						   priv->dev->dev_addr);
 }
 
+static int stmmac_init_dma_engine(struct stmmac_priv *priv)
+{
+	int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0;
+
+	/* Some DMA parameters can be passed from the platform;
+	 * in case of these are not passed we keep a default
+	 * (good for all the chips) and init the DMA! */
+	if (priv->plat->dma_cfg) {
+		pbl = priv->plat->dma_cfg->pbl;
+		fixed_burst = priv->plat->dma_cfg->fixed_burst;
+		burst_len = priv->plat->dma_cfg->burst_len;
+	}
+
+	return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst,
+				   burst_len, priv->dma_tx_phy,
+				   priv->dma_rx_phy);
+}
+
 /**
  *  stmmac_open - open entry point of the driver
  *  @dev : pointer to the device structure.
@@ -898,16 +954,6 @@
 	struct stmmac_priv *priv = netdev_priv(dev);
 	int ret;
 
-	stmmac_check_ether_addr(priv);
-
-	/* MDIO bus Registration */
-	ret = stmmac_mdio_register(dev);
-	if (ret < 0) {
-		pr_debug("%s: MDIO bus (id: %d) registration failed",
-			 __func__, priv->plat->bus_id);
-		return ret;
-	}
-
 #ifdef CONFIG_STMMAC_TIMER
 	priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
 	if (unlikely(priv->tm == NULL))
@@ -925,6 +971,10 @@
 	} else
 		priv->tm->enable = 1;
 #endif
+	stmmac_clk_enable(priv);
+
+	stmmac_check_ether_addr(priv);
+
 	ret = stmmac_init_phy(dev);
 	if (unlikely(ret)) {
 		pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
@@ -938,8 +988,7 @@
 	init_dma_desc_rings(dev);
 
 	/* DMA initialization and SW reset */
-	ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
-				  priv->dma_tx_phy, priv->dma_rx_phy);
+	ret = stmmac_init_dma_engine(priv);
 	if (ret < 0) {
 		pr_err("%s: DMA initialization failed\n", __func__);
 		goto open_error;
@@ -1026,6 +1075,8 @@
 	if (priv->phydev)
 		phy_disconnect(priv->phydev);
 
+	stmmac_clk_disable(priv);
+
 	return ret;
 }
 
@@ -1077,7 +1128,7 @@
 #ifdef CONFIG_STMMAC_DEBUG_FS
 	stmmac_exit_fs();
 #endif
-	stmmac_mdio_unregister(dev);
+	stmmac_clk_disable(priv);
 
 	return 0;
 }
@@ -1276,7 +1327,8 @@
 			struct sk_buff *skb;
 			int frame_len;
 
-			frame_len = priv->hw->desc->get_rx_frame_len(p);
+			frame_len = priv->hw->desc->get_rx_frame_len(p,
+					priv->plat->rx_coe);
 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
 			 * Type frames (LLC/LLC-SNAP) */
 			if (unlikely(status != llc_snap))
@@ -1312,7 +1364,7 @@
 #endif
 			skb->protocol = eth_type_trans(skb, priv->dev);
 
-			if (unlikely(!priv->rx_coe)) {
+			if (unlikely(!priv->plat->rx_coe)) {
 				/* No RX COE for old mac10/100 devices */
 				skb_checksum_none_assert(skb);
 				netif_receive_skb(skb);
@@ -1459,8 +1511,10 @@
 {
 	struct stmmac_priv *priv = netdev_priv(dev);
 
-	if (!priv->rx_coe)
+	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
 		features &= ~NETIF_F_RXCSUM;
+	else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1)
+		features &= ~NETIF_F_IPV6_CSUM;
 	if (!priv->plat->tx_coe)
 		features &= ~NETIF_F_ALL_CSUM;
 
@@ -1765,17 +1819,32 @@
 		 * register (if supported).
 		 */
 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
-		priv->plat->tx_coe = priv->dma_cap.tx_coe;
 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
+
+		priv->plat->tx_coe = priv->dma_cap.tx_coe;
+
+		if (priv->dma_cap.rx_coe_type2)
+			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
+		else if (priv->dma_cap.rx_coe_type1)
+			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
+
 	} else
 		pr_info(" No HW DMA feature register supported");
 
 	/* Select the enhnaced/normal descriptor structures */
 	stmmac_selec_desc_mode(priv);
 
-	priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
-	if (priv->rx_coe)
-		pr_info(" RX Checksum Offload Engine supported\n");
+	/* Enable the IPC (Checksum Offload) and check if the feature has been
+	 * enabled during the core configuration. */
+	ret = priv->hw->mac->rx_ipc(priv->ioaddr);
+	if (!ret) {
+		pr_warning(" RX IPC Checksum Offload not configured.\n");
+		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
+	}
+
+	if (priv->plat->rx_coe)
+		pr_info(" RX Checksum Offload Engine supported (type %d)\n",
+			priv->plat->rx_coe);
 	if (priv->plat->tx_coe)
 		pr_info(" TX Checksum insertion supported\n");
 
@@ -1856,6 +1925,28 @@
 		goto error;
 	}
 
+	if (stmmac_clk_get(priv))
+		pr_warning("%s: warning: cannot get CSR clock\n", __func__);
+
+	/* If a specific clk_csr value is passed from the platform
+	 * this means that the CSR Clock Range selection cannot be
+	 * changed at run-time and it is fixed. Viceversa the driver'll try to
+	 * set the MDC clock dynamically according to the csr actual
+	 * clock input.
+	 */
+	if (!priv->plat->clk_csr)
+		stmmac_clk_csr_set(priv);
+	else
+		priv->clk_csr = priv->plat->clk_csr;
+
+	/* MDIO bus Registration */
+	ret = stmmac_mdio_register(ndev);
+	if (ret < 0) {
+		pr_debug("%s: MDIO bus (id: %d) registration failed",
+			 __func__, priv->plat->bus_id);
+		goto error;
+	}
+
 	return priv;
 
 error:
@@ -1883,6 +1974,7 @@
 	priv->hw->dma->stop_tx(priv->ioaddr);
 
 	stmmac_set_mac(priv->ioaddr, false);
+	stmmac_mdio_unregister(ndev);
 	netif_carrier_off(ndev);
 	unregister_netdev(ndev);
 	free_netdev(ndev);
@@ -1925,9 +2017,11 @@
 	/* Enable Power down mode by programming the PMT regs */
 	if (device_may_wakeup(priv->device))
 		priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
-	else
+	else {
 		stmmac_set_mac(priv->ioaddr, false);
-
+		/* Disable clock in case of PWM is off */
+		stmmac_clk_disable(priv);
+	}
 	spin_unlock(&priv->lock);
 	return 0;
 }
@@ -1948,6 +2042,9 @@
 	 * from another devices (e.g. serial console). */
 	if (device_may_wakeup(priv->device))
 		priv->hw->mac->pmt(priv->ioaddr, 0);
+	else
+		/* enable the clk prevously disabled */
+		stmmac_clk_enable(priv);
 
 	netif_device_attach(ndev);
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 7319532..ade1082 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -34,6 +34,22 @@
 #define MII_BUSY 0x00000001
 #define MII_WRITE 0x00000002
 
+static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr)
+{
+	unsigned long curr;
+	unsigned long finish = jiffies + 3 * HZ;
+
+	do {
+		curr = jiffies;
+		if (readl(ioaddr + mii_addr) & MII_BUSY)
+			cpu_relax();
+		else
+			return 0;
+	} while (!time_after_eq(curr, finish));
+
+	return -EBUSY;
+}
+
 /**
  * stmmac_mdio_read
  * @bus: points to the mii_bus structure
@@ -54,11 +70,15 @@
 	int data;
 	u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
 			((phyreg << 6) & (0x000007C0)));
-	regValue |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
+	regValue |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
 
-	do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
+	if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+		return -EBUSY;
+
 	writel(regValue, priv->ioaddr + mii_address);
-	do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
+
+	if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+		return -EBUSY;
 
 	/* Read the data from the MII data register */
 	data = (int)readl(priv->ioaddr + mii_data);
@@ -86,20 +106,18 @@
 	    (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
 	    | MII_WRITE;
 
-	value |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
-
+	value |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
 
 	/* Wait until any existing MII operation is complete */
-	do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
+	if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+		return -EBUSY;
 
 	/* Set the MII address register to write */
 	writel(phydata, priv->ioaddr + mii_data);
 	writel(value, priv->ioaddr + mii_address);
 
 	/* Wait until any existing MII operation is complete */
-	do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
-
-	return 0;
+	return stmmac_mdio_busy_wait(priv->ioaddr, mii_address);
 }
 
 /**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index da66ed7..58fab53 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -28,6 +28,7 @@
 
 struct plat_stmmacenet_data plat_dat;
 struct stmmac_mdio_bus_data mdio_data;
+struct stmmac_dma_cfg dma_cfg;
 
 static void stmmac_default_data(void)
 {
@@ -35,7 +36,6 @@
 	plat_dat.bus_id = 1;
 	plat_dat.phy_addr = 0;
 	plat_dat.interface = PHY_INTERFACE_MODE_GMII;
-	plat_dat.pbl = 32;
 	plat_dat.clk_csr = 2;	/* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
 	plat_dat.has_gmac = 1;
 	plat_dat.force_sf_dma_mode = 1;
@@ -44,6 +44,10 @@
 	mdio_data.phy_reset = NULL;
 	mdio_data.phy_mask = 0;
 	plat_dat.mdio_bus_data = &mdio_data;
+
+	dma_cfg.pbl = 32;
+	dma_cfg.burst_len = DMA_AXI_BLEN_256;
+	plat_dat.dma_cfg = &dma_cfg;
 }
 
 /**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 116529a..3dd8f08 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -50,7 +50,6 @@
 	 * once needed on other platforms.
 	 */
 	if (of_device_is_compatible(np, "st,spear600-gmac")) {
-		plat->pbl = 8;
 		plat->has_gmac = 1;
 		plat->pmt = 1;
 	}
@@ -189,9 +188,6 @@
 	if (priv->plat->exit)
 		priv->plat->exit(pdev);
 
-	if (priv->plat->exit)
-		priv->plat->exit(pdev);
-
 	platform_set_drvdata(pdev, NULL);
 
 	iounmap((void *)priv->ioaddr);
@@ -218,14 +214,26 @@
 
 int stmmac_pltfr_freeze(struct device *dev)
 {
+	int ret;
+	struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
 	struct net_device *ndev = dev_get_drvdata(dev);
+	struct platform_device *pdev = to_platform_device(dev);
 
-	return stmmac_freeze(ndev);
+	ret = stmmac_freeze(ndev);
+	if (plat_dat->exit)
+		plat_dat->exit(pdev);
+
+	return ret;
 }
 
 int stmmac_pltfr_restore(struct device *dev)
 {
+	struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
 	struct net_device *ndev = dev_get_drvdata(dev);
+	struct platform_device *pdev = to_platform_device(dev);
+
+	if (plat_dat->init)
+		plat_dat->init(pdev);
 
 	return stmmac_restore(ndev);
 }
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 558409f..dc065fa 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -401,7 +401,7 @@
 		return 1;
 	}
 
-	udelay(5000);
+	mdelay(5);
 
 	/* Execute RX reset command. */
 	writel(gp->swrst_base | GREG_SWRST_RXRST,
@@ -2898,7 +2898,6 @@
 	}
 
 	gp->pdev = pdev;
-	dev->base_addr = (long) pdev;
 	gp->dev = dev;
 
 	gp->msg_enable = DEFAULT_MSG;
@@ -2972,7 +2971,6 @@
 	netif_napi_add(dev, &gp->napi, gem_poll, 64);
 	dev->ethtool_ops = &gem_ethtool_ops;
 	dev->watchdog_timeo = 5 * HZ;
-	dev->irq = pdev->irq;
 	dev->dma = 0;
 
 	/* Set that now, in case PM kicks in now */
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index b95e7e6..dfc00c4 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2182,11 +2182,12 @@
 	 * into a single source which we register handling at probe time.
 	 */
 	if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
-		if (request_irq(dev->irq, happy_meal_interrupt,
-				IRQF_SHARED, dev->name, (void *)dev)) {
+		res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED,
+				  dev->name, dev);
+		if (res) {
 			HMD(("EAGAIN\n"));
 			printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
-			       dev->irq);
+			       hp->irq);
 
 			return -EAGAIN;
 		}
@@ -2199,7 +2200,7 @@
 	spin_unlock_irq(&hp->happy_lock);
 
 	if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO))
-		free_irq(dev->irq, dev);
+		free_irq(hp->irq, dev);
 	return res;
 }
 
@@ -2221,7 +2222,7 @@
 	 * time and never unregister.
 	 */
 	if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)
-		free_irq(dev->irq, dev);
+		free_irq(hp->irq, dev);
 
 	return 0;
 }
@@ -2777,7 +2778,7 @@
 	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
 	dev->features |= dev->hw_features | NETIF_F_RXCSUM;
 
-	dev->irq = op->archdata.irqs[0];
+	hp->irq = op->archdata.irqs[0];
 
 #if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
 	/* Hook up SBUS register/descriptor accessors. */
@@ -2981,8 +2982,6 @@
 	if (hme_version_printed++ == 0)
 		printk(KERN_INFO "%s", version);
 
-	dev->base_addr = (long) pdev;
-
 	hp = netdev_priv(dev);
 
 	hp->happy_dev = pdev;
@@ -3087,12 +3086,11 @@
 
 	init_timer(&hp->happy_timer);
 
+	hp->irq = pdev->irq;
 	hp->dev = dev;
 	dev->netdev_ops = &hme_netdev_ops;
 	dev->watchdog_timeo = 5*HZ;
 	dev->ethtool_ops = &hme_ethtool_ops;
-	dev->irq = pdev->irq;
-	dev->dma = 0;
 
 	/* Happy Meal can do it all... */
 	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h
index 64f2783..f430765 100644
--- a/drivers/net/ethernet/sun/sunhme.h
+++ b/drivers/net/ethernet/sun/sunhme.h
@@ -432,6 +432,7 @@
 
 	dma_addr_t                hblock_dvma;    /* DVMA visible address happy block  */
 	unsigned int              happy_flags;    /* Driver state flags                */
+	int                       irq;
 	enum happy_transceiver    tcvr_type;      /* Kind of transceiver in use        */
 	unsigned int              happy_bursts;   /* Get your mind out of the gutter   */
 	unsigned int              paddr;          /* PHY address for transceiver       */
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index ad973ff..8846516 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1317,7 +1317,7 @@
 
 static void print_rxfd(struct rxf_desc *rxfd)
 {
-	DBG("=== RxF desc CHIP ORDER/ENDIANESS =============\n"
+	DBG("=== RxF desc CHIP ORDER/ENDIANNESS =============\n"
 	    "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n",
 	    rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len);
 }
@@ -1988,10 +1988,6 @@
 		/* these fields are used for info purposes only
 		 * so we can have them same for all ports of the board */
 		ndev->if_port = port;
-		ndev->base_addr = pciaddr;
-		ndev->mem_start = pciaddr;
-		ndev->mem_end = pciaddr + regionSize;
-		ndev->irq = pdev->irq;
 		ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
 		    | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
 		    NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 3455876..d614c37 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -92,7 +92,7 @@
 	CPDMA_STATE_TEARDOWN,
 };
 
-const char *cpdma_state_str[] = { "idle", "active", "teardown" };
+static const char *cpdma_state_str[] = { "idle", "active", "teardown" };
 
 struct cpdma_ctlr {
 	enum cpdma_state	state;
@@ -276,6 +276,7 @@
 		ctlr->num_chan = CPDMA_MAX_CHANNELS;
 	return ctlr;
 }
+EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
 
 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
 {
@@ -321,6 +322,7 @@
 	spin_unlock_irqrestore(&ctlr->lock, flags);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
 
 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
 {
@@ -351,6 +353,7 @@
 	spin_unlock_irqrestore(&ctlr->lock, flags);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
 
 int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
 {
@@ -421,6 +424,7 @@
 	spin_unlock_irqrestore(&ctlr->lock, flags);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(cpdma_ctlr_dump);
 
 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
 {
@@ -444,6 +448,7 @@
 	kfree(ctlr);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
 
 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
 {
@@ -528,6 +533,7 @@
 err_chan_alloc:
 	return ERR_PTR(ret);
 }
+EXPORT_SYMBOL_GPL(cpdma_chan_create);
 
 int cpdma_chan_destroy(struct cpdma_chan *chan)
 {
@@ -545,6 +551,7 @@
 	kfree(chan);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
 
 int cpdma_chan_get_stats(struct cpdma_chan *chan,
 			 struct cpdma_chan_stats *stats)
@@ -693,6 +700,7 @@
 	spin_unlock_irqrestore(&chan->lock, flags);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(cpdma_chan_submit);
 
 static void __cpdma_chan_free(struct cpdma_chan *chan,
 			      struct cpdma_desc __iomem *desc,
@@ -776,6 +784,7 @@
 	}
 	return used;
 }
+EXPORT_SYMBOL_GPL(cpdma_chan_process);
 
 int cpdma_chan_start(struct cpdma_chan *chan)
 {
@@ -803,6 +812,7 @@
 	spin_unlock_irqrestore(&chan->lock, flags);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(cpdma_chan_start);
 
 int cpdma_chan_stop(struct cpdma_chan *chan)
 {
@@ -863,6 +873,7 @@
 	spin_unlock_irqrestore(&chan->lock, flags);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(cpdma_chan_stop);
 
 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
 {
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 174a334..8aa3332 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -627,6 +627,7 @@
 	.get_link = ethtool_op_get_link,
 	.get_coalesce = emac_get_coalesce,
 	.set_coalesce =  emac_set_coalesce,
+	.get_ts_info = ethtool_op_get_ts_info,
 };
 
 /**
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 2757c7d..e4e4708 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -181,6 +181,11 @@
 		__davinci_mdio_reset(data);
 		return -EAGAIN;
 	}
+
+	reg = __raw_readl(&regs->user[0].access);
+	if ((reg & USERACCESS_GO) == 0)
+		return 0;
+
 	dev_err(data->dev, "timed out waiting for user access\n");
 	return -ETIMEDOUT;
 }
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 817ad3b..bb8b802 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -2545,7 +2545,7 @@
 
 	phy = priv->phy[priv->phy_num];
 
-	TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name);
+	TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Resetting PHY.\n", dev->name);
 	tlan_mii_sync(dev->base_addr);
 	value = MII_GC_LOOPBK | MII_GC_RESET;
 	tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 261356c..3d501ec 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -342,6 +342,21 @@
 }
 
 
+static void tile_net_return_credit(struct tile_net_cpu *info)
+{
+	struct tile_netio_queue *queue = &info->queue;
+	netio_queue_user_impl_t *qup = &queue->__user_part;
+
+	/* Return four credits after every fourth packet. */
+	if (--qup->__receive_credit_remaining == 0) {
+		u32 interval = qup->__receive_credit_interval;
+		qup->__receive_credit_remaining = interval;
+		__netio_fastio_return_credits(qup->__fastio_index, interval);
+	}
+}
+
+
+
 /*
  * Provide a linux buffer to LIPP.
  */
@@ -433,7 +448,7 @@
 	struct sk_buff **skb_ptr;
 
 	/* Request 96 extra bytes for alignment purposes. */
-	skb = netdev_alloc_skb(info->napi->dev, len + padding);
+	skb = netdev_alloc_skb(info->napi.dev, len + padding);
 	if (skb == NULL)
 		return false;
 
@@ -864,19 +879,11 @@
 
 		stats->rx_packets++;
 		stats->rx_bytes += len;
-
-		if (small)
-			info->num_needed_small_buffers++;
-		else
-			info->num_needed_large_buffers++;
 	}
 
-	/* Return four credits after every fourth packet. */
-	if (--qup->__receive_credit_remaining == 0) {
-		u32 interval = qup->__receive_credit_interval;
-		qup->__receive_credit_remaining = interval;
-		__netio_fastio_return_credits(qup->__fastio_index, interval);
-	}
+	/* ISSUE: It would be nice to defer this until the packet has */
+	/* actually been processed. */
+	tile_net_return_credit(info);
 
 	/* Consume this packet. */
 	qup->__packet_receive_read = index2;
@@ -1543,7 +1550,7 @@
 
 	/* Drain all the LIPP buffers. */
 	while (true) {
-		int buffer;
+		unsigned int buffer;
 
 		/* NOTE: This should never fail. */
 		if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer,
@@ -1707,7 +1714,7 @@
 		if (!hash_default) {
 			void *va = pfn_to_kaddr(pfn) + f->page_offset;
 			BUG_ON(PageHighMem(skb_frag_page(f)));
-			finv_buffer_remote(va, f->size, 0);
+			finv_buffer_remote(va, skb_frag_size(f), 0);
 		}
 
 		cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset;
@@ -1735,8 +1742,8 @@
  * Sometimes, if "sendfile()" requires copying, we will be called with
  * "data" containing the header and payload, with "frags" being empty.
  *
- * In theory, "sh->nr_frags" could be 3, but in practice, it seems
- * that this will never actually happen.
+ * Sometimes, for example when using NFS over TCP, a single segment can
+ * span 3 fragments, which must be handled carefully in LEPP.
  *
  * See "emulate_large_send_offload()" for some reference code, which
  * does not handle checksumming.
@@ -1844,10 +1851,8 @@
 
 	spin_lock_irqsave(&priv->eq_lock, irqflags);
 
-	/*
-	 * Handle completions if needed to make room.
-	 * HACK: Spin until there is sufficient room.
-	 */
+	/* Handle completions if needed to make room. */
+	/* NOTE: Return NETDEV_TX_BUSY if there is still no room. */
 	if (lepp_num_free_comp_slots(eq) == 0) {
 		nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
 		if (nolds == 0) {
@@ -1861,6 +1866,7 @@
 	cmd_tail = eq->cmd_tail;
 
 	/* Prepare to advance, detecting full queue. */
+	/* NOTE: Return NETDEV_TX_BUSY if the queue is full. */
 	cmd_next = cmd_tail + cmd_size;
 	if (cmd_tail < cmd_head && cmd_next >= cmd_head)
 		goto busy;
@@ -2023,10 +2029,8 @@
 
 	spin_lock_irqsave(&priv->eq_lock, irqflags);
 
-	/*
-	 * Handle completions if needed to make room.
-	 * HACK: Spin until there is sufficient room.
-	 */
+	/* Handle completions if needed to make room. */
+	/* NOTE: Return NETDEV_TX_BUSY if there is still no room. */
 	if (lepp_num_free_comp_slots(eq) == 0) {
 		nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
 		if (nolds == 0) {
@@ -2040,6 +2044,7 @@
 	cmd_tail = eq->cmd_tail;
 
 	/* Copy the commands, or fail. */
+	/* NOTE: Return NETDEV_TX_BUSY if the queue is full. */
 	for (i = 0; i < num_frags; i++) {
 
 		/* Prepare to advance, detecting full queue. */
@@ -2261,6 +2266,23 @@
 	return 0;
 }
 
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void tile_net_netpoll(struct net_device *dev)
+{
+	struct tile_net_priv *priv = netdev_priv(dev);
+	disable_percpu_irq(priv->intr_id);
+	tile_net_handle_ingress_interrupt(priv->intr_id, dev);
+	enable_percpu_irq(priv->intr_id, 0);
+}
+#endif
+
+
 static const struct net_device_ops tile_net_ops = {
 	.ndo_open = tile_net_open,
 	.ndo_stop = tile_net_stop,
@@ -2269,7 +2291,10 @@
 	.ndo_get_stats = tile_net_get_stats,
 	.ndo_change_mtu = tile_net_change_mtu,
 	.ndo_tx_timeout = tile_net_tx_timeout,
-	.ndo_set_mac_address = tile_net_set_mac_address
+	.ndo_set_mac_address = tile_net_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller = tile_net_netpoll,
+#endif
 };
 
 
@@ -2409,7 +2434,7 @@
  */
 static int tile_net_init_module(void)
 {
-	pr_info("Tilera IPP Net Driver\n");
+	pr_info("Tilera Network Driver\n");
 
 	tile_net_devs[0] = tile_net_dev_init("xgbe0");
 	tile_net_devs[1] = tile_net_dev_init("xgbe1");
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index fcfa01f..0459c09 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -689,9 +689,12 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void rhine_poll(struct net_device *dev)
 {
-	disable_irq(dev->irq);
-	rhine_interrupt(dev->irq, (void *)dev);
-	enable_irq(dev->irq);
+	struct rhine_private *rp = netdev_priv(dev);
+	const int irq = rp->pdev->irq;
+
+	disable_irq(irq);
+	rhine_interrupt(irq, dev);
+	enable_irq(irq);
 }
 #endif
 
@@ -972,7 +975,6 @@
 	}
 #endif /* USE_MMIO */
 
-	dev->base_addr = (unsigned long)ioaddr;
 	rp->base = ioaddr;
 
 	/* Get chip registers into a sane state */
@@ -995,8 +997,6 @@
 	if (!phy_id)
 		phy_id = ioread8(ioaddr + 0x6C);
 
-	dev->irq = pdev->irq;
-
 	spin_lock_init(&rp->lock);
 	mutex_init(&rp->task_lock);
 	INIT_WORK(&rp->reset_task, rhine_reset_task);
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 8a5d7c1..ea3e0a2 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2488,8 +2488,8 @@
 
 	if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
 		velocity_get_ip(vptr);
-	if (dev->irq != 0)
-		free_irq(dev->irq, dev);
+
+	free_irq(vptr->pdev->irq, dev);
 
 	velocity_free_rings(vptr);
 
@@ -2755,8 +2755,6 @@
 	if (ret < 0)
 		goto err_free_dev;
 
-	dev->irq = pdev->irq;
-
 	ret = velocity_get_pci_info(vptr, pdev);
 	if (ret < 0) {
 		/* error message already printed */
@@ -2779,8 +2777,6 @@
 
 	mac_wol_reset(regs);
 
-	dev->base_addr = vptr->ioaddr;
-
 	for (i = 0; i < 6; i++)
 		dev->dev_addr[i] = readb(&regs->PAR[i]);
 
@@ -2806,7 +2802,6 @@
 
 	vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
 
-	dev->irq = pdev->irq;
 	dev->netdev_ops = &velocity_netdev_ops;
 	dev->ethtool_ops = &velocity_ethtool_ops;
 	netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
diff --git a/drivers/net/ethernet/wiznet/Kconfig b/drivers/net/ethernet/wiznet/Kconfig
new file mode 100644
index 0000000..cb18043
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/Kconfig
@@ -0,0 +1,73 @@
+#
+# WIZnet devices configuration
+#
+
+config NET_VENDOR_WIZNET
+	bool "WIZnet devices"
+	default y
+	---help---
+	  If you have a network (Ethernet) card belonging to this class, say Y
+	  and read the Ethernet-HOWTO, available from
+	  <http://www.tldp.org/docs.html#howto>.
+
+	  Note that the answer to this question doesn't directly affect the
+	  kernel: saying N will just cause the configurator to skip all
+	  the questions about WIZnet devices. If you say Y, you will be asked
+	  for your specific card in the following questions.
+
+if NET_VENDOR_WIZNET
+
+config WIZNET_W5100
+	tristate "WIZnet W5100 Ethernet support"
+	depends on HAS_IOMEM
+	---help---
+	  Support for WIZnet W5100 chips.
+
+	  W5100 is a single chip with integrated 10/100 Ethernet MAC,
+	  PHY and hardware TCP/IP stack, but this driver is limited to
+	  the MAC and PHY functions only, onchip TCP/IP is unused.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called w5100.
+
+config WIZNET_W5300
+	tristate "WIZnet W5300 Ethernet support"
+	depends on HAS_IOMEM
+	---help---
+	  Support for WIZnet W5300 chips.
+
+	  W5300 is a single chip with integrated 10/100 Ethernet MAC,
+	  PHY and hardware TCP/IP stack, but this driver is limited to
+	  the MAC and PHY functions only, onchip TCP/IP is unused.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called w5300.
+
+choice
+	prompt "WIZnet interface mode"
+	depends on WIZNET_W5100 || WIZNET_W5300
+	default WIZNET_BUS_ANY
+
+config WIZNET_BUS_DIRECT
+	bool "Direct address bus mode"
+	---help---
+	  In direct address mode host system can directly access all registers
+	  after mapping to Memory-Mapped I/O space.
+
+config WIZNET_BUS_INDIRECT
+	bool "Indirect address bus mode"
+	---help---
+	  In indirect address mode host system indirectly accesses registers
+	  using Indirect Mode Address Register and Indirect Mode Data Register,
+	  which are directly mapped to Memory-Mapped I/O space.
+
+config WIZNET_BUS_ANY
+	bool "Select interface mode in runtime"
+	---help---
+	  If interface mode is unknown in compile time, it can be selected
+	  in runtime from board/platform resources configuration.
+
+	  Performance may decrease compared to explicitly selected bus mode.
+endchoice
+
+endif # NET_VENDOR_WIZNET
diff --git a/drivers/net/ethernet/wiznet/Makefile b/drivers/net/ethernet/wiznet/Makefile
new file mode 100644
index 0000000..c614535
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_WIZNET_W5100) += w5100.o
+obj-$(CONFIG_WIZNET_W5300) += w5300.o
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
new file mode 100644
index 0000000..a75e9ef
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -0,0 +1,808 @@
+/*
+ * Ethernet driver for the WIZnet W5100 chip.
+ *
+ * Copyright (C) 2006-2008 WIZnet Co.,Ltd.
+ * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kconfig.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/wiznet.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+
+#define DRV_NAME	"w5100"
+#define DRV_VERSION	"2012-04-04"
+
+MODULE_DESCRIPTION("WIZnet W5100 Ethernet driver v"DRV_VERSION);
+MODULE_AUTHOR("Mike Sinkovsky <msink@permonline.ru>");
+MODULE_ALIAS("platform:"DRV_NAME);
+MODULE_LICENSE("GPL");
+
+/*
+ * Registers
+ */
+#define W5100_COMMON_REGS	0x0000
+#define W5100_MR		0x0000 /* Mode Register */
+#define   MR_RST		  0x80 /* S/W reset */
+#define   MR_PB			  0x10 /* Ping block */
+#define   MR_AI			  0x02 /* Address Auto-Increment */
+#define   MR_IND		  0x01 /* Indirect mode */
+#define W5100_SHAR		0x0009 /* Source MAC address */
+#define W5100_IR		0x0015 /* Interrupt Register */
+#define W5100_IMR		0x0016 /* Interrupt Mask Register */
+#define   IR_S0			  0x01 /* S0 interrupt */
+#define W5100_RTR		0x0017 /* Retry Time-value Register */
+#define   RTR_DEFAULT		  2000 /* =0x07d0 (2000) */
+#define W5100_RMSR		0x001a /* Receive Memory Size */
+#define W5100_TMSR		0x001b /* Transmit Memory Size */
+#define W5100_COMMON_REGS_LEN	0x0040
+
+#define W5100_S0_REGS		0x0400
+#define W5100_S0_MR		0x0400 /* S0 Mode Register */
+#define   S0_MR_MACRAW		  0x04 /* MAC RAW mode (promiscous) */
+#define   S0_MR_MACRAW_MF	  0x44 /* MAC RAW mode (filtered) */
+#define W5100_S0_CR		0x0401 /* S0 Command Register */
+#define   S0_CR_OPEN		  0x01 /* OPEN command */
+#define   S0_CR_CLOSE		  0x10 /* CLOSE command */
+#define   S0_CR_SEND		  0x20 /* SEND command */
+#define   S0_CR_RECV		  0x40 /* RECV command */
+#define W5100_S0_IR		0x0402 /* S0 Interrupt Register */
+#define   S0_IR_SENDOK		  0x10 /* complete sending */
+#define   S0_IR_RECV		  0x04 /* receiving data */
+#define W5100_S0_SR		0x0403 /* S0 Status Register */
+#define   S0_SR_MACRAW		  0x42 /* mac raw mode */
+#define W5100_S0_TX_FSR		0x0420 /* S0 Transmit free memory size */
+#define W5100_S0_TX_RD		0x0422 /* S0 Transmit memory read pointer */
+#define W5100_S0_TX_WR		0x0424 /* S0 Transmit memory write pointer */
+#define W5100_S0_RX_RSR		0x0426 /* S0 Receive free memory size */
+#define W5100_S0_RX_RD		0x0428 /* S0 Receive memory read pointer */
+#define W5100_S0_REGS_LEN	0x0040
+
+#define W5100_TX_MEM_START	0x4000
+#define W5100_TX_MEM_END	0x5fff
+#define W5100_TX_MEM_MASK	0x1fff
+#define W5100_RX_MEM_START	0x6000
+#define W5100_RX_MEM_END	0x7fff
+#define W5100_RX_MEM_MASK	0x1fff
+
+/*
+ * Device driver private data structure
+ */
+struct w5100_priv {
+	void __iomem *base;
+	spinlock_t reg_lock;
+	bool indirect;
+	u8   (*read)(struct w5100_priv *priv, u16 addr);
+	void (*write)(struct w5100_priv *priv, u16 addr, u8 data);
+	u16  (*read16)(struct w5100_priv *priv, u16 addr);
+	void (*write16)(struct w5100_priv *priv, u16 addr, u16 data);
+	void (*readbuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
+	void (*writebuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
+	int irq;
+	int link_irq;
+	int link_gpio;
+
+	struct napi_struct napi;
+	struct net_device *ndev;
+	bool promisc;
+	u32 msg_enable;
+};
+
+/************************************************************************
+ *
+ *  Lowlevel I/O functions
+ *
+ ***********************************************************************/
+
+/*
+ * In direct address mode host system can directly access W5100 registers
+ * after mapping to Memory-Mapped I/O space.
+ *
+ * 0x8000 bytes are required for memory space.
+ */
+static inline u8 w5100_read_direct(struct w5100_priv *priv, u16 addr)
+{
+	return ioread8(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+}
+
+static inline void w5100_write_direct(struct w5100_priv *priv,
+				      u16 addr, u8 data)
+{
+	iowrite8(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+}
+
+static u16 w5100_read16_direct(struct w5100_priv *priv, u16 addr)
+{
+	u16 data;
+	data  = w5100_read_direct(priv, addr) << 8;
+	data |= w5100_read_direct(priv, addr + 1);
+	return data;
+}
+
+static void w5100_write16_direct(struct w5100_priv *priv, u16 addr, u16 data)
+{
+	w5100_write_direct(priv, addr, data >> 8);
+	w5100_write_direct(priv, addr + 1, data);
+}
+
+static void w5100_readbuf_direct(struct w5100_priv *priv,
+				 u16 offset, u8 *buf, int len)
+{
+	u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
+	int i;
+
+	for (i = 0; i < len; i++, addr++) {
+		if (unlikely(addr > W5100_RX_MEM_END))
+			addr = W5100_RX_MEM_START;
+		*buf++ = w5100_read_direct(priv, addr);
+	}
+}
+
+static void w5100_writebuf_direct(struct w5100_priv *priv,
+				  u16 offset, u8 *buf, int len)
+{
+	u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
+	int i;
+
+	for (i = 0; i < len; i++, addr++) {
+		if (unlikely(addr > W5100_TX_MEM_END))
+			addr = W5100_TX_MEM_START;
+		w5100_write_direct(priv, addr, *buf++);
+	}
+}
+
+/*
+ * In indirect address mode host system indirectly accesses registers by
+ * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
+ * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space.
+ * Mode Register (MR) is directly accessible.
+ *
+ * Only 0x04 bytes are required for memory space.
+ */
+#define W5100_IDM_AR		0x01   /* Indirect Mode Address Register */
+#define W5100_IDM_DR		0x03   /* Indirect Mode Data Register */
+
+static u8 w5100_read_indirect(struct w5100_priv *priv, u16 addr)
+{
+	unsigned long flags;
+	u8 data;
+
+	spin_lock_irqsave(&priv->reg_lock, flags);
+	w5100_write16_direct(priv, W5100_IDM_AR, addr);
+	mmiowb();
+	data = w5100_read_direct(priv, W5100_IDM_DR);
+	spin_unlock_irqrestore(&priv->reg_lock, flags);
+
+	return data;
+}
+
+static void w5100_write_indirect(struct w5100_priv *priv, u16 addr, u8 data)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->reg_lock, flags);
+	w5100_write16_direct(priv, W5100_IDM_AR, addr);
+	mmiowb();
+	w5100_write_direct(priv, W5100_IDM_DR, data);
+	mmiowb();
+	spin_unlock_irqrestore(&priv->reg_lock, flags);
+}
+
+static u16 w5100_read16_indirect(struct w5100_priv *priv, u16 addr)
+{
+	unsigned long flags;
+	u16 data;
+
+	spin_lock_irqsave(&priv->reg_lock, flags);
+	w5100_write16_direct(priv, W5100_IDM_AR, addr);
+	mmiowb();
+	data  = w5100_read_direct(priv, W5100_IDM_DR) << 8;
+	data |= w5100_read_direct(priv, W5100_IDM_DR);
+	spin_unlock_irqrestore(&priv->reg_lock, flags);
+
+	return data;
+}
+
+static void w5100_write16_indirect(struct w5100_priv *priv, u16 addr, u16 data)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->reg_lock, flags);
+	w5100_write16_direct(priv, W5100_IDM_AR, addr);
+	mmiowb();
+	w5100_write_direct(priv, W5100_IDM_DR, data >> 8);
+	w5100_write_direct(priv, W5100_IDM_DR, data);
+	mmiowb();
+	spin_unlock_irqrestore(&priv->reg_lock, flags);
+}
+
+static void w5100_readbuf_indirect(struct w5100_priv *priv,
+				   u16 offset, u8 *buf, int len)
+{
+	u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&priv->reg_lock, flags);
+	w5100_write16_direct(priv, W5100_IDM_AR, addr);
+	mmiowb();
+
+	for (i = 0; i < len; i++, addr++) {
+		if (unlikely(addr > W5100_RX_MEM_END)) {
+			addr = W5100_RX_MEM_START;
+			w5100_write16_direct(priv, W5100_IDM_AR, addr);
+			mmiowb();
+		}
+		*buf++ = w5100_read_direct(priv, W5100_IDM_DR);
+	}
+	mmiowb();
+	spin_unlock_irqrestore(&priv->reg_lock, flags);
+}
+
+static void w5100_writebuf_indirect(struct w5100_priv *priv,
+				    u16 offset, u8 *buf, int len)
+{
+	u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&priv->reg_lock, flags);
+	w5100_write16_direct(priv, W5100_IDM_AR, addr);
+	mmiowb();
+
+	for (i = 0; i < len; i++, addr++) {
+		if (unlikely(addr > W5100_TX_MEM_END)) {
+			addr = W5100_TX_MEM_START;
+			w5100_write16_direct(priv, W5100_IDM_AR, addr);
+			mmiowb();
+		}
+		w5100_write_direct(priv, W5100_IDM_DR, *buf++);
+	}
+	mmiowb();
+	spin_unlock_irqrestore(&priv->reg_lock, flags);
+}
+
+#if defined(CONFIG_WIZNET_BUS_DIRECT)
+#define w5100_read	w5100_read_direct
+#define w5100_write	w5100_write_direct
+#define w5100_read16	w5100_read16_direct
+#define w5100_write16	w5100_write16_direct
+#define w5100_readbuf	w5100_readbuf_direct
+#define w5100_writebuf	w5100_writebuf_direct
+
+#elif defined(CONFIG_WIZNET_BUS_INDIRECT)
+#define w5100_read	w5100_read_indirect
+#define w5100_write	w5100_write_indirect
+#define w5100_read16	w5100_read16_indirect
+#define w5100_write16	w5100_write16_indirect
+#define w5100_readbuf	w5100_readbuf_indirect
+#define w5100_writebuf	w5100_writebuf_indirect
+
+#else /* CONFIG_WIZNET_BUS_ANY */
+#define w5100_read	priv->read
+#define w5100_write	priv->write
+#define w5100_read16	priv->read16
+#define w5100_write16	priv->write16
+#define w5100_readbuf	priv->readbuf
+#define w5100_writebuf	priv->writebuf
+#endif
+
+static int w5100_command(struct w5100_priv *priv, u16 cmd)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+	w5100_write(priv, W5100_S0_CR, cmd);
+	mmiowb();
+
+	while (w5100_read(priv, W5100_S0_CR) != 0) {
+		if (time_after(jiffies, timeout))
+			return -EIO;
+		cpu_relax();
+	}
+
+	return 0;
+}
+
+static void w5100_write_macaddr(struct w5100_priv *priv)
+{
+	struct net_device *ndev = priv->ndev;
+	int i;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		w5100_write(priv, W5100_SHAR + i, ndev->dev_addr[i]);
+	mmiowb();
+}
+
+static void w5100_hw_reset(struct w5100_priv *priv)
+{
+	w5100_write_direct(priv, W5100_MR, MR_RST);
+	mmiowb();
+	mdelay(5);
+	w5100_write_direct(priv, W5100_MR, priv->indirect ?
+				  MR_PB | MR_AI | MR_IND :
+				  MR_PB);
+	mmiowb();
+	w5100_write(priv, W5100_IMR, 0);
+	w5100_write_macaddr(priv);
+
+	/* Configure 16K of internal memory
+	 * as 8K RX buffer and 8K TX buffer
+	 */
+	w5100_write(priv, W5100_RMSR, 0x03);
+	w5100_write(priv, W5100_TMSR, 0x03);
+	mmiowb();
+}
+
+static void w5100_hw_start(struct w5100_priv *priv)
+{
+	w5100_write(priv, W5100_S0_MR, priv->promisc ?
+			  S0_MR_MACRAW : S0_MR_MACRAW_MF);
+	mmiowb();
+	w5100_command(priv, S0_CR_OPEN);
+	w5100_write(priv, W5100_IMR, IR_S0);
+	mmiowb();
+}
+
+static void w5100_hw_close(struct w5100_priv *priv)
+{
+	w5100_write(priv, W5100_IMR, 0);
+	mmiowb();
+	w5100_command(priv, S0_CR_CLOSE);
+}
+
+/***********************************************************************
+ *
+ *   Device driver functions / callbacks
+ *
+ ***********************************************************************/
+
+static void w5100_get_drvinfo(struct net_device *ndev,
+			      struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, dev_name(ndev->dev.parent),
+		sizeof(info->bus_info));
+}
+
+static u32 w5100_get_link(struct net_device *ndev)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	if (gpio_is_valid(priv->link_gpio))
+		return !!gpio_get_value(priv->link_gpio);
+
+	return 1;
+}
+
+static u32 w5100_get_msglevel(struct net_device *ndev)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	return priv->msg_enable;
+}
+
+static void w5100_set_msglevel(struct net_device *ndev, u32 value)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	priv->msg_enable = value;
+}
+
+static int w5100_get_regs_len(struct net_device *ndev)
+{
+	return W5100_COMMON_REGS_LEN + W5100_S0_REGS_LEN;
+}
+
+static void w5100_get_regs(struct net_device *ndev,
+			   struct ethtool_regs *regs, void *_buf)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+	u8 *buf = _buf;
+	u16 i;
+
+	regs->version = 1;
+	for (i = 0; i < W5100_COMMON_REGS_LEN; i++)
+		*buf++ = w5100_read(priv, W5100_COMMON_REGS + i);
+	for (i = 0; i < W5100_S0_REGS_LEN; i++)
+		*buf++ = w5100_read(priv, W5100_S0_REGS + i);
+}
+
+static void w5100_tx_timeout(struct net_device *ndev)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	netif_stop_queue(ndev);
+	w5100_hw_reset(priv);
+	w5100_hw_start(priv);
+	ndev->stats.tx_errors++;
+	ndev->trans_start = jiffies;
+	netif_wake_queue(ndev);
+}
+
+static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+	u16 offset;
+
+	netif_stop_queue(ndev);
+
+	offset = w5100_read16(priv, W5100_S0_TX_WR);
+	w5100_writebuf(priv, offset, skb->data, skb->len);
+	w5100_write16(priv, W5100_S0_TX_WR, offset + skb->len);
+	mmiowb();
+	ndev->stats.tx_bytes += skb->len;
+	ndev->stats.tx_packets++;
+	dev_kfree_skb(skb);
+
+	w5100_command(priv, S0_CR_SEND);
+
+	return NETDEV_TX_OK;
+}
+
+static int w5100_napi_poll(struct napi_struct *napi, int budget)
+{
+	struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi);
+	struct net_device *ndev = priv->ndev;
+	struct sk_buff *skb;
+	int rx_count;
+	u16 rx_len;
+	u16 offset;
+	u8 header[2];
+
+	for (rx_count = 0; rx_count < budget; rx_count++) {
+		u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR);
+		if (rx_buf_len == 0)
+			break;
+
+		offset = w5100_read16(priv, W5100_S0_RX_RD);
+		w5100_readbuf(priv, offset, header, 2);
+		rx_len = get_unaligned_be16(header) - 2;
+
+		skb = netdev_alloc_skb_ip_align(ndev, rx_len);
+		if (unlikely(!skb)) {
+			w5100_write16(priv, W5100_S0_RX_RD,
+					    offset + rx_buf_len);
+			w5100_command(priv, S0_CR_RECV);
+			ndev->stats.rx_dropped++;
+			return -ENOMEM;
+		}
+
+		skb_put(skb, rx_len);
+		w5100_readbuf(priv, offset + 2, skb->data, rx_len);
+		w5100_write16(priv, W5100_S0_RX_RD, offset + 2 + rx_len);
+		mmiowb();
+		w5100_command(priv, S0_CR_RECV);
+		skb->protocol = eth_type_trans(skb, ndev);
+
+		netif_receive_skb(skb);
+		ndev->stats.rx_packets++;
+		ndev->stats.rx_bytes += rx_len;
+	}
+
+	if (rx_count < budget) {
+		w5100_write(priv, W5100_IMR, IR_S0);
+		mmiowb();
+		napi_complete(napi);
+	}
+
+	return rx_count;
+}
+
+static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
+{
+	struct net_device *ndev = ndev_instance;
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	int ir = w5100_read(priv, W5100_S0_IR);
+	if (!ir)
+		return IRQ_NONE;
+	w5100_write(priv, W5100_S0_IR, ir);
+	mmiowb();
+
+	if (ir & S0_IR_SENDOK) {
+		netif_dbg(priv, tx_done, ndev, "tx done\n");
+		netif_wake_queue(ndev);
+	}
+
+	if (ir & S0_IR_RECV) {
+		if (napi_schedule_prep(&priv->napi)) {
+			w5100_write(priv, W5100_IMR, 0);
+			mmiowb();
+			__napi_schedule(&priv->napi);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t w5100_detect_link(int irq, void *ndev_instance)
+{
+	struct net_device *ndev = ndev_instance;
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	if (netif_running(ndev)) {
+		if (gpio_get_value(priv->link_gpio) != 0) {
+			netif_info(priv, link, ndev, "link is up\n");
+			netif_carrier_on(ndev);
+		} else {
+			netif_info(priv, link, ndev, "link is down\n");
+			netif_carrier_off(ndev);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void w5100_set_rx_mode(struct net_device *ndev)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+	bool set_promisc = (ndev->flags & IFF_PROMISC) != 0;
+
+	if (priv->promisc != set_promisc) {
+		priv->promisc = set_promisc;
+		w5100_hw_start(priv);
+	}
+}
+
+static int w5100_set_macaddr(struct net_device *ndev, void *addr)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+	struct sockaddr *sock_addr = addr;
+
+	if (!is_valid_ether_addr(sock_addr->sa_data))
+		return -EADDRNOTAVAIL;
+	memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
+	ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
+	w5100_write_macaddr(priv);
+	return 0;
+}
+
+static int w5100_open(struct net_device *ndev)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	netif_info(priv, ifup, ndev, "enabling\n");
+	if (!is_valid_ether_addr(ndev->dev_addr))
+		return -EINVAL;
+	w5100_hw_start(priv);
+	napi_enable(&priv->napi);
+	netif_start_queue(ndev);
+	if (!gpio_is_valid(priv->link_gpio) ||
+	    gpio_get_value(priv->link_gpio) != 0)
+		netif_carrier_on(ndev);
+	return 0;
+}
+
+static int w5100_stop(struct net_device *ndev)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	netif_info(priv, ifdown, ndev, "shutting down\n");
+	w5100_hw_close(priv);
+	netif_carrier_off(ndev);
+	netif_stop_queue(ndev);
+	napi_disable(&priv->napi);
+	return 0;
+}
+
+static const struct ethtool_ops w5100_ethtool_ops = {
+	.get_drvinfo		= w5100_get_drvinfo,
+	.get_msglevel		= w5100_get_msglevel,
+	.set_msglevel		= w5100_set_msglevel,
+	.get_link		= w5100_get_link,
+	.get_regs_len		= w5100_get_regs_len,
+	.get_regs		= w5100_get_regs,
+};
+
+static const struct net_device_ops w5100_netdev_ops = {
+	.ndo_open		= w5100_open,
+	.ndo_stop		= w5100_stop,
+	.ndo_start_xmit		= w5100_start_tx,
+	.ndo_tx_timeout		= w5100_tx_timeout,
+	.ndo_set_rx_mode	= w5100_set_rx_mode,
+	.ndo_set_mac_address	= w5100_set_macaddr,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_change_mtu		= eth_change_mtu,
+};
+
+static int __devinit w5100_hw_probe(struct platform_device *pdev)
+{
+	struct wiznet_platform_data *data = pdev->dev.platform_data;
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct w5100_priv *priv = netdev_priv(ndev);
+	const char *name = netdev_name(ndev);
+	struct resource *mem;
+	int mem_size;
+	int irq;
+	int ret;
+
+	if (data && is_valid_ether_addr(data->mac_addr)) {
+		memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
+	} else {
+		random_ether_addr(ndev->dev_addr);
+		ndev->addr_assign_type |= NET_ADDR_RANDOM;
+	}
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem)
+		return -ENXIO;
+	mem_size = resource_size(mem);
+	if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name))
+		return -EBUSY;
+	priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
+	if (!priv->base)
+		return -EBUSY;
+
+	spin_lock_init(&priv->reg_lock);
+	priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE;
+	if (priv->indirect) {
+		priv->read     = w5100_read_indirect;
+		priv->write    = w5100_write_indirect;
+		priv->read16   = w5100_read16_indirect;
+		priv->write16  = w5100_write16_indirect;
+		priv->readbuf  = w5100_readbuf_indirect;
+		priv->writebuf = w5100_writebuf_indirect;
+	} else {
+		priv->read     = w5100_read_direct;
+		priv->write    = w5100_write_direct;
+		priv->read16   = w5100_read16_direct;
+		priv->write16  = w5100_write16_direct;
+		priv->readbuf  = w5100_readbuf_direct;
+		priv->writebuf = w5100_writebuf_direct;
+	}
+
+	w5100_hw_reset(priv);
+	if (w5100_read16(priv, W5100_RTR) != RTR_DEFAULT)
+		return -ENODEV;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+	ret = request_irq(irq, w5100_interrupt,
+			  IRQ_TYPE_LEVEL_LOW, name, ndev);
+	if (ret < 0)
+		return ret;
+	priv->irq = irq;
+
+	priv->link_gpio = data ? data->link_gpio : -EINVAL;
+	if (gpio_is_valid(priv->link_gpio)) {
+		char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL);
+		if (!link_name)
+			return -ENOMEM;
+		snprintf(link_name, 16, "%s-link", name);
+		priv->link_irq = gpio_to_irq(priv->link_gpio);
+		if (request_any_context_irq(priv->link_irq, w5100_detect_link,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				link_name, priv->ndev) < 0)
+			priv->link_gpio = -EINVAL;
+	}
+
+	netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq);
+	return 0;
+}
+
+static int __devinit w5100_probe(struct platform_device *pdev)
+{
+	struct w5100_priv *priv;
+	struct net_device *ndev;
+	int err;
+
+	ndev = alloc_etherdev(sizeof(*priv));
+	if (!ndev)
+		return -ENOMEM;
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+	platform_set_drvdata(pdev, ndev);
+	priv = netdev_priv(ndev);
+	priv->ndev = ndev;
+
+	ether_setup(ndev);
+	ndev->netdev_ops = &w5100_netdev_ops;
+	ndev->ethtool_ops = &w5100_ethtool_ops;
+	ndev->watchdog_timeo = HZ;
+	netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16);
+
+	/* This chip doesn't support VLAN packets with normal MTU,
+	 * so disable VLAN for this device.
+	 */
+	ndev->features |= NETIF_F_VLAN_CHALLENGED;
+
+	err = register_netdev(ndev);
+	if (err < 0)
+		goto err_register;
+
+	err = w5100_hw_probe(pdev);
+	if (err < 0)
+		goto err_hw_probe;
+
+	return 0;
+
+err_hw_probe:
+	unregister_netdev(ndev);
+err_register:
+	free_netdev(ndev);
+	platform_set_drvdata(pdev, NULL);
+	return err;
+}
+
+static int __devexit w5100_remove(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	w5100_hw_reset(priv);
+	free_irq(priv->irq, ndev);
+	if (gpio_is_valid(priv->link_gpio))
+		free_irq(priv->link_irq, ndev);
+
+	unregister_netdev(ndev);
+	free_netdev(ndev);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int w5100_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	if (netif_running(ndev)) {
+		netif_carrier_off(ndev);
+		netif_device_detach(ndev);
+
+		w5100_hw_close(priv);
+	}
+	return 0;
+}
+
+static int w5100_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	if (netif_running(ndev)) {
+		w5100_hw_reset(priv);
+		w5100_hw_start(priv);
+
+		netif_device_attach(ndev);
+		if (!gpio_is_valid(priv->link_gpio) ||
+		    gpio_get_value(priv->link_gpio) != 0)
+			netif_carrier_on(ndev);
+	}
+	return 0;
+}
+#endif /* CONFIG_PM */
+
+static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
+
+static struct platform_driver w5100_driver = {
+	.driver		= {
+		.name	= DRV_NAME,
+		.owner	= THIS_MODULE,
+		.pm	= &w5100_pm_ops,
+	},
+	.probe		= w5100_probe,
+	.remove		= __devexit_p(w5100_remove),
+};
+
+module_platform_driver(w5100_driver);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
new file mode 100644
index 0000000..3306a20
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -0,0 +1,720 @@
+/*
+ * Ethernet driver for the WIZnet W5300 chip.
+ *
+ * Copyright (C) 2008-2009 WIZnet Co.,Ltd.
+ * Copyright (C) 2011 Taehun Kim <kth3321 <at> gmail.com>
+ * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kconfig.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/wiznet.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+
+#define DRV_NAME	"w5300"
+#define DRV_VERSION	"2012-04-04"
+
+MODULE_DESCRIPTION("WIZnet W5300 Ethernet driver v"DRV_VERSION);
+MODULE_AUTHOR("Mike Sinkovsky <msink@permonline.ru>");
+MODULE_ALIAS("platform:"DRV_NAME);
+MODULE_LICENSE("GPL");
+
+/*
+ * Registers
+ */
+#define W5300_MR		0x0000	/* Mode Register */
+#define   MR_DBW		  (1 << 15) /* Data bus width */
+#define   MR_MPF		  (1 << 14) /* Mac layer pause frame */
+#define   MR_WDF(n)		  (((n)&7)<<11) /* Write data fetch time */
+#define   MR_RDH		  (1 << 10) /* Read data hold time */
+#define   MR_FS			  (1 << 8)  /* FIFO swap */
+#define   MR_RST		  (1 << 7)  /* S/W reset */
+#define   MR_PB			  (1 << 4)  /* Ping block */
+#define   MR_DBS		  (1 << 2)  /* Data bus swap */
+#define   MR_IND		  (1 << 0)  /* Indirect mode */
+#define W5300_IR		0x0002	/* Interrupt Register */
+#define W5300_IMR		0x0004	/* Interrupt Mask Register */
+#define   IR_S0			  0x0001  /* S0 interrupt */
+#define W5300_SHARL		0x0008	/* Source MAC address (0123) */
+#define W5300_SHARH		0x000c	/* Source MAC address (45) */
+#define W5300_TMSRL		0x0020	/* Transmit Memory Size (0123) */
+#define W5300_TMSRH		0x0024	/* Transmit Memory Size (4567) */
+#define W5300_RMSRL		0x0028	/* Receive Memory Size (0123) */
+#define W5300_RMSRH		0x002c	/* Receive Memory Size (4567) */
+#define W5300_MTYPE		0x0030	/* Memory Type */
+#define W5300_IDR		0x00fe	/* Chip ID register */
+#define   IDR_W5300		  0x5300  /* =0x5300 for WIZnet W5300 */
+#define W5300_S0_MR		0x0200	/* S0 Mode Register */
+#define   S0_MR_CLOSED		  0x0000  /* Close mode */
+#define   S0_MR_MACRAW		  0x0004  /* MAC RAW mode (promiscous) */
+#define   S0_MR_MACRAW_MF	  0x0044  /* MAC RAW mode (filtered) */
+#define W5300_S0_CR		0x0202	/* S0 Command Register */
+#define   S0_CR_OPEN		  0x0001  /* OPEN command */
+#define   S0_CR_CLOSE		  0x0010  /* CLOSE command */
+#define   S0_CR_SEND		  0x0020  /* SEND command */
+#define   S0_CR_RECV		  0x0040  /* RECV command */
+#define W5300_S0_IMR		0x0204	/* S0 Interrupt Mask Register */
+#define W5300_S0_IR		0x0206	/* S0 Interrupt Register */
+#define   S0_IR_RECV		  0x0004  /* Receive interrupt */
+#define   S0_IR_SENDOK		  0x0010  /* Send OK interrupt */
+#define W5300_S0_SSR		0x0208	/* S0 Socket Status Register */
+#define W5300_S0_TX_WRSR	0x0220	/* S0 TX Write Size Register */
+#define W5300_S0_TX_FSR		0x0224	/* S0 TX Free Size Register */
+#define W5300_S0_RX_RSR		0x0228	/* S0 Received data Size */
+#define W5300_S0_TX_FIFO	0x022e	/* S0 Transmit FIFO */
+#define W5300_S0_RX_FIFO	0x0230	/* S0 Receive FIFO */
+#define W5300_REGS_LEN		0x0400
+
+/*
+ * Device driver private data structure
+ */
+struct w5300_priv {
+	void __iomem *base;
+	spinlock_t reg_lock;
+	bool indirect;
+	u16  (*read) (struct w5300_priv *priv, u16 addr);
+	void (*write)(struct w5300_priv *priv, u16 addr, u16 data);
+	int irq;
+	int link_irq;
+	int link_gpio;
+
+	struct napi_struct napi;
+	struct net_device *ndev;
+	bool promisc;
+	u32 msg_enable;
+};
+
+/************************************************************************
+ *
+ *  Lowlevel I/O functions
+ *
+ ***********************************************************************/
+
+/*
+ * In direct address mode host system can directly access W5300 registers
+ * after mapping to Memory-Mapped I/O space.
+ *
+ * 0x400 bytes are required for memory space.
+ */
+static inline u16 w5300_read_direct(struct w5300_priv *priv, u16 addr)
+{
+	return ioread16(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+}
+
+static inline void w5300_write_direct(struct w5300_priv *priv,
+				      u16 addr, u16 data)
+{
+	iowrite16(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+}
+
+/*
+ * In indirect address mode host system indirectly accesses registers by
+ * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
+ * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space.
+ * Mode Register (MR) is directly accessible.
+ *
+ * Only 0x06 bytes are required for memory space.
+ */
+#define W5300_IDM_AR		0x0002	 /* Indirect Mode Address */
+#define W5300_IDM_DR		0x0004	 /* Indirect Mode Data */
+
+static u16 w5300_read_indirect(struct w5300_priv *priv, u16 addr)
+{
+	unsigned long flags;
+	u16 data;
+
+	spin_lock_irqsave(&priv->reg_lock, flags);
+	w5300_write_direct(priv, W5300_IDM_AR, addr);
+	mmiowb();
+	data = w5300_read_direct(priv, W5300_IDM_DR);
+	spin_unlock_irqrestore(&priv->reg_lock, flags);
+
+	return data;
+}
+
+static void w5300_write_indirect(struct w5300_priv *priv, u16 addr, u16 data)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->reg_lock, flags);
+	w5300_write_direct(priv, W5300_IDM_AR, addr);
+	mmiowb();
+	w5300_write_direct(priv, W5300_IDM_DR, data);
+	mmiowb();
+	spin_unlock_irqrestore(&priv->reg_lock, flags);
+}
+
+#if defined(CONFIG_WIZNET_BUS_DIRECT)
+#define w5300_read	w5300_read_direct
+#define w5300_write	w5300_write_direct
+
+#elif defined(CONFIG_WIZNET_BUS_INDIRECT)
+#define w5300_read	w5300_read_indirect
+#define w5300_write	w5300_write_indirect
+
+#else /* CONFIG_WIZNET_BUS_ANY */
+#define w5300_read	priv->read
+#define w5300_write	priv->write
+#endif
+
+static u32 w5300_read32(struct w5300_priv *priv, u16 addr)
+{
+	u32 data;
+	data  = w5300_read(priv, addr) << 16;
+	data |= w5300_read(priv, addr + 2);
+	return data;
+}
+
+static void w5300_write32(struct w5300_priv *priv, u16 addr, u32 data)
+{
+	w5300_write(priv, addr, data >> 16);
+	w5300_write(priv, addr + 2, data);
+}
+
+static int w5300_command(struct w5300_priv *priv, u16 cmd)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+	w5300_write(priv, W5300_S0_CR, cmd);
+	mmiowb();
+
+	while (w5300_read(priv, W5300_S0_CR) != 0) {
+		if (time_after(jiffies, timeout))
+			return -EIO;
+		cpu_relax();
+	}
+
+	return 0;
+}
+
+static void w5300_read_frame(struct w5300_priv *priv, u8 *buf, int len)
+{
+	u16 fifo;
+	int i;
+
+	for (i = 0; i < len; i += 2) {
+		fifo = w5300_read(priv, W5300_S0_RX_FIFO);
+		*buf++ = fifo >> 8;
+		*buf++ = fifo;
+	}
+	fifo = w5300_read(priv, W5300_S0_RX_FIFO);
+	fifo = w5300_read(priv, W5300_S0_RX_FIFO);
+}
+
+static void w5300_write_frame(struct w5300_priv *priv, u8 *buf, int len)
+{
+	u16 fifo;
+	int i;
+
+	for (i = 0; i < len; i += 2) {
+		fifo  = *buf++ << 8;
+		fifo |= *buf++;
+		w5300_write(priv, W5300_S0_TX_FIFO, fifo);
+	}
+	w5300_write32(priv, W5300_S0_TX_WRSR, len);
+}
+
+static void w5300_write_macaddr(struct w5300_priv *priv)
+{
+	struct net_device *ndev = priv->ndev;
+	w5300_write32(priv, W5300_SHARL,
+		      ndev->dev_addr[0] << 24 |
+		      ndev->dev_addr[1] << 16 |
+		      ndev->dev_addr[2] << 8 |
+		      ndev->dev_addr[3]);
+	w5300_write(priv, W5300_SHARH,
+		      ndev->dev_addr[4] << 8 |
+		      ndev->dev_addr[5]);
+	mmiowb();
+}
+
+static void w5300_hw_reset(struct w5300_priv *priv)
+{
+	w5300_write_direct(priv, W5300_MR, MR_RST);
+	mmiowb();
+	mdelay(5);
+	w5300_write_direct(priv, W5300_MR, priv->indirect ?
+				 MR_WDF(7) | MR_PB | MR_IND :
+				 MR_WDF(7) | MR_PB);
+	mmiowb();
+	w5300_write(priv, W5300_IMR, 0);
+	w5300_write_macaddr(priv);
+
+	/* Configure 128K of internal memory
+	 * as 64K RX fifo and 64K TX fifo
+	 */
+	w5300_write32(priv, W5300_RMSRL, 64 << 24);
+	w5300_write32(priv, W5300_RMSRH, 0);
+	w5300_write32(priv, W5300_TMSRL, 64 << 24);
+	w5300_write32(priv, W5300_TMSRH, 0);
+	w5300_write(priv, W5300_MTYPE, 0x00ff);
+	mmiowb();
+}
+
+static void w5300_hw_start(struct w5300_priv *priv)
+{
+	w5300_write(priv, W5300_S0_MR, priv->promisc ?
+			  S0_MR_MACRAW : S0_MR_MACRAW_MF);
+	mmiowb();
+	w5300_command(priv, S0_CR_OPEN);
+	w5300_write(priv, W5300_S0_IMR, S0_IR_RECV | S0_IR_SENDOK);
+	w5300_write(priv, W5300_IMR, IR_S0);
+	mmiowb();
+}
+
+static void w5300_hw_close(struct w5300_priv *priv)
+{
+	w5300_write(priv, W5300_IMR, 0);
+	mmiowb();
+	w5300_command(priv, S0_CR_CLOSE);
+}
+
+/***********************************************************************
+ *
+ *   Device driver functions / callbacks
+ *
+ ***********************************************************************/
+
+static void w5300_get_drvinfo(struct net_device *ndev,
+			      struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, dev_name(ndev->dev.parent),
+		sizeof(info->bus_info));
+}
+
+static u32 w5300_get_link(struct net_device *ndev)
+{
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	if (gpio_is_valid(priv->link_gpio))
+		return !!gpio_get_value(priv->link_gpio);
+
+	return 1;
+}
+
+static u32 w5300_get_msglevel(struct net_device *ndev)
+{
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	return priv->msg_enable;
+}
+
+static void w5300_set_msglevel(struct net_device *ndev, u32 value)
+{
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	priv->msg_enable = value;
+}
+
+static int w5300_get_regs_len(struct net_device *ndev)
+{
+	return W5300_REGS_LEN;
+}
+
+static void w5300_get_regs(struct net_device *ndev,
+			   struct ethtool_regs *regs, void *_buf)
+{
+	struct w5300_priv *priv = netdev_priv(ndev);
+	u8 *buf = _buf;
+	u16 addr;
+	u16 data;
+
+	regs->version = 1;
+	for (addr = 0; addr < W5300_REGS_LEN; addr += 2) {
+		switch (addr & 0x23f) {
+		case W5300_S0_TX_FIFO: /* cannot read TX_FIFO */
+		case W5300_S0_RX_FIFO: /* cannot read RX_FIFO */
+			data = 0xffff;
+			break;
+		default:
+			data = w5300_read(priv, addr);
+			break;
+		}
+		*buf++ = data >> 8;
+		*buf++ = data;
+	}
+}
+
+static void w5300_tx_timeout(struct net_device *ndev)
+{
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	netif_stop_queue(ndev);
+	w5300_hw_reset(priv);
+	w5300_hw_start(priv);
+	ndev->stats.tx_errors++;
+	ndev->trans_start = jiffies;
+	netif_wake_queue(ndev);
+}
+
+static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	netif_stop_queue(ndev);
+
+	w5300_write_frame(priv, skb->data, skb->len);
+	mmiowb();
+	ndev->stats.tx_packets++;
+	ndev->stats.tx_bytes += skb->len;
+	dev_kfree_skb(skb);
+	netif_dbg(priv, tx_queued, ndev, "tx queued\n");
+
+	w5300_command(priv, S0_CR_SEND);
+
+	return NETDEV_TX_OK;
+}
+
+static int w5300_napi_poll(struct napi_struct *napi, int budget)
+{
+	struct w5300_priv *priv = container_of(napi, struct w5300_priv, napi);
+	struct net_device *ndev = priv->ndev;
+	struct sk_buff *skb;
+	int rx_count;
+	u16 rx_len;
+
+	for (rx_count = 0; rx_count < budget; rx_count++) {
+		u32 rx_fifo_len = w5300_read32(priv, W5300_S0_RX_RSR);
+		if (rx_fifo_len == 0)
+			break;
+
+		rx_len = w5300_read(priv, W5300_S0_RX_FIFO);
+
+		skb = netdev_alloc_skb_ip_align(ndev, roundup(rx_len, 2));
+		if (unlikely(!skb)) {
+			u32 i;
+			for (i = 0; i < rx_fifo_len; i += 2)
+				w5300_read(priv, W5300_S0_RX_FIFO);
+			ndev->stats.rx_dropped++;
+			return -ENOMEM;
+		}
+
+		skb_put(skb, rx_len);
+		w5300_read_frame(priv, skb->data, rx_len);
+		skb->protocol = eth_type_trans(skb, ndev);
+
+		netif_receive_skb(skb);
+		ndev->stats.rx_packets++;
+		ndev->stats.rx_bytes += rx_len;
+	}
+
+	if (rx_count < budget) {
+		w5300_write(priv, W5300_IMR, IR_S0);
+		mmiowb();
+		napi_complete(napi);
+	}
+
+	return rx_count;
+}
+
+static irqreturn_t w5300_interrupt(int irq, void *ndev_instance)
+{
+	struct net_device *ndev = ndev_instance;
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	int ir = w5300_read(priv, W5300_S0_IR);
+	if (!ir)
+		return IRQ_NONE;
+	w5300_write(priv, W5300_S0_IR, ir);
+	mmiowb();
+
+	if (ir & S0_IR_SENDOK) {
+		netif_dbg(priv, tx_done, ndev, "tx done\n");
+		netif_wake_queue(ndev);
+	}
+
+	if (ir & S0_IR_RECV) {
+		if (napi_schedule_prep(&priv->napi)) {
+			w5300_write(priv, W5300_IMR, 0);
+			mmiowb();
+			__napi_schedule(&priv->napi);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t w5300_detect_link(int irq, void *ndev_instance)
+{
+	struct net_device *ndev = ndev_instance;
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	if (netif_running(ndev)) {
+		if (gpio_get_value(priv->link_gpio) != 0) {
+			netif_info(priv, link, ndev, "link is up\n");
+			netif_carrier_on(ndev);
+		} else {
+			netif_info(priv, link, ndev, "link is down\n");
+			netif_carrier_off(ndev);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void w5300_set_rx_mode(struct net_device *ndev)
+{
+	struct w5300_priv *priv = netdev_priv(ndev);
+	bool set_promisc = (ndev->flags & IFF_PROMISC) != 0;
+
+	if (priv->promisc != set_promisc) {
+		priv->promisc = set_promisc;
+		w5300_hw_start(priv);
+	}
+}
+
+static int w5300_set_macaddr(struct net_device *ndev, void *addr)
+{
+	struct w5300_priv *priv = netdev_priv(ndev);
+	struct sockaddr *sock_addr = addr;
+
+	if (!is_valid_ether_addr(sock_addr->sa_data))
+		return -EADDRNOTAVAIL;
+	memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
+	ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
+	w5300_write_macaddr(priv);
+	return 0;
+}
+
+static int w5300_open(struct net_device *ndev)
+{
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	netif_info(priv, ifup, ndev, "enabling\n");
+	if (!is_valid_ether_addr(ndev->dev_addr))
+		return -EINVAL;
+	w5300_hw_start(priv);
+	napi_enable(&priv->napi);
+	netif_start_queue(ndev);
+	if (!gpio_is_valid(priv->link_gpio) ||
+	    gpio_get_value(priv->link_gpio) != 0)
+		netif_carrier_on(ndev);
+	return 0;
+}
+
+static int w5300_stop(struct net_device *ndev)
+{
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	netif_info(priv, ifdown, ndev, "shutting down\n");
+	w5300_hw_close(priv);
+	netif_carrier_off(ndev);
+	netif_stop_queue(ndev);
+	napi_disable(&priv->napi);
+	return 0;
+}
+
+static const struct ethtool_ops w5300_ethtool_ops = {
+	.get_drvinfo		= w5300_get_drvinfo,
+	.get_msglevel		= w5300_get_msglevel,
+	.set_msglevel		= w5300_set_msglevel,
+	.get_link		= w5300_get_link,
+	.get_regs_len		= w5300_get_regs_len,
+	.get_regs		= w5300_get_regs,
+};
+
+static const struct net_device_ops w5300_netdev_ops = {
+	.ndo_open		= w5300_open,
+	.ndo_stop		= w5300_stop,
+	.ndo_start_xmit		= w5300_start_tx,
+	.ndo_tx_timeout		= w5300_tx_timeout,
+	.ndo_set_rx_mode	= w5300_set_rx_mode,
+	.ndo_set_mac_address	= w5300_set_macaddr,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_change_mtu		= eth_change_mtu,
+};
+
+static int __devinit w5300_hw_probe(struct platform_device *pdev)
+{
+	struct wiznet_platform_data *data = pdev->dev.platform_data;
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct w5300_priv *priv = netdev_priv(ndev);
+	const char *name = netdev_name(ndev);
+	struct resource *mem;
+	int mem_size;
+	int irq;
+	int ret;
+
+	if (data && is_valid_ether_addr(data->mac_addr)) {
+		memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
+	} else {
+		random_ether_addr(ndev->dev_addr);
+		ndev->addr_assign_type |= NET_ADDR_RANDOM;
+	}
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem)
+		return -ENXIO;
+	mem_size = resource_size(mem);
+	if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name))
+		return -EBUSY;
+	priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
+	if (!priv->base)
+		return -EBUSY;
+
+	spin_lock_init(&priv->reg_lock);
+	priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE;
+	if (priv->indirect) {
+		priv->read  = w5300_read_indirect;
+		priv->write = w5300_write_indirect;
+	} else {
+		priv->read  = w5300_read_direct;
+		priv->write = w5300_write_direct;
+	}
+
+	w5300_hw_reset(priv);
+	if (w5300_read(priv, W5300_IDR) != IDR_W5300)
+		return -ENODEV;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+	ret = request_irq(irq, w5300_interrupt,
+			  IRQ_TYPE_LEVEL_LOW, name, ndev);
+	if (ret < 0)
+		return ret;
+	priv->irq = irq;
+
+	priv->link_gpio = data ? data->link_gpio : -EINVAL;
+	if (gpio_is_valid(priv->link_gpio)) {
+		char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL);
+		if (!link_name)
+			return -ENOMEM;
+		snprintf(link_name, 16, "%s-link", name);
+		priv->link_irq = gpio_to_irq(priv->link_gpio);
+		if (request_any_context_irq(priv->link_irq, w5300_detect_link,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				link_name, priv->ndev) < 0)
+			priv->link_gpio = -EINVAL;
+	}
+
+	netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq);
+	return 0;
+}
+
+static int __devinit w5300_probe(struct platform_device *pdev)
+{
+	struct w5300_priv *priv;
+	struct net_device *ndev;
+	int err;
+
+	ndev = alloc_etherdev(sizeof(*priv));
+	if (!ndev)
+		return -ENOMEM;
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+	platform_set_drvdata(pdev, ndev);
+	priv = netdev_priv(ndev);
+	priv->ndev = ndev;
+
+	ether_setup(ndev);
+	ndev->netdev_ops = &w5300_netdev_ops;
+	ndev->ethtool_ops = &w5300_ethtool_ops;
+	ndev->watchdog_timeo = HZ;
+	netif_napi_add(ndev, &priv->napi, w5300_napi_poll, 16);
+
+	/* This chip doesn't support VLAN packets with normal MTU,
+	 * so disable VLAN for this device.
+	 */
+	ndev->features |= NETIF_F_VLAN_CHALLENGED;
+
+	err = register_netdev(ndev);
+	if (err < 0)
+		goto err_register;
+
+	err = w5300_hw_probe(pdev);
+	if (err < 0)
+		goto err_hw_probe;
+
+	return 0;
+
+err_hw_probe:
+	unregister_netdev(ndev);
+err_register:
+	free_netdev(ndev);
+	platform_set_drvdata(pdev, NULL);
+	return err;
+}
+
+static int __devexit w5300_remove(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	w5300_hw_reset(priv);
+	free_irq(priv->irq, ndev);
+	if (gpio_is_valid(priv->link_gpio))
+		free_irq(priv->link_irq, ndev);
+
+	unregister_netdev(ndev);
+	free_netdev(ndev);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int w5300_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	if (netif_running(ndev)) {
+		netif_carrier_off(ndev);
+		netif_device_detach(ndev);
+
+		w5300_hw_close(priv);
+	}
+	return 0;
+}
+
+static int w5300_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	if (!netif_running(ndev)) {
+		w5300_hw_reset(priv);
+		w5300_hw_start(priv);
+
+		netif_device_attach(ndev);
+		if (!gpio_is_valid(priv->link_gpio) ||
+		    gpio_get_value(priv->link_gpio) != 0)
+			netif_carrier_on(ndev);
+	}
+	return 0;
+}
+#endif /* CONFIG_PM */
+
+static SIMPLE_DEV_PM_OPS(w5300_pm_ops, w5300_suspend, w5300_resume);
+
+static struct platform_driver w5300_driver = {
+	.driver		= {
+		.name	= DRV_NAME,
+		.owner	= THIS_MODULE,
+		.pm	= &w5300_pm_ops,
+	},
+	.probe		= w5300_probe,
+	.remove		= __devexit_p(w5300_remove),
+};
+
+module_platform_driver(w5300_driver);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index d21591a..1eaf712 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1000,6 +1000,7 @@
 	.set_settings = temac_set_settings,
 	.nway_reset = temac_nway_reset,
 	.get_link = ethtool_op_get_link,
+	.get_ts_info = ethtool_op_get_ts_info,
 };
 
 static int __devinit temac_of_probe(struct platform_device *op)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index cc83af0..44b8d2b 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -2,9 +2,7 @@
  * Definitions for Xilinx Axi Ethernet device driver.
  *
  * Copyright (c) 2009 Secret Lab Technologies, Ltd.
- * Copyright (c) 2010 Xilinx, Inc. All rights reserved.
- * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
- * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
+ * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
  */
 
 #ifndef XILINX_AXIENET_H
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 2fcbeba..9c365e1 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -4,9 +4,9 @@
  * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
  * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
  * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
- * Copyright (c) 2010 Xilinx, Inc. All rights reserved.
- * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
- * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
+ * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
+ * Copyright (c) 2010 - 2011 PetaLogix
+ * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
  *
  * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
  * and Spartan6.
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index d70b6e7..e90e1f4 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -2,9 +2,9 @@
  * MDIO bus driver for the Xilinx Axi Ethernet device
  *
  * Copyright (c) 2009 Secret Lab Technologies, Ltd.
- * Copyright (c) 2010 Xilinx, Inc. All rights reserved.
- * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
- * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
+ * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
+ * Copyright (c) 2010 - 2011 PetaLogix
+ * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
  */
 
 #include <linux/of_address.h>
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
index cf67352..3f43101 100644
--- a/drivers/net/ethernet/xscale/Kconfig
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -5,8 +5,8 @@
 config NET_VENDOR_XSCALE
 	bool "Intel XScale IXP devices"
 	default y
-	depends on NET_VENDOR_INTEL && ((ARM && ARCH_IXP4XX && \
-		   IXP4XX_NPE && IXP4XX_QMGR) || ARCH_ENP2611)
+	depends on NET_VENDOR_INTEL && (ARM && ARCH_IXP4XX && \
+		   IXP4XX_NPE && IXP4XX_QMGR)
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y
 	  and read the Ethernet-HOWTO, available from
@@ -27,6 +27,4 @@
 	  Say Y here if you want to use built-in Ethernet ports
 	  on IXP4xx processor.
 
-source "drivers/net/ethernet/xscale/ixp2000/Kconfig"
-
 endif # NET_VENDOR_XSCALE
diff --git a/drivers/net/ethernet/xscale/Makefile b/drivers/net/ethernet/xscale/Makefile
index b195b9d..abc3b03 100644
--- a/drivers/net/ethernet/xscale/Makefile
+++ b/drivers/net/ethernet/xscale/Makefile
@@ -2,5 +2,4 @@
 # Makefile for the Intel XScale IXP device drivers.
 #
 
-obj-$(CONFIG_ENP2611_MSF_NET) += ixp2000/
 obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
diff --git a/drivers/net/ethernet/xscale/ixp2000/Kconfig b/drivers/net/ethernet/xscale/ixp2000/Kconfig
deleted file mode 100644
index 58dbc5b..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-config ENP2611_MSF_NET
-	tristate "Radisys ENP2611 MSF network interface support"
-	depends on ARCH_ENP2611
-	---help---
-	  This is a driver for the MSF network interface unit in
-	  the IXP2400 on the Radisys ENP2611 platform.
diff --git a/drivers/net/ethernet/xscale/ixp2000/Makefile b/drivers/net/ethernet/xscale/ixp2000/Makefile
deleted file mode 100644
index fd38351..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_ENP2611_MSF_NET) += enp2611_mod.o
-
-enp2611_mod-objs := caleb.o enp2611.o ixp2400-msf.o ixpdev.o pm3386.o
diff --git a/drivers/net/ethernet/xscale/ixp2000/caleb.c b/drivers/net/ethernet/xscale/ixp2000/caleb.c
deleted file mode 100644
index 7dea5b9..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/caleb.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Helper functions for the SPI-3 bridge FPGA on the Radisys ENP2611
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-#include "caleb.h"
-
-#define CALEB_IDLO		0x00
-#define CALEB_IDHI		0x01
-#define CALEB_RID		0x02
-#define CALEB_RESET		0x03
-#define CALEB_INTREN0		0x04
-#define CALEB_INTREN1		0x05
-#define CALEB_INTRSTAT0		0x06
-#define CALEB_INTRSTAT1		0x07
-#define CALEB_PORTEN		0x08
-#define CALEB_BURST		0x09
-#define CALEB_PORTPAUS		0x0A
-#define CALEB_PORTPAUSD		0x0B
-#define CALEB_PHY0RX		0x10
-#define CALEB_PHY1RX		0x11
-#define CALEB_PHY0TX		0x12
-#define CALEB_PHY1TX		0x13
-#define CALEB_IXPRX_HI_CNTR	0x15
-#define CALEB_PHY0RX_HI_CNTR	0x16
-#define CALEB_PHY1RX_HI_CNTR	0x17
-#define CALEB_IXPRX_CNTR	0x18
-#define CALEB_PHY0RX_CNTR	0x19
-#define CALEB_PHY1RX_CNTR	0x1A
-#define CALEB_IXPTX_CNTR	0x1B
-#define CALEB_PHY0TX_CNTR	0x1C
-#define CALEB_PHY1TX_CNTR	0x1D
-#define CALEB_DEBUG0		0x1E
-#define CALEB_DEBUG1		0x1F
-
-
-static u8 caleb_reg_read(int reg)
-{
-	u8 value;
-
-	value = *((volatile u8 *)(ENP2611_CALEB_VIRT_BASE + reg));
-
-//	printk(KERN_INFO "caleb_reg_read(%d) = %.2x\n", reg, value);
-
-	return value;
-}
-
-static void caleb_reg_write(int reg, u8 value)
-{
-	u8 dummy;
-
-//	printk(KERN_INFO "caleb_reg_write(%d, %.2x)\n", reg, value);
-
-	*((volatile u8 *)(ENP2611_CALEB_VIRT_BASE + reg)) = value;
-
-	dummy = *((volatile u8 *)ENP2611_CALEB_VIRT_BASE);
-	__asm__ __volatile__("mov %0, %0" : "+r" (dummy));
-}
-
-
-void caleb_reset(void)
-{
-	/*
-	 * Perform a chip reset.
-	 */
-	caleb_reg_write(CALEB_RESET, 0x02);
-	udelay(1);
-
-	/*
-	 * Enable all interrupt sources.  This is needed to get
-	 * meaningful results out of the status bits (register 6
-	 * and 7.)
-	 */
-	caleb_reg_write(CALEB_INTREN0, 0xff);
-	caleb_reg_write(CALEB_INTREN1, 0x07);
-
-	/*
-	 * Set RX and TX FIFO thresholds to 1.5kb.
-	 */
-	caleb_reg_write(CALEB_PHY0RX, 0x11);
-	caleb_reg_write(CALEB_PHY1RX, 0x11);
-	caleb_reg_write(CALEB_PHY0TX, 0x11);
-	caleb_reg_write(CALEB_PHY1TX, 0x11);
-
-	/*
-	 * Program SPI-3 burst size.
-	 */
-	caleb_reg_write(CALEB_BURST, 0);	// 64-byte RBUF mpackets
-//	caleb_reg_write(CALEB_BURST, 1);	// 128-byte RBUF mpackets
-//	caleb_reg_write(CALEB_BURST, 2);	// 256-byte RBUF mpackets
-}
-
-void caleb_enable_rx(int port)
-{
-	u8 temp;
-
-	temp = caleb_reg_read(CALEB_PORTEN);
-	temp |= 1 << port;
-	caleb_reg_write(CALEB_PORTEN, temp);
-}
-
-void caleb_disable_rx(int port)
-{
-	u8 temp;
-
-	temp = caleb_reg_read(CALEB_PORTEN);
-	temp &= ~(1 << port);
-	caleb_reg_write(CALEB_PORTEN, temp);
-}
-
-void caleb_enable_tx(int port)
-{
-	u8 temp;
-
-	temp = caleb_reg_read(CALEB_PORTEN);
-	temp |= 1 << (port + 4);
-	caleb_reg_write(CALEB_PORTEN, temp);
-}
-
-void caleb_disable_tx(int port)
-{
-	u8 temp;
-
-	temp = caleb_reg_read(CALEB_PORTEN);
-	temp &= ~(1 << (port + 4));
-	caleb_reg_write(CALEB_PORTEN, temp);
-}
diff --git a/drivers/net/ethernet/xscale/ixp2000/caleb.h b/drivers/net/ethernet/xscale/ixp2000/caleb.h
deleted file mode 100644
index e93a1ef..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/caleb.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Helper functions for the SPI-3 bridge FPGA on the Radisys ENP2611
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __CALEB_H
-#define __CALEB_H
-
-void caleb_reset(void);
-void caleb_enable_rx(int port);
-void caleb_disable_rx(int port);
-void caleb_enable_tx(int port);
-void caleb_disable_tx(int port);
-
-
-#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/enp2611.c b/drivers/net/ethernet/xscale/ixp2000/enp2611.c
deleted file mode 100644
index 34a6cfd..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/enp2611.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * IXP2400 MSF network device driver for the Radisys ENP2611
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/moduleparam.h>
-#include <asm/hardware/uengine.h>
-#include <asm/mach-types.h>
-#include <asm/io.h>
-#include "ixpdev.h"
-#include "caleb.h"
-#include "ixp2400-msf.h"
-#include "pm3386.h"
-
-/***********************************************************************
- * The Radisys ENP2611 is a PCI form factor board with three SFP GBIC
- * slots, connected via two PMC/Sierra 3386s and an SPI-3 bridge FPGA
- * to the IXP2400.
- *
- *                +-------------+
- * SFP GBIC #0 ---+             |       +---------+
- *                |  PM3386 #0  +-------+         |
- * SFP GBIC #1 ---+             |       | "Caleb" |         +---------+
- *                +-------------+       |         |         |         |
- *                                      | SPI-3   +---------+ IXP2400 |
- *                +-------------+       | bridge  |         |         |
- * SFP GBIC #2 ---+             |       | FPGA    |         +---------+
- *                |  PM3386 #1  +-------+         |
- *                |             |       +---------+
- *                +-------------+
- *              ^                   ^                  ^
- *              | 1.25Gbaud         | 104MHz           | 104MHz
- *              | SERDES ea.        | SPI-3 ea.        | SPI-3
- *
- ***********************************************************************/
-static struct ixp2400_msf_parameters enp2611_msf_parameters =
-{
-	.rx_mode =		IXP2400_RX_MODE_UTOPIA_POS |
-				IXP2400_RX_MODE_1x32 |
-				IXP2400_RX_MODE_MPHY |
-				IXP2400_RX_MODE_MPHY_32 |
-				IXP2400_RX_MODE_MPHY_POLLED_STATUS |
-				IXP2400_RX_MODE_MPHY_LEVEL3 |
-				IXP2400_RX_MODE_RBUF_SIZE_64,
-
-	.rxclk01_multiplier =	IXP2400_PLL_MULTIPLIER_16,
-
-	.rx_poll_ports =	3,
-
-	.rx_channel_mode = {
-		IXP2400_PORT_RX_MODE_MASTER |
-		IXP2400_PORT_RX_MODE_POS_PHY |
-		IXP2400_PORT_RX_MODE_POS_PHY_L3 |
-		IXP2400_PORT_RX_MODE_ODD_PARITY |
-		IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
-
-		IXP2400_PORT_RX_MODE_MASTER |
-		IXP2400_PORT_RX_MODE_POS_PHY |
-		IXP2400_PORT_RX_MODE_POS_PHY_L3 |
-		IXP2400_PORT_RX_MODE_ODD_PARITY |
-		IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
-
-		IXP2400_PORT_RX_MODE_MASTER |
-		IXP2400_PORT_RX_MODE_POS_PHY |
-		IXP2400_PORT_RX_MODE_POS_PHY_L3 |
-		IXP2400_PORT_RX_MODE_ODD_PARITY |
-		IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
-
-		IXP2400_PORT_RX_MODE_MASTER |
-		IXP2400_PORT_RX_MODE_POS_PHY |
-		IXP2400_PORT_RX_MODE_POS_PHY_L3 |
-		IXP2400_PORT_RX_MODE_ODD_PARITY |
-		IXP2400_PORT_RX_MODE_2_CYCLE_DECODE
-	},
-
-	.tx_mode =		IXP2400_TX_MODE_UTOPIA_POS |
-				IXP2400_TX_MODE_1x32 |
-				IXP2400_TX_MODE_MPHY |
-				IXP2400_TX_MODE_MPHY_32 |
-				IXP2400_TX_MODE_MPHY_POLLED_STATUS |
-				IXP2400_TX_MODE_MPHY_LEVEL3 |
-				IXP2400_TX_MODE_TBUF_SIZE_64,
-
-	.txclk01_multiplier =	IXP2400_PLL_MULTIPLIER_16,
-
-	.tx_poll_ports =	3,
-
-	.tx_channel_mode = {
-		IXP2400_PORT_TX_MODE_MASTER |
-		IXP2400_PORT_TX_MODE_POS_PHY |
-		IXP2400_PORT_TX_MODE_ODD_PARITY |
-		IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
-
-		IXP2400_PORT_TX_MODE_MASTER |
-		IXP2400_PORT_TX_MODE_POS_PHY |
-		IXP2400_PORT_TX_MODE_ODD_PARITY |
-		IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
-
-		IXP2400_PORT_TX_MODE_MASTER |
-		IXP2400_PORT_TX_MODE_POS_PHY |
-		IXP2400_PORT_TX_MODE_ODD_PARITY |
-		IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
-
-		IXP2400_PORT_TX_MODE_MASTER |
-		IXP2400_PORT_TX_MODE_POS_PHY |
-		IXP2400_PORT_TX_MODE_ODD_PARITY |
-		IXP2400_PORT_TX_MODE_2_CYCLE_DECODE
-	}
-};
-
-static struct net_device *nds[3];
-static struct timer_list link_check_timer;
-
-/* @@@ Poll the SFP moddef0 line too.  */
-/* @@@ Try to use the pm3386 DOOL interrupt as well.  */
-static void enp2611_check_link_status(unsigned long __dummy)
-{
-	int i;
-
-	for (i = 0; i < 3; i++) {
-		struct net_device *dev;
-		int status;
-
-		dev = nds[i];
-		if (dev == NULL)
-			continue;
-
-		status = pm3386_is_link_up(i);
-		if (status && !netif_carrier_ok(dev)) {
-			/* @@@ Should report autonegotiation status.  */
-			printk(KERN_INFO "%s: NIC Link is Up\n", dev->name);
-
-			pm3386_enable_tx(i);
-			caleb_enable_tx(i);
-			netif_carrier_on(dev);
-		} else if (!status && netif_carrier_ok(dev)) {
-			printk(KERN_INFO "%s: NIC Link is Down\n", dev->name);
-
-			netif_carrier_off(dev);
-			caleb_disable_tx(i);
-			pm3386_disable_tx(i);
-		}
-	}
-
-	link_check_timer.expires = jiffies + HZ / 10;
-	add_timer(&link_check_timer);
-}
-
-static void enp2611_set_port_admin_status(int port, int up)
-{
-	if (up) {
-		caleb_enable_rx(port);
-
-		pm3386_set_carrier(port, 1);
-		pm3386_enable_rx(port);
-	} else {
-		caleb_disable_tx(port);
-		pm3386_disable_tx(port);
-		/* @@@ Flush out pending packets.  */
-		pm3386_set_carrier(port, 0);
-
-		pm3386_disable_rx(port);
-		caleb_disable_rx(port);
-	}
-}
-
-static int __init enp2611_init_module(void)
-{ 
-	int ports;
-	int i;
-
-	if (!machine_is_enp2611())
-		return -ENODEV;
-
-	caleb_reset();
-	pm3386_reset();
-
-	ports = pm3386_port_count();
-	for (i = 0; i < ports; i++) {
-		nds[i] = ixpdev_alloc(i, sizeof(struct ixpdev_priv));
-		if (nds[i] == NULL) {
-			while (--i >= 0)
-				free_netdev(nds[i]);
-			return -ENOMEM;
-		}
-
-		pm3386_init_port(i);
-		pm3386_get_mac(i, nds[i]->dev_addr);
-	}
-
-	ixp2400_msf_init(&enp2611_msf_parameters);
-
-	if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) {
-		for (i = 0; i < ports; i++)
-			if (nds[i])
-				free_netdev(nds[i]);
-		return -EINVAL;
-	}
-
-	init_timer(&link_check_timer);
-	link_check_timer.function = enp2611_check_link_status;
-	link_check_timer.expires = jiffies;
-	add_timer(&link_check_timer);
-
-	return 0;
-}
-
-static void __exit enp2611_cleanup_module(void)
-{
-	int i;
-
-	del_timer_sync(&link_check_timer);
-
-	ixpdev_deinit();
-	for (i = 0; i < 3; i++)
-		free_netdev(nds[i]);
-}
-
-module_init(enp2611_init_module);
-module_exit(enp2611_cleanup_module);
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c
deleted file mode 100644
index f5ffd7e..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Generic library functions for the MSF (Media and Switch Fabric) unit
- * found on the Intel IXP2400 network processor.
- *
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of the
- * License, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <mach/hardware.h>
-#include <mach/ixp2000-regs.h>
-#include <asm/delay.h>
-#include <asm/io.h>
-#include "ixp2400-msf.h"
-
-/*
- * This is the Intel recommended PLL init procedure as described on
- * page 340 of the IXP2400/IXP2800 Programmer's Reference Manual.
- */
-static void ixp2400_pll_init(struct ixp2400_msf_parameters *mp)
-{
-	int rx_dual_clock;
-	int tx_dual_clock;
-	u32 value;
-
-	/*
-	 * If the RX mode is not 1x32, we have to enable both RX PLLs
-	 * (#0 and #1.)  The same thing for the TX direction.
-	 */
-	rx_dual_clock = !!(mp->rx_mode & IXP2400_RX_MODE_WIDTH_MASK);
-	tx_dual_clock = !!(mp->tx_mode & IXP2400_TX_MODE_WIDTH_MASK);
-
-	/*
-	 * Read initial value.
-	 */
-	value = ixp2000_reg_read(IXP2000_MSF_CLK_CNTRL);
-
-	/*
-	 * Put PLLs in powerdown and bypass mode.
-	 */
-	value |= 0x0000f0f0;
-	ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
-
-	/*
-	 * Set single or dual clock mode bits.
-	 */
-	value &= ~0x03000000;
-	value |= (rx_dual_clock << 24) | (tx_dual_clock << 25);
-
-	/*
-	 * Set multipliers.
-	 */
-	value &= ~0x00ff0000;
-	value |= mp->rxclk01_multiplier << 16;
-	value |= mp->rxclk23_multiplier << 18;
-	value |= mp->txclk01_multiplier << 20;
-	value |= mp->txclk23_multiplier << 22;
-
-	/*
-	 * And write value.
-	 */
-	ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
-
-	/*
-	 * Disable PLL bypass mode.
-	 */
-	value &= ~(0x00005000 | rx_dual_clock << 13 | tx_dual_clock << 15);
-	ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
-
-	/*
-	 * Turn on PLLs.
-	 */
-	value &= ~(0x00000050 | rx_dual_clock << 5 | tx_dual_clock << 7);
-	ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
-
-	/*
-	 * Wait for PLLs to lock.  There are lock status bits, but IXP2400
-	 * erratum #65 says that these lock bits should not be relied upon
-	 * as they might not accurately reflect the true state of the PLLs.
-	 */
-	udelay(100);
-}
-
-/*
- * Needed according to p480 of Programmer's Reference Manual.
- */
-static void ixp2400_msf_free_rbuf_entries(struct ixp2400_msf_parameters *mp)
-{
-	int size_bits;
-	int i;
-
-	/*
-	 * Work around IXP2400 erratum #69 (silent RBUF-to-DRAM transfer
-	 * corruption) in the Intel-recommended way: do not add the RBUF
-	 * elements susceptible to corruption to the freelist.
-	 */
-	size_bits = mp->rx_mode & IXP2400_RX_MODE_RBUF_SIZE_MASK;
-	if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_64) {
-		for (i = 1; i < 128; i++) {
-			if (i == 9 || i == 18 || i == 27)
-				continue;
-			ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
-		}
-	} else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_128) {
-		for (i = 1; i < 64; i++) {
-			if (i == 4 || i == 9 || i == 13)
-				continue;
-			ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
-		}
-	} else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_256) {
-		for (i = 1; i < 32; i++) {
-			if (i == 2 || i == 4 || i == 6)
-				continue;
-			ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
-		}
-	}
-}
-
-static u32 ixp2400_msf_valid_channels(u32 reg)
-{
-	u32 channels;
-
-	channels = 0;
-	switch (reg & IXP2400_RX_MODE_WIDTH_MASK) {
-	case IXP2400_RX_MODE_1x32:
-		channels = 0x1;
-		if (reg & IXP2400_RX_MODE_MPHY &&
-		    !(reg & IXP2400_RX_MODE_MPHY_32))
-			channels = 0xf;
-		break;
-
-	case IXP2400_RX_MODE_2x16:
-		channels = 0x5;
-		break;
-
-	case IXP2400_RX_MODE_4x8:
-		channels = 0xf;
-		break;
-
-	case IXP2400_RX_MODE_1x16_2x8:
-		channels = 0xd;
-		break;
-	}
-
-	return channels;
-}
-
-static void ixp2400_msf_enable_rx(struct ixp2400_msf_parameters *mp)
-{
-	u32 value;
-
-	value = ixp2000_reg_read(IXP2000_MSF_RX_CONTROL) & 0x0fffffff;
-	value |= ixp2400_msf_valid_channels(mp->rx_mode) << 28;
-	ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, value);
-}
-
-static void ixp2400_msf_enable_tx(struct ixp2400_msf_parameters *mp)
-{
-	u32 value;
-
-	value = ixp2000_reg_read(IXP2000_MSF_TX_CONTROL) & 0x0fffffff;
-	value |= ixp2400_msf_valid_channels(mp->tx_mode) << 28;
-	ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, value);
-}
-
-
-void ixp2400_msf_init(struct ixp2400_msf_parameters *mp)
-{
-	u32 value;
-	int i;
-
-	/*
-	 * Init the RX/TX PLLs based on the passed parameter block.
-	 */
-	ixp2400_pll_init(mp);
-
-	/*
-	 * Reset MSF.  Bit 7 in IXP_RESET_0 resets the MSF.
-	 */
-	value = ixp2000_reg_read(IXP2000_RESET0);
-	ixp2000_reg_write(IXP2000_RESET0, value | 0x80);
-	ixp2000_reg_write(IXP2000_RESET0, value & ~0x80);
-
-	/*
-	 * Initialise the RX section.
-	 */
-	ixp2000_reg_write(IXP2000_MSF_RX_MPHY_POLL_LIMIT, mp->rx_poll_ports - 1);
-	ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, mp->rx_mode);
-	for (i = 0; i < 4; i++) {
-		ixp2000_reg_write(IXP2000_MSF_RX_UP_CONTROL_0 + i,
-						mp->rx_channel_mode[i]);
-	}
-	ixp2400_msf_free_rbuf_entries(mp);
-	ixp2400_msf_enable_rx(mp);
-
-	/*
-	 * Initialise the TX section.
-	 */
-	ixp2000_reg_write(IXP2000_MSF_TX_MPHY_POLL_LIMIT, mp->tx_poll_ports - 1);
-	ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, mp->tx_mode);
-	for (i = 0; i < 4; i++) {
-		ixp2000_reg_write(IXP2000_MSF_TX_UP_CONTROL_0 + i,
-						mp->tx_channel_mode[i]);
-	}
-	ixp2400_msf_enable_tx(mp);
-}
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h
deleted file mode 100644
index 3ac1af2..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Generic library functions for the MSF (Media and Switch Fabric) unit
- * found on the Intel IXP2400 network processor.
- *
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of the
- * License, or (at your option) any later version.
- */
-
-#ifndef __IXP2400_MSF_H
-#define __IXP2400_MSF_H
-
-struct ixp2400_msf_parameters
-{
-	u32				rx_mode;
-	unsigned			rxclk01_multiplier:2;
-	unsigned			rxclk23_multiplier:2;
-	unsigned			rx_poll_ports:6;
-	u32				rx_channel_mode[4];
-
-	u32				tx_mode;
-	unsigned			txclk01_multiplier:2;
-	unsigned			txclk23_multiplier:2;
-	unsigned			tx_poll_ports:6;
-	u32				tx_channel_mode[4];
-};
-
-void ixp2400_msf_init(struct ixp2400_msf_parameters *mp);
-
-#define IXP2400_PLL_MULTIPLIER_48		0x00
-#define IXP2400_PLL_MULTIPLIER_24		0x01
-#define IXP2400_PLL_MULTIPLIER_16		0x02
-#define IXP2400_PLL_MULTIPLIER_12		0x03
-
-#define IXP2400_RX_MODE_CSIX			0x00400000
-#define IXP2400_RX_MODE_UTOPIA_POS		0x00000000
-#define IXP2400_RX_MODE_WIDTH_MASK		0x00300000
-#define IXP2400_RX_MODE_1x16_2x8		0x00300000
-#define IXP2400_RX_MODE_4x8			0x00200000
-#define IXP2400_RX_MODE_2x16			0x00100000
-#define IXP2400_RX_MODE_1x32			0x00000000
-#define IXP2400_RX_MODE_MPHY			0x00080000
-#define IXP2400_RX_MODE_SPHY			0x00000000
-#define IXP2400_RX_MODE_MPHY_32			0x00040000
-#define IXP2400_RX_MODE_MPHY_4			0x00000000
-#define IXP2400_RX_MODE_MPHY_POLLED_STATUS	0x00020000
-#define IXP2400_RX_MODE_MPHY_DIRECT_STATUS	0x00000000
-#define IXP2400_RX_MODE_CBUS_FULL_DUPLEX	0x00010000
-#define IXP2400_RX_MODE_CBUS_SIMPLEX		0x00000000
-#define IXP2400_RX_MODE_MPHY_LEVEL2		0x00004000
-#define IXP2400_RX_MODE_MPHY_LEVEL3		0x00000000
-#define IXP2400_RX_MODE_CBUS_8BIT		0x00002000
-#define IXP2400_RX_MODE_CBUS_4BIT		0x00000000
-#define IXP2400_RX_MODE_CSIX_SINGLE_FREELIST	0x00000200
-#define IXP2400_RX_MODE_CSIX_SPLIT_FREELISTS	0x00000000
-#define IXP2400_RX_MODE_RBUF_SIZE_MASK		0x0000000c
-#define IXP2400_RX_MODE_RBUF_SIZE_256		0x00000008
-#define IXP2400_RX_MODE_RBUF_SIZE_128		0x00000004
-#define IXP2400_RX_MODE_RBUF_SIZE_64		0x00000000
-
-#define IXP2400_PORT_RX_MODE_SLAVE		0x00000040
-#define IXP2400_PORT_RX_MODE_MASTER		0x00000000
-#define IXP2400_PORT_RX_MODE_POS_PHY_L3		0x00000020
-#define IXP2400_PORT_RX_MODE_POS_PHY_L2		0x00000000
-#define IXP2400_PORT_RX_MODE_POS_PHY		0x00000010
-#define IXP2400_PORT_RX_MODE_UTOPIA		0x00000000
-#define IXP2400_PORT_RX_MODE_EVEN_PARITY	0x0000000c
-#define IXP2400_PORT_RX_MODE_ODD_PARITY		0x00000008
-#define IXP2400_PORT_RX_MODE_NO_PARITY		0x00000000
-#define IXP2400_PORT_RX_MODE_UTOPIA_BIG_CELLS	0x00000002
-#define IXP2400_PORT_RX_MODE_UTOPIA_NORMAL_CELLS	0x00000000
-#define IXP2400_PORT_RX_MODE_2_CYCLE_DECODE	0x00000001
-#define IXP2400_PORT_RX_MODE_1_CYCLE_DECODE	0x00000000
-
-#define IXP2400_TX_MODE_CSIX			0x00400000
-#define IXP2400_TX_MODE_UTOPIA_POS		0x00000000
-#define IXP2400_TX_MODE_WIDTH_MASK		0x00300000
-#define IXP2400_TX_MODE_1x16_2x8		0x00300000
-#define IXP2400_TX_MODE_4x8			0x00200000
-#define IXP2400_TX_MODE_2x16			0x00100000
-#define IXP2400_TX_MODE_1x32			0x00000000
-#define IXP2400_TX_MODE_MPHY			0x00080000
-#define IXP2400_TX_MODE_SPHY			0x00000000
-#define IXP2400_TX_MODE_MPHY_32			0x00040000
-#define IXP2400_TX_MODE_MPHY_4			0x00000000
-#define IXP2400_TX_MODE_MPHY_POLLED_STATUS	0x00020000
-#define IXP2400_TX_MODE_MPHY_DIRECT_STATUS	0x00000000
-#define IXP2400_TX_MODE_CBUS_FULL_DUPLEX	0x00010000
-#define IXP2400_TX_MODE_CBUS_SIMPLEX		0x00000000
-#define IXP2400_TX_MODE_MPHY_LEVEL2		0x00004000
-#define IXP2400_TX_MODE_MPHY_LEVEL3		0x00000000
-#define IXP2400_TX_MODE_CBUS_8BIT		0x00002000
-#define IXP2400_TX_MODE_CBUS_4BIT		0x00000000
-#define IXP2400_TX_MODE_TBUF_SIZE_MASK		0x0000000c
-#define IXP2400_TX_MODE_TBUF_SIZE_256		0x00000008
-#define IXP2400_TX_MODE_TBUF_SIZE_128		0x00000004
-#define IXP2400_TX_MODE_TBUF_SIZE_64		0x00000000
-
-#define IXP2400_PORT_TX_MODE_SLAVE		0x00000040
-#define IXP2400_PORT_TX_MODE_MASTER		0x00000000
-#define IXP2400_PORT_TX_MODE_POS_PHY		0x00000010
-#define IXP2400_PORT_TX_MODE_UTOPIA		0x00000000
-#define IXP2400_PORT_TX_MODE_EVEN_PARITY	0x0000000c
-#define IXP2400_PORT_TX_MODE_ODD_PARITY		0x00000008
-#define IXP2400_PORT_TX_MODE_NO_PARITY		0x00000000
-#define IXP2400_PORT_TX_MODE_UTOPIA_BIG_CELLS	0x00000002
-#define IXP2400_PORT_TX_MODE_2_CYCLE_DECODE	0x00000001
-#define IXP2400_PORT_TX_MODE_1_CYCLE_DECODE	0x00000000
-
-
-#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc
deleted file mode 100644
index 42a73e35..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc
+++ /dev/null
@@ -1,408 +0,0 @@
-/*
- * RX ucode for the Intel IXP2400 in POS-PHY mode.
- * Copyright (C) 2004, 2005 Lennert Buytenhek
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Assumptions made in this code:
- * - The IXP2400 MSF is configured for POS-PHY mode, in a mode where
- *   only one full element list is used.  This includes, for example,
- *   1x32 SPHY and 1x32 MPHY32, but not 4x8 SPHY or 1x32 MPHY4.  (This
- *   is not an exhaustive list.)
- * - The RBUF uses 64-byte mpackets.
- * - RX descriptors reside in SRAM, and have the following format:
- *	struct rx_desc
- *	{
- *	// to uengine
- *		u32	buf_phys_addr;
- *		u32	buf_length;
- *
- *	// from uengine
- *		u32	channel;
- *		u32	pkt_length;
- *	};
- * - Packet data resides in DRAM.
- * - Packet buffer addresses are 8-byte aligned.
- * - Scratch ring 0 is rx_pending.
- * - Scratch ring 1 is rx_done, and has status condition 'full'.
- * - The host triggers rx_done flush and rx_pending refill on seeing INTA.
- * - This code is run on all eight threads of the microengine it runs on.
- *
- * Local memory is used for per-channel RX state.
- */
-
-#define RX_THREAD_FREELIST_0		0x0030
-#define RBUF_ELEMENT_DONE		0x0044
-
-#define CHANNEL_FLAGS			*l$index0[0]
-#define CHANNEL_FLAG_RECEIVING		1
-#define PACKET_LENGTH			*l$index0[1]
-#define PACKET_CHECKSUM			*l$index0[2]
-#define BUFFER_HANDLE			*l$index0[3]
-#define BUFFER_START			*l$index0[4]
-#define BUFFER_LENGTH			*l$index0[5]
-
-#define CHANNEL_STATE_SIZE		24	// in bytes
-#define CHANNEL_STATE_SHIFT		5	// ceil(log2(state size))
-
-
-	.sig volatile sig1
-	.sig volatile sig2
-	.sig volatile sig3
-
-	.sig mpacket_arrived
-	.reg add_to_rx_freelist
-	.reg read $rsw0, $rsw1
-	.xfer_order $rsw0 $rsw1
-
-	.reg zero
-
-	/*
-	 * Initialise add_to_rx_freelist.
-	 */
-	.begin
-		.reg temp
-		.reg temp2
-
-		immed[add_to_rx_freelist, RX_THREAD_FREELIST_0]
-		immed_w1[add_to_rx_freelist, (&$rsw0 | (&mpacket_arrived << 12))]
-
-		local_csr_rd[ACTIVE_CTX_STS]
-		immed[temp, 0]
-		alu[temp2, temp, and, 0x1f]
-		alu_shf[add_to_rx_freelist, add_to_rx_freelist, or, temp2, <<20]
-		alu[temp2, temp, and, 0x80]
-		alu_shf[add_to_rx_freelist, add_to_rx_freelist, or, temp2, <<18]
-	.end
-
-	immed[zero, 0]
-
-	/*
-	 * Skip context 0 initialisation?
-	 */
-	.begin
-		br!=ctx[0, mpacket_receive_loop#]
-	.end
-
-	/*
-	 * Initialise local memory.
-	 */
-	.begin
-		.reg addr
-		.reg temp
-
-		immed[temp, 0]
-	init_local_mem_loop#:
-		alu_shf[addr, --, b, temp, <<CHANNEL_STATE_SHIFT]
-		local_csr_wr[ACTIVE_LM_ADDR_0, addr]
-		nop
-		nop
-		nop
-
-		immed[CHANNEL_FLAGS, 0]
-
-		alu[temp, temp, +, 1]
-		alu[--, temp, and, 0x20]
-		beq[init_local_mem_loop#]
-	.end
-
-	/*
-	 * Initialise signal pipeline.
-	 */
-	.begin
-		local_csr_wr[SAME_ME_SIGNAL, (&sig1 << 3)]
-		.set_sig sig1
-
-		local_csr_wr[SAME_ME_SIGNAL, (&sig2 << 3)]
-		.set_sig sig2
-
-		local_csr_wr[SAME_ME_SIGNAL, (&sig3 << 3)]
-		.set_sig sig3
-	.end
-
-mpacket_receive_loop#:
-	/*
-	 * Synchronise and wait for mpacket.
-	 */
-	.begin
-		ctx_arb[sig1]
-		local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig1 << 3))]
-
-		msf[fast_wr, --, add_to_rx_freelist, 0]
-		.set_sig mpacket_arrived
-		ctx_arb[mpacket_arrived]
-		.set $rsw0 $rsw1
-	.end
-
-	/*
-	 * We halt if we see {inbparerr,parerr,null,soperror}.
-	 */
-	.begin
-		alu_shf[--, 0x1b, and, $rsw0, >>8]
-		bne[abort_rswerr#]
-	.end
-
-	/*
-	 * Point local memory pointer to this channel's state area.
-	 */
-	.begin
-		.reg chanaddr
-
-		alu[chanaddr, $rsw0, and, 0x1f]
-		alu_shf[chanaddr, --, b, chanaddr, <<CHANNEL_STATE_SHIFT]
-		local_csr_wr[ACTIVE_LM_ADDR_0, chanaddr]
-		nop
-		nop
-		nop
-	.end
-
-	/*
-	 * Check whether we received a SOP mpacket while we were already
-	 * working on a packet, or a non-SOP mpacket while there was no
-	 * packet pending.  (SOP == RECEIVING -> abort)  If everything's
-	 * okay, update the RECEIVING flag to reflect our new state.
-	 */
-	.begin
-		.reg temp
-		.reg eop
-
-		#if CHANNEL_FLAG_RECEIVING != 1
-		#error CHANNEL_FLAG_RECEIVING is not 1
-		#endif
-
-		alu_shf[temp, 1, and, $rsw0, >>15]
-		alu[temp, temp, xor, CHANNEL_FLAGS]
-		alu[--, temp, and, CHANNEL_FLAG_RECEIVING]
-		beq[abort_proterr#]
-
-		alu_shf[eop, 1, and, $rsw0, >>14]
-		alu[CHANNEL_FLAGS, temp, xor, eop]
-	.end
-
-	/*
-	 * Copy the mpacket into the right spot, and in case of EOP,
-	 * write back the descriptor and pass the packet on.
-	 */
-	.begin
-		.reg buffer_offset
-		.reg _packet_length
-		.reg _packet_checksum
-		.reg _buffer_handle
-		.reg _buffer_start
-		.reg _buffer_length
-
-		/*
-		 * Determine buffer_offset, _packet_length and
-		 * _packet_checksum.
-		 */
-		.begin
-			.reg temp
-
-			alu[--, 1, and, $rsw0, >>15]
-			beq[not_sop#]
-
-			immed[PACKET_LENGTH, 0]
-			immed[PACKET_CHECKSUM, 0]
-
-		not_sop#:
-			alu[buffer_offset, --, b, PACKET_LENGTH]
-			alu_shf[temp, 0xff, and, $rsw0, >>16]
-			alu[_packet_length, buffer_offset, +, temp]
-			alu[PACKET_LENGTH, --, b, _packet_length]
-
-			immed[temp, 0xffff]
-			alu[temp, $rsw1, and, temp]
-			alu[_packet_checksum, PACKET_CHECKSUM, +, temp]
-			alu[PACKET_CHECKSUM, --, b, _packet_checksum]
-		.end
-
-		/*
-		 * Allocate buffer in case of SOP.
-		 */
-		.begin
-			.reg temp
-
-			alu[temp, 1, and, $rsw0, >>15]
-			beq[skip_buffer_alloc#]
-
-			.begin
-				.sig zzz
-				.reg read $stemp $stemp2
-				.xfer_order $stemp $stemp2
-
-			rx_nobufs#:
-				scratch[get, $stemp, zero, 0, 1], ctx_swap[zzz]
-				alu[_buffer_handle, --, b, $stemp]
-				beq[rx_nobufs#]
-
-				sram[read, $stemp, _buffer_handle, 0, 2],
-								ctx_swap[zzz]
-				alu[_buffer_start, --, b, $stemp]
-				alu[_buffer_length, --, b, $stemp2]
-			.end
-
-		skip_buffer_alloc#:
-		.end
-
-		/*
-		 * Resynchronise.
-		 */
-		.begin
-			ctx_arb[sig2]
-			local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig2 << 3))]
-		.end
-
-		/*
-		 * Synchronise buffer state.
-		 */
-		.begin
-			.reg temp
-
-			alu[temp, 1, and, $rsw0, >>15]
-			beq[copy_from_local_mem#]
-
-			alu[BUFFER_HANDLE, --, b, _buffer_handle]
-			alu[BUFFER_START, --, b, _buffer_start]
-			alu[BUFFER_LENGTH, --, b, _buffer_length]
-			br[sync_state_done#]
-
-		copy_from_local_mem#:
-			alu[_buffer_handle, --, b, BUFFER_HANDLE]
-			alu[_buffer_start, --, b, BUFFER_START]
-			alu[_buffer_length, --, b, BUFFER_LENGTH]
-
-		sync_state_done#:
-		.end
-
-#if 0
-		/*
-		 * Debug buffer state management.
-		 */
-		.begin
-			.reg temp
-
-			alu[temp, 1, and, $rsw0, >>14]
-			beq[no_poison#]
-			immed[BUFFER_HANDLE, 0xdead]
-			immed[BUFFER_START, 0xdead]
-			immed[BUFFER_LENGTH, 0xdead]
-		no_poison#:
-
-			immed[temp, 0xdead]
-			alu[--, _buffer_handle, -, temp]
-			beq[state_corrupted#]
-			alu[--, _buffer_start, -, temp]
-			beq[state_corrupted#]
-			alu[--, _buffer_length, -, temp]
-			beq[state_corrupted#]
-		.end
-#endif
-
-		/*
-		 * Check buffer length.
-		 */
-		.begin
-			alu[--, _buffer_length, -, _packet_length]
-			blo[buffer_overflow#]
-		.end
-
-		/*
-		 * Copy the mpacket and give back the RBUF element.
-		 */
-		.begin
-			.reg element
-			.reg xfer_size
-			.reg temp
-			.sig copy_sig
-
-			alu_shf[element, 0x7f, and, $rsw0, >>24]
-			alu_shf[xfer_size, 0xff, and, $rsw0, >>16]
-
-			alu[xfer_size, xfer_size, -, 1]
-			alu_shf[xfer_size, 0x10, or, xfer_size, >>3]
-			alu_shf[temp, 0x10, or, xfer_size, <<21]
-			alu_shf[temp, temp, or, element, <<11]
-			alu_shf[--, temp, or, 1, <<18]
-
-			dram[rbuf_rd, --, _buffer_start, buffer_offset, max_8],
-						indirect_ref, sig_done[copy_sig]
-			ctx_arb[copy_sig]
-
-			alu[temp, RBUF_ELEMENT_DONE, or, element, <<16]
-			msf[fast_wr, --, temp, 0]
-		.end
-
-		/*
-		 * If EOP, write back the packet descriptor.
-		 */
-		.begin
-			.reg write $stemp $stemp2
-			.xfer_order $stemp $stemp2
-			.sig zzz
-
-			alu_shf[--, 1, and, $rsw0, >>14]
-			beq[no_writeback#]
-
-			alu[$stemp, $rsw0, and, 0x1f]
-			alu[$stemp2, --, b, _packet_length]
-			sram[write, $stemp, _buffer_handle, 8, 2], ctx_swap[zzz]
-
-		no_writeback#:
-		.end
-
-		/*
-		 * Resynchronise.
-		 */
-		.begin
-			ctx_arb[sig3]
-			local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig3 << 3))]
-		.end
-
-		/*
-		 * If EOP, put the buffer back onto the scratch ring.
-		 */
-		.begin
-			.reg write $stemp
-			.sig zzz
-
-			br_inp_state[SCR_Ring1_Status, rx_done_ring_overflow#]
-
-			alu_shf[--, 1, and, $rsw0, >>14]
-			beq[mpacket_receive_loop#]
-
-			alu[--, 1, and, $rsw0, >>10]
-			bne[rxerr#]
-
-			alu[$stemp, --, b, _buffer_handle]
-			scratch[put, $stemp, zero, 4, 1], ctx_swap[zzz]
-			cap[fast_wr, 0, XSCALE_INT_A]
-			br[mpacket_receive_loop#]
-
-		rxerr#:
-			alu[$stemp, --, b, _buffer_handle]
-			scratch[put, $stemp, zero, 0, 1], ctx_swap[zzz]
-			br[mpacket_receive_loop#]
-		.end
-	.end
-
-
-abort_rswerr#:
-	halt
-
-abort_proterr#:
-	halt
-
-state_corrupted#:
-	halt
-
-buffer_overflow#:
-	halt
-
-rx_done_ring_overflow#:
-	halt
-
-
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode
deleted file mode 100644
index e8aee2f..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode
+++ /dev/null
@@ -1,130 +0,0 @@
-static struct ixp2000_uengine_code ixp2400_rx =
-{
-	.cpu_model_bitmask	= 0x000003fe,
-	.cpu_min_revision	= 0,
-	.cpu_max_revision	= 255,
-
-	.uengine_parameters	= IXP2000_UENGINE_8_CONTEXTS |
-				  IXP2000_UENGINE_PRN_UPDATE_EVERY |
-				  IXP2000_UENGINE_NN_FROM_PREVIOUS |
-				  IXP2000_UENGINE_ASSERT_EMPTY_AT_0 |
-				  IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT |
-				  IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT,
-
-	.initial_reg_values	= (struct ixp2000_reg_value []) {
-		{ -1, -1 }
-	},
-
-	.num_insns		= 109,
-	.insns			= (u8 []) {
-		0xf0, 0x00, 0x0c, 0xc0, 0x05,
-		0xf4, 0x44, 0x0c, 0x00, 0x05,
-		0xfc, 0x04, 0x4c, 0x00, 0x00,
-		0xf0, 0x00, 0x00, 0x3b, 0x00,
-		0xb4, 0x40, 0xf0, 0x3b, 0x1f,
-		0x8a, 0xc0, 0x50, 0x3e, 0x05,
-		0xb4, 0x40, 0xf0, 0x3b, 0x80,
-		0x9a, 0xe0, 0x00, 0x3e, 0x05,
-		0xf0, 0x00, 0x00, 0x07, 0x00,
-		0xd8, 0x05, 0xc0, 0x00, 0x11,
-		0xf0, 0x00, 0x00, 0x0f, 0x00,
-		0x91, 0xb0, 0x20, 0x0e, 0x00,
-		0xfc, 0x06, 0x60, 0x0b, 0x00,
-		0xf0, 0x00, 0x0c, 0x03, 0x00,
-		0xf0, 0x00, 0x0c, 0x03, 0x00,
-		0xf0, 0x00, 0x0c, 0x03, 0x00,
-		0xf0, 0x00, 0x0c, 0x02, 0x00,
-		0xb0, 0xc0, 0x30, 0x0f, 0x01,
-		0xa4, 0x70, 0x00, 0x0f, 0x20,
-		0xd8, 0x02, 0xc0, 0x01, 0x00,
-		0xfc, 0x10, 0xac, 0x23, 0x08,
-		0xfc, 0x10, 0xac, 0x43, 0x10,
-		0xfc, 0x10, 0xac, 0x63, 0x18,
-		0xe0, 0x00, 0x00, 0x00, 0x02,
-		0xfc, 0x10, 0xae, 0x23, 0x88,
-		0x3d, 0x00, 0x04, 0x03, 0x20,
-		0xe0, 0x00, 0x00, 0x00, 0x10,
-		0x84, 0x82, 0x02, 0x01, 0x3b,
-		0xd8, 0x1a, 0x00, 0x01, 0x01,
-		0xb4, 0x00, 0x8c, 0x7d, 0x80,
-		0x91, 0xb0, 0x80, 0x22, 0x00,
-		0xfc, 0x06, 0x60, 0x23, 0x00,
-		0xf0, 0x00, 0x0c, 0x03, 0x00,
-		0xf0, 0x00, 0x0c, 0x03, 0x00,
-		0xf0, 0x00, 0x0c, 0x03, 0x00,
-		0x94, 0xf0, 0x92, 0x01, 0x21,
-		0xac, 0x40, 0x60, 0x26, 0x00,
-		0xa4, 0x30, 0x0c, 0x04, 0x06,
-		0xd8, 0x1a, 0x40, 0x01, 0x00,
-		0x94, 0xe0, 0xa2, 0x01, 0x21,
-		0xac, 0x20, 0x00, 0x28, 0x06,
-		0x84, 0xf2, 0x02, 0x01, 0x21,
-		0xd8, 0x0b, 0x40, 0x01, 0x00,
-		0xf0, 0x00, 0x0c, 0x02, 0x01,
-		0xf0, 0x00, 0x0c, 0x02, 0x02,
-		0xa0, 0x00, 0x08, 0x04, 0x00,
-		0x95, 0x00, 0xc6, 0x01, 0xff,
-		0xa0, 0x80, 0x10, 0x30, 0x00,
-		0xa0, 0x60, 0x1c, 0x00, 0x01,
-		0xf0, 0x0f, 0xf0, 0x33, 0xff,
-		0xb4, 0x00, 0xc0, 0x31, 0x81,
-		0xb0, 0x80, 0xb0, 0x32, 0x02,
-		0xa0, 0x20, 0x20, 0x2c, 0x00,
-		0x94, 0xf0, 0xd2, 0x01, 0x21,
-		0xd8, 0x0f, 0x40, 0x01, 0x00,
-		0x19, 0x40, 0x10, 0x04, 0x20,
-		0xa0, 0x00, 0x26, 0x04, 0x00,
-		0xd8, 0x0d, 0xc0, 0x01, 0x00,
-		0x00, 0x42, 0x10, 0x80, 0x02,
-		0xb0, 0x00, 0x46, 0x04, 0x00,
-		0xb0, 0x00, 0x56, 0x08, 0x00,
-		0xe0, 0x00, 0x00, 0x00, 0x04,
-		0xfc, 0x10, 0xae, 0x43, 0x90,
-		0x84, 0xf0, 0x32, 0x01, 0x21,
-		0xd8, 0x11, 0x40, 0x01, 0x00,
-		0xa0, 0x60, 0x3c, 0x00, 0x02,
-		0xa0, 0x20, 0x40, 0x10, 0x00,
-		0xa0, 0x20, 0x50, 0x14, 0x00,
-		0xd8, 0x12, 0x00, 0x00, 0x18,
-		0xa0, 0x00, 0x28, 0x0c, 0x00,
-		0xb0, 0x00, 0x48, 0x10, 0x00,
-		0xb0, 0x00, 0x58, 0x14, 0x00,
-		0xaa, 0xf0, 0x00, 0x14, 0x01,
-		0xd8, 0x1a, 0xc0, 0x01, 0x05,
-		0x85, 0x80, 0x42, 0x01, 0xff,
-		0x95, 0x00, 0x66, 0x01, 0xff,
-		0xba, 0xc0, 0x60, 0x1b, 0x01,
-		0x9a, 0x30, 0x60, 0x19, 0x30,
-		0x9a, 0xb0, 0x70, 0x1a, 0x30,
-		0x9b, 0x50, 0x78, 0x1e, 0x04,
-		0x8a, 0xe2, 0x08, 0x1e, 0x21,
-		0x6a, 0x4e, 0x00, 0x13, 0x00,
-		0xe0, 0x00, 0x00, 0x00, 0x30,
-		0x9b, 0x00, 0x7a, 0x92, 0x04,
-		0x3d, 0x00, 0x04, 0x1f, 0x20,
-		0x84, 0xe2, 0x02, 0x01, 0x21,
-		0xd8, 0x16, 0x80, 0x01, 0x00,
-		0xa4, 0x18, 0x0c, 0x7d, 0x80,
-		0xa0, 0x58, 0x1c, 0x00, 0x01,
-		0x01, 0x42, 0x00, 0xa0, 0x02,
-		0xe0, 0x00, 0x00, 0x00, 0x08,
-		0xfc, 0x10, 0xae, 0x63, 0x98,
-		0xd8, 0x1b, 0x00, 0xc2, 0x14,
-		0x84, 0xe2, 0x02, 0x01, 0x21,
-		0xd8, 0x05, 0xc0, 0x01, 0x00,
-		0x84, 0xa2, 0x02, 0x01, 0x21,
-		0xd8, 0x19, 0x40, 0x01, 0x01,
-		0xa0, 0x58, 0x0c, 0x00, 0x02,
-		0x1a, 0x40, 0x00, 0x04, 0x24,
-		0x33, 0x00, 0x01, 0x2f, 0x20,
-		0xd8, 0x05, 0xc0, 0x00, 0x18,
-		0xa0, 0x58, 0x0c, 0x00, 0x02,
-		0x1a, 0x40, 0x00, 0x04, 0x20,
-		0xd8, 0x05, 0xc0, 0x00, 0x18,
-		0xe0, 0x00, 0x02, 0x00, 0x00,
-		0xe0, 0x00, 0x02, 0x00, 0x00,
-		0xe0, 0x00, 0x02, 0x00, 0x00,
-		0xe0, 0x00, 0x02, 0x00, 0x00,
-		0xe0, 0x00, 0x02, 0x00, 0x00,
-	}
-};
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc
deleted file mode 100644
index d090d18..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * TX ucode for the Intel IXP2400 in POS-PHY mode.
- * Copyright (C) 2004, 2005 Lennert Buytenhek
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Assumptions made in this code:
- * - The IXP2400 MSF is configured for POS-PHY mode, in a mode where
- *   only one TBUF partition is used.  This includes, for example,
- *   1x32 SPHY and 1x32 MPHY32, but not 4x8 SPHY or 1x32 MPHY4. (This
- *   is not an exhaustive list.)
- * - The TBUF uses 64-byte mpackets.
- * - TX descriptors reside in SRAM, and have the following format:
- *	struct tx_desc
- *	{
- *	// to uengine
- *		u32	buf_phys_addr;
- *		u32	pkt_length;
- *		u32	channel;
- *	};
- * - Packet data resides in DRAM.
- * - Packet buffer addresses are 8-byte aligned.
- * - Scratch ring 2 is tx_pending.
- * - Scratch ring 3 is tx_done, and has status condition 'full'.
- * - This code is run on all eight threads of the microengine it runs on.
- */
-
-#define TX_SEQUENCE_0		0x0060
-#define TBUF_CTRL		0x1800
-
-#define PARTITION_SIZE		128
-#define PARTITION_THRESH	96
-
-
-	.sig volatile sig1
-	.sig volatile sig2
-	.sig volatile sig3
-
-	.reg @old_tx_seq_0
-	.reg @mpkts_in_flight
-	.reg @next_tbuf_mpacket
-
-	.reg @buffer_handle
-	.reg @buffer_start
-	.reg @packet_length
-	.reg @channel
-	.reg @packet_offset
-
-	.reg zero
-
-	immed[zero, 0]
-
-	/*
-	 * Skip context 0 initialisation?
-	 */
-	.begin
-		br!=ctx[0, mpacket_tx_loop#]
-	.end
-
-	/*
-	 * Wait until all pending TBUF elements have been transmitted.
-	 */
-	.begin
-		.reg read $tx
-		.sig zzz
-
-	loop_empty#:
-		msf[read, $tx, zero, TX_SEQUENCE_0, 1], ctx_swap[zzz]
-		alu_shf[--, --, b, $tx, >>31]
-		beq[loop_empty#]
-
-		alu[@old_tx_seq_0, --, b, $tx]
-	.end
-
-	immed[@mpkts_in_flight, 0]
-	alu[@next_tbuf_mpacket, @old_tx_seq_0, and, (PARTITION_SIZE - 1)]
-
-	immed[@buffer_handle, 0]
-
-	/*
-	 * Initialise signal pipeline.
-	 */
-	.begin
-		local_csr_wr[SAME_ME_SIGNAL, (&sig1 << 3)]
-		.set_sig sig1
-
-		local_csr_wr[SAME_ME_SIGNAL, (&sig2 << 3)]
-		.set_sig sig2
-
-		local_csr_wr[SAME_ME_SIGNAL, (&sig3 << 3)]
-		.set_sig sig3
-	.end
-
-mpacket_tx_loop#:
-	.begin
-		.reg tbuf_element_index
-		.reg buffer_handle
-		.reg sop_eop
-		.reg packet_data
-		.reg channel
-		.reg mpacket_size
-
-		/*
-		 * If there is no packet currently being transmitted,
-		 * dequeue the next TX descriptor, and fetch the buffer
-		 * address, packet length and destination channel number.
-		 */
-		.begin
-			.reg read $stemp $stemp2 $stemp3
-			.xfer_order $stemp $stemp2 $stemp3
-			.sig zzz
-
-			ctx_arb[sig1]
-
-			alu[--, --, b, @buffer_handle]
-			bne[already_got_packet#]
-
-		tx_nobufs#:
-			scratch[get, $stemp, zero, 8, 1], ctx_swap[zzz]
-			alu[@buffer_handle, --, b, $stemp]
-			beq[tx_nobufs#]
-
-			sram[read, $stemp, $stemp, 0, 3], ctx_swap[zzz]
-			alu[@buffer_start, --, b, $stemp]
-			alu[@packet_length, --, b, $stemp2]
-			beq[zero_byte_packet#]
-			alu[@channel, --, b, $stemp3]
-			immed[@packet_offset, 0]
-
-		already_got_packet#:
-			local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig1 << 3))]
-		.end
-
-		/*
-		 * Determine tbuf element index, SOP/EOP flags, mpacket
-		 * offset and mpacket size and cache buffer_handle and
-		 * channel number.
-		 */
-		.begin
-			alu[tbuf_element_index, --, b, @next_tbuf_mpacket]
-			alu[@next_tbuf_mpacket, @next_tbuf_mpacket, +, 1]
-			alu[@next_tbuf_mpacket, @next_tbuf_mpacket, and,
-							(PARTITION_SIZE - 1)]
-
-			alu[buffer_handle, --, b, @buffer_handle]
-			immed[@buffer_handle, 0]
-
-			immed[sop_eop, 1]
-
-			alu[packet_data, --, b, @packet_offset]
-			bne[no_sop#]
-			alu[sop_eop, sop_eop, or, 2]
-		no_sop#:
-			alu[packet_data, packet_data, +, @buffer_start]
-
-			alu[channel, --, b, @channel]
-
-			alu[mpacket_size, @packet_length, -, @packet_offset]
-			alu[--, 64, -, mpacket_size]
-			bhs[eop#]
-			alu[@buffer_handle, --, b, buffer_handle]
-			immed[mpacket_size, 64]
-			alu[sop_eop, sop_eop, and, 2]
-		eop#:
-
-			alu[@packet_offset, @packet_offset, +, mpacket_size]
-		.end
-
-		/*
-		 * Wait until there's enough space in the TBUF.
-		 */
-		.begin
-			.reg read $tx
-			.reg temp
-			.sig zzz
-
-			ctx_arb[sig2]
-
-			br[test_space#]
-
-		loop_space#:
-			msf[read, $tx, zero, TX_SEQUENCE_0, 1], ctx_swap[zzz]
-
-			alu[temp, $tx, -, @old_tx_seq_0]
-			alu[temp, temp, and, 0xff]
-			alu[@mpkts_in_flight, @mpkts_in_flight, -, temp]
-
-			alu[@old_tx_seq_0, --, b, $tx]
-
-		test_space#:
-			alu[--, PARTITION_THRESH, -, @mpkts_in_flight]
-			blo[loop_space#]
-
-			alu[@mpkts_in_flight, @mpkts_in_flight, +, 1]
-
-			local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig2 << 3))]
-		.end
-
-		/*
-		 * Copy the packet data to the TBUF.
-		 */
-		.begin
-			.reg temp
-			.sig copy_sig
-
-			alu[temp, mpacket_size, -, 1]
-			alu_shf[temp, 0x10, or, temp, >>3]
-			alu_shf[temp, 0x10, or, temp, <<21]
-			alu_shf[temp, temp, or, tbuf_element_index, <<11]
-			alu_shf[--, temp, or, 1, <<18]
-
-			dram[tbuf_wr, --, packet_data, 0, max_8],
-					indirect_ref, sig_done[copy_sig]
-			ctx_arb[copy_sig]
-		.end
-
-		/*
-		 * Mark TBUF element as ready-to-be-transmitted.
-		 */
-		.begin
-			.reg write $tsw $tsw2
-			.xfer_order $tsw $tsw2
-			.reg temp
-			.sig zzz
-
-			alu_shf[temp, channel, or, mpacket_size, <<24]
-			alu_shf[$tsw, temp, or, sop_eop, <<8]
-			immed[$tsw2, 0]
-
-			immed[temp, TBUF_CTRL]
-			alu_shf[temp, temp, or, tbuf_element_index, <<3]
-			msf[write, $tsw, temp, 0, 2], ctx_swap[zzz]
-		.end
-
-		/*
-		 * Resynchronise.
-		 */
-		.begin
-			ctx_arb[sig3]
-			local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig3 << 3))]
-		.end
-
-		/*
-		 * If this was an EOP mpacket, recycle the TX buffer
-	 	 * and signal the host.
-		 */
-		.begin
-			.reg write $stemp
-			.sig zzz
-
-			alu[--, sop_eop, and, 1]
-			beq[mpacket_tx_loop#]
-
-		tx_done_ring_full#:
-			br_inp_state[SCR_Ring3_Status, tx_done_ring_full#]
-
-			alu[$stemp, --, b, buffer_handle]
-			scratch[put, $stemp, zero, 12, 1], ctx_swap[zzz]
-			cap[fast_wr, 0, XSCALE_INT_A]
-			br[mpacket_tx_loop#]
-		.end
-	.end
-
-
-zero_byte_packet#:
-	halt
-
-
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode
deleted file mode 100644
index a433e24..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode
+++ /dev/null
@@ -1,98 +0,0 @@
-static struct ixp2000_uengine_code ixp2400_tx =
-{
-	.cpu_model_bitmask	= 0x000003fe,
-	.cpu_min_revision	= 0,
-	.cpu_max_revision	= 255,
-
-	.uengine_parameters	= IXP2000_UENGINE_8_CONTEXTS |
-				  IXP2000_UENGINE_PRN_UPDATE_EVERY |
-				  IXP2000_UENGINE_NN_FROM_PREVIOUS |
-				  IXP2000_UENGINE_ASSERT_EMPTY_AT_0 |
-				  IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT |
-				  IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT,
-
-	.initial_reg_values	= (struct ixp2000_reg_value []) {
-		{ -1, -1 }
-	},
-
-	.num_insns		= 77,
-	.insns			= (u8 []) {
-		0xf0, 0x00, 0x00, 0x07, 0x00,
-		0xd8, 0x03, 0x00, 0x00, 0x11,
-		0x3c, 0x40, 0x00, 0x04, 0xe0,
-		0x81, 0xf2, 0x02, 0x01, 0x00,
-		0xd8, 0x00, 0x80, 0x01, 0x00,
-		0xb0, 0x08, 0x06, 0x00, 0x00,
-		0xf0, 0x00, 0x0c, 0x00, 0x80,
-		0xb4, 0x49, 0x02, 0x03, 0x7f,
-		0xf0, 0x00, 0x02, 0x83, 0x00,
-		0xfc, 0x10, 0xac, 0x23, 0x08,
-		0xfc, 0x10, 0xac, 0x43, 0x10,
-		0xfc, 0x10, 0xac, 0x63, 0x18,
-		0xe0, 0x00, 0x00, 0x00, 0x02,
-		0xa0, 0x30, 0x02, 0x80, 0x00,
-		0xd8, 0x06, 0x00, 0x01, 0x01,
-		0x19, 0x40, 0x00, 0x04, 0x28,
-		0xb0, 0x0a, 0x06, 0x00, 0x00,
-		0xd8, 0x03, 0xc0, 0x01, 0x00,
-		0x00, 0x44, 0x00, 0x80, 0x80,
-		0xa0, 0x09, 0x06, 0x00, 0x00,
-		0xb0, 0x0b, 0x06, 0x04, 0x00,
-		0xd8, 0x13, 0x00, 0x01, 0x00,
-		0xb0, 0x0c, 0x06, 0x08, 0x00,
-		0xf0, 0x00, 0x0c, 0x00, 0xa0,
-		0xfc, 0x10, 0xae, 0x23, 0x88,
-		0xa0, 0x00, 0x12, 0x40, 0x00,
-		0xb0, 0xc9, 0x02, 0x43, 0x01,
-		0xb4, 0x49, 0x02, 0x43, 0x7f,
-		0xb0, 0x00, 0x22, 0x80, 0x00,
-		0xf0, 0x00, 0x02, 0x83, 0x00,
-		0xf0, 0x00, 0x0c, 0x04, 0x02,
-		0xb0, 0x40, 0x6c, 0x00, 0xa0,
-		0xd8, 0x08, 0x80, 0x01, 0x01,
-		0xaa, 0x00, 0x2c, 0x08, 0x02,
-		0xa0, 0xc0, 0x30, 0x18, 0x90,
-		0xa0, 0x00, 0x43, 0x00, 0x00,
-		0xba, 0xc0, 0x32, 0xc0, 0xa0,
-		0xaa, 0xb0, 0x00, 0x0f, 0x40,
-		0xd8, 0x0a, 0x80, 0x01, 0x04,
-		0xb0, 0x0a, 0x00, 0x08, 0x00,
-		0xf0, 0x00, 0x00, 0x0f, 0x40,
-		0xa4, 0x00, 0x2c, 0x08, 0x02,
-		0xa0, 0x8a, 0x00, 0x0c, 0xa0,
-		0xe0, 0x00, 0x00, 0x00, 0x04,
-		0xd8, 0x0c, 0x80, 0x00, 0x18,
-		0x3c, 0x40, 0x00, 0x04, 0xe0,
-		0xba, 0x80, 0x42, 0x01, 0x80,
-		0xb4, 0x40, 0x40, 0x13, 0xff,
-		0xaa, 0x88, 0x00, 0x10, 0x80,
-		0xb0, 0x08, 0x06, 0x00, 0x00,
-		0xaa, 0xf0, 0x0d, 0x80, 0x80,
-		0xd8, 0x0b, 0x40, 0x01, 0x05,
-		0xa0, 0x88, 0x0c, 0x04, 0x80,
-		0xfc, 0x10, 0xae, 0x43, 0x90,
-		0xba, 0xc0, 0x50, 0x0f, 0x01,
-		0x9a, 0x30, 0x50, 0x15, 0x30,
-		0x9a, 0xb0, 0x50, 0x16, 0x30,
-		0x9b, 0x50, 0x58, 0x16, 0x01,
-		0x8a, 0xe2, 0x08, 0x16, 0x21,
-		0x6b, 0x4e, 0x00, 0x83, 0x03,
-		0xe0, 0x00, 0x00, 0x00, 0x30,
-		0x9a, 0x80, 0x70, 0x0e, 0x04,
-		0x8b, 0x88, 0x08, 0x1e, 0x02,
-		0xf0, 0x00, 0x0c, 0x01, 0x81,
-		0xf0, 0x01, 0x80, 0x1f, 0x00,
-		0x9b, 0xd0, 0x78, 0x1e, 0x01,
-		0x3d, 0x42, 0x00, 0x1c, 0x20,
-		0xe0, 0x00, 0x00, 0x00, 0x08,
-		0xfc, 0x10, 0xae, 0x63, 0x98,
-		0xa4, 0x30, 0x0c, 0x04, 0x02,
-		0xd8, 0x03, 0x00, 0x01, 0x00,
-		0xd8, 0x11, 0xc1, 0x42, 0x14,
-		0xa0, 0x18, 0x00, 0x08, 0x00,
-		0x1a, 0x40, 0x00, 0x04, 0x2c,
-		0x33, 0x00, 0x01, 0x2f, 0x20,
-		0xd8, 0x03, 0x00, 0x00, 0x18,
-		0xe0, 0x00, 0x02, 0x00, 0x00,
-	}
-};
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev.c b/drivers/net/ethernet/xscale/ixp2000/ixpdev.c
deleted file mode 100644
index 4500837..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixpdev.c
+++ /dev/null
@@ -1,437 +0,0 @@
-/*
- * IXP2000 MSF network device driver
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/moduleparam.h>
-#include <linux/gfp.h>
-#include <asm/hardware/uengine.h>
-#include <asm/io.h>
-#include "ixp2400_rx.ucode"
-#include "ixp2400_tx.ucode"
-#include "ixpdev_priv.h"
-#include "ixpdev.h"
-#include "pm3386.h"
-
-#define DRV_MODULE_VERSION	"0.2"
-
-static int nds_count;
-static struct net_device **nds;
-static int nds_open;
-static void (*set_port_admin_status)(int port, int up);
-
-static struct ixpdev_rx_desc * const rx_desc =
-	(struct ixpdev_rx_desc *)(IXP2000_SRAM0_VIRT_BASE + RX_BUF_DESC_BASE);
-static struct ixpdev_tx_desc * const tx_desc =
-	(struct ixpdev_tx_desc *)(IXP2000_SRAM0_VIRT_BASE + TX_BUF_DESC_BASE);
-static int tx_pointer;
-
-
-static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-	struct ixpdev_priv *ip = netdev_priv(dev);
-	struct ixpdev_tx_desc *desc;
-	int entry;
-	unsigned long flags;
-
-	if (unlikely(skb->len > PAGE_SIZE)) {
-		/* @@@ Count drops.  */
-		dev_kfree_skb(skb);
-		return NETDEV_TX_OK;
-	}
-
-	entry = tx_pointer;
-	tx_pointer = (tx_pointer + 1) % TX_BUF_COUNT;
-
-	desc = tx_desc + entry;
-	desc->pkt_length = skb->len;
-	desc->channel = ip->channel;
-
-	skb_copy_and_csum_dev(skb, phys_to_virt(desc->buf_addr));
-	dev_kfree_skb(skb);
-
-	ixp2000_reg_write(RING_TX_PENDING,
-		TX_BUF_DESC_BASE + (entry * sizeof(struct ixpdev_tx_desc)));
-
-	local_irq_save(flags);
-	ip->tx_queue_entries++;
-	if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
-		netif_stop_queue(dev);
-	local_irq_restore(flags);
-
-	return NETDEV_TX_OK;
-}
-
-
-static int ixpdev_rx(struct net_device *dev, int processed, int budget)
-{
-	while (processed < budget) {
-		struct ixpdev_rx_desc *desc;
-		struct sk_buff *skb;
-		void *buf;
-		u32 _desc;
-
-		_desc = ixp2000_reg_read(RING_RX_DONE);
-		if (_desc == 0)
-			return 0;
-
-		desc = rx_desc +
-			((_desc - RX_BUF_DESC_BASE) / sizeof(struct ixpdev_rx_desc));
-		buf = phys_to_virt(desc->buf_addr);
-
-		if (desc->pkt_length < 4 || desc->pkt_length > PAGE_SIZE) {
-			printk(KERN_ERR "ixp2000: rx err, length %d\n",
-					desc->pkt_length);
-			goto err;
-		}
-
-		if (desc->channel < 0 || desc->channel >= nds_count) {
-			printk(KERN_ERR "ixp2000: rx err, channel %d\n",
-					desc->channel);
-			goto err;
-		}
-
-		/* @@@ Make FCS stripping configurable.  */
-		desc->pkt_length -= 4;
-
-		if (unlikely(!netif_running(nds[desc->channel])))
-			goto err;
-
-		skb = netdev_alloc_skb_ip_align(dev, desc->pkt_length);
-		if (likely(skb != NULL)) {
-			skb_copy_to_linear_data(skb, buf, desc->pkt_length);
-			skb_put(skb, desc->pkt_length);
-			skb->protocol = eth_type_trans(skb, nds[desc->channel]);
-
-			netif_receive_skb(skb);
-		}
-
-err:
-		ixp2000_reg_write(RING_RX_PENDING, _desc);
-		processed++;
-	}
-
-	return processed;
-}
-
-/* dev always points to nds[0].  */
-static int ixpdev_poll(struct napi_struct *napi, int budget)
-{
-	struct ixpdev_priv *ip = container_of(napi, struct ixpdev_priv, napi);
-	struct net_device *dev = ip->dev;
-	int rx;
-
-	rx = 0;
-	do {
-		ixp2000_reg_write(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0x00ff);
-
-		rx = ixpdev_rx(dev, rx, budget);
-		if (rx >= budget)
-			break;
-	} while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff);
-
-	napi_complete(napi);
-	ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff);
-
-	return rx;
-}
-
-static void ixpdev_tx_complete(void)
-{
-	int channel;
-	u32 wake;
-
-	wake = 0;
-	while (1) {
-		struct ixpdev_priv *ip;
-		u32 desc;
-		int entry;
-
-		desc = ixp2000_reg_read(RING_TX_DONE);
-		if (desc == 0)
-			break;
-
-		/* @@@ Check whether entries come back in order.  */
-		entry = (desc - TX_BUF_DESC_BASE) / sizeof(struct ixpdev_tx_desc);
-		channel = tx_desc[entry].channel;
-
-		if (channel < 0 || channel >= nds_count) {
-			printk(KERN_ERR "ixp2000: txcomp channel index "
-					"out of bounds (%d, %.8i, %d)\n",
-					channel, (unsigned int)desc, entry);
-			continue;
-		}
-
-		ip = netdev_priv(nds[channel]);
-		if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
-			wake |= 1 << channel;
-		ip->tx_queue_entries--;
-	}
-
-	for (channel = 0; wake != 0; channel++) {
-		if (wake & (1 << channel)) {
-			netif_wake_queue(nds[channel]);
-			wake &= ~(1 << channel);
-		}
-	}
-}
-
-static irqreturn_t ixpdev_interrupt(int irq, void *dev_id)
-{
-	u32 status;
-
-	status = ixp2000_reg_read(IXP2000_IRQ_THD_STATUS_A_0);
-	if (status == 0)
-		return IRQ_NONE;
-
-	/*
-	 * Any of the eight receive units signaled RX?
-	 */
-	if (status & 0x00ff) {
-		struct net_device *dev = nds[0];
-		struct ixpdev_priv *ip = netdev_priv(dev);
-
-		ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff);
-		if (likely(napi_schedule_prep(&ip->napi))) {
-			__napi_schedule(&ip->napi);
-		} else {
-			printk(KERN_CRIT "ixp2000: irq while polling!!\n");
-		}
-	}
-
-	/*
-	 * Any of the eight transmit units signaled TXdone?
-	 */
-	if (status & 0xff00) {
-		ixp2000_reg_wrb(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0xff00);
-		ixpdev_tx_complete();
-	}
-
-	return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ixpdev_poll_controller(struct net_device *dev)
-{
-	disable_irq(IRQ_IXP2000_THDA0);
-	ixpdev_interrupt(IRQ_IXP2000_THDA0, dev);
-	enable_irq(IRQ_IXP2000_THDA0);
-}
-#endif
-
-static int ixpdev_open(struct net_device *dev)
-{
-	struct ixpdev_priv *ip = netdev_priv(dev);
-	int err;
-
-	napi_enable(&ip->napi);
-	if (!nds_open++) {
-		err = request_irq(IRQ_IXP2000_THDA0, ixpdev_interrupt,
-					IRQF_SHARED, "ixp2000_eth", nds);
-		if (err) {
-			nds_open--;
-			napi_disable(&ip->napi);
-			return err;
-		}
-
-		ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0xffff);
-	}
-
-	set_port_admin_status(ip->channel, 1);
-	netif_start_queue(dev);
-
-	return 0;
-}
-
-static int ixpdev_close(struct net_device *dev)
-{
-	struct ixpdev_priv *ip = netdev_priv(dev);
-
-	netif_stop_queue(dev);
-	napi_disable(&ip->napi);
-	set_port_admin_status(ip->channel, 0);
-
-	if (!--nds_open) {
-		ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0xffff);
-		free_irq(IRQ_IXP2000_THDA0, nds);
-	}
-
-	return 0;
-}
-
-static struct net_device_stats *ixpdev_get_stats(struct net_device *dev)
-{
-	struct ixpdev_priv *ip = netdev_priv(dev);
-
-	pm3386_get_stats(ip->channel, &(dev->stats));
-
-	return &(dev->stats);
-}
-
-static const struct net_device_ops ixpdev_netdev_ops = {
-	.ndo_open		= ixpdev_open,
-	.ndo_stop		= ixpdev_close,
-	.ndo_start_xmit		= ixpdev_xmit,
-	.ndo_change_mtu		= eth_change_mtu,
-	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_set_mac_address	= eth_mac_addr,
-	.ndo_get_stats		= ixpdev_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller	= ixpdev_poll_controller,
-#endif
-};
-
-struct net_device *ixpdev_alloc(int channel, int sizeof_priv)
-{
-	struct net_device *dev;
-	struct ixpdev_priv *ip;
-
-	dev = alloc_etherdev(sizeof_priv);
-	if (dev == NULL)
-		return NULL;
-
-	dev->netdev_ops = &ixpdev_netdev_ops;
-
-	dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
-
-	ip = netdev_priv(dev);
-	ip->dev = dev;
-	netif_napi_add(dev, &ip->napi, ixpdev_poll, 64);
-	ip->channel = channel;
-	ip->tx_queue_entries = 0;
-
-	return dev;
-}
-
-int ixpdev_init(int __nds_count, struct net_device **__nds,
-		void (*__set_port_admin_status)(int port, int up))
-{
-	int i;
-	int err;
-
-	BUILD_BUG_ON(RX_BUF_COUNT > 192 || TX_BUF_COUNT > 192);
-
-	printk(KERN_INFO "IXP2000 MSF ethernet driver %s\n", DRV_MODULE_VERSION);
-
-	nds_count = __nds_count;
-	nds = __nds;
-	set_port_admin_status = __set_port_admin_status;
-
-	for (i = 0; i < RX_BUF_COUNT; i++) {
-		void *buf;
-
-		buf = (void *)get_zeroed_page(GFP_KERNEL);
-		if (buf == NULL) {
-			err = -ENOMEM;
-			while (--i >= 0)
-				free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
-			goto err_out;
-		}
-		rx_desc[i].buf_addr = virt_to_phys(buf);
-		rx_desc[i].buf_length = PAGE_SIZE;
-	}
-
-	/* @@@ Maybe we shouldn't be preallocating TX buffers.  */
-	for (i = 0; i < TX_BUF_COUNT; i++) {
-		void *buf;
-
-		buf = (void *)get_zeroed_page(GFP_KERNEL);
-		if (buf == NULL) {
-			err = -ENOMEM;
-			while (--i >= 0)
-				free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
-			goto err_free_rx;
-		}
-		tx_desc[i].buf_addr = virt_to_phys(buf);
-	}
-
-	/* 256 entries, ring status set means 'empty', base address 0x0000.  */
-	ixp2000_reg_write(RING_RX_PENDING_BASE, 0x44000000);
-	ixp2000_reg_write(RING_RX_PENDING_HEAD, 0x00000000);
-	ixp2000_reg_write(RING_RX_PENDING_TAIL, 0x00000000);
-
-	/* 256 entries, ring status set means 'full', base address 0x0400.  */
-	ixp2000_reg_write(RING_RX_DONE_BASE, 0x40000400);
-	ixp2000_reg_write(RING_RX_DONE_HEAD, 0x00000000);
-	ixp2000_reg_write(RING_RX_DONE_TAIL, 0x00000000);
-
-	for (i = 0; i < RX_BUF_COUNT; i++) {
-		ixp2000_reg_write(RING_RX_PENDING,
-			RX_BUF_DESC_BASE + (i * sizeof(struct ixpdev_rx_desc)));
-	}
-
-	ixp2000_uengine_load(0, &ixp2400_rx);
-	ixp2000_uengine_start_contexts(0, 0xff);
-
-	/* 256 entries, ring status set means 'empty', base address 0x0800.  */
-	ixp2000_reg_write(RING_TX_PENDING_BASE, 0x44000800);
-	ixp2000_reg_write(RING_TX_PENDING_HEAD, 0x00000000);
-	ixp2000_reg_write(RING_TX_PENDING_TAIL, 0x00000000);
-
-	/* 256 entries, ring status set means 'full', base address 0x0c00.  */
-	ixp2000_reg_write(RING_TX_DONE_BASE, 0x40000c00);
-	ixp2000_reg_write(RING_TX_DONE_HEAD, 0x00000000);
-	ixp2000_reg_write(RING_TX_DONE_TAIL, 0x00000000);
-
-	ixp2000_uengine_load(1, &ixp2400_tx);
-	ixp2000_uengine_start_contexts(1, 0xff);
-
-	for (i = 0; i < nds_count; i++) {
-		err = register_netdev(nds[i]);
-		if (err) {
-			while (--i >= 0)
-				unregister_netdev(nds[i]);
-			goto err_free_tx;
-		}
-	}
-
-	for (i = 0; i < nds_count; i++) {
-		printk(KERN_INFO "%s: IXP2000 MSF ethernet (port %d), %pM.\n",
-				 nds[i]->name, i, nds[i]->dev_addr);
-	}
-
-	return 0;
-
-err_free_tx:
-	for (i = 0; i < TX_BUF_COUNT; i++)
-		free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
-
-err_free_rx:
-	for (i = 0; i < RX_BUF_COUNT; i++)
-		free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
-
-err_out:
-	return err;
-} 
-
-void ixpdev_deinit(void)
-{
-	int i;
-
-	/* @@@ Flush out pending packets.  */
-
-	for (i = 0; i < nds_count; i++)
-		unregister_netdev(nds[i]);
-
-	ixp2000_uengine_stop_contexts(1, 0xff);
-	ixp2000_uengine_stop_contexts(0, 0xff);
-	ixp2000_uengine_reset(0x3);
-
-	for (i = 0; i < TX_BUF_COUNT; i++)
-		free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
-
-	for (i = 0; i < RX_BUF_COUNT; i++)
-		free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
-}
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev.h b/drivers/net/ethernet/xscale/ixp2000/ixpdev.h
deleted file mode 100644
index 391ece6..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixpdev.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * IXP2000 MSF network device driver
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __IXPDEV_H
-#define __IXPDEV_H
-
-struct ixpdev_priv
-{
-	struct net_device *dev;
-	struct napi_struct napi;
-	int	channel;
-	int	tx_queue_entries;
-};
-
-struct net_device *ixpdev_alloc(int channel, int sizeof_priv);
-int ixpdev_init(int num_ports, struct net_device **nds,
-		void (*set_port_admin_status)(int port, int up));
-void ixpdev_deinit(void);
-
-
-#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h b/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h
deleted file mode 100644
index 86aa08e..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * IXP2000 MSF network device driver
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __IXPDEV_PRIV_H
-#define __IXPDEV_PRIV_H
-
-#define RX_BUF_DESC_BASE	0x00001000
-#define RX_BUF_COUNT		((3 * PAGE_SIZE) / (4 * sizeof(struct ixpdev_rx_desc)))
-#define TX_BUF_DESC_BASE	0x00002000
-#define TX_BUF_COUNT		((3 * PAGE_SIZE) / (4 * sizeof(struct ixpdev_tx_desc)))
-#define TX_BUF_COUNT_PER_CHAN	(TX_BUF_COUNT / 4)
-
-#define RING_RX_PENDING		((u32 *)IXP2000_SCRATCH_RING_VIRT_BASE)
-#define RING_RX_DONE		((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 4))
-#define RING_TX_PENDING		((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 8))
-#define RING_TX_DONE		((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 12))
-
-#define SCRATCH_REG(x)		((u32 *)(IXP2000_GLOBAL_REG_VIRT_BASE | 0x0800 | (x)))
-#define RING_RX_PENDING_BASE	SCRATCH_REG(0x00)
-#define RING_RX_PENDING_HEAD	SCRATCH_REG(0x04)
-#define RING_RX_PENDING_TAIL	SCRATCH_REG(0x08)
-#define RING_RX_DONE_BASE	SCRATCH_REG(0x10)
-#define RING_RX_DONE_HEAD	SCRATCH_REG(0x14)
-#define RING_RX_DONE_TAIL	SCRATCH_REG(0x18)
-#define RING_TX_PENDING_BASE	SCRATCH_REG(0x20)
-#define RING_TX_PENDING_HEAD	SCRATCH_REG(0x24)
-#define RING_TX_PENDING_TAIL	SCRATCH_REG(0x28)
-#define RING_TX_DONE_BASE	SCRATCH_REG(0x30)
-#define RING_TX_DONE_HEAD	SCRATCH_REG(0x34)
-#define RING_TX_DONE_TAIL	SCRATCH_REG(0x38)
-
-struct ixpdev_rx_desc
-{
-	u32	buf_addr;
-	u32	buf_length;
-	u32	channel;
-	u32	pkt_length;
-};
-
-struct ixpdev_tx_desc
-{
-	u32	buf_addr;
-	u32	pkt_length;
-	u32	channel;
-	u32	unused;
-};
-
-
-#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/pm3386.c b/drivers/net/ethernet/xscale/ixp2000/pm3386.c
deleted file mode 100644
index e08d3f9..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/pm3386.c
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * Helper functions for the PM3386s on the Radisys ENP2611
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <asm/io.h>
-#include "pm3386.h"
-
-/*
- * Read from register 'reg' of PM3386 device 'pm'.
- */
-static u16 pm3386_reg_read(int pm, int reg)
-{
-	void *_reg;
-	u16 value;
-
-	_reg = (void *)ENP2611_PM3386_0_VIRT_BASE;
-	if (pm == 1)
-		_reg = (void *)ENP2611_PM3386_1_VIRT_BASE;
-
-	value = *((volatile u16 *)(_reg + (reg << 1)));
-
-//	printk(KERN_INFO "pm3386_reg_read(%d, %.3x) = %.8x\n", pm, reg, value);
-
-	return value;
-}
-
-/*
- * Write to register 'reg' of PM3386 device 'pm', and perform
- * a readback from the identification register.
- */
-static void pm3386_reg_write(int pm, int reg, u16 value)
-{
-	void *_reg;
-	u16 dummy;
-
-//	printk(KERN_INFO "pm3386_reg_write(%d, %.3x, %.8x)\n", pm, reg, value);
-
-	_reg = (void *)ENP2611_PM3386_0_VIRT_BASE;
-	if (pm == 1)
-		_reg = (void *)ENP2611_PM3386_1_VIRT_BASE;
-
-	*((volatile u16 *)(_reg + (reg << 1))) = value;
-
-	dummy = *((volatile u16 *)_reg);
-	__asm__ __volatile__("mov %0, %0" : "+r" (dummy));
-}
-
-/*
- * Read from port 'port' register 'reg', where the registers
- * for the different ports are 'spacing' registers apart.
- */
-static u16 pm3386_port_reg_read(int port, int _reg, int spacing)
-{
-	int reg;
-
-	reg = _reg;
-	if (port & 1)
-		reg += spacing;
-
-	return pm3386_reg_read(port >> 1, reg);
-}
-
-/*
- * Write to port 'port' register 'reg', where the registers
- * for the different ports are 'spacing' registers apart.
- */
-static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value)
-{
-	int reg;
-
-	reg = _reg;
-	if (port & 1)
-		reg += spacing;
-
-	pm3386_reg_write(port >> 1, reg, value);
-}
-
-int pm3386_secondary_present(void)
-{
-	return pm3386_reg_read(1, 0) == 0x3386;
-}
-
-void pm3386_reset(void)
-{
-	u8 mac[3][6];
-	int secondary;
-
-	secondary = pm3386_secondary_present();
-
-	/* Save programmed MAC addresses.  */
-	pm3386_get_mac(0, mac[0]);
-	pm3386_get_mac(1, mac[1]);
-	if (secondary)
-		pm3386_get_mac(2, mac[2]);
-
-	/* Assert analog and digital reset.  */
-	pm3386_reg_write(0, 0x002, 0x0060);
-	if (secondary)
-		pm3386_reg_write(1, 0x002, 0x0060);
-	mdelay(1);
-
-	/* Deassert analog reset.  */
-	pm3386_reg_write(0, 0x002, 0x0062);
-	if (secondary)
-		pm3386_reg_write(1, 0x002, 0x0062);
-	mdelay(10);
-
-	/* Deassert digital reset.  */
-	pm3386_reg_write(0, 0x002, 0x0063);
-	if (secondary)
-		pm3386_reg_write(1, 0x002, 0x0063);
-	mdelay(10);
-
-	/* Restore programmed MAC addresses.  */
-	pm3386_set_mac(0, mac[0]);
-	pm3386_set_mac(1, mac[1]);
-	if (secondary)
-		pm3386_set_mac(2, mac[2]);
-
-	/* Disable carrier on all ports.  */
-	pm3386_set_carrier(0, 0);
-	pm3386_set_carrier(1, 0);
-	if (secondary)
-		pm3386_set_carrier(2, 0);
-}
-
-static u16 swaph(u16 x)
-{
-	return ((x << 8) | (x >> 8)) & 0xffff;
-}
-
-int pm3386_port_count(void)
-{
-	return 2 + pm3386_secondary_present();
-}
-
-void pm3386_init_port(int port)
-{
-	int pm = port >> 1;
-
-	/*
-	 * Work around ENP2611 bootloader programming MAC address
-	 * in reverse.
-	 */
-	if (pm3386_port_reg_read(port, 0x30a, 0x100) == 0x0000 &&
-	    (pm3386_port_reg_read(port, 0x309, 0x100) & 0xff00) == 0x5000) {
-		u16 temp[3];
-
-		temp[0] = pm3386_port_reg_read(port, 0x308, 0x100);
-		temp[1] = pm3386_port_reg_read(port, 0x309, 0x100);
-		temp[2] = pm3386_port_reg_read(port, 0x30a, 0x100);
-		pm3386_port_reg_write(port, 0x308, 0x100, swaph(temp[2]));
-		pm3386_port_reg_write(port, 0x309, 0x100, swaph(temp[1]));
-		pm3386_port_reg_write(port, 0x30a, 0x100, swaph(temp[0]));
-	}
-
-	/*
-	 * Initialise narrowbanding mode.  See application note 2010486
-	 * for more information.  (@@@ We also need to issue a reset
-	 * when ROOL or DOOL are detected.)
-	 */
-	pm3386_port_reg_write(port, 0x708, 0x10, 0xd055);
-	udelay(500);
-	pm3386_port_reg_write(port, 0x708, 0x10, 0x5055);
-
-	/*
-	 * SPI-3 ingress block.  Set 64 bytes SPI-3 burst size
-	 * towards SPI-3 bridge.
-	 */
-	pm3386_port_reg_write(port, 0x122, 0x20, 0x0002);
-
-	/*
-	 * Enable ingress protocol checking, and soft reset the
-	 * SPI-3 ingress block.
-	 */
-	pm3386_reg_write(pm, 0x103, 0x0003);
-	while (!(pm3386_reg_read(pm, 0x103) & 0x80))
-		;
-
-	/*
-	 * SPI-3 egress block.  Gather 12288 bytes of the current
-	 * packet in the TX fifo before initiating transmit on the
-	 * SERDES interface.  (Prevents TX underflows.)
-	 */
-	pm3386_port_reg_write(port, 0x221, 0x20, 0x0007);
-
-	/*
-	 * Enforce odd parity from the SPI-3 bridge, and soft reset
-	 * the SPI-3 egress block.
-	 */
-	pm3386_reg_write(pm, 0x203, 0x000d & ~(4 << (port & 1)));
-	while ((pm3386_reg_read(pm, 0x203) & 0x000c) != 0x000c)
-		;
-
-	/*
-	 * EGMAC block.  Set this channels to reject long preambles,
-	 * not send or transmit PAUSE frames, enable preamble checking,
-	 * disable frame length checking, enable FCS appending, enable
-	 * TX frame padding.
-	 */
-	pm3386_port_reg_write(port, 0x302, 0x100, 0x0113);
-
-	/*
-	 * Soft reset the EGMAC block.
-	 */
-	pm3386_port_reg_write(port, 0x301, 0x100, 0x8000);
-	pm3386_port_reg_write(port, 0x301, 0x100, 0x0000);
-
-	/*
-	 * Auto-sense autonegotiation status.
-	 */
-	pm3386_port_reg_write(port, 0x306, 0x100, 0x0100);
-
-	/*
-	 * Allow reception of jumbo frames.
-	 */
-	pm3386_port_reg_write(port, 0x310, 0x100, 9018);
-
-	/*
-	 * Allow transmission of jumbo frames.
-	 */
-	pm3386_port_reg_write(port, 0x336, 0x100, 9018);
-
-	/* @@@ Should set 0x337/0x437 (RX forwarding threshold.)  */
-
-	/*
-	 * Set autonegotiation parameters to 'no PAUSE, full duplex.'
-	 */
-	pm3386_port_reg_write(port, 0x31c, 0x100, 0x0020);
-
-	/*
-	 * Enable and restart autonegotiation.
-	 */
-	pm3386_port_reg_write(port, 0x318, 0x100, 0x0003);
-	pm3386_port_reg_write(port, 0x318, 0x100, 0x0002);
-}
-
-void pm3386_get_mac(int port, u8 *mac)
-{
-	u16 temp;
-
-	temp = pm3386_port_reg_read(port, 0x308, 0x100);
-	mac[0] = temp & 0xff;
-	mac[1] = (temp >> 8) & 0xff;
-
-	temp = pm3386_port_reg_read(port, 0x309, 0x100);
-	mac[2] = temp & 0xff;
-	mac[3] = (temp >> 8) & 0xff;
-
-	temp = pm3386_port_reg_read(port, 0x30a, 0x100);
-	mac[4] = temp & 0xff;
-	mac[5] = (temp >> 8) & 0xff;
-}
-
-void pm3386_set_mac(int port, u8 *mac)
-{
-	pm3386_port_reg_write(port, 0x308, 0x100, (mac[1] << 8) | mac[0]);
-	pm3386_port_reg_write(port, 0x309, 0x100, (mac[3] << 8) | mac[2]);
-	pm3386_port_reg_write(port, 0x30a, 0x100, (mac[5] << 8) | mac[4]);
-}
-
-static u32 pm3386_get_stat(int port, u16 base)
-{
-	u32 value;
-
-	value = pm3386_port_reg_read(port, base, 0x100);
-	value |= pm3386_port_reg_read(port, base + 1, 0x100) << 16;
-
-	return value;
-}
-
-void pm3386_get_stats(int port, struct net_device_stats *stats)
-{
-	/*
-	 * Snapshot statistics counters.
-	 */
-	pm3386_port_reg_write(port, 0x500, 0x100, 0x0001);
-	while (pm3386_port_reg_read(port, 0x500, 0x100) & 0x0001)
-		;
-
-	memset(stats, 0, sizeof(*stats));
-
-	stats->rx_packets = pm3386_get_stat(port, 0x510);
-	stats->tx_packets = pm3386_get_stat(port, 0x590);
-	stats->rx_bytes = pm3386_get_stat(port, 0x514);
-	stats->tx_bytes = pm3386_get_stat(port, 0x594);
-	/* @@@ Add other stats.  */
-}
-
-void pm3386_set_carrier(int port, int state)
-{
-	pm3386_port_reg_write(port, 0x703, 0x10, state ? 0x1001 : 0x0000);
-}
-
-int pm3386_is_link_up(int port)
-{
-	u16 temp;
-
-	temp = pm3386_port_reg_read(port, 0x31a, 0x100);
-	temp = pm3386_port_reg_read(port, 0x31a, 0x100);
-
-	return !!(temp & 0x0002);
-}
-
-void pm3386_enable_rx(int port)
-{
-	u16 temp;
-
-	temp = pm3386_port_reg_read(port, 0x303, 0x100);
-	temp |= 0x1000;
-	pm3386_port_reg_write(port, 0x303, 0x100, temp);
-}
-
-void pm3386_disable_rx(int port)
-{
-	u16 temp;
-
-	temp = pm3386_port_reg_read(port, 0x303, 0x100);
-	temp &= 0xefff;
-	pm3386_port_reg_write(port, 0x303, 0x100, temp);
-}
-
-void pm3386_enable_tx(int port)
-{
-	u16 temp;
-
-	temp = pm3386_port_reg_read(port, 0x303, 0x100);
-	temp |= 0x4000;
-	pm3386_port_reg_write(port, 0x303, 0x100, temp);
-}
-
-void pm3386_disable_tx(int port)
-{
-	u16 temp;
-
-	temp = pm3386_port_reg_read(port, 0x303, 0x100);
-	temp &= 0xbfff;
-	pm3386_port_reg_write(port, 0x303, 0x100, temp);
-}
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/xscale/ixp2000/pm3386.h b/drivers/net/ethernet/xscale/ixp2000/pm3386.h
deleted file mode 100644
index cc4183d..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/pm3386.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Helper functions for the PM3386s on the Radisys ENP2611
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __PM3386_H
-#define __PM3386_H
-
-void pm3386_reset(void);
-int pm3386_port_count(void);
-void pm3386_init_port(int port);
-void pm3386_get_mac(int port, u8 *mac);
-void pm3386_set_mac(int port, u8 *mac);
-void pm3386_get_stats(int port, struct net_device_stats *stats);
-void pm3386_set_carrier(int port, int state);
-int pm3386_is_link_up(int port);
-void pm3386_enable_rx(int port);
-void pm3386_disable_rx(int port);
-void pm3386_enable_tx(int port);
-void pm3386_disable_tx(int port);
-
-
-#endif
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 41a8b5a..482648f 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1002,12 +1002,41 @@
 	return phy_start_aneg(port->phydev);
 }
 
+int ixp46x_phc_index = -1;
+
+static int ixp4xx_get_ts_info(struct net_device *dev,
+			      struct ethtool_ts_info *info)
+{
+	if (!cpu_is_ixp46x()) {
+		info->so_timestamping =
+			SOF_TIMESTAMPING_TX_SOFTWARE |
+			SOF_TIMESTAMPING_RX_SOFTWARE |
+			SOF_TIMESTAMPING_SOFTWARE;
+		info->phc_index = -1;
+		return 0;
+	}
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_HARDWARE |
+		SOF_TIMESTAMPING_RX_HARDWARE |
+		SOF_TIMESTAMPING_RAW_HARDWARE;
+	info->phc_index = ixp46x_phc_index;
+	info->tx_types =
+		(1 << HWTSTAMP_TX_OFF) |
+		(1 << HWTSTAMP_TX_ON);
+	info->rx_filters =
+		(1 << HWTSTAMP_FILTER_NONE) |
+		(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+		(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ);
+	return 0;
+}
+
 static const struct ethtool_ops ixp4xx_ethtool_ops = {
 	.get_drvinfo = ixp4xx_get_drvinfo,
 	.get_settings = ixp4xx_get_settings,
 	.set_settings = ixp4xx_set_settings,
 	.nway_reset = ixp4xx_nway_reset,
 	.get_link = ethtool_op_get_link,
+	.get_ts_info = ixp4xx_get_ts_info,
 };
 
 
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 168c8f4..d471963 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -113,10 +113,9 @@
 
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
-	if (pci_request_regions(pdev, "rrunner")) {
-		ret = -EIO;
+	ret = pci_request_regions(pdev, "rrunner");
+	if (ret < 0)
 		goto out;
-	}
 
 	pci_set_drvdata(pdev, dev);
 
@@ -124,11 +123,8 @@
 
 	spin_lock_init(&rrpriv->lock);
 
-	dev->irq = pdev->irq;
 	dev->netdev_ops = &rr_netdev_ops;
 
-	dev->base_addr = pci_resource_start(pdev, 0);
-
 	/* display version info if adapter is found */
 	if (!version_disp) {
 		/* set display flag to TRUE so that */
@@ -146,16 +142,15 @@
 	pci_set_master(pdev);
 
 	printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI "
-	       "at 0x%08lx, irq %i, PCI latency %i\n", dev->name,
-	       dev->base_addr, dev->irq, pci_latency);
+	       "at 0x%llx, irq %i, PCI latency %i\n", dev->name,
+	       (unsigned long long)pci_resource_start(pdev, 0),
+	       pdev->irq, pci_latency);
 
 	/*
-	 * Remap the regs into kernel space.
+	 * Remap the MMIO regs into kernel space.
 	 */
-
-	rrpriv->regs = ioremap(dev->base_addr, 0x1000);
-
-	if (!rrpriv->regs){
+	rrpriv->regs = pci_iomap(pdev, 0, 0x1000);
+	if (!rrpriv->regs) {
 		printk(KERN_ERR "%s:  Unable to map I/O register, "
 			"RoadRunner will be disabled.\n", dev->name);
 		ret = -EIO;
@@ -202,8 +197,6 @@
 
 	rr_init(dev);
 
-	dev->base_addr = 0;
-
 	ret = register_netdev(dev);
 	if (ret)
 		goto out;
@@ -217,7 +210,7 @@
 		pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring,
 				    rrpriv->tx_ring_dma);
 	if (rrpriv->regs)
-		iounmap(rrpriv->regs);
+		pci_iounmap(pdev, rrpriv->regs);
 	if (pdev) {
 		pci_release_regions(pdev);
 		pci_set_drvdata(pdev, NULL);
@@ -231,29 +224,26 @@
 static void __devexit rr_remove_one (struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
+	struct rr_private *rr = netdev_priv(dev);
 
-	if (dev) {
-		struct rr_private *rr = netdev_priv(dev);
-
-		if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)){
-			printk(KERN_ERR "%s: trying to unload running NIC\n",
-			       dev->name);
-			writel(HALT_NIC, &rr->regs->HostCtrl);
-		}
-
-		pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
-				    rr->evt_ring_dma);
-		pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
-				    rr->rx_ring_dma);
-		pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
-				    rr->tx_ring_dma);
-		unregister_netdev(dev);
-		iounmap(rr->regs);
-		free_netdev(dev);
-		pci_release_regions(pdev);
-		pci_disable_device(pdev);
-		pci_set_drvdata(pdev, NULL);
+	if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)) {
+		printk(KERN_ERR "%s: trying to unload running NIC\n",
+		       dev->name);
+		writel(HALT_NIC, &rr->regs->HostCtrl);
 	}
+
+	unregister_netdev(dev);
+	pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
+			    rr->evt_ring_dma);
+	pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
+			    rr->rx_ring_dma);
+	pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
+			    rr->tx_ring_dma);
+	pci_iounmap(pdev, rr->regs);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(dev);
 }
 
 
@@ -1229,9 +1219,9 @@
 	readl(&regs->HostCtrl);
 	spin_unlock_irqrestore(&rrpriv->lock, flags);
 
-	if (request_irq(dev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) {
+	if (request_irq(pdev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) {
 		printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
-		       dev->name, dev->irq);
+		       dev->name, pdev->irq);
 		ecode = -EAGAIN;
 		goto error;
 	}
@@ -1338,16 +1328,15 @@
 
 static int rr_close(struct net_device *dev)
 {
-	struct rr_private *rrpriv;
-	struct rr_regs __iomem *regs;
+	struct rr_private *rrpriv = netdev_priv(dev);
+	struct rr_regs __iomem *regs = rrpriv->regs;
+	struct pci_dev *pdev = rrpriv->pci_dev;
 	unsigned long flags;
 	u32 tmp;
 	short i;
 
 	netif_stop_queue(dev);
 
-	rrpriv = netdev_priv(dev);
-	regs = rrpriv->regs;
 
 	/*
 	 * Lock to make sure we are not cleaning up while another CPU
@@ -1386,15 +1375,15 @@
 	rr_raz_tx(rrpriv, dev);
 	rr_raz_rx(rrpriv, dev);
 
-	pci_free_consistent(rrpriv->pci_dev, 256 * sizeof(struct ring_ctrl),
+	pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl),
 			    rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
 	rrpriv->rx_ctrl = NULL;
 
-	pci_free_consistent(rrpriv->pci_dev, sizeof(struct rr_info),
-			    rrpriv->info, rrpriv->info_dma);
+	pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info,
+			    rrpriv->info_dma);
 	rrpriv->info = NULL;
 
-	free_irq(dev->irq, dev);
+	free_irq(pdev->irq, dev);
 	spin_unlock_irqrestore(&rrpriv->lock, flags);
 
 	return 0;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d025c83..8b91947 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -428,6 +428,24 @@
 	return 0;
 }
 
+
+#define RING_AVAIL_PERCENT_HIWATER 20
+#define RING_AVAIL_PERCENT_LOWATER 10
+
+/*
+ * Get the percentage of available bytes to write in the ring.
+ * The return value is in range from 0 to 100.
+ */
+static inline u32 hv_ringbuf_avail_percent(
+		struct hv_ring_buffer_info *ring_info)
+{
+	u32 avail_read, avail_write;
+
+	hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
+
+	return avail_write * 100 / ring_info->ring_datasize;
+}
+
 static void netvsc_send_completion(struct hv_device *device,
 				   struct vmpacket_descriptor *packet)
 {
@@ -455,6 +473,8 @@
 		complete(&net_device->channel_init_wait);
 	} else if (nvsp_packet->hdr.msg_type ==
 		   NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
+		int num_outstanding_sends;
+
 		/* Get the send context */
 		nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
 			packet->trans_id;
@@ -463,10 +483,14 @@
 		nvsc_packet->completion.send.send_completion(
 			nvsc_packet->completion.send.send_completion_ctx);
 
-		atomic_dec(&net_device->num_outstanding_sends);
+		num_outstanding_sends =
+			atomic_dec_return(&net_device->num_outstanding_sends);
 
-		if (netif_queue_stopped(ndev) && !net_device->start_remove)
-			netif_wake_queue(ndev);
+		if (netif_queue_stopped(ndev) && !net_device->start_remove &&
+			(hv_ringbuf_avail_percent(&device->channel->outbound)
+			> RING_AVAIL_PERCENT_HIWATER ||
+			num_outstanding_sends < 1))
+				netif_wake_queue(ndev);
 	} else {
 		netdev_err(ndev, "Unknown send completion packet type- "
 			   "%d received!!\n", nvsp_packet->hdr.msg_type);
@@ -519,10 +543,19 @@
 
 	if (ret == 0) {
 		atomic_inc(&net_device->num_outstanding_sends);
+		if (hv_ringbuf_avail_percent(&device->channel->outbound) <
+			RING_AVAIL_PERCENT_LOWATER) {
+			netif_stop_queue(ndev);
+			if (atomic_read(&net_device->
+				num_outstanding_sends) < 1)
+				netif_wake_queue(ndev);
+		}
 	} else if (ret == -EAGAIN) {
 		netif_stop_queue(ndev);
-		if (atomic_read(&net_device->num_outstanding_sends) < 1)
+		if (atomic_read(&net_device->num_outstanding_sends) < 1) {
 			netif_wake_queue(ndev);
+			ret = -ENOSPC;
+		}
 	} else {
 		netdev_err(ndev, "Unable to send packet %p ret %d\n",
 			   packet, ret);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index dd29478..8f8ed33 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -44,6 +44,7 @@
 	/* point back to our device context */
 	struct hv_device *device_ctx;
 	struct delayed_work dwork;
+	struct work_struct work;
 };
 
 
@@ -51,30 +52,22 @@
 module_param(ring_size, int, S_IRUGO);
 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
 
-struct set_multicast_work {
-	struct work_struct work;
-	struct net_device *net;
-};
-
 static void do_set_multicast(struct work_struct *w)
 {
-	struct set_multicast_work *swk =
-		container_of(w, struct set_multicast_work, work);
-	struct net_device *net = swk->net;
-
-	struct net_device_context *ndevctx = netdev_priv(net);
+	struct net_device_context *ndevctx =
+		container_of(w, struct net_device_context, work);
 	struct netvsc_device *nvdev;
 	struct rndis_device *rdev;
 
 	nvdev = hv_get_drvdata(ndevctx->device_ctx);
-	if (nvdev == NULL)
-		goto out;
+	if (nvdev == NULL || nvdev->ndev == NULL)
+		return;
 
 	rdev = nvdev->extension;
 	if (rdev == NULL)
-		goto out;
+		return;
 
-	if (net->flags & IFF_PROMISC)
+	if (nvdev->ndev->flags & IFF_PROMISC)
 		rndis_filter_set_packet_filter(rdev,
 			NDIS_PACKET_TYPE_PROMISCUOUS);
 	else
@@ -82,21 +75,13 @@
 			NDIS_PACKET_TYPE_BROADCAST |
 			NDIS_PACKET_TYPE_ALL_MULTICAST |
 			NDIS_PACKET_TYPE_DIRECTED);
-
-out:
-	kfree(w);
 }
 
 static void netvsc_set_multicast_list(struct net_device *net)
 {
-	struct set_multicast_work *swk =
-		kmalloc(sizeof(struct set_multicast_work), GFP_ATOMIC);
-	if (swk == NULL)
-		return;
+	struct net_device_context *net_device_ctx = netdev_priv(net);
 
-	swk->net = net;
-	INIT_WORK(&swk->work, do_set_multicast);
-	schedule_work(&swk->work);
+	schedule_work(&net_device_ctx->work);
 }
 
 static int netvsc_open(struct net_device *net)
@@ -125,6 +110,8 @@
 
 	netif_tx_disable(net);
 
+	/* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
+	cancel_work_sync(&net_device_ctx->work);
 	ret = rndis_filter_close(device_obj);
 	if (ret != 0)
 		netdev_err(net, "unable to close device (ret %d).\n", ret);
@@ -224,9 +211,13 @@
 		net->stats.tx_packets++;
 	} else {
 		kfree(packet);
+		if (ret != -EAGAIN) {
+			dev_kfree_skb_any(skb);
+			net->stats.tx_dropped++;
+		}
 	}
 
-	return ret ? NETDEV_TX_BUSY : NETDEV_TX_OK;
+	return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
 }
 
 /*
@@ -335,6 +326,7 @@
 
 	nvdev->start_remove = true;
 	cancel_delayed_work_sync(&ndevctx->dwork);
+	cancel_work_sync(&ndevctx->work);
 	netif_tx_disable(ndev);
 	rndis_filter_device_remove(hdev);
 
@@ -403,6 +395,7 @@
 	net_device_ctx->device_ctx = dev;
 	hv_set_drvdata(dev, net);
 	INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
+	INIT_WORK(&net_device_ctx->work, do_set_multicast);
 
 	net->netdev_ops = &device_ops;
 
@@ -456,6 +449,7 @@
 
 	ndev_ctx = netdev_priv(net);
 	cancel_delayed_work_sync(&ndev_ctx->dwork);
+	cancel_work_sync(&ndev_ctx->work);
 
 	/* Stop outbound asap */
 	netif_tx_disable(net);
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 4351296..510b9c8 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1710,7 +1710,7 @@
 
 /* Flush all packets */
   while ((i--) && (self->txpending))
-    udelay (10000);
+    msleep(10);
 
   spin_lock_irqsave(&self->spinlock, flags);
 
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 725d6b3..eb315b8 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -737,7 +737,7 @@
 	netif_stop_queue(ndev);
 	pm_runtime_put_sync(&self->pdev->dev);
 
-	dev_info(&ndev->dev, "stoped\n");
+	dev_info(&ndev->dev, "stopped\n");
 
 	return 0;
 }
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index e6661b5..256eddf 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -685,7 +685,7 @@
 
 	netif_stop_queue(ndev);
 
-	dev_info(&ndev->dev, "stoped\n");
+	dev_info(&ndev->dev, "stopped\n");
 
 	return 0;
 }
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index f975afd..9653ed6 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -312,7 +312,8 @@
 	int err;
 
 	if (vlan->port->passthru) {
-		dev_set_promiscuity(lowerdev, 1);
+		if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
+			dev_set_promiscuity(lowerdev, 1);
 		goto hash_add;
 	}
 
@@ -344,12 +345,15 @@
 	struct macvlan_dev *vlan = netdev_priv(dev);
 	struct net_device *lowerdev = vlan->lowerdev;
 
+	dev_uc_unsync(lowerdev, dev);
+	dev_mc_unsync(lowerdev, dev);
+
 	if (vlan->port->passthru) {
-		dev_set_promiscuity(lowerdev, -1);
+		if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
+			dev_set_promiscuity(lowerdev, -1);
 		goto hash_del;
 	}
 
-	dev_mc_unsync(lowerdev, dev);
 	if (dev->flags & IFF_ALLMULTI)
 		dev_set_allmulti(lowerdev, -1);
 
@@ -399,10 +403,11 @@
 		dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
 }
 
-static void macvlan_set_multicast_list(struct net_device *dev)
+static void macvlan_set_mac_lists(struct net_device *dev)
 {
 	struct macvlan_dev *vlan = netdev_priv(dev);
 
+	dev_uc_sync(vlan->lowerdev, dev);
 	dev_mc_sync(vlan->lowerdev, dev);
 }
 
@@ -542,6 +547,43 @@
 	return 0;
 }
 
+static int macvlan_fdb_add(struct ndmsg *ndm,
+			   struct net_device *dev,
+			   unsigned char *addr,
+			   u16 flags)
+{
+	struct macvlan_dev *vlan = netdev_priv(dev);
+	int err = -EINVAL;
+
+	if (!vlan->port->passthru)
+		return -EOPNOTSUPP;
+
+	if (is_unicast_ether_addr(addr))
+		err = dev_uc_add_excl(dev, addr);
+	else if (is_multicast_ether_addr(addr))
+		err = dev_mc_add_excl(dev, addr);
+
+	return err;
+}
+
+static int macvlan_fdb_del(struct ndmsg *ndm,
+			   struct net_device *dev,
+			   unsigned char *addr)
+{
+	struct macvlan_dev *vlan = netdev_priv(dev);
+	int err = -EINVAL;
+
+	if (!vlan->port->passthru)
+		return -EOPNOTSUPP;
+
+	if (is_unicast_ether_addr(addr))
+		err = dev_uc_del(dev, addr);
+	else if (is_multicast_ether_addr(addr))
+		err = dev_mc_del(dev, addr);
+
+	return err;
+}
+
 static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
 					struct ethtool_drvinfo *drvinfo)
 {
@@ -572,11 +614,14 @@
 	.ndo_change_mtu		= macvlan_change_mtu,
 	.ndo_change_rx_flags	= macvlan_change_rx_flags,
 	.ndo_set_mac_address	= macvlan_set_mac_address,
-	.ndo_set_rx_mode	= macvlan_set_multicast_list,
+	.ndo_set_rx_mode	= macvlan_set_mac_lists,
 	.ndo_get_stats64	= macvlan_dev_get_stats64,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_vlan_rx_add_vid	= macvlan_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid	= macvlan_vlan_rx_kill_vid,
+	.ndo_fdb_add		= macvlan_fdb_add,
+	.ndo_fdb_del		= macvlan_fdb_del,
+	.ndo_fdb_dump		= ndo_dflt_fdb_dump,
 };
 
 void macvlan_common_setup(struct net_device *dev)
@@ -711,6 +756,9 @@
 	if (data && data[IFLA_MACVLAN_MODE])
 		vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
 
+	if (data && data[IFLA_MACVLAN_FLAGS])
+		vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
+
 	if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
 		if (port->count)
 			return -EINVAL;
@@ -760,6 +808,16 @@
 	struct macvlan_dev *vlan = netdev_priv(dev);
 	if (data && data[IFLA_MACVLAN_MODE])
 		vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
+	if (data && data[IFLA_MACVLAN_FLAGS]) {
+		__u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
+		bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
+
+		if (promisc && (flags & MACVLAN_FLAG_NOPROMISC))
+			dev_set_promiscuity(vlan->lowerdev, -1);
+		else if (promisc && !(flags & MACVLAN_FLAG_NOPROMISC))
+			dev_set_promiscuity(vlan->lowerdev, 1);
+		vlan->flags = flags;
+	}
 	return 0;
 }
 
@@ -773,7 +831,10 @@
 {
 	struct macvlan_dev *vlan = netdev_priv(dev);
 
-	NLA_PUT_U32(skb, IFLA_MACVLAN_MODE, vlan->mode);
+	if (nla_put_u32(skb, IFLA_MACVLAN_MODE, vlan->mode))
+		goto nla_put_failure;
+	if (nla_put_u16(skb, IFLA_MACVLAN_FLAGS, vlan->flags))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -781,7 +842,8 @@
 }
 
 static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
-	[IFLA_MACVLAN_MODE] = { .type = NLA_U32 },
+	[IFLA_MACVLAN_MODE]  = { .type = NLA_U32 },
+	[IFLA_MACVLAN_FLAGS] = { .type = NLA_U16 },
 };
 
 int macvlan_link_register(struct rtnl_link_ops *ops)
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index e16f98c..cd802eb 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -39,10 +39,7 @@
 		MII_BCM63XX_IR_SPEED |
 		MII_BCM63XX_IR_LINK) |
 		MII_BCM63XX_IR_EN;
-	err = phy_write(phydev, MII_BCM63XX_IR, reg);
-	if (err < 0)
-		return err;
-	return 0;
+	return phy_write(phydev, MII_BCM63XX_IR, reg);
 }
 
 static int bcm63xx_ack_interrupt(struct phy_device *phydev)
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 2f774ac..5f59cc0 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -134,12 +134,7 @@
 		return err;
 
 	/* Reconnect the PHY, and enable Autonegotiation */
-	err = phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
-
-	if (err < 0)
-		return err;
-
-	return 0;
+	return phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
 }
 
 static int dm9161_ack_interrupt(struct phy_device *phydev)
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index dd7ae19..940b290 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1215,6 +1215,36 @@
 	}
 }
 
+static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info)
+{
+	struct dp83640_private *dp83640 = dev->priv;
+
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_HARDWARE |
+		SOF_TIMESTAMPING_RX_HARDWARE |
+		SOF_TIMESTAMPING_RAW_HARDWARE;
+	info->phc_index = ptp_clock_index(dp83640->clock->ptp_clock);
+	info->tx_types =
+		(1 << HWTSTAMP_TX_OFF) |
+		(1 << HWTSTAMP_TX_ON) |
+		(1 << HWTSTAMP_TX_ONESTEP_SYNC);
+	info->rx_filters =
+		(1 << HWTSTAMP_FILTER_NONE) |
+		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+		(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+		(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+	return 0;
+}
+
 static struct phy_driver dp83640_driver = {
 	.phy_id		= DP83640_PHY_ID,
 	.phy_id_mask	= 0xfffffff0,
@@ -1225,6 +1255,7 @@
 	.remove		= dp83640_remove,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
+	.ts_info	= dp83640_ts_info,
 	.hwtstamp	= dp83640_hwtstamp,
 	.rxtstamp	= dp83640_rxtstamp,
 	.txtstamp	= dp83640_txtstamp,
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index f08c85a..5ac46f5 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -40,6 +40,7 @@
 #define IP1001_PHASE_SEL_MASK		3	/* IP1001 RX/TXPHASE_SEL */
 #define IP1001_APS_ON			11	/* IP1001 APS Mode  bit */
 #define IP101A_G_APS_ON			2	/* IP101A/G APS Mode bit */
+#define IP101A_G_IRQ_CONF_STATUS	0x11	/* Conf Info IRQ & Status Reg */
 
 static int ip175c_config_init(struct phy_device *phydev)
 {
@@ -185,6 +186,15 @@
 	return 0;
 }
 
+static int ip101a_g_ack_interrupt(struct phy_device *phydev)
+{
+	int err = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS);
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
 static struct phy_driver ip175c_driver = {
 	.phy_id		= 0x02430d80,
 	.name		= "ICPlus IP175C",
@@ -204,7 +214,6 @@
 	.phy_id_mask	= 0x0ffffff0,
 	.features	= PHY_GBIT_FEATURES | SUPPORTED_Pause |
 			  SUPPORTED_Asym_Pause,
-	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= &ip1001_config_init,
 	.config_aneg	= &genphy_config_aneg,
 	.read_status	= &genphy_read_status,
@@ -220,6 +229,7 @@
 	.features	= PHY_BASIC_FEATURES | SUPPORTED_Pause |
 			  SUPPORTED_Asym_Pause,
 	.flags		= PHY_HAS_INTERRUPT,
+	.ack_interrupt	= ip101a_g_ack_interrupt,
 	.config_init	= &ip101a_g_config_init,
 	.config_aneg	= &genphy_config_aneg,
 	.read_status	= &genphy_read_status,
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e8b9c53..418928d 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -455,11 +455,7 @@
 	if (err < 0)
 		return err;
 
-	err = phy_write(phydev, MII_BMCR, BMCR_RESET);
-	if (err < 0)
-		return err;
-
-	return 0;
+	return phy_write(phydev, MII_BMCR, BMCR_RESET);
 }
 
 static int m88e1118_config_aneg(struct phy_device *phydev)
@@ -515,11 +511,7 @@
 	if (err < 0)
 		return err;
 
-	err = phy_write(phydev, MII_BMCR, BMCR_RESET);
-	if (err < 0)
-		return err;
-
-	return 0;
+	return phy_write(phydev, MII_BMCR, BMCR_RESET);
 }
 
 static int m88e1149_config_init(struct phy_device *phydev)
@@ -545,11 +537,7 @@
 	if (err < 0)
 		return err;
 
-	err = phy_write(phydev, MII_BMCR, BMCR_RESET);
-	if (err < 0)
-		return err;
-
-	return 0;
+	return phy_write(phydev, MII_BMCR, BMCR_RESET);
 }
 
 static int m88e1145_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 116a2dd..4eb98bc 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -348,7 +348,6 @@
 static struct spi_driver ks8995_driver = {
 	.driver = {
 		.name	    = "spi-ks8995",
-		.bus	     = &spi_bus_type,
 		.owner	   = THIS_MODULE,
 	},
 	.probe	  = ks8995_probe,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 33f8c51..21d7151 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -235,7 +235,7 @@
 /* Prototypes. */
 static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
 			struct file *file, unsigned int cmd, unsigned long arg);
-static int ppp_xmit_process(struct ppp *ppp);
+static void ppp_xmit_process(struct ppp *ppp);
 static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
 static void ppp_push(struct ppp *ppp);
 static void ppp_channel_push(struct channel *pch);
@@ -969,8 +969,7 @@
 	put_unaligned_be16(proto, pp);
 
 	skb_queue_tail(&ppp->file.xq, skb);
-	if (!ppp_xmit_process(ppp))
-		netif_stop_queue(dev);
+	ppp_xmit_process(ppp);
 	return NETDEV_TX_OK;
 
  outf:
@@ -1048,11 +1047,10 @@
  * Called to do any work queued up on the transmit side
  * that can now be done.
  */
-static int
+static void
 ppp_xmit_process(struct ppp *ppp)
 {
 	struct sk_buff *skb;
-	int ret = 0;
 
 	ppp_xmit_lock(ppp);
 	if (!ppp->closing) {
@@ -1062,13 +1060,12 @@
 			ppp_send_frame(ppp, skb);
 		/* If there's no work left to do, tell the core net
 		   code that we can accept some more. */
-		if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) {
+		if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
 			netif_wake_queue(ppp->dev);
-			ret = 1;
-		}
+		else
+			netif_stop_queue(ppp->dev);
 	}
 	ppp_xmit_unlock(ppp);
-	return ret;
 }
 
 static inline struct sk_buff *
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 885dbdd..72b50f5 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -116,8 +116,8 @@
 	int i;
 
 	rcu_read_lock();
-	for (i = find_next_bit(callid_bitmap, MAX_CALLID, 1); i < MAX_CALLID;
-	     i = find_next_bit(callid_bitmap, MAX_CALLID, i + 1)) {
+	i = 1;
+	for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
 		sock = rcu_dereference(callid_sock[i]);
 		if (!sock)
 			continue;
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
index 248a144..89024d5 100644
--- a/drivers/net/team/Kconfig
+++ b/drivers/net/team/Kconfig
@@ -40,4 +40,15 @@
 	  To compile this team mode as a module, choose M here: the module
 	  will be called team_mode_activebackup.
 
+config NET_TEAM_MODE_LOADBALANCE
+	tristate "Load-balance mode support"
+	depends on NET_TEAM
+	---help---
+	  This mode provides load balancing functionality. Tx port selection
+	  is done using BPF function set up from userspace (bpf_hash_func
+	  option)
+
+	  To compile this team mode as a module, choose M here: the module
+	  will be called team_mode_loadbalance.
+
 endif # NET_TEAM
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
index 85f2028..fb9f4c1 100644
--- a/drivers/net/team/Makefile
+++ b/drivers/net/team/Makefile
@@ -5,3 +5,4 @@
 obj-$(CONFIG_NET_TEAM) += team.o
 obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
 obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
+obj-$(CONFIG_NET_TEAM_MODE_LOADBALANCE) += team_mode_loadbalance.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 8f81805..c61ae35 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -65,7 +65,7 @@
 	return dev_set_mac_address(port_dev, &addr);
 }
 
-int team_port_set_orig_mac(struct team_port *port)
+static int team_port_set_orig_mac(struct team_port *port)
 {
 	return __set_port_mac(port->dev, port->orig.dev_addr);
 }
@@ -76,12 +76,26 @@
 }
 EXPORT_SYMBOL(team_port_set_team_mac);
 
+static void team_refresh_port_linkup(struct team_port *port)
+{
+	port->linkup = port->user.linkup_enabled ? port->user.linkup :
+						   port->state.linkup;
+}
 
 /*******************
  * Options handling
  *******************/
 
-struct team_option *__team_find_option(struct team *team, const char *opt_name)
+struct team_option_inst { /* One for each option instance */
+	struct list_head list;
+	struct team_option *option;
+	struct team_port *port; /* != NULL if per-port */
+	bool changed;
+	bool removed;
+};
+
+static struct team_option *__team_find_option(struct team *team,
+					      const char *opt_name)
 {
 	struct team_option *option;
 
@@ -92,9 +106,121 @@
 	return NULL;
 }
 
-int __team_options_register(struct team *team,
-			    const struct team_option *option,
-			    size_t option_count)
+static int __team_option_inst_add(struct team *team, struct team_option *option,
+				  struct team_port *port)
+{
+	struct team_option_inst *opt_inst;
+
+	opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
+	if (!opt_inst)
+		return -ENOMEM;
+	opt_inst->option = option;
+	opt_inst->port = port;
+	opt_inst->changed = true;
+	opt_inst->removed = false;
+	list_add_tail(&opt_inst->list, &team->option_inst_list);
+	return 0;
+}
+
+static void __team_option_inst_del(struct team_option_inst *opt_inst)
+{
+	list_del(&opt_inst->list);
+	kfree(opt_inst);
+}
+
+static void __team_option_inst_del_option(struct team *team,
+					  struct team_option *option)
+{
+	struct team_option_inst *opt_inst, *tmp;
+
+	list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
+		if (opt_inst->option == option)
+			__team_option_inst_del(opt_inst);
+	}
+}
+
+static int __team_option_inst_add_option(struct team *team,
+					 struct team_option *option)
+{
+	struct team_port *port;
+	int err;
+
+	if (!option->per_port)
+		return __team_option_inst_add(team, option, 0);
+
+	list_for_each_entry(port, &team->port_list, list) {
+		err = __team_option_inst_add(team, option, port);
+		if (err)
+			goto inst_del_option;
+	}
+	return 0;
+
+inst_del_option:
+	__team_option_inst_del_option(team, option);
+	return err;
+}
+
+static void __team_option_inst_mark_removed_option(struct team *team,
+						   struct team_option *option)
+{
+	struct team_option_inst *opt_inst;
+
+	list_for_each_entry(opt_inst, &team->option_inst_list, list) {
+		if (opt_inst->option == option) {
+			opt_inst->changed = true;
+			opt_inst->removed = true;
+		}
+	}
+}
+
+static void __team_option_inst_del_port(struct team *team,
+					struct team_port *port)
+{
+	struct team_option_inst *opt_inst, *tmp;
+
+	list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
+		if (opt_inst->option->per_port &&
+		    opt_inst->port == port)
+			__team_option_inst_del(opt_inst);
+	}
+}
+
+static int __team_option_inst_add_port(struct team *team,
+				       struct team_port *port)
+{
+	struct team_option *option;
+	int err;
+
+	list_for_each_entry(option, &team->option_list, list) {
+		if (!option->per_port)
+			continue;
+		err = __team_option_inst_add(team, option, port);
+		if (err)
+			goto inst_del_port;
+	}
+	return 0;
+
+inst_del_port:
+	__team_option_inst_del_port(team, port);
+	return err;
+}
+
+static void __team_option_inst_mark_removed_port(struct team *team,
+						 struct team_port *port)
+{
+	struct team_option_inst *opt_inst;
+
+	list_for_each_entry(opt_inst, &team->option_inst_list, list) {
+		if (opt_inst->port == port) {
+			opt_inst->changed = true;
+			opt_inst->removed = true;
+		}
+	}
+}
+
+static int __team_options_register(struct team *team,
+				   const struct team_option *option,
+				   size_t option_count)
 {
 	int i;
 	struct team_option **dst_opts;
@@ -107,26 +233,32 @@
 	for (i = 0; i < option_count; i++, option++) {
 		if (__team_find_option(team, option->name)) {
 			err = -EEXIST;
-			goto rollback;
+			goto alloc_rollback;
 		}
 		dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
 		if (!dst_opts[i]) {
 			err = -ENOMEM;
-			goto rollback;
+			goto alloc_rollback;
 		}
 	}
 
 	for (i = 0; i < option_count; i++) {
-		dst_opts[i]->changed = true;
-		dst_opts[i]->removed = false;
+		err = __team_option_inst_add_option(team, dst_opts[i]);
+		if (err)
+			goto inst_rollback;
 		list_add_tail(&dst_opts[i]->list, &team->option_list);
 	}
 
 	kfree(dst_opts);
 	return 0;
 
-rollback:
-	for (i = 0; i < option_count; i++)
+inst_rollback:
+	for (i--; i >= 0; i--)
+		__team_option_inst_del_option(team, dst_opts[i]);
+
+	i = option_count - 1;
+alloc_rollback:
+	for (i--; i >= 0; i--)
 		kfree(dst_opts[i]);
 
 	kfree(dst_opts);
@@ -143,10 +275,8 @@
 		struct team_option *del_opt;
 
 		del_opt = __team_find_option(team, option->name);
-		if (del_opt) {
-			del_opt->changed = true;
-			del_opt->removed = true;
-		}
+		if (del_opt)
+			__team_option_inst_mark_removed_option(team, del_opt);
 	}
 }
 
@@ -161,6 +291,7 @@
 
 		del_opt = __team_find_option(team, option->name);
 		if (del_opt) {
+			__team_option_inst_del_option(team, del_opt);
 			list_del(&del_opt->list);
 			kfree(del_opt);
 		}
@@ -193,22 +324,42 @@
 }
 EXPORT_SYMBOL(team_options_unregister);
 
-static int team_option_get(struct team *team, struct team_option *option,
-			   void *arg)
-{
-	return option->getter(team, arg);
-}
-
-static int team_option_set(struct team *team, struct team_option *option,
-			   void *arg)
+static int team_option_port_add(struct team *team, struct team_port *port)
 {
 	int err;
 
-	err = option->setter(team, arg);
+	err = __team_option_inst_add_port(team, port);
+	if (err)
+		return err;
+	__team_options_change_check(team);
+	return 0;
+}
+
+static void team_option_port_del(struct team *team, struct team_port *port)
+{
+	__team_option_inst_mark_removed_port(team, port);
+	__team_options_change_check(team);
+	__team_option_inst_del_port(team, port);
+}
+
+static int team_option_get(struct team *team,
+			   struct team_option_inst *opt_inst,
+			   struct team_gsetter_ctx *ctx)
+{
+	return opt_inst->option->getter(team, ctx);
+}
+
+static int team_option_set(struct team *team,
+			   struct team_option_inst *opt_inst,
+			   struct team_gsetter_ctx *ctx)
+{
+	int err;
+
+	err = opt_inst->option->setter(team, ctx);
 	if (err)
 		return err;
 
-	option->changed = true;
+	opt_inst->changed = true;
 	__team_options_change_check(team);
 	return err;
 }
@@ -408,6 +559,8 @@
  * Rx path frame handler
  ************************/
 
+static bool team_port_enabled(struct team_port *port);
+
 /* note: already called with rcu_read_lock */
 static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
 {
@@ -424,8 +577,12 @@
 
 	port = team_port_get_rcu(skb->dev);
 	team = port->team;
-
-	res = team->ops.receive(team, port, skb);
+	if (!team_port_enabled(port)) {
+		/* allow exact match delivery for disabled ports */
+		res = RX_HANDLER_EXACT;
+	} else {
+		res = team->ops.receive(team, port, skb);
+	}
 	if (res == RX_HANDLER_ANOTHER) {
 		struct team_pcpu_stats *pcpu_stats;
 
@@ -461,17 +618,25 @@
 	return false;
 }
 
-/*
- * Add/delete port to the team port list. Write guarded by rtnl_lock.
- * Takes care of correct port->index setup (might be racy).
- */
-static void team_port_list_add_port(struct team *team,
-				    struct team_port *port)
+static bool team_port_enabled(struct team_port *port)
 {
-	port->index = team->port_count++;
+	return port->index != -1;
+}
+
+/*
+ * Enable/disable port by adding to enabled port hashlist and setting
+ * port->index (Might be racy so reader could see incorrect ifindex when
+ * processing a flying packet, but that is not a problem). Write guarded
+ * by team->lock.
+ */
+static void team_port_enable(struct team *team,
+			     struct team_port *port)
+{
+	if (team_port_enabled(port))
+		return;
+	port->index = team->en_port_count++;
 	hlist_add_head_rcu(&port->hlist,
 			   team_port_index_hash(team, port->index));
-	list_add_tail_rcu(&port->list, &team->port_list);
 }
 
 static void __reconstruct_port_hlist(struct team *team, int rm_index)
@@ -479,7 +644,7 @@
 	int i;
 	struct team_port *port;
 
-	for (i = rm_index + 1; i < team->port_count; i++) {
+	for (i = rm_index + 1; i < team->en_port_count; i++) {
 		port = team_get_port_by_index(team, i);
 		hlist_del_rcu(&port->hlist);
 		port->index--;
@@ -488,15 +653,17 @@
 	}
 }
 
-static void team_port_list_del_port(struct team *team,
-				   struct team_port *port)
+static void team_port_disable(struct team *team,
+			      struct team_port *port)
 {
 	int rm_index = port->index;
 
+	if (!team_port_enabled(port))
+		return;
 	hlist_del_rcu(&port->hlist);
-	list_del_rcu(&port->list);
 	__reconstruct_port_hlist(team, rm_index);
-	team->port_count--;
+	team->en_port_count--;
+	port->index = -1;
 }
 
 #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -642,7 +809,16 @@
 		goto err_handler_register;
 	}
 
-	team_port_list_add_port(team, port);
+	err = team_option_port_add(team, port);
+	if (err) {
+		netdev_err(dev, "Device %s failed to add per-port options\n",
+			   portname);
+		goto err_option_port_add;
+	}
+
+	port->index = -1;
+	team_port_enable(team, port);
+	list_add_tail_rcu(&port->list, &team->port_list);
 	team_adjust_ops(team);
 	__team_compute_features(team);
 	__team_port_change_check(port, !!netif_carrier_ok(port_dev));
@@ -651,6 +827,9 @@
 
 	return 0;
 
+err_option_port_add:
+	netdev_rx_handler_unregister(port_dev);
+
 err_handler_register:
 	netdev_set_master(port_dev, NULL);
 
@@ -688,8 +867,10 @@
 
 	port->removed = true;
 	__team_port_change_check(port, false);
-	team_port_list_del_port(team, port);
+	team_port_disable(team, port);
+	list_del_rcu(&port->list);
 	team_adjust_ops(team);
+	team_option_port_del(team, port);
 	netdev_rx_handler_unregister(port_dev);
 	netdev_set_master(port_dev, NULL);
 	vlan_vids_del_by_dev(port_dev, dev);
@@ -712,19 +893,66 @@
 
 static const char team_no_mode_kind[] = "*NOMODE*";
 
-static int team_mode_option_get(struct team *team, void *arg)
+static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
 {
-	const char **str = arg;
-
-	*str = team->mode ? team->mode->kind : team_no_mode_kind;
+	ctx->data.str_val = team->mode ? team->mode->kind : team_no_mode_kind;
 	return 0;
 }
 
-static int team_mode_option_set(struct team *team, void *arg)
+static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
 {
-	const char **str = arg;
+	return team_change_mode(team, ctx->data.str_val);
+}
 
-	return team_change_mode(team, *str);
+static int team_port_en_option_get(struct team *team,
+				   struct team_gsetter_ctx *ctx)
+{
+	ctx->data.bool_val = team_port_enabled(ctx->port);
+	return 0;
+}
+
+static int team_port_en_option_set(struct team *team,
+				   struct team_gsetter_ctx *ctx)
+{
+	if (ctx->data.bool_val)
+		team_port_enable(team, ctx->port);
+	else
+		team_port_disable(team, ctx->port);
+	return 0;
+}
+
+static int team_user_linkup_option_get(struct team *team,
+				       struct team_gsetter_ctx *ctx)
+{
+	ctx->data.bool_val = ctx->port->user.linkup;
+	return 0;
+}
+
+static int team_user_linkup_option_set(struct team *team,
+				       struct team_gsetter_ctx *ctx)
+{
+	ctx->port->user.linkup = ctx->data.bool_val;
+	team_refresh_port_linkup(ctx->port);
+	return 0;
+}
+
+static int team_user_linkup_en_option_get(struct team *team,
+					  struct team_gsetter_ctx *ctx)
+{
+	struct team_port *port = ctx->port;
+
+	ctx->data.bool_val = port->user.linkup_enabled;
+	return 0;
+}
+
+static int team_user_linkup_en_option_set(struct team *team,
+					  struct team_gsetter_ctx *ctx)
+{
+	struct team_port *port = ctx->port;
+
+	port->user.linkup_enabled = ctx->data.bool_val;
+	team_refresh_port_linkup(ctx->port);
+	return 0;
 }
 
 static const struct team_option team_options[] = {
@@ -734,6 +962,27 @@
 		.getter = team_mode_option_get,
 		.setter = team_mode_option_set,
 	},
+	{
+		.name = "enabled",
+		.type = TEAM_OPTION_TYPE_BOOL,
+		.per_port = true,
+		.getter = team_port_en_option_get,
+		.setter = team_port_en_option_set,
+	},
+	{
+		.name = "user_linkup",
+		.type = TEAM_OPTION_TYPE_BOOL,
+		.per_port = true,
+		.getter = team_user_linkup_option_get,
+		.setter = team_user_linkup_option_set,
+	},
+	{
+		.name = "user_linkup_enabled",
+		.type = TEAM_OPTION_TYPE_BOOL,
+		.per_port = true,
+		.getter = team_user_linkup_en_option_get,
+		.setter = team_user_linkup_en_option_set,
+	},
 };
 
 static int team_init(struct net_device *dev)
@@ -750,12 +999,13 @@
 		return -ENOMEM;
 
 	for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
-		INIT_HLIST_HEAD(&team->port_hlist[i]);
+		INIT_HLIST_HEAD(&team->en_port_hlist[i]);
 	INIT_LIST_HEAD(&team->port_list);
 
 	team_adjust_ops(team);
 
 	INIT_LIST_HEAD(&team->option_list);
+	INIT_LIST_HEAD(&team->option_inst_list);
 	err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
 	if (err)
 		goto err_options_register;
@@ -1145,10 +1395,7 @@
 	},
 	[TEAM_ATTR_OPTION_CHANGED]		= { .type = NLA_FLAG },
 	[TEAM_ATTR_OPTION_TYPE]			= { .type = NLA_U8 },
-	[TEAM_ATTR_OPTION_DATA] = {
-		.type = NLA_BINARY,
-		.len = TEAM_STRING_MAX_LEN,
-	},
+	[TEAM_ATTR_OPTION_DATA]			= { .type = NLA_BINARY },
 };
 
 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
@@ -1241,46 +1488,86 @@
 {
 	struct nlattr *option_list;
 	void *hdr;
-	struct team_option *option;
+	struct team_option_inst *opt_inst;
+	int err;
 
 	hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
 			  TEAM_CMD_OPTIONS_GET);
 	if (IS_ERR(hdr))
 		return PTR_ERR(hdr);
 
-	NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
+	if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
+		goto nla_put_failure;
 	option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
 	if (!option_list)
 		return -EMSGSIZE;
 
-	list_for_each_entry(option, &team->option_list, list) {
+	list_for_each_entry(opt_inst, &team->option_inst_list, list) {
 		struct nlattr *option_item;
-		long arg;
+		struct team_option *option = opt_inst->option;
+		struct team_gsetter_ctx ctx;
 
 		/* Include only changed options if fill all mode is not on */
-		if (!fillall && !option->changed)
+		if (!fillall && !opt_inst->changed)
 			continue;
 		option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
 		if (!option_item)
 			goto nla_put_failure;
-		NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name);
-		if (option->changed) {
-			NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED);
-			option->changed = false;
+		if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
+			goto nla_put_failure;
+		if (opt_inst->changed) {
+			if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
+				goto nla_put_failure;
+			opt_inst->changed = false;
 		}
-		if (option->removed)
-			NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_REMOVED);
+		if (opt_inst->removed &&
+		    nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
+			goto nla_put_failure;
+		if (opt_inst->port &&
+		    nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
+				opt_inst->port->dev->ifindex))
+			goto nla_put_failure;
+		ctx.port = opt_inst->port;
 		switch (option->type) {
 		case TEAM_OPTION_TYPE_U32:
-			NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32);
-			team_option_get(team, option, &arg);
-			NLA_PUT_U32(skb, TEAM_ATTR_OPTION_DATA, arg);
+			if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
+				goto nla_put_failure;
+			err = team_option_get(team, opt_inst, &ctx);
+			if (err)
+				goto errout;
+			if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA,
+					ctx.data.u32_val))
+				goto nla_put_failure;
 			break;
 		case TEAM_OPTION_TYPE_STRING:
-			NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING);
-			team_option_get(team, option, &arg);
-			NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_DATA,
-				       (char *) arg);
+			if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
+				goto nla_put_failure;
+			err = team_option_get(team, opt_inst, &ctx);
+			if (err)
+				goto errout;
+			if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
+					   ctx.data.str_val))
+				goto nla_put_failure;
+			break;
+		case TEAM_OPTION_TYPE_BINARY:
+			if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
+				goto nla_put_failure;
+			err = team_option_get(team, opt_inst, &ctx);
+			if (err)
+				goto errout;
+			if (nla_put(skb, TEAM_ATTR_OPTION_DATA,
+				    ctx.data.bin_val.len, ctx.data.bin_val.ptr))
+				goto nla_put_failure;
+			break;
+		case TEAM_OPTION_TYPE_BOOL:
+			if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
+				goto nla_put_failure;
+			err = team_option_get(team, opt_inst, &ctx);
+			if (err)
+				goto errout;
+			if (ctx.data.bool_val &&
+			    nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
+				goto nla_put_failure;
 			break;
 		default:
 			BUG();
@@ -1292,8 +1579,10 @@
 	return genlmsg_end(skb, hdr);
 
 nla_put_failure:
+	err = -EMSGSIZE;
+errout:
 	genlmsg_cancel(skb, hdr);
-	return -EMSGSIZE;
+	return err;
 }
 
 static int team_nl_fill_options_get_all(struct sk_buff *skb,
@@ -1339,9 +1628,12 @@
 	}
 
 	nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
-		struct nlattr *mode_attrs[TEAM_ATTR_OPTION_MAX + 1];
+		struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
+		struct nlattr *attr_port_ifindex;
+		struct nlattr *attr_data;
 		enum team_option_type opt_type;
-		struct team_option *option;
+		int opt_port_ifindex = 0; /* != 0 for per-port options */
+		struct team_option_inst *opt_inst;
 		char *opt_name;
 		bool opt_found = false;
 
@@ -1349,48 +1641,78 @@
 			err = -EINVAL;
 			goto team_put;
 		}
-		err = nla_parse_nested(mode_attrs, TEAM_ATTR_OPTION_MAX,
+		err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
 				       nl_option, team_nl_option_policy);
 		if (err)
 			goto team_put;
-		if (!mode_attrs[TEAM_ATTR_OPTION_NAME] ||
-		    !mode_attrs[TEAM_ATTR_OPTION_TYPE] ||
-		    !mode_attrs[TEAM_ATTR_OPTION_DATA]) {
+		if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
+		    !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
 			err = -EINVAL;
 			goto team_put;
 		}
-		switch (nla_get_u8(mode_attrs[TEAM_ATTR_OPTION_TYPE])) {
+		switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
 		case NLA_U32:
 			opt_type = TEAM_OPTION_TYPE_U32;
 			break;
 		case NLA_STRING:
 			opt_type = TEAM_OPTION_TYPE_STRING;
 			break;
+		case NLA_BINARY:
+			opt_type = TEAM_OPTION_TYPE_BINARY;
+			break;
+		case NLA_FLAG:
+			opt_type = TEAM_OPTION_TYPE_BOOL;
+			break;
 		default:
 			goto team_put;
 		}
 
-		opt_name = nla_data(mode_attrs[TEAM_ATTR_OPTION_NAME]);
-		list_for_each_entry(option, &team->option_list, list) {
-			long arg;
-			struct nlattr *opt_data_attr;
+		attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
+		if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
+			err = -EINVAL;
+			goto team_put;
+		}
 
+		opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
+		attr_port_ifindex = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
+		if (attr_port_ifindex)
+			opt_port_ifindex = nla_get_u32(attr_port_ifindex);
+
+		list_for_each_entry(opt_inst, &team->option_inst_list, list) {
+			struct team_option *option = opt_inst->option;
+			struct team_gsetter_ctx ctx;
+			int tmp_ifindex;
+
+			tmp_ifindex = opt_inst->port ?
+				      opt_inst->port->dev->ifindex : 0;
 			if (option->type != opt_type ||
-			    strcmp(option->name, opt_name))
+			    strcmp(option->name, opt_name) ||
+			    tmp_ifindex != opt_port_ifindex)
 				continue;
 			opt_found = true;
-			opt_data_attr = mode_attrs[TEAM_ATTR_OPTION_DATA];
+			ctx.port = opt_inst->port;
 			switch (opt_type) {
 			case TEAM_OPTION_TYPE_U32:
-				arg = nla_get_u32(opt_data_attr);
+				ctx.data.u32_val = nla_get_u32(attr_data);
 				break;
 			case TEAM_OPTION_TYPE_STRING:
-				arg = (long) nla_data(opt_data_attr);
+				if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
+					err = -EINVAL;
+					goto team_put;
+				}
+				ctx.data.str_val = nla_data(attr_data);
+				break;
+			case TEAM_OPTION_TYPE_BINARY:
+				ctx.data.bin_val.len = nla_len(attr_data);
+				ctx.data.bin_val.ptr = nla_data(attr_data);
+				break;
+			case TEAM_OPTION_TYPE_BOOL:
+				ctx.data.bool_val = attr_data ? true : false;
 				break;
 			default:
 				BUG();
 			}
-			err = team_option_set(team, option, &arg);
+			err = team_option_set(team, opt_inst, &ctx);
 			if (err)
 				goto team_put;
 		}
@@ -1420,7 +1742,8 @@
 	if (IS_ERR(hdr))
 		return PTR_ERR(hdr);
 
-	NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
+	if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
+		goto nla_put_failure;
 	port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
 	if (!port_list)
 		return -EMSGSIZE;
@@ -1434,17 +1757,20 @@
 		port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
 		if (!port_item)
 			goto nla_put_failure;
-		NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex);
+		if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
+			goto nla_put_failure;
 		if (port->changed) {
-			NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED);
+			if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
+				goto nla_put_failure;
 			port->changed = false;
 		}
-		if (port->removed)
-			NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_REMOVED);
-		if (port->linkup)
-			NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP);
-		NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed);
-		NLA_PUT_U8(skb, TEAM_ATTR_PORT_DUPLEX, port->duplex);
+		if ((port->removed &&
+		     nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
+		    (port->state.linkup &&
+		     nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
+		    nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
+		    nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
+			goto nla_put_failure;
 		nla_nest_end(skb, port_item);
 	}
 
@@ -1603,23 +1929,24 @@
 {
 	int err;
 
-	if (!port->removed && port->linkup == linkup)
+	if (!port->removed && port->state.linkup == linkup)
 		return;
 
 	port->changed = true;
-	port->linkup = linkup;
+	port->state.linkup = linkup;
+	team_refresh_port_linkup(port);
 	if (linkup) {
 		struct ethtool_cmd ecmd;
 
 		err = __ethtool_get_settings(port->dev, &ecmd);
 		if (!err) {
-			port->speed = ethtool_cmd_speed(&ecmd);
-			port->duplex = ecmd.duplex;
+			port->state.speed = ethtool_cmd_speed(&ecmd);
+			port->state.duplex = ecmd.duplex;
 			goto send_event;
 		}
 	}
-	port->speed = 0;
-	port->duplex = 0;
+	port->state.speed = 0;
+	port->state.duplex = 0;
 
 send_event:
 	err = team_nl_send_event_port_list_get(port->team);
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
index f4d960e..fd6bd03 100644
--- a/drivers/net/team/team_mode_activebackup.c
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -59,23 +59,21 @@
 		RCU_INIT_POINTER(ab_priv(team)->active_port, NULL);
 }
 
-static int ab_active_port_get(struct team *team, void *arg)
+static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx)
 {
-	u32 *ifindex = arg;
-
-	*ifindex = 0;
 	if (ab_priv(team)->active_port)
-		*ifindex = ab_priv(team)->active_port->dev->ifindex;
+		ctx->data.u32_val = ab_priv(team)->active_port->dev->ifindex;
+	else
+		ctx->data.u32_val = 0;
 	return 0;
 }
 
-static int ab_active_port_set(struct team *team, void *arg)
+static int ab_active_port_set(struct team *team, struct team_gsetter_ctx *ctx)
 {
-	u32 *ifindex = arg;
 	struct team_port *port;
 
-	list_for_each_entry_rcu(port, &team->port_list, list) {
-		if (port->dev->ifindex == *ifindex) {
+	list_for_each_entry(port, &team->port_list, list) {
+		if (port->dev->ifindex == ctx->data.u32_val) {
 			rcu_assign_pointer(ab_priv(team)->active_port, port);
 			return 0;
 		}
@@ -92,12 +90,12 @@
 	},
 };
 
-int ab_init(struct team *team)
+static int ab_init(struct team *team)
 {
 	return team_options_register(team, ab_options, ARRAY_SIZE(ab_options));
 }
 
-void ab_exit(struct team *team)
+static void ab_exit(struct team *team)
 {
 	team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options));
 }
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
new file mode 100644
index 0000000..86e8183
--- /dev/null
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -0,0 +1,174 @@
+/*
+ * drivers/net/team/team_mode_loadbalance.c - Load-balancing mode for team
+ * Copyright (c) 2012 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/filter.h>
+#include <linux/if_team.h>
+
+struct lb_priv {
+	struct sk_filter __rcu *fp;
+	struct sock_fprog *orig_fprog;
+};
+
+static struct lb_priv *lb_priv(struct team *team)
+{
+	return (struct lb_priv *) &team->mode_priv;
+}
+
+static bool lb_transmit(struct team *team, struct sk_buff *skb)
+{
+	struct sk_filter *fp;
+	struct team_port *port;
+	unsigned int hash;
+	int port_index;
+
+	fp = rcu_dereference(lb_priv(team)->fp);
+	if (unlikely(!fp))
+		goto drop;
+	hash = SK_RUN_FILTER(fp, skb);
+	port_index = hash % team->en_port_count;
+	port = team_get_port_by_index_rcu(team, port_index);
+	if (unlikely(!port))
+		goto drop;
+	skb->dev = port->dev;
+	if (dev_queue_xmit(skb))
+		return false;
+	return true;
+
+drop:
+	dev_kfree_skb_any(skb);
+	return false;
+}
+
+static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
+{
+	if (!lb_priv(team)->orig_fprog) {
+		ctx->data.bin_val.len = 0;
+		ctx->data.bin_val.ptr = NULL;
+		return 0;
+	}
+	ctx->data.bin_val.len = lb_priv(team)->orig_fprog->len *
+				sizeof(struct sock_filter);
+	ctx->data.bin_val.ptr = lb_priv(team)->orig_fprog->filter;
+	return 0;
+}
+
+static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
+			  const void *data)
+{
+	struct sock_fprog *fprog;
+	struct sock_filter *filter = (struct sock_filter *) data;
+
+	if (data_len % sizeof(struct sock_filter))
+		return -EINVAL;
+	fprog = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL);
+	if (!fprog)
+		return -ENOMEM;
+	fprog->filter = kmemdup(filter, data_len, GFP_KERNEL);
+	if (!fprog->filter) {
+		kfree(fprog);
+		return -ENOMEM;
+	}
+	fprog->len = data_len / sizeof(struct sock_filter);
+	*pfprog = fprog;
+	return 0;
+}
+
+static void __fprog_destroy(struct sock_fprog *fprog)
+{
+	kfree(fprog->filter);
+	kfree(fprog);
+}
+
+static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
+{
+	struct sk_filter *fp = NULL;
+	struct sock_fprog *fprog = NULL;
+	int err;
+
+	if (ctx->data.bin_val.len) {
+		err = __fprog_create(&fprog, ctx->data.bin_val.len,
+				     ctx->data.bin_val.ptr);
+		if (err)
+			return err;
+		err = sk_unattached_filter_create(&fp, fprog);
+		if (err) {
+			__fprog_destroy(fprog);
+			return err;
+		}
+	}
+
+	if (lb_priv(team)->orig_fprog) {
+		/* Clear old filter data */
+		__fprog_destroy(lb_priv(team)->orig_fprog);
+		sk_unattached_filter_destroy(lb_priv(team)->fp);
+	}
+
+	rcu_assign_pointer(lb_priv(team)->fp, fp);
+	lb_priv(team)->orig_fprog = fprog;
+	return 0;
+}
+
+static const struct team_option lb_options[] = {
+	{
+		.name = "bpf_hash_func",
+		.type = TEAM_OPTION_TYPE_BINARY,
+		.getter = lb_bpf_func_get,
+		.setter = lb_bpf_func_set,
+	},
+};
+
+static int lb_init(struct team *team)
+{
+	return team_options_register(team, lb_options,
+				     ARRAY_SIZE(lb_options));
+}
+
+static void lb_exit(struct team *team)
+{
+	team_options_unregister(team, lb_options,
+				ARRAY_SIZE(lb_options));
+}
+
+static const struct team_mode_ops lb_mode_ops = {
+	.init			= lb_init,
+	.exit			= lb_exit,
+	.transmit		= lb_transmit,
+};
+
+static struct team_mode lb_mode = {
+	.kind		= "loadbalance",
+	.owner		= THIS_MODULE,
+	.priv_size	= sizeof(struct lb_priv),
+	.ops		= &lb_mode_ops,
+};
+
+static int __init lb_init_module(void)
+{
+	return team_mode_register(&lb_mode);
+}
+
+static void __exit lb_cleanup_module(void)
+{
+	team_mode_unregister(&lb_mode);
+}
+
+module_init(lb_init_module);
+module_exit(lb_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Load-balancing mode for team");
+MODULE_ALIAS("team-mode-loadbalance");
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index a0e8f80..6abfbdc 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -50,7 +50,7 @@
 	struct team_port *port;
 	int port_index;
 
-	port_index = rr_priv(team)->sent_packets++ % team->port_count;
+	port_index = rr_priv(team)->sent_packets++ % team->en_port_count;
 	port = team_get_port_by_index_rcu(team, port_index);
 	port = __get_first_port_up(team, port);
 	if (unlikely(!port))
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index b15ac81..0924f57 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -1826,18 +1826,6 @@
 	.remove		= __devexit_p(xl_remove_one),
 };
 
-static int __init xl_pci_init (void)
-{
-	return pci_register_driver(&xl_3c359_driver);
-}
-
-
-static void __exit xl_pci_cleanup (void)
-{
-	pci_unregister_driver (&xl_3c359_driver);
-}
-
-module_init(xl_pci_init);
-module_exit(xl_pci_cleanup);
+module_pci_driver(xl_3c359_driver);
 
 MODULE_LICENSE("GPL") ; 
diff --git a/drivers/net/tokenring/Kconfig b/drivers/net/tokenring/Kconfig
index 45550d4..ef3bb13 100644
--- a/drivers/net/tokenring/Kconfig
+++ b/drivers/net/tokenring/Kconfig
@@ -98,7 +98,7 @@
 
 config TMS380TR
 	tristate "Generic TMS380 Token Ring ISA/PCI adapter support"
-	depends on PCI || ISA && ISA_DMA_API || MCA
+	depends on PCI || ISA || MCA
 	select FW_LOADER
 	---help---
 	  This driver provides generic support for token ring adapters
@@ -137,7 +137,7 @@
 
 config SKISA
 	tristate "SysKonnect TR4/16 ISA support"
-	depends on TMS380TR && ISA
+	depends on TMS380TR && ISA && ISA_DMA_API
 	help
 	  This tms380 module supports SysKonnect TR4/16 ISA cards.
 
@@ -149,7 +149,7 @@
 
 config PROTEON
 	tristate "Proteon ISA support"
-	depends on TMS380TR && ISA
+	depends on TMS380TR && ISA && ISA_DMA_API
 	help
 	  This tms380 module supports Proteon ISA cards.
 
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 3e4b4f0..97e4c65 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -1904,14 +1904,6 @@
   .remove   = __devexit_p(streamer_remove_one),
 };
 
-static int __init streamer_init_module(void) {
-  return pci_register_driver(&streamer_pci_driver);
-}
+module_pci_driver(streamer_pci_driver);
 
-static void __exit streamer_cleanup_module(void) {
-  pci_unregister_driver(&streamer_pci_driver);
-}
-
-module_init(streamer_init_module);
-module_exit(streamer_cleanup_module);
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 0e23474..4d45fe8 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -1732,18 +1732,6 @@
 	.remove		= __devexit_p(olympic_remove_one),
 };
 
-static int __init olympic_pci_init(void) 
-{
-	return pci_register_driver(&olympic_driver) ;
-}
-
-static void __exit olympic_pci_cleanup(void)
-{
-	pci_unregister_driver(&olympic_driver) ; 
-}	
-
-
-module_init(olympic_pci_init) ; 
-module_exit(olympic_pci_cleanup) ; 
+module_pci_driver(olympic_driver);
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index be4813e..b5e0855 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -254,7 +254,7 @@
 
 	/* Reset the hardware here. Don't forget to set the station address. */
 
-#ifdef CONFIG_ISA
+#if defined(CONFIG_ISA) && defined(CONFIG_ISA_DMA_API)
 	if(dev->dma > 0) 
 	{
 		unsigned long flags=claim_dma_lock();
@@ -1125,8 +1125,8 @@
     
 	del_timer(&tp->timer);
 	tms380tr_disable_interrupts(dev);
-   
-#ifdef CONFIG_ISA
+
+#if defined(CONFIG_ISA) && defined(CONFIG_ISA_DMA_API)
 	if(dev->dma > 0) 
 	{
 		unsigned long flags=claim_dma_lock();
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index fb9918d..90f3fa4 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -233,16 +233,4 @@
 	.remove		= __devexit_p(tms_pci_detach),
 };
 
-static int __init tms_pci_init (void)
-{
-	return pci_register_driver(&tms_pci_driver);
-}
-
-static void __exit tms_pci_rmmod (void)
-{
-	pci_unregister_driver (&tms_pci_driver);
-}
-
-module_init(tms_pci_init);
-module_exit(tms_pci_rmmod);
-
+module_pci_driver(tms_pci_driver);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 552d24b..d316503b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -365,6 +365,27 @@
 	.data		= BIT(4), /* interface whitelist bitmap */
 };
 
+/* Sierra Wireless provide equally useless interface descriptors
+ * Devices in QMI mode can be switched between two different
+ * configurations:
+ *   a) USB interface #8 is QMI/wwan
+ *   b) USB interfaces #8, #19 and #20 are QMI/wwan
+ *
+ * Both configurations provide a number of other interfaces (serial++),
+ * some of which have the same endpoint configuration as we expect, so
+ * a whitelist or blacklist is necessary.
+ *
+ * FIXME: The below whitelist should include BIT(20).  It does not
+ * because I cannot get it to work...
+ */
+static const struct driver_info	qmi_wwan_sierra = {
+	.description	= "Sierra Wireless wwan/QMI device",
+	.flags		= FLAG_WWAN,
+	.bind		= qmi_wwan_bind_gobi,
+	.unbind		= qmi_wwan_unbind_shared,
+	.manage_power	= qmi_wwan_manage_power,
+	.data		= BIT(8) | BIT(19), /* interface whitelist bitmap */
+};
 
 #define HUAWEI_VENDOR_ID	0x12D1
 #define QMI_GOBI_DEVICE(vend, prod) \
@@ -445,6 +466,15 @@
 		.bInterfaceProtocol = 0xff,
 		.driver_info        = (unsigned long)&qmi_wwan_force_int4,
 	},
+	{	/* Sierra Wireless MC77xx in QMI mode */
+		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
+		.idVendor           = 0x1199,
+		.idProduct          = 0x68a2,
+		.bInterfaceClass    = 0xff,
+		.bInterfaceSubClass = 0xff,
+		.bInterfaceProtocol = 0xff,
+		.driver_info        = (unsigned long)&qmi_wwan_sierra,
+	},
 	{QMI_GOBI_DEVICE(0x05c6, 0x9212)},	/* Acer Gobi Modem Device */
 	{QMI_GOBI_DEVICE(0x03f0, 0x1f1d)},	/* HP un2400 Gobi Modem Device */
 	{QMI_GOBI_DEVICE(0x03f0, 0x371d)},	/* HP un2430 Mobile Broadband Module */
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 187d01c..a234948 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1051,6 +1051,7 @@
 	dev->net->ethtool_ops = &smsc75xx_ethtool_ops;
 	dev->net->flags |= IFF_MULTICAST;
 	dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD;
+	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
 	return 0;
 }
 
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index b7b3f5b..db99536 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -884,6 +884,7 @@
 	.get_drvinfo		= usbnet_get_drvinfo,
 	.get_msglevel		= usbnet_get_msglevel,
 	.set_msglevel		= usbnet_set_msglevel,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 /*-------------------------------------------------------------------------*/
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4de2760..fa58c78 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -66,12 +66,21 @@
 	/* Host will merge rx buffers for big packets (shake it! shake it!) */
 	bool mergeable_rx_bufs;
 
+	/* enable config space updates */
+	bool config_enable;
+
 	/* Active statistics */
 	struct virtnet_stats __percpu *stats;
 
 	/* Work struct for refilling if we run low on memory. */
 	struct delayed_work refill;
 
+	/* Work struct for config space updates */
+	struct work_struct config_work;
+
+	/* Lock for config space updates */
+	struct mutex config_lock;
+
 	/* Chain pages by the private ptr. */
 	struct page *pages;
 
@@ -626,16 +635,15 @@
 	/* This can happen with OOM and indirect buffers. */
 	if (unlikely(capacity < 0)) {
 		if (likely(capacity == -ENOMEM)) {
-			if (net_ratelimit()) {
+			if (net_ratelimit())
 				dev_warn(&dev->dev,
 					 "TX queue failure: out of memory\n");
-			} else {
+		} else {
 			dev->stats.tx_fifo_errors++;
 			if (net_ratelimit())
 				dev_warn(&dev->dev,
 					 "Unexpected TX queue failure: %d\n",
 					 capacity);
-			}
 		}
 		dev->stats.tx_dropped++;
 		kfree_skb(skb);
@@ -781,6 +789,16 @@
 	return status == VIRTIO_NET_OK;
 }
 
+static void virtnet_ack_link_announce(struct virtnet_info *vi)
+{
+	rtnl_lock();
+	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
+				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL,
+				  0, 0))
+		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
+	rtnl_unlock();
+}
+
 static int virtnet_close(struct net_device *dev)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
@@ -952,20 +970,31 @@
 #endif
 };
 
-static void virtnet_update_status(struct virtnet_info *vi)
+static void virtnet_config_changed_work(struct work_struct *work)
 {
+	struct virtnet_info *vi =
+		container_of(work, struct virtnet_info, config_work);
 	u16 v;
 
+	mutex_lock(&vi->config_lock);
+	if (!vi->config_enable)
+		goto done;
+
 	if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
 			      offsetof(struct virtio_net_config, status),
 			      &v) < 0)
-		return;
+		goto done;
+
+	if (v & VIRTIO_NET_S_ANNOUNCE) {
+		netif_notify_peers(vi->dev);
+		virtnet_ack_link_announce(vi);
+	}
 
 	/* Ignore unknown (future) status bits */
 	v &= VIRTIO_NET_S_LINK_UP;
 
 	if (vi->status == v)
-		return;
+		goto done;
 
 	vi->status = v;
 
@@ -976,13 +1005,15 @@
 		netif_carrier_off(vi->dev);
 		netif_stop_queue(vi->dev);
 	}
+done:
+	mutex_unlock(&vi->config_lock);
 }
 
 static void virtnet_config_changed(struct virtio_device *vdev)
 {
 	struct virtnet_info *vi = vdev->priv;
 
-	virtnet_update_status(vi);
+	queue_work(system_nrt_wq, &vi->config_work);
 }
 
 static int init_vqs(struct virtnet_info *vi)
@@ -1076,6 +1107,9 @@
 		goto free;
 
 	INIT_DELAYED_WORK(&vi->refill, refill_work);
+	mutex_init(&vi->config_lock);
+	vi->config_enable = true;
+	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
 	sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
 	sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
 
@@ -1111,7 +1145,7 @@
 	   otherwise get link status from config. */
 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
 		netif_carrier_off(dev);
-		virtnet_update_status(vi);
+		queue_work(system_nrt_wq, &vi->config_work);
 	} else {
 		vi->status = VIRTIO_NET_S_LINK_UP;
 		netif_carrier_on(dev);
@@ -1170,10 +1204,17 @@
 {
 	struct virtnet_info *vi = vdev->priv;
 
+	/* Prevent config work handler from accessing the device. */
+	mutex_lock(&vi->config_lock);
+	vi->config_enable = false;
+	mutex_unlock(&vi->config_lock);
+
 	unregister_netdev(vi->dev);
 
 	remove_vq_common(vi);
 
+	flush_work(&vi->config_work);
+
 	free_percpu(vi->stats);
 	free_netdev(vi->dev);
 }
@@ -1183,6 +1224,11 @@
 {
 	struct virtnet_info *vi = vdev->priv;
 
+	/* Prevent config work handler from accessing the device */
+	mutex_lock(&vi->config_lock);
+	vi->config_enable = false;
+	mutex_unlock(&vi->config_lock);
+
 	virtqueue_disable_cb(vi->rvq);
 	virtqueue_disable_cb(vi->svq);
 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
@@ -1196,6 +1242,8 @@
 
 	remove_vq_common(vi);
 
+	flush_work(&vi->config_work);
+
 	return 0;
 }
 
@@ -1216,6 +1264,10 @@
 	if (!try_fill_recv(vi, GFP_KERNEL))
 		queue_delayed_work(system_nrt_wq, &vi->refill, 0);
 
+	mutex_lock(&vi->config_lock);
+	vi->config_enable = true;
+	mutex_unlock(&vi->config_lock);
+
 	return 0;
 }
 #endif
@@ -1233,6 +1285,7 @@
 	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
 	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
 	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
+	VIRTIO_NET_F_GUEST_ANNOUNCE,
 };
 
 static struct virtio_driver virtio_net_driver = {
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index c676de7..9eb6479 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -2055,15 +2055,4 @@
 	.remove		= __devexit_p(dscc4_remove_one),
 };
 
-static int __init dscc4_init_module(void)
-{
-	return pci_register_driver(&dscc4_driver);
-}
-
-static void __exit dscc4_cleanup_module(void)
-{
-	pci_unregister_driver(&dscc4_driver);
-}
-
-module_init(dscc4_init_module);
-module_exit(dscc4_cleanup_module);
+module_pci_driver(dscc4_driver);
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index ebb9f24..1a62318 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2483,6 +2483,7 @@
 		pr_err("Control memory remap failed\n");
 		pci_release_regions(pdev);
 		pci_disable_device(pdev);
+		iounmap(card->mem);
 		kfree(card);
 		return -ENODEV;
 	}
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 76a8a4a..f5d533a 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1120,7 +1120,7 @@
 {
     lmc_softc_t *sc = dev_to_sc(dev);
 
-    lmc_trace(dev, "lmc_runnig_reset in");
+    lmc_trace(dev, "lmc_running_reset in");
 
     /* stop interrupts */
     /* Clear the interrupt mask */
@@ -1736,18 +1736,7 @@
 	.remove		= __devexit_p(lmc_remove_one),
 };
 
-static int __init init_lmc(void)
-{
-    return pci_register_driver(&lmc_driver);
-}
-
-static void __exit exit_lmc(void)
-{
-    pci_unregister_driver(&lmc_driver);
-}
-
-module_init(init_lmc);
-module_exit(exit_lmc);
+module_pci_driver(lmc_driver);
 
 unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/
 {
diff --git a/drivers/net/wimax/i2400m/Kconfig b/drivers/net/wimax/i2400m/Kconfig
index 3f70338..672de18 100644
--- a/drivers/net/wimax/i2400m/Kconfig
+++ b/drivers/net/wimax/i2400m/Kconfig
@@ -32,8 +32,9 @@
 	  If unsure, it is safe to select M (module).
 
 config WIMAX_IWMC3200_SDIO
-	bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO"
+	bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO (EXPERIMENTAL)"
 	depends on WIMAX_I2400M_SDIO
+	depends on EXPERIMENTAL
 	select IWMC3200TOP
 	help
 	  Select if you have a device based on the Intel Multicom WiMAX
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 29b1e03..713d033 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -695,7 +695,7 @@
 	d_fnstart(3, dev, "(iface %p)\n", iface);
 	rmb();		/* see i2400m->updown's documentation  */
 	if (i2400m->updown == 0) {
-		d_printf(1, dev, "fw was down, no resume neeed\n");
+		d_printf(1, dev, "fw was down, no resume needed\n");
 		goto out;
 	}
 	d_printf(1, dev, "fw was up, resuming\n");
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index 8faa129..8c50d9d 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -19,6 +19,7 @@
 #include <linux/nl80211.h>
 #include <linux/platform_device.h>
 #include <linux/etherdevice.h>
+#include <linux/export.h>
 #include <ar231x_platform.h>
 #include "ath5k.h"
 #include "debug.h"
@@ -119,7 +120,7 @@
 	if (res == NULL) {
 		dev_err(&pdev->dev, "no IRQ resource found\n");
 		ret = -ENXIO;
-		goto err_out;
+		goto err_iounmap;
 	}
 
 	irq = res->start;
@@ -128,7 +129,7 @@
 	if (hw == NULL) {
 		dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
 		ret = -ENOMEM;
-		goto err_out;
+		goto err_iounmap;
 	}
 
 	ah = hw->priv;
@@ -185,6 +186,8 @@
  err_free_hw:
 	ieee80211_free_hw(hw);
 	platform_set_drvdata(pdev, NULL);
+ err_iounmap:
+        iounmap(mem);
  err_out:
 	return ret;
 }
diff --git a/drivers/net/wireless/ath/ath6kl/testmode.c b/drivers/net/wireless/ath/ath6kl/testmode.c
index 6675c92..acc9aa8 100644
--- a/drivers/net/wireless/ath/ath6kl/testmode.c
+++ b/drivers/net/wireless/ath/ath6kl/testmode.c
@@ -55,8 +55,9 @@
 		ath6kl_warn("failed to allocate testmode rx skb!\n");
 		return;
 	}
-	NLA_PUT_U32(skb, ATH6KL_TM_ATTR_CMD, ATH6KL_TM_CMD_TCMD);
-	NLA_PUT(skb, ATH6KL_TM_ATTR_DATA, buf_len, buf);
+	if (nla_put_u32(skb, ATH6KL_TM_ATTR_CMD, ATH6KL_TM_CMD_TCMD) ||
+	    nla_put(skb, ATH6KL_TM_ATTR_DATA, buf_len, buf))
+		goto nla_put_failure;
 	cfg80211_testmode_event(skb, GFP_KERNEL);
 	return;
 
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index f4b7334..dfa78e8 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1574,6 +1574,7 @@
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
 	struct ieee80211_conf *conf = &hw->conf;
+	bool reset_channel = false;
 
 	ath9k_ps_wakeup(sc);
 	mutex_lock(&sc->mutex);
@@ -1582,6 +1583,12 @@
 		sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
 		if (sc->ps_idle)
 			ath_cancel_work(sc);
+		else
+			/*
+			 * The chip needs a reset to properly wake up from
+			 * full sleep
+			 */
+			reset_channel = ah->chip_fullsleep;
 	}
 
 	/*
@@ -1610,7 +1617,7 @@
 		}
 	}
 
-	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+	if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
 		struct ieee80211_channel *curchan = hw->conf.channel;
 		int pos = curchan->hw_value;
 		int old_pos = -1;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 834e6bc..23eaa1b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1820,6 +1820,7 @@
 	struct ath_frame_info *fi = get_frame_info(skb);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	struct ath_buf *bf;
+	int fragno;
 	u16 seqno;
 
 	bf = ath_tx_get_buffer(sc);
@@ -1831,9 +1832,16 @@
 	ATH_TXBUF_RESET(bf);
 
 	if (tid) {
+		fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
 		seqno = tid->seq_next;
 		hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
-		INCR(tid->seq_next, IEEE80211_SEQ_MAX);
+
+		if (fragno)
+			hdr->seq_ctrl |= cpu_to_le16(fragno);
+
+		if (!ieee80211_has_morefrags(hdr->frame_control))
+			INCR(tid->seq_next, IEEE80211_SEQ_MAX);
+
 		bf->bf_state.seqno = seqno;
 	}
 
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
index 80b0755..a54fb2d 100644
--- a/drivers/net/wireless/b43/sdio.c
+++ b/drivers/net/wireless/b43/sdio.c
@@ -193,7 +193,7 @@
 	.name		= "b43-sdio",
 	.id_table	= b43_sdio_ids,
 	.probe		= b43_sdio_probe,
-	.remove		= b43_sdio_remove,
+	.remove		= __devexit_p(b43_sdio_remove),
 };
 
 int b43_sdio_init(void)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 231ddf4..7083db7 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -7614,6 +7614,7 @@
 {
 	int len_mpdu;
 	struct ieee80211_rx_status rx_status;
+	struct ieee80211_hdr *hdr;
 
 	memset(&rx_status, 0, sizeof(rx_status));
 	prep_mac80211_status(wlc, rxh, p, &rx_status);
@@ -7623,6 +7624,13 @@
 	skb_pull(p, D11_PHY_HDR_LEN);
 	__skb_trim(p, len_mpdu);
 
+	/* unmute transmit */
+	if (wlc->hw->suspended_fifos) {
+		hdr = (struct ieee80211_hdr *)p->data;
+		if (ieee80211_is_beacon(hdr->frame_control))
+			brcms_b_mute(wlc->hw, false);
+	}
+
 	memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status));
 	ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p);
 }
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 8193687..c72136c 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -344,38 +344,50 @@
 
 static inline void read_register(struct net_device *dev, u32 reg, u32 * val)
 {
-	*val = readl((void __iomem *)(dev->base_addr + reg));
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	*val = ioread32(priv->ioaddr + reg);
 	IPW_DEBUG_IO("r: 0x%08X => 0x%08X\n", reg, *val);
 }
 
 static inline void write_register(struct net_device *dev, u32 reg, u32 val)
 {
-	writel(val, (void __iomem *)(dev->base_addr + reg));
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	iowrite32(val, priv->ioaddr + reg);
 	IPW_DEBUG_IO("w: 0x%08X <= 0x%08X\n", reg, val);
 }
 
 static inline void read_register_word(struct net_device *dev, u32 reg,
 				      u16 * val)
 {
-	*val = readw((void __iomem *)(dev->base_addr + reg));
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	*val = ioread16(priv->ioaddr + reg);
 	IPW_DEBUG_IO("r: 0x%08X => %04X\n", reg, *val);
 }
 
 static inline void read_register_byte(struct net_device *dev, u32 reg, u8 * val)
 {
-	*val = readb((void __iomem *)(dev->base_addr + reg));
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	*val = ioread8(priv->ioaddr + reg);
 	IPW_DEBUG_IO("r: 0x%08X => %02X\n", reg, *val);
 }
 
 static inline void write_register_word(struct net_device *dev, u32 reg, u16 val)
 {
-	writew(val, (void __iomem *)(dev->base_addr + reg));
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	iowrite16(val, priv->ioaddr + reg);
 	IPW_DEBUG_IO("w: 0x%08X <= %04X\n", reg, val);
 }
 
 static inline void write_register_byte(struct net_device *dev, u32 reg, u8 val)
 {
-	writeb(val, (void __iomem *)(dev->base_addr + reg));
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	iowrite8(val, priv->ioaddr + reg);
 	IPW_DEBUG_IO("w: 0x%08X =< %02X\n", reg, val);
 }
 
@@ -507,13 +519,13 @@
 		read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA + i, buf);
 }
 
-static inline int ipw2100_hw_is_adapter_in_system(struct net_device *dev)
+static bool ipw2100_hw_is_adapter_in_system(struct net_device *dev)
 {
-	return (dev->base_addr &&
-		(readl
-		 ((void __iomem *)(dev->base_addr +
-				   IPW_REG_DOA_DEBUG_AREA_START))
-		 == IPW_DATA_DOA_DEBUG_VALUE));
+	u32 dbg;
+
+	read_register(dev, IPW_REG_DOA_DEBUG_AREA_START, &dbg);
+
+	return dbg == IPW_DATA_DOA_DEBUG_VALUE;
 }
 
 static int ipw2100_get_ordinal(struct ipw2100_priv *priv, u32 ord,
@@ -3777,7 +3789,7 @@
 	    IPW2100_ORD(COUNTRY_CODE,
 				"IEEE country code as recv'd from beacon"),
 	    IPW2100_ORD(COUNTRY_CHANNELS,
-				"channels suported by country"),
+				"channels supported by country"),
 	    IPW2100_ORD(RESET_CNT, "adapter resets (warm)"),
 	    IPW2100_ORD(BEACON_INTERVAL, "Beacon interval"),
 	    IPW2100_ORD(ANTENNA_DIVERSITY,
@@ -4066,7 +4078,7 @@
 	ipw2100_firmware.version = 0;
 #endif
 
-	printk(KERN_INFO "%s: Reseting on mode change.\n", priv->net_dev->name);
+	printk(KERN_INFO "%s: Resetting on mode change.\n", priv->net_dev->name);
 	priv->reset_backoff = 0;
 	schedule_reset(priv);
 
@@ -6086,9 +6098,7 @@
 /* Look into using netdev destructor to shutdown libipw? */
 
 static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
-					       void __iomem * base_addr,
-					       unsigned long mem_start,
-					       unsigned long mem_len)
+					       void __iomem * ioaddr)
 {
 	struct ipw2100_priv *priv;
 	struct net_device *dev;
@@ -6100,6 +6110,7 @@
 	priv->ieee = netdev_priv(dev);
 	priv->pci_dev = pci_dev;
 	priv->net_dev = dev;
+	priv->ioaddr = ioaddr;
 
 	priv->ieee->hard_start_xmit = ipw2100_tx;
 	priv->ieee->set_security = shim__set_security;
@@ -6115,10 +6126,6 @@
 	dev->watchdog_timeo = 3 * HZ;
 	dev->irq = 0;
 
-	dev->base_addr = (unsigned long)base_addr;
-	dev->mem_start = mem_start;
-	dev->mem_end = dev->mem_start + mem_len - 1;
-
 	/* NOTE: We don't use the wireless_handlers hook
 	 * in dev as the system will start throwing WX requests
 	 * to us before we're actually initialized and it just
@@ -6219,8 +6226,7 @@
 static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
 				const struct pci_device_id *ent)
 {
-	unsigned long mem_start, mem_len, mem_flags;
-	void __iomem *base_addr = NULL;
+	void __iomem *ioaddr;
 	struct net_device *dev = NULL;
 	struct ipw2100_priv *priv = NULL;
 	int err = 0;
@@ -6229,18 +6235,14 @@
 
 	IPW_DEBUG_INFO("enter\n");
 
-	mem_start = pci_resource_start(pci_dev, 0);
-	mem_len = pci_resource_len(pci_dev, 0);
-	mem_flags = pci_resource_flags(pci_dev, 0);
-
-	if ((mem_flags & IORESOURCE_MEM) != IORESOURCE_MEM) {
+	if (!(pci_resource_flags(pci_dev, 0) & IORESOURCE_MEM)) {
 		IPW_DEBUG_INFO("weird - resource type is not memory\n");
 		err = -ENODEV;
-		goto fail;
+		goto out;
 	}
 
-	base_addr = ioremap_nocache(mem_start, mem_len);
-	if (!base_addr) {
+	ioaddr = pci_iomap(pci_dev, 0, 0);
+	if (!ioaddr) {
 		printk(KERN_WARNING DRV_NAME
 		       "Error calling ioremap_nocache.\n");
 		err = -EIO;
@@ -6248,7 +6250,7 @@
 	}
 
 	/* allocate and initialize our net_device */
-	dev = ipw2100_alloc_device(pci_dev, base_addr, mem_start, mem_len);
+	dev = ipw2100_alloc_device(pci_dev, ioaddr);
 	if (!dev) {
 		printk(KERN_WARNING DRV_NAME
 		       "Error calling ipw2100_alloc_device.\n");
@@ -6383,8 +6385,8 @@
 	priv->status |= STATUS_INITIALIZED;
 
 	mutex_unlock(&priv->action_mutex);
-
-	return 0;
+out:
+	return err;
 
       fail_unlock:
 	mutex_unlock(&priv->action_mutex);
@@ -6413,63 +6415,56 @@
 		pci_set_drvdata(pci_dev, NULL);
 	}
 
-	if (base_addr)
-		iounmap(base_addr);
+	pci_iounmap(pci_dev, ioaddr);
 
 	pci_release_regions(pci_dev);
 	pci_disable_device(pci_dev);
-
-	return err;
+	goto out;
 }
 
 static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
 {
 	struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
-	struct net_device *dev;
+	struct net_device *dev = priv->net_dev;
 
-	if (priv) {
-		mutex_lock(&priv->action_mutex);
+	mutex_lock(&priv->action_mutex);
 
-		priv->status &= ~STATUS_INITIALIZED;
+	priv->status &= ~STATUS_INITIALIZED;
 
-		dev = priv->net_dev;
-		sysfs_remove_group(&pci_dev->dev.kobj,
-				   &ipw2100_attribute_group);
+	sysfs_remove_group(&pci_dev->dev.kobj, &ipw2100_attribute_group);
 
 #ifdef CONFIG_PM
-		if (ipw2100_firmware.version)
-			ipw2100_release_firmware(priv, &ipw2100_firmware);
+	if (ipw2100_firmware.version)
+		ipw2100_release_firmware(priv, &ipw2100_firmware);
 #endif
-		/* Take down the hardware */
-		ipw2100_down(priv);
+	/* Take down the hardware */
+	ipw2100_down(priv);
 
-		/* Release the mutex so that the network subsystem can
-		 * complete any needed calls into the driver... */
-		mutex_unlock(&priv->action_mutex);
+	/* Release the mutex so that the network subsystem can
+	 * complete any needed calls into the driver... */
+	mutex_unlock(&priv->action_mutex);
 
-		/* Unregister the device first - this results in close()
-		 * being called if the device is open.  If we free storage
-		 * first, then close() will crash. */
-		unregister_netdev(dev);
+	/* Unregister the device first - this results in close()
+	 * being called if the device is open.  If we free storage
+	 * first, then close() will crash.
+	 * FIXME: remove the comment above. */
+	unregister_netdev(dev);
 
-		ipw2100_kill_works(priv);
+	ipw2100_kill_works(priv);
 
-		ipw2100_queues_free(priv);
+	ipw2100_queues_free(priv);
 
-		/* Free potential debugging firmware snapshot */
-		ipw2100_snapshot_free(priv);
+	/* Free potential debugging firmware snapshot */
+	ipw2100_snapshot_free(priv);
 
-		if (dev->irq)
-			free_irq(dev->irq, priv);
+	free_irq(dev->irq, priv);
 
-		if (dev->base_addr)
-			iounmap((void __iomem *)dev->base_addr);
+	pci_iounmap(pci_dev, priv->ioaddr);
 
-		/* wiphy_unregister needs to be here, before free_libipw */
-		wiphy_unregister(priv->ieee->wdev.wiphy);
-		kfree(priv->ieee->bg_band.channels);
-		free_libipw(dev, 0);
-	}
+	/* wiphy_unregister needs to be here, before free_libipw */
+	wiphy_unregister(priv->ieee->wdev.wiphy);
+	kfree(priv->ieee->bg_band.channels);
+	free_libipw(dev, 0);
 
 	pci_release_regions(pci_dev);
 	pci_disable_device(pci_dev);
@@ -8612,7 +8607,7 @@
 	struct net_device *dev = priv->net_dev;
 	const unsigned char *microcode_data = fw->uc.data;
 	unsigned int microcode_data_left = fw->uc.size;
-	void __iomem *reg = (void __iomem *)dev->base_addr;
+	void __iomem *reg = priv->ioaddr;
 
 	struct symbol_alive_response response;
 	int i, j;
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index 18741a4..9731252 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -479,6 +479,7 @@
 #define CAP_PRIVACY_ON          (1<<1)	/* Off = No privacy */
 
 struct ipw2100_priv {
+	void __iomem *ioaddr;
 
 	int stop_hang_check;	/* Set 1 when shutting down to kill hang_check */
 	int stop_rf_kill;	/* Set 1 when shutting down to kill rf_kill */
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index f37d315..d57522c 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -7024,7 +7024,7 @@
 			    cpu_to_le16(burst_duration);
 	} else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
 		if (type == IEEE_B) {
-			IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
+			IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n",
 				      type);
 			if (priv->qos_data.qos_enable == 0)
 				active_one = &def_parameters_CCK;
@@ -11829,10 +11829,6 @@
 	net_dev->wireless_data = &priv->wireless_data;
 	net_dev->wireless_handlers = &ipw_wx_handler_def;
 	net_dev->ethtool_ops = &ipw_ethtool_ops;
-	net_dev->irq = pdev->irq;
-	net_dev->base_addr = (unsigned long)priv->hw_base;
-	net_dev->mem_start = pci_resource_start(pdev, 0);
-	net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
 
 	err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
 	if (err) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.c b/drivers/net/wireless/iwlwifi/iwl-testmode.c
index bd92bd8..060aac3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.c
@@ -183,9 +183,10 @@
 			 "Run out of memory for messages to user space ?\n");
 		return;
 	}
-	NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT);
-	/* the length doesn't include len_n_flags field, so add it manually */
-	NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data);
+	if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
+	    /* the length doesn't include len_n_flags field, so add it manually */
+	    nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data))
+		goto nla_put_failure;
 	cfg80211_testmode_event(skb, GFP_ATOMIC);
 	return;
 
@@ -313,8 +314,9 @@
 	memcpy(reply_buf, &(pkt->hdr), reply_len);
 	iwl_free_resp(&cmd);
 
-	NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT);
-	NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf);
+	if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
+	    nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
+		goto nla_put_failure;
 	return cfg80211_testmode_reply(skb);
 
 nla_put_failure:
@@ -378,7 +380,8 @@
 			IWL_ERR(priv, "Memory allocation fail\n");
 			return -ENOMEM;
 		}
-		NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32);
+		if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
+			goto nla_put_failure;
 		status = cfg80211_testmode_reply(skb);
 		if (status < 0)
 			IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -480,10 +483,11 @@
 			IWL_ERR(priv, "Memory allocation fail\n");
 			return -ENOMEM;
 		}
-		NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
-			    IWL_TM_CMD_DEV2APP_SYNC_RSP);
-		NLA_PUT(skb, IWL_TM_ATTR_SYNC_RSP,
-			rsp_data_len, rsp_data_ptr);
+		if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
+				IWL_TM_CMD_DEV2APP_SYNC_RSP) ||
+		    nla_put(skb, IWL_TM_ATTR_SYNC_RSP,
+			    rsp_data_len, rsp_data_ptr))
+			goto nla_put_failure;
 		status = cfg80211_testmode_reply(skb);
 		if (status < 0)
 			IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -538,11 +542,12 @@
 				IWL_ERR(priv, "Memory allocation fail\n");
 				return -ENOMEM;
 			}
-			NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
-				IWL_TM_CMD_DEV2APP_EEPROM_RSP);
-			NLA_PUT(skb, IWL_TM_ATTR_EEPROM,
-				priv->cfg->base_params->eeprom_size,
-				priv->eeprom);
+			if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
+					IWL_TM_CMD_DEV2APP_EEPROM_RSP) ||
+			    nla_put(skb, IWL_TM_ATTR_EEPROM,
+				    priv->cfg->base_params->eeprom_size,
+				    priv->eeprom))
+				goto nla_put_failure;
 			status = cfg80211_testmode_reply(skb);
 			if (status < 0)
 				IWL_ERR(priv, "Error sending msg : %d\n",
@@ -568,8 +573,9 @@
 			IWL_ERR(priv, "Memory allocation fail\n");
 			return -ENOMEM;
 		}
-		NLA_PUT_U32(skb, IWL_TM_ATTR_FW_VERSION,
-			    priv->fw->ucode_ver);
+		if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION,
+				priv->fw->ucode_ver))
+			goto nla_put_failure;
 		status = cfg80211_testmode_reply(skb);
 		if (status < 0)
 			IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -584,7 +590,8 @@
 			IWL_ERR(priv, "Memory allocation fail\n");
 			return -ENOMEM;
 		}
-		NLA_PUT_U32(skb, IWL_TM_ATTR_DEVICE_ID, devid);
+		if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
+			goto nla_put_failure;
 		status = cfg80211_testmode_reply(skb);
 		if (status < 0)
 			IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -604,9 +611,10 @@
 			inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
 			data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
 		}
-		NLA_PUT_U32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode);
-		NLA_PUT_U32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size);
-		NLA_PUT_U32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size);
+		if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) ||
+		    nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
+		    nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
+			goto nla_put_failure;
 		status = cfg80211_testmode_reply(skb);
 		if (status < 0)
 			IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -680,9 +688,10 @@
 			iwl_trace_cleanup(priv);
 			return -ENOMEM;
 		}
-		NLA_PUT(skb, IWL_TM_ATTR_TRACE_ADDR,
-			sizeof(priv->testmode_trace.dma_addr),
-			(u64 *)&priv->testmode_trace.dma_addr);
+		if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
+			    sizeof(priv->testmode_trace.dma_addr),
+			    (u64 *)&priv->testmode_trace.dma_addr))
+			goto nla_put_failure;
 		status = cfg80211_testmode_reply(skb);
 		if (status < 0) {
 			IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -727,9 +736,10 @@
 			length = priv->testmode_trace.buff_size %
 				DUMP_CHUNK_SIZE;
 
-		NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length,
-			priv->testmode_trace.trace_addr +
-			(DUMP_CHUNK_SIZE * idx));
+		if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
+			    priv->testmode_trace.trace_addr +
+			    (DUMP_CHUNK_SIZE * idx)))
+			goto nla_put_failure;
 		idx++;
 		cb->args[4] = idx;
 		return 0;
@@ -924,9 +934,10 @@
 			length = priv->testmode_mem.buff_size %
 				DUMP_CHUNK_SIZE;
 
-		NLA_PUT(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
-			priv->testmode_mem.buff_addr +
-			(DUMP_CHUNK_SIZE * idx));
+		if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
+			    priv->testmode_mem.buff_addr +
+			    (DUMP_CHUNK_SIZE * idx)))
+			goto nla_put_failure;
 		idx++;
 		cb->args[4] = idx;
 		return 0;
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
index 03f998d..7107ce5 100644
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ b/drivers/net/wireless/iwmc3200wifi/Kconfig
@@ -1,5 +1,5 @@
 config IWM
-	tristate "Intel Wireless Multicomm 3200 WiFi driver"
+	tristate "Intel Wireless Multicomm 3200 WiFi driver (EXPERIMENTAL)"
 	depends on MMC && EXPERIMENTAL
 	depends on CFG80211
 	select FW_LOADER
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 3fa1ece..2fa879b 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -103,7 +103,7 @@
  * Convert NL80211's auth_type to the one from Libertas, see chapter 5.9.1
  * in the firmware spec
  */
-static u8 lbs_auth_to_authtype(enum nl80211_auth_type auth_type)
+static int lbs_auth_to_authtype(enum nl80211_auth_type auth_type)
 {
 	int ret = -ENOTSUPP;
 
@@ -1411,7 +1411,12 @@
 		goto done;
 	}
 
-	lbs_set_authtype(priv, sme);
+	ret = lbs_set_authtype(priv, sme);
+	if (ret == -ENOTSUPP) {
+		wiphy_err(wiphy, "unsupported authtype 0x%x\n", sme->auth_type);
+		goto done;
+	}
+
 	lbs_set_radio(priv, preamble, 1);
 
 	/* Do the actual association */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 3edd473..03c0c6b 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -582,11 +582,13 @@
 		goto nla_put_failure;
 	}
 
-	NLA_PUT(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
-		     sizeof(struct mac_address), data->addresses[1].addr);
+	if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
+		    sizeof(struct mac_address), data->addresses[1].addr))
+		goto nla_put_failure;
 
 	/* We get the skb->data */
-	NLA_PUT(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data);
+	if (nla_put(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data))
+		goto nla_put_failure;
 
 	/* We get the flags for this transmission, and we translate them to
 	   wmediumd flags  */
@@ -597,7 +599,8 @@
 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
 		hwsim_flags |= HWSIM_TX_CTL_NO_ACK;
 
-	NLA_PUT_U32(skb, HWSIM_ATTR_FLAGS, hwsim_flags);
+	if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags))
+		goto nla_put_failure;
 
 	/* We get the tx control (rate and retries) info*/
 
@@ -606,12 +609,14 @@
 		tx_attempts[i].count = info->status.rates[i].count;
 	}
 
-	NLA_PUT(skb, HWSIM_ATTR_TX_INFO,
-		     sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES,
-		     tx_attempts);
+	if (nla_put(skb, HWSIM_ATTR_TX_INFO,
+		    sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES,
+		    tx_attempts))
+		goto nla_put_failure;
 
 	/* We create a cookie to identify this skb */
-	NLA_PUT_U64(skb, HWSIM_ATTR_COOKIE, (unsigned long) my_skb);
+	if (nla_put_u64(skb, HWSIM_ATTR_COOKIE, (unsigned long) my_skb))
+		goto nla_put_failure;
 
 	genlmsg_end(skb, msg_head);
 	genlmsg_unicast(&init_net, skb, dst_pid);
@@ -1109,7 +1114,8 @@
 						nla_total_size(sizeof(u32)));
 		if (!skb)
 			return -ENOMEM;
-		NLA_PUT_U32(skb, HWSIM_TM_ATTR_PS, hwsim->ps);
+		if (nla_put_u32(skb, HWSIM_TM_ATTR_PS, hwsim->ps))
+			goto nla_put_failure;
 		return cfg80211_testmode_reply(skb);
 	default:
 		return -EOPNOTSUPP;
diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h
index 445ff21..2f218f9 100644
--- a/drivers/net/wireless/mwifiex/pcie.h
+++ b/drivers/net/wireless/mwifiex/pcie.h
@@ -48,15 +48,15 @@
 #define PCIE_HOST_INT_STATUS_MASK			0xC3C
 #define PCIE_SCRATCH_2_REG				0xC40
 #define PCIE_SCRATCH_3_REG				0xC44
-#define PCIE_SCRATCH_4_REG				0xCC0
-#define PCIE_SCRATCH_5_REG				0xCC4
-#define PCIE_SCRATCH_6_REG				0xCC8
-#define PCIE_SCRATCH_7_REG				0xCCC
-#define PCIE_SCRATCH_8_REG				0xCD0
-#define PCIE_SCRATCH_9_REG				0xCD4
-#define PCIE_SCRATCH_10_REG				0xCD8
-#define PCIE_SCRATCH_11_REG				0xCDC
-#define PCIE_SCRATCH_12_REG				0xCE0
+#define PCIE_SCRATCH_4_REG				0xCD0
+#define PCIE_SCRATCH_5_REG				0xCD4
+#define PCIE_SCRATCH_6_REG				0xCD8
+#define PCIE_SCRATCH_7_REG				0xCDC
+#define PCIE_SCRATCH_8_REG				0xCE0
+#define PCIE_SCRATCH_9_REG				0xCE4
+#define PCIE_SCRATCH_10_REG				0xCE8
+#define PCIE_SCRATCH_11_REG				0xCEC
+#define PCIE_SCRATCH_12_REG				0xCF0
 
 #define CPU_INTR_DNLD_RDY				BIT(0)
 #define CPU_INTR_DOOR_BELL				BIT(1)
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index bac3d03..e1eac83 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -1131,7 +1131,7 @@
 	.name	= "p54usb",
 	.id_table = p54u_table,
 	.probe = p54u_probe,
-	.disconnect = p54u_disconnect,
+	.disconnect = __devexit_p(p54u_disconnect),
 	.pre_reset = p54u_pre_reset,
 	.post_reset = p54u_post_reset,
 #ifdef CONFIG_PM
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 9cda706..0e59ea2 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -115,7 +115,8 @@
 			goto out_sleep;
 		}
 
-		NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf);
+		if (nla_put(skb, WL1271_TM_ATTR_DATA, buf_len, buf))
+			goto nla_put_failure;
 		ret = cfg80211_testmode_reply(skb);
 		if (ret < 0)
 			goto out_sleep;
@@ -177,7 +178,8 @@
 		goto out_free;
 	}
 
-	NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd);
+	if (nla_put(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd))
+		goto nla_put_failure;
 	ret = cfg80211_testmode_reply(skb);
 	if (ret < 0)
 		goto out_free;
@@ -296,7 +298,8 @@
 		goto out;
 	}
 
-	NLA_PUT(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr);
+	if (nla_put(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr))
+		goto nla_put_failure;
 	ret = cfg80211_testmode_reply(skb);
 	if (ret < 0)
 		goto out;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 663b32c..0ebbb19 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1965,7 +1965,7 @@
 	if (xen_initial_domain())
 		return 0;
 
-	if (!xen_platform_pci_unplug)
+	if (xen_hvm_domain() && !xen_platform_pci_unplug)
 		return -ENODEV;
 
 	printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c
index bba8121..bf984b6 100644
--- a/drivers/of/gpio.c
+++ b/drivers/of/gpio.c
@@ -140,7 +140,7 @@
 	if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells))
 		return -EINVAL;
 
-	if (gpiospec->args[0] > gc->ngpio)
+	if (gpiospec->args[0] >= gc->ngpio)
 		return -EINVAL;
 
 	if (flags)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 8156744..111569c 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -967,16 +967,59 @@
 	return 0;
 }
 
+static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
+				     u32 saved_val, int retry)
+{
+	u32 val;
+
+	pci_read_config_dword(pdev, offset, &val);
+	if (val == saved_val)
+		return;
+
+	for (;;) {
+		dev_dbg(&pdev->dev, "restoring config space at offset "
+			"%#x (was %#x, writing %#x)\n", offset, val, saved_val);
+		pci_write_config_dword(pdev, offset, saved_val);
+		if (retry-- <= 0)
+			return;
+
+		pci_read_config_dword(pdev, offset, &val);
+		if (val == saved_val)
+			return;
+
+		mdelay(1);
+	}
+}
+
+static void pci_restore_config_space_range(struct pci_dev *pdev,
+					   int start, int end, int retry)
+{
+	int index;
+
+	for (index = end; index >= start; index--)
+		pci_restore_config_dword(pdev, 4 * index,
+					 pdev->saved_config_space[index],
+					 retry);
+}
+
+static void pci_restore_config_space(struct pci_dev *pdev)
+{
+	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
+		pci_restore_config_space_range(pdev, 10, 15, 0);
+		/* Restore BARs before the command register. */
+		pci_restore_config_space_range(pdev, 4, 9, 10);
+		pci_restore_config_space_range(pdev, 0, 3, 0);
+	} else {
+		pci_restore_config_space_range(pdev, 0, 15, 0);
+	}
+}
+
 /** 
  * pci_restore_state - Restore the saved state of a PCI device
  * @dev: - PCI device that we're dealing with
  */
 void pci_restore_state(struct pci_dev *dev)
 {
-	int i;
-	u32 val;
-	int tries;
-
 	if (!dev->state_saved)
 		return;
 
@@ -984,24 +1027,8 @@
 	pci_restore_pcie_state(dev);
 	pci_restore_ats_state(dev);
 
-	/*
-	 * The Base Address register should be programmed before the command
-	 * register(s)
-	 */
-	for (i = 15; i >= 0; i--) {
-		pci_read_config_dword(dev, i * 4, &val);
-		tries = 10;		
-		while (tries && val != dev->saved_config_space[i]) {
-			dev_dbg(&dev->dev, "restoring config "
-				"space at offset %#x (was %#x, writing %#x)\n",
-				i, val, (int)dev->saved_config_space[i]);
-			pci_write_config_dword(dev,i * 4,
-				dev->saved_config_space[i]);
-			pci_read_config_dword(dev, i * 4, &val);
-			mdelay(10);
-			tries--;
-		}
-	}
+	pci_restore_config_space(dev);
+
 	pci_restore_pcix_state(dev);
 	pci_restore_msi_state(dev);
 	pci_restore_iov_state(dev);
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index fd00ff0..d6cc62c 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -290,6 +290,7 @@
 		} else {
 			printk(KERN_DEBUG "enable msix get value %x\n",
 				op.value);
+			err = op.value;
 		}
 	} else {
 		dev_err(&dev->dev, "enable msix get err %x\n", err);
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index ec3b8cc..df6296c 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -908,10 +908,6 @@
 	const struct pinctrl_ops *ops = pctldev->desc->pctlops;
 	unsigned selector = 0;
 
-	/* No grouping */
-	if (!ops)
-		return 0;
-
 	mutex_lock(&pinctrl_mutex);
 
 	seq_puts(s, "registered pin groups:\n");
@@ -1225,6 +1221,19 @@
 
 #endif
 
+static int pinctrl_check_ops(struct pinctrl_dev *pctldev)
+{
+	const struct pinctrl_ops *ops = pctldev->desc->pctlops;
+
+	if (!ops ||
+	    !ops->list_groups ||
+	    !ops->get_group_name ||
+	    !ops->get_group_pins)
+		return -EINVAL;
+
+	return 0;
+}
+
 /**
  * pinctrl_register() - register a pin controller device
  * @pctldesc: descriptor for this pin controller
@@ -1256,6 +1265,14 @@
 	INIT_LIST_HEAD(&pctldev->gpio_ranges);
 	pctldev->dev = dev;
 
+	/* check core ops for sanity */
+	ret = pinctrl_check_ops(pctldev);
+	if (ret) {
+		pr_err("%s pinctrl ops lacks necessary functions\n",
+			pctldesc->name);
+		goto out_err;
+	}
+
 	/* If we're implementing pinmuxing, check the ops for sanity */
 	if (pctldesc->pmxops) {
 		ret = pinmux_check_ops(pctldev);
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index cd9bc3b..5648dad 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -78,9 +78,13 @@
 	depends on PCH_GBE
 	help
 	  This driver adds support for using the PCH EG20T as a PTP
-	  clock. This clock is only useful if your PTP programs are
-	  getting hardware time stamps on the PTP Ethernet packets
-	  using the SO_TIMESTAMPING API.
+	  clock. The hardware supports time stamping of PTP packets
+	  when using the end-to-end delay (E2E) mechansim. The peer
+	  delay mechansim (P2P) is not supported.
+
+	  This clock is only useful if your PTP programs are getting
+	  hardware time stamps on the PTP Ethernet packets using the
+	  SO_TIMESTAMPING API.
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called ptp_pch.
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index f519a13..1e528b5 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -304,6 +304,12 @@
 }
 EXPORT_SYMBOL(ptp_clock_event);
 
+int ptp_clock_index(struct ptp_clock *ptp)
+{
+	return ptp->index;
+}
+EXPORT_SYMBOL(ptp_clock_index);
+
 /* module operations */
 
 static void __exit ptp_exit(void)
diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/ptp/ptp_ixp46x.c
index 6f2782b..e03c406 100644
--- a/drivers/ptp/ptp_ixp46x.c
+++ b/drivers/ptp/ptp_ixp46x.c
@@ -284,6 +284,7 @@
 {
 	free_irq(MASTER_IRQ, &ixp_clock);
 	free_irq(SLAVE_IRQ, &ixp_clock);
+	ixp46x_phc_index = -1;
 	ptp_clock_unregister(ixp_clock.ptp_clock);
 }
 
@@ -302,6 +303,8 @@
 	if (IS_ERR(ixp_clock.ptp_clock))
 		return PTR_ERR(ixp_clock.ptp_clock);
 
+	ixp46x_phc_index = ptp_clock_index(ixp_clock.ptp_clock);
+
 	__raw_writel(DEFAULT_ADDEND, &ixp_clock.regs->addend);
 	__raw_writel(1, &ixp_clock.regs->trgt_lo);
 	__raw_writel(0, &ixp_clock.regs->trgt_hi);
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
index 375eb04..08c3311 100644
--- a/drivers/ptp/ptp_pch.c
+++ b/drivers/ptp/ptp_pch.c
@@ -261,6 +261,7 @@
 
 	ns = ((u64) hi) << 32;
 	ns |= lo;
+	ns <<= TICKS_NS_SHIFT;
 
 	return ns;
 }
@@ -277,6 +278,7 @@
 
 	ns = ((u64) hi) << 32;
 	ns |= lo;
+	ns <<= TICKS_NS_SHIFT;
 
 	return ns;
 }
@@ -306,7 +308,7 @@
  *				    traffic on the  ethernet interface
  * @addr:	dress which contain the column separated address to be used.
  */
-static int pch_set_station_address(u8 *addr, struct pci_dev *pdev)
+int pch_set_station_address(u8 *addr, struct pci_dev *pdev)
 {
 	s32 i;
 	struct pch_dev *chip = pci_get_drvdata(pdev);
@@ -350,6 +352,7 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL(pch_set_station_address);
 
 /*
  * Interrupt service routine
@@ -649,8 +652,6 @@
 	iowrite32(1, &chip->regs->trgt_lo);
 	iowrite32(0, &chip->regs->trgt_hi);
 	iowrite32(PCH_TSE_TTIPEND, &chip->regs->event);
-	/* Version: IEEE1588 v1 and IEEE1588-2008,  Mode: All Evwnt, Locked  */
-	iowrite32(0x80020000, &chip->regs->ch_control);
 
 	pch_eth_enable_set(chip);
 
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 53969af..81fd606 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -214,7 +214,7 @@
 	{ /* end */ }
 };
 
-static struct platform_driver anatop_regulator = {
+static struct platform_driver anatop_regulator_driver = {
 	.driver = {
 		.name	= "anatop_regulator",
 		.owner  = THIS_MODULE,
@@ -226,13 +226,13 @@
 
 static int __init anatop_regulator_init(void)
 {
-	return platform_driver_register(&anatop_regulator);
+	return platform_driver_register(&anatop_regulator_driver);
 }
 postcore_initcall(anatop_regulator_init);
 
 static void __exit anatop_regulator_exit(void)
 {
-	platform_driver_unregister(&anatop_regulator);
+	platform_driver_unregister(&anatop_regulator_driver);
 }
 module_exit(anatop_regulator_exit);
 
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index 5502923..c9f890b 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -213,7 +213,6 @@
 		.name = "rtc-efi",
 		.owner = THIS_MODULE,
 	},
-	.probe = efi_rtc_probe,
 	.remove = __exit_p(efi_rtc_remove),
 };
 
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 692de73..684ef4b 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -339,8 +339,7 @@
 	dev_dbg(&adev->dev, "revision = 0x%01x\n", ldata->hw_revision);
 
 	/* Enable the clockwatch on ST Variants */
-	if ((ldata->hw_designer == AMBA_VENDOR_ST) &&
-	    (ldata->hw_revision > 1))
+	if (ldata->hw_designer == AMBA_VENDOR_ST)
 		writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
 		       ldata->base + RTC_CR);
 
diff --git a/drivers/rtc/rtc-r9701.c b/drivers/rtc/rtc-r9701.c
index 7f8e6c2..33b6ba0 100644
--- a/drivers/rtc/rtc-r9701.c
+++ b/drivers/rtc/rtc-r9701.c
@@ -122,6 +122,7 @@
 static int __devinit r9701_probe(struct spi_device *spi)
 {
 	struct rtc_device *rtc;
+	struct rtc_time dt;
 	unsigned char tmp;
 	int res;
 
@@ -132,6 +133,27 @@
 		return -ENODEV;
 	}
 
+	/*
+	 * The device seems to be present. Now check if the registers
+	 * contain invalid values. If so, try to write a default date:
+	 * 2000/1/1 00:00:00
+	 */
+	r9701_get_datetime(&spi->dev, &dt);
+	if (rtc_valid_tm(&dt)) {
+		dev_info(&spi->dev, "trying to repair invalid date/time\n");
+		dt.tm_sec  = 0;
+		dt.tm_min  = 0;
+		dt.tm_hour = 0;
+		dt.tm_mday = 1;
+		dt.tm_mon  = 0;
+		dt.tm_year = 100;
+
+		if (r9701_set_datetime(&spi->dev, &dt)) {
+			dev_err(&spi->dev, "cannot repair RTC register\n");
+			return -ENODEV;
+		}
+	}
+
 	rtc = rtc_device_register("r9701",
 				&spi->dev, &r9701_rtc_ops, THIS_MODULE);
 	if (IS_ERR(rtc))
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 9ccea13..3f3a297 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -40,6 +40,10 @@
 	TYPE_S3C64XX,
 };
 
+struct s3c_rtc_drv_data {
+	int cpu_type;
+};
+
 /* I have yet to find an S3C implementation with more than one
  * of these rtc blocks in */
 
@@ -446,10 +450,12 @@
 static inline int s3c_rtc_get_driver_data(struct platform_device *pdev)
 {
 #ifdef CONFIG_OF
+	struct s3c_rtc_drv_data *data;
 	if (pdev->dev.of_node) {
 		const struct of_device_id *match;
 		match = of_match_node(s3c_rtc_dt_match, pdev->dev.of_node);
-		return match->data;
+		data = (struct s3c_rtc_drv_data *) match->data;
+		return data->cpu_type;
 	}
 #endif
 	return platform_get_device_id(pdev)->driver_data;
@@ -664,20 +670,27 @@
 #define s3c_rtc_resume  NULL
 #endif
 
+static struct s3c_rtc_drv_data s3c_rtc_drv_data_array[] = {
+	[TYPE_S3C2410] = { TYPE_S3C2410 },
+	[TYPE_S3C2416] = { TYPE_S3C2416 },
+	[TYPE_S3C2443] = { TYPE_S3C2443 },
+	[TYPE_S3C64XX] = { TYPE_S3C64XX },
+};
+
 #ifdef CONFIG_OF
 static const struct of_device_id s3c_rtc_dt_match[] = {
 	{
-		.compatible = "samsung,s3c2410-rtc"
-		.data = TYPE_S3C2410,
+		.compatible = "samsung,s3c2410-rtc",
+		.data = &s3c_rtc_drv_data_array[TYPE_S3C2410],
 	}, {
-		.compatible = "samsung,s3c2416-rtc"
-		.data = TYPE_S3C2416,
+		.compatible = "samsung,s3c2416-rtc",
+		.data = &s3c_rtc_drv_data_array[TYPE_S3C2416],
 	}, {
-		.compatible = "samsung,s3c2443-rtc"
-		.data = TYPE_S3C2443,
+		.compatible = "samsung,s3c2443-rtc",
+		.data = &s3c_rtc_drv_data_array[TYPE_S3C2443],
 	}, {
-		.compatible = "samsung,s3c6410-rtc"
-		.data = TYPE_S3C64XX,
+		.compatible = "samsung,s3c6410-rtc",
+		.data = &s3c_rtc_drv_data_array[TYPE_S3C64XX],
 	},
 	{},
 };
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 4c2c6df..258abea 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -112,6 +112,7 @@
 #define BIT_RTC_CTRL_REG_TEST_MODE_M             0x10
 #define BIT_RTC_CTRL_REG_SET_32_COUNTER_M        0x20
 #define BIT_RTC_CTRL_REG_GET_TIME_M              0x40
+#define BIT_RTC_CTRL_REG_RTC_V_OPT               0x80
 
 /* RTC_STATUS_REG bitfields */
 #define BIT_RTC_STATUS_REG_RUN_M                 0x02
@@ -235,25 +236,57 @@
 	unsigned char rtc_data[ALL_TIME_REGS + 1];
 	int ret;
 	u8 save_control;
+	u8 rtc_control;
 
 	ret = twl_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
-	if (ret < 0)
+	if (ret < 0) {
+		dev_err(dev, "%s: reading CTRL_REG, error %d\n", __func__, ret);
 		return ret;
+	}
+	/* for twl6030/32 make sure BIT_RTC_CTRL_REG_GET_TIME_M is clear */
+	if (twl_class_is_6030()) {
+		if (save_control & BIT_RTC_CTRL_REG_GET_TIME_M) {
+			save_control &= ~BIT_RTC_CTRL_REG_GET_TIME_M;
+			ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+			if (ret < 0) {
+				dev_err(dev, "%s clr GET_TIME, error %d\n",
+					__func__, ret);
+				return ret;
+			}
+		}
+	}
 
-	save_control |= BIT_RTC_CTRL_REG_GET_TIME_M;
+	/* Copy RTC counting registers to static registers or latches */
+	rtc_control = save_control | BIT_RTC_CTRL_REG_GET_TIME_M;
 
-	ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
-	if (ret < 0)
+	/* for twl6030/32 enable read access to static shadowed registers */
+	if (twl_class_is_6030())
+		rtc_control |= BIT_RTC_CTRL_REG_RTC_V_OPT;
+
+	ret = twl_rtc_write_u8(rtc_control, REG_RTC_CTRL_REG);
+	if (ret < 0) {
+		dev_err(dev, "%s: writing CTRL_REG, error %d\n", __func__, ret);
 		return ret;
+	}
 
 	ret = twl_i2c_read(TWL_MODULE_RTC, rtc_data,
 			(rtc_reg_map[REG_SECONDS_REG]), ALL_TIME_REGS);
 
 	if (ret < 0) {
-		dev_err(dev, "rtc_read_time error %d\n", ret);
+		dev_err(dev, "%s: reading data, error %d\n", __func__, ret);
 		return ret;
 	}
 
+	/* for twl6030 restore original state of rtc control register */
+	if (twl_class_is_6030()) {
+		ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+		if (ret < 0) {
+			dev_err(dev, "%s: restore CTRL_REG, error %d\n",
+				__func__, ret);
+			return ret;
+		}
+	}
+
 	tm->tm_sec = bcd2bin(rtc_data[0]);
 	tm->tm_min = bcd2bin(rtc_data[1]);
 	tm->tm_hour = bcd2bin(rtc_data[2]);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index c21871a..bc2e8a7 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2844,6 +2844,7 @@
 	sector_t recid, trkid;
 	unsigned int offs;
 	unsigned int count, count_to_trk_end;
+	int ret;
 
 	basedev = block->base;
 	if (rq_data_dir(req) == READ) {
@@ -2884,8 +2885,8 @@
 
 	itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
 	if (IS_ERR(itcw)) {
-		dasd_sfree_request(cqr, startdev);
-		return ERR_PTR(-EINVAL);
+		ret = -EINVAL;
+		goto out_error;
 	}
 	cqr->cpaddr = itcw_get_tcw(itcw);
 	if (prepare_itcw(itcw, first_trk, last_trk,
@@ -2897,8 +2898,8 @@
 		/* Clock not in sync and XRC is enabled.
 		 * Try again later.
 		 */
-		dasd_sfree_request(cqr, startdev);
-		return ERR_PTR(-EAGAIN);
+		ret = -EAGAIN;
+		goto out_error;
 	}
 	len_to_track_end = 0;
 	/*
@@ -2937,8 +2938,10 @@
 					tidaw_flags = 0;
 				last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
 							    dst, part_len);
-				if (IS_ERR(last_tidaw))
-					return ERR_PTR(-EINVAL);
+				if (IS_ERR(last_tidaw)) {
+					ret = -EINVAL;
+					goto out_error;
+				}
 				dst += part_len;
 			}
 		}
@@ -2947,8 +2950,10 @@
 			dst = page_address(bv->bv_page) + bv->bv_offset;
 			last_tidaw = itcw_add_tidaw(itcw, 0x00,
 						    dst, bv->bv_len);
-			if (IS_ERR(last_tidaw))
-				return ERR_PTR(-EINVAL);
+			if (IS_ERR(last_tidaw)) {
+				ret = -EINVAL;
+				goto out_error;
+			}
 		}
 	}
 	last_tidaw->flags |= TIDAW_FLAGS_LAST;
@@ -2968,6 +2973,9 @@
 	cqr->buildclk = get_clock();
 	cqr->status = DASD_CQR_FILLED;
 	return cqr;
+out_error:
+	dasd_sfree_request(cqr, startdev);
+	return ERR_PTR(ret);
 }
 
 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 85f4a9a..73bef0b 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -903,7 +903,7 @@
 		goto fail_urdev_put;
 	}
 
-	cdev_init(urd->char_device, &ur_fops);
+	urd->char_device->ops = &ur_fops;
 	urd->char_device->dev = MKDEV(major, minor);
 	urd->char_device->owner = ur_fops.owner;
 
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 453a740..9220861 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -662,7 +662,7 @@
 
 	/* setup Socket parameters */
 	sk = sock->sk;
-	sk->sk_reuse = 1;
+	sk->sk_reuse = SK_CAN_REUSE;
 	sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
 	sk->sk_allocation = GFP_ATOMIC;
 
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 2cfcbff..386f0c5 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -835,7 +835,7 @@
 
 	scsi_eh_restore_cmnd(scmd, &ses);
 
-	if (sdrv->eh_action)
+	if (sdrv && sdrv->eh_action)
 		rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn);
 
 	return rtn;
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 31bfba8..9b2901f 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -653,7 +653,7 @@
 			dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
 								rx_buf_count);
 			if (t->tx_buf)
-				dma_unmap_single(NULL, t->tx_dma, t->len,
+				dma_unmap_single(&spi->dev, t->tx_dma, t->len,
 								DMA_TO_DEVICE);
 			return -ENOMEM;
 		}
@@ -692,10 +692,10 @@
 	if (spicfg->io_type == SPI_IO_TYPE_DMA) {
 
 		if (t->tx_buf)
-			dma_unmap_single(NULL, t->tx_dma, t->len,
+			dma_unmap_single(&spi->dev, t->tx_dma, t->len,
 								DMA_TO_DEVICE);
 
-		dma_unmap_single(NULL, t->rx_dma, rx_buf_count,
+		dma_unmap_single(&spi->dev, t->rx_dma, rx_buf_count,
 							DMA_FROM_DEVICE);
 
 		clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 24cacff..5f748c0 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -139,10 +139,12 @@
 static void fsl_spi_chipselect(struct spi_device *spi, int value)
 {
 	struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
-	struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data;
+	struct fsl_spi_platform_data *pdata;
 	bool pol = spi->mode & SPI_CS_HIGH;
 	struct spi_mpc8xxx_cs	*cs = spi->controller_state;
 
+	pdata = spi->dev.parent->parent->platform_data;
+
 	if (value == BITBANG_CS_INACTIVE) {
 		if (pdata->cs_control)
 			pdata->cs_control(spi, !pol);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 31054e3..570f220 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -83,7 +83,7 @@
 	struct spi_bitbang bitbang;
 
 	struct completion xfer_done;
-	void *base;
+	void __iomem *base;
 	int irq;
 	struct clk *clk;
 	unsigned long spi_clk;
@@ -766,8 +766,12 @@
 	}
 
 	ret = of_property_read_u32(np, "fsl,spi-num-chipselects", &num_cs);
-	if (ret < 0)
-		num_cs = mxc_platform_info->num_chipselect;
+	if (ret < 0) {
+		if (mxc_platform_info)
+			num_cs = mxc_platform_info->num_chipselect;
+		else
+			return ret;
+	}
 
 	master = spi_alloc_master(&pdev->dev,
 			sizeof(struct spi_imx_data) + sizeof(int) * num_cs);
@@ -784,7 +788,7 @@
 
 	for (i = 0; i < master->num_chipselect; i++) {
 		int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
-		if (cs_gpio < 0)
+		if (cs_gpio < 0 && mxc_platform_info)
 			cs_gpio = mxc_platform_info->chipselect[i];
 
 		spi_imx->chipselect[i] = cs_gpio;
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 96f0da6..09c925a 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2195,7 +2195,6 @@
 	struct pl022 *pl022 = dev_get_drvdata(dev);
 
 	clk_disable(pl022->clk);
-	amba_vcore_disable(pl022->adev);
 
 	return 0;
 }
@@ -2204,7 +2203,6 @@
 {
 	struct pl022 *pl022 = dev_get_drvdata(dev);
 
-	amba_vcore_enable(pl022->adev);
 	clk_enable(pl022->clk);
 
 	return 0;
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 08a3b11..eb1dee2 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -27,13 +27,14 @@
 
 config ANDROID_PERSISTENT_RAM
 	bool
+	depends on HAVE_MEMBLOCK
 	select REED_SOLOMON
 	select REED_SOLOMON_ENC8
 	select REED_SOLOMON_DEC8
 
 config ANDROID_RAM_CONSOLE
 	bool "Android RAM buffer console"
-	depends on !S390 && !UML
+	depends on !S390 && !UML && HAVE_MEMBLOCK
 	select ANDROID_PERSISTENT_RAM
 	default n
 
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 052b43e..b91e4bc 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -55,7 +55,6 @@
 };
 static int lowmem_minfree_size = 4;
 
-static struct task_struct *lowmem_deathpending;
 static unsigned long lowmem_deathpending_timeout;
 
 #define lowmem_print(level, x...)			\
@@ -64,24 +63,6 @@
 			printk(x);			\
 	} while (0)
 
-static int
-task_notify_func(struct notifier_block *self, unsigned long val, void *data);
-
-static struct notifier_block task_nb = {
-	.notifier_call	= task_notify_func,
-};
-
-static int
-task_notify_func(struct notifier_block *self, unsigned long val, void *data)
-{
-	struct task_struct *task = data;
-
-	if (task == lowmem_deathpending)
-		lowmem_deathpending = NULL;
-
-	return NOTIFY_OK;
-}
-
 static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
 {
 	struct task_struct *tsk;
@@ -97,19 +78,6 @@
 	int other_file = global_page_state(NR_FILE_PAGES) -
 						global_page_state(NR_SHMEM);
 
-	/*
-	 * If we already have a death outstanding, then
-	 * bail out right away; indicating to vmscan
-	 * that we have nothing further to offer on
-	 * this pass.
-	 *
-	 * Note: Currently you need CONFIG_PROFILING
-	 * for this to work correctly.
-	 */
-	if (lowmem_deathpending &&
-	    time_before_eq(jiffies, lowmem_deathpending_timeout))
-		return 0;
-
 	if (lowmem_adj_size < array_size)
 		array_size = lowmem_adj_size;
 	if (lowmem_minfree_size < array_size)
@@ -148,6 +116,12 @@
 		if (!p)
 			continue;
 
+		if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
+		    time_before_eq(jiffies, lowmem_deathpending_timeout)) {
+			task_unlock(p);
+			rcu_read_unlock();
+			return 0;
+		}
 		oom_score_adj = p->signal->oom_score_adj;
 		if (oom_score_adj < min_score_adj) {
 			task_unlock(p);
@@ -174,15 +148,9 @@
 		lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
 			     selected->pid, selected->comm,
 			     selected_oom_score_adj, selected_tasksize);
-		/*
-		 * If CONFIG_PROFILING is off, then we don't want to stall
-		 * the killer by setting lowmem_deathpending.
-		 */
-#ifdef CONFIG_PROFILING
-		lowmem_deathpending = selected;
 		lowmem_deathpending_timeout = jiffies + HZ;
-#endif
 		send_sig(SIGKILL, selected, 0);
+		set_tsk_thread_flag(selected, TIF_MEMDIE);
 		rem -= selected_tasksize;
 	}
 	lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
@@ -198,7 +166,6 @@
 
 static int __init lowmem_init(void)
 {
-	task_handoff_register(&task_nb);
 	register_shrinker(&lowmem_shrinker);
 	return 0;
 }
@@ -206,7 +173,6 @@
 static void __exit lowmem_exit(void)
 {
 	unregister_shrinker(&lowmem_shrinker);
-	task_handoff_unregister(&task_nb);
 }
 
 module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
diff --git a/drivers/staging/android/persistent_ram.c b/drivers/staging/android/persistent_ram.c
index e08f257..8d8c1e3 100644
--- a/drivers/staging/android/persistent_ram.c
+++ b/drivers/staging/android/persistent_ram.c
@@ -399,12 +399,12 @@
 struct persistent_ram_zone *__persistent_ram_init(struct device *dev, bool ecc)
 {
 	struct persistent_ram_zone *prz;
-	int ret;
+	int ret = -ENOMEM;
 
 	prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL);
 	if (!prz) {
 		pr_err("persistent_ram: failed to allocate persistent ram zone\n");
-		return ERR_PTR(-ENOMEM);
+		goto err;
 	}
 
 	INIT_LIST_HEAD(&prz->node);
@@ -412,13 +412,13 @@
 	ret = persistent_ram_buffer_init(dev_name(dev), prz);
 	if (ret) {
 		pr_err("persistent_ram: failed to initialize buffer\n");
-		return ERR_PTR(ret);
+		goto err;
 	}
 
 	prz->ecc = ecc;
 	ret = persistent_ram_init_ecc(prz, prz->buffer_size);
 	if (ret)
-		return ERR_PTR(ret);
+		goto err;
 
 	if (prz->buffer->sig == PERSISTENT_RAM_SIG) {
 		if (buffer_size(prz) > prz->buffer_size ||
@@ -442,6 +442,9 @@
 	atomic_set(&prz->buffer->size, 0);
 
 	return prz;
+err:
+	kfree(prz);
+	return ERR_PTR(ret);
 }
 
 struct persistent_ram_zone * __init
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
index bc723ef..45c522c 100644
--- a/drivers/staging/android/timed_gpio.c
+++ b/drivers/staging/android/timed_gpio.c
@@ -85,7 +85,7 @@
 	struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
 	struct timed_gpio *cur_gpio;
 	struct timed_gpio_data *gpio_data, *gpio_dat;
-	int i, j, ret = 0;
+	int i, ret;
 
 	if (!pdata)
 		return -EBUSY;
@@ -108,18 +108,12 @@
 		gpio_dat->dev.get_time = gpio_get_time;
 		gpio_dat->dev.enable = gpio_enable;
 		ret = gpio_request(cur_gpio->gpio, cur_gpio->name);
-		if (ret >= 0) {
-			ret = timed_output_dev_register(&gpio_dat->dev);
-			if (ret < 0)
-				gpio_free(cur_gpio->gpio);
-		}
+		if (ret < 0)
+			goto err_out;
+		ret = timed_output_dev_register(&gpio_dat->dev);
 		if (ret < 0) {
-			for (j = 0; j < i; j++) {
-				timed_output_dev_unregister(&gpio_data[i].dev);
-				gpio_free(gpio_data[i].gpio);
-			}
-			kfree(gpio_data);
-			return ret;
+			gpio_free(cur_gpio->gpio);
+			goto err_out;
 		}
 
 		gpio_dat->gpio = cur_gpio->gpio;
@@ -131,6 +125,15 @@
 	platform_set_drvdata(pdev, gpio_data);
 
 	return 0;
+
+err_out:
+	while (--i >= 0) {
+		timed_output_dev_unregister(&gpio_data[i].dev);
+		gpio_free(gpio_data[i].gpio);
+	}
+	kfree(gpio_data);
+
+	return ret;
 }
 
 static int timed_gpio_remove(struct platform_device *pdev)
diff --git a/drivers/staging/iio/inkern.c b/drivers/staging/iio/inkern.c
index de2c8ea..ef07a02 100644
--- a/drivers/staging/iio/inkern.c
+++ b/drivers/staging/iio/inkern.c
@@ -82,6 +82,7 @@
 			ret = -ENODEV;
 			goto error_ret;
 		}
+		i++;
 	}
 error_ret:
 	mutex_unlock(&iio_map_list_lock);
diff --git a/drivers/staging/iio/magnetometer/ak8975.c b/drivers/staging/iio/magnetometer/ak8975.c
index d5ddac3..ebc2d08 100644
--- a/drivers/staging/iio/magnetometer/ak8975.c
+++ b/drivers/staging/iio/magnetometer/ak8975.c
@@ -108,7 +108,8 @@
 static int ak8975_write_data(struct i2c_client *client,
 			     u8 reg, u8 val, u8 mask, u8 shift)
 {
-	struct ak8975_data *data = i2c_get_clientdata(client);
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+	struct ak8975_data *data = iio_priv(indio_dev);
 	u8 regval;
 	int ret;
 
@@ -159,7 +160,8 @@
  */
 static int ak8975_setup(struct i2c_client *client)
 {
-	struct ak8975_data *data = i2c_get_clientdata(client);
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+	struct ak8975_data *data = iio_priv(indio_dev);
 	u8 device_id;
 	int ret;
 
@@ -509,6 +511,7 @@
 		goto exit_gpio;
 	}
 	data = iio_priv(indio_dev);
+	i2c_set_clientdata(client, indio_dev);
 	/* Perform some basic start-of-day setup of the device. */
 	err = ak8975_setup(client);
 	if (err < 0) {
@@ -516,7 +519,6 @@
 		goto exit_free_iio;
 	}
 
-	i2c_set_clientdata(client, indio_dev);
 	data->client = client;
 	mutex_init(&data->lock);
 	data->eoc_irq = client->irq;
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index 91dd3da..e00b416 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -521,7 +521,9 @@
 /* Called when we have found a new HMC5843. */
 static void hmc5843_init_client(struct i2c_client *client)
 {
-	struct hmc5843_data *data = i2c_get_clientdata(client);
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+	struct hmc5843_data *data = iio_priv(indio_dev);
+
 	hmc5843_set_meas_conf(client, data->meas_conf);
 	hmc5843_set_rate(client, data->rate);
 	hmc5843_configure(client, data->operating_mode);
diff --git a/drivers/staging/media/as102/as102_fw.c b/drivers/staging/media/as102/as102_fw.c
index 43ebc43..1075fb1 100644
--- a/drivers/staging/media/as102/as102_fw.c
+++ b/drivers/staging/media/as102/as102_fw.c
@@ -165,7 +165,7 @@
 int as102_fw_upload(struct as10x_bus_adapter_t *bus_adap)
 {
 	int errno = -EFAULT;
-	const struct firmware *firmware;
+	const struct firmware *firmware = NULL;
 	unsigned char *cmd_buf = NULL;
 	char *fw1, *fw2;
 	struct usb_device *dev = bus_adap->usb_dev;
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/staging/omapdrm/omap_drv.c
index 3df5b4c..620b8d5 100644
--- a/drivers/staging/omapdrm/omap_drv.c
+++ b/drivers/staging/omapdrm/omap_drv.c
@@ -803,9 +803,6 @@
 static int pdev_probe(struct platform_device *device)
 {
 	DBG("%s", device->name);
-	if (platform_driver_register(&omap_dmm_driver))
-		dev_err(&device->dev, "DMM registration failed\n");
-
 	return drm_platform_init(&omap_drm_driver, device);
 }
 
@@ -833,6 +830,10 @@
 static int __init omap_drm_init(void)
 {
 	DBG("init");
+	if (platform_driver_register(&omap_dmm_driver)) {
+		/* we can continue on without DMM.. so not fatal */
+		dev_err(NULL, "DMM registration failed\n");
+	}
 	return platform_driver_register(&pdev);
 }
 
diff --git a/drivers/staging/ozwpan/TODO b/drivers/staging/ozwpan/TODO
index f7a9c12..c2d30a71 100644
--- a/drivers/staging/ozwpan/TODO
+++ b/drivers/staging/ozwpan/TODO
@@ -8,5 +8,7 @@
 	- code review by USB developer community.
 	- testing with as many devices as possible.
 
-Please send any patches for this driver to Chris Kelly <ckelly@ozmodevices.com>
+Please send any patches for this driver to
+Rupesh Gujare <rgujare@ozmodevices.com>
+Chris Kelly <ckelly@ozmodevices.com>
 and Greg Kroah-Hartman <gregkh@linuxfoundation.org>.
diff --git a/drivers/staging/ramster/Kconfig b/drivers/staging/ramster/Kconfig
index 8b57b87..4af1f8d 100644
--- a/drivers/staging/ramster/Kconfig
+++ b/drivers/staging/ramster/Kconfig
@@ -1,10 +1,6 @@
-# Dependency on CONFIG_BROKEN is because there is a commit dependency
-# on a cleancache naming change to be submitted by Konrad Wilk
-# a39c00ded70339603ffe1b0ffdf3ade85bcf009a "Merge branch 'stable/cleancache.v13'
-# into linux-next.  Once this commit is present, BROKEN can be removed
 config RAMSTER
 	bool "Cross-machine RAM capacity sharing, aka peer-to-peer tmem"
-	depends on (CLEANCACHE || FRONTSWAP) && CONFIGFS_FS=y && !ZCACHE && !XVMALLOC && !HIGHMEM && BROKEN
+	depends on (CLEANCACHE || FRONTSWAP) && CONFIGFS_FS=y && !ZCACHE && !XVMALLOC && !HIGHMEM
 	select LZO_COMPRESS
 	select LZO_DECOMPRESS
 	default n
diff --git a/drivers/staging/ramster/cluster/tcp.c b/drivers/staging/ramster/cluster/tcp.c
index 3af1b2c..b9721c1 100644
--- a/drivers/staging/ramster/cluster/tcp.c
+++ b/drivers/staging/ramster/cluster/tcp.c
@@ -2106,7 +2106,7 @@
 	r2net_listen_sock = sock;
 	INIT_WORK(&r2net_listen_work, r2net_accept_many);
 
-	sock->sk->sk_reuse = 1;
+	sock->sk->sk_reuse = SK_CAN_REUSE;
 	ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
 	if (ret < 0) {
 		printk(KERN_ERR "ramster: Error %d while binding socket at "
diff --git a/drivers/staging/rts_pstor/ms.c b/drivers/staging/rts_pstor/ms.c
index 66341df..f9a4498 100644
--- a/drivers/staging/rts_pstor/ms.c
+++ b/drivers/staging/rts_pstor/ms.c
@@ -3498,7 +3498,8 @@
 
 		log_blk++;
 
-		for (seg_no = 0; seg_no < sizeof(ms_start_idx)/2; seg_no++) {
+		for (seg_no = 0; seg_no < ARRAY_SIZE(ms_start_idx) - 1;
+				seg_no++) {
 			if (log_blk < ms_start_idx[seg_no+1])
 				break;
 		}
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
index a7feb3e..1dccd93 100644
--- a/drivers/staging/rts_pstor/rtsx.c
+++ b/drivers/staging/rts_pstor/rtsx.c
@@ -1000,6 +1000,11 @@
 
 	rtsx_init_chip(dev->chip);
 
+	/* set the supported max_lun and max_id for the scsi host
+	 * NOTE: the minimal value of max_id is 1 */
+	host->max_id = 1;
+	host->max_lun = dev->chip->max_lun;
+
 	/* Start up our control thread */
 	th = kthread_run(rtsx_control_thread, dev, CR_DRIVER_NAME);
 	if (IS_ERR(th)) {
diff --git a/drivers/staging/rts_pstor/rtsx_transport.c b/drivers/staging/rts_pstor/rtsx_transport.c
index 4e3d2c1..9b2e5c9 100644
--- a/drivers/staging/rts_pstor/rtsx_transport.c
+++ b/drivers/staging/rts_pstor/rtsx_transport.c
@@ -335,6 +335,7 @@
 	int sg_cnt, i, resid;
 	int err = 0;
 	long timeleft;
+	struct scatterlist *sg_ptr;
 	u32 val = TRIG_DMA;
 
 	if ((sg == NULL) || (num_sg <= 0) || !offset || !index)
@@ -371,7 +372,7 @@
 	sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
 
 	resid = size;
-
+	sg_ptr = sg;
 	chip->sgi = 0;
 	/* Usually the next entry will be @sg@ + 1, but if this sg element
 	 * is part of a chained scatterlist, it could jump to the start of
@@ -379,14 +380,14 @@
 	 * the proper sg
 	 */
 	for (i = 0; i < *index; i++)
-		sg = sg_next(sg);
+		sg_ptr = sg_next(sg_ptr);
 	for (i = *index; i < sg_cnt; i++) {
 		dma_addr_t addr;
 		unsigned int len;
 		u8 option;
 
-		addr = sg_dma_address(sg);
-		len = sg_dma_len(sg);
+		addr = sg_dma_address(sg_ptr);
+		len = sg_dma_len(sg_ptr);
 
 		RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
 			     (unsigned int)addr, len);
@@ -415,7 +416,7 @@
 		if (!resid)
 			break;
 
-		sg = sg_next(sg);
+		sg_ptr = sg_next(sg_ptr);
 	}
 
 	RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
diff --git a/drivers/staging/sep/sep_main.c b/drivers/staging/sep/sep_main.c
index ad54c2e..f1701bc 100644
--- a/drivers/staging/sep/sep_main.c
+++ b/drivers/staging/sep/sep_main.c
@@ -3114,7 +3114,7 @@
 			current->pid);
 		if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
 				  &call_status->status)) {
-			dev_warn(&sep->pdev->dev,
+			dev_dbg(&sep->pdev->dev,
 				"[PID%d] dcb prep needed before send msg\n",
 				current->pid);
 			error = -EPROTO;
@@ -3122,9 +3122,9 @@
 		}
 
 		if (!arg) {
-			dev_warn(&sep->pdev->dev,
+			dev_dbg(&sep->pdev->dev,
 				"[PID%d] dcb null arg\n", current->pid);
-			error = EINVAL;
+			error = -EINVAL;
 			goto end_function;
 		}
 
diff --git a/drivers/staging/vme/devices/vme_pio2_core.c b/drivers/staging/vme/devices/vme_pio2_core.c
index 9fedc44..573c800 100644
--- a/drivers/staging/vme/devices/vme_pio2_core.c
+++ b/drivers/staging/vme/devices/vme_pio2_core.c
@@ -35,10 +35,10 @@
 static int vector_num;
 static int level[PIO2_CARDS_MAX];
 static int level_num;
-static const char *variant[PIO2_CARDS_MAX];
+static char *variant[PIO2_CARDS_MAX];
 static int variant_num;
 
-static int loopback;
+static bool loopback;
 
 static int pio2_match(struct vme_dev *);
 static int __devinit pio2_probe(struct vme_dev *);
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index 0ff8d7b..774b0d4 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -655,6 +655,9 @@
         return (false);
     }
 
+    if (uKeyLength > MAX_KEY_LEN)
+	    return false;
+
     pTable->KeyTable[MAX_KEY_TABLE-1].bInUse = true;
     for(ii=0;ii<ETH_ALEN;ii++)
         pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID[ii] = 0xFF;
diff --git a/drivers/staging/vt6656/ioctl.c b/drivers/staging/vt6656/ioctl.c
index 1463d76..d59456c 100644
--- a/drivers/staging/vt6656/ioctl.c
+++ b/drivers/staging/vt6656/ioctl.c
@@ -565,7 +565,7 @@
 			result = -ENOMEM;
 			break;
 		}
-		pNodeList = (PSNodeList)kmalloc(sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)), (int)GFP_ATOMIC);
+		pNodeList = kmalloc(sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)), (int)GFP_ATOMIC);
 		if (pNodeList == NULL) {
 			result = -ENOMEM;
 			break;
@@ -601,6 +601,7 @@
 			}
 		}
 		if (copy_to_user(pReq->data, pNodeList, sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)))) {
+			kfree(pNodeList);
 			result = -EFAULT;
 			break;
 		}
diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c
index 27bb523..ee62a06 100644
--- a/drivers/staging/vt6656/key.c
+++ b/drivers/staging/vt6656/key.c
@@ -684,6 +684,9 @@
         return (FALSE);
     }
 
+    if (uKeyLength > MAX_KEY_LEN)
+	    return false;
+
     pTable->KeyTable[MAX_KEY_TABLE-1].bInUse = TRUE;
     for (ii = 0; ii < ETH_ALEN; ii++)
         pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID[ii] = 0xFF;
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 94d5c35..3650bbf 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -61,7 +61,7 @@
 		}
 		temp = xgifb_reg_get(pVBInfo->P3c4, 0x3B);
 		/* SR3B[7][3]MAA15 MAA11 (Power on Trapping) */
-		if ((temp & 0x88) == 0x80)
+		if (((temp & 0x88) == 0x80) || ((temp & 0x88) == 0x08))
 			data = 0; /* DDR */
 		else
 			data = 1; /* DDRII */
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index 2919924..60d4adf 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -152,6 +152,7 @@
 		pVBInfo->pXGINew_CR97 = &XG20_CR97;
 
 	if (ChipType == XG27) {
+		unsigned char temp;
 		pVBInfo->MCLKData
 			= (struct SiS_MCLKData *) XGI27New_MCLKData;
 		pVBInfo->CR40 = XGI27_cr41;
@@ -162,7 +163,13 @@
 		pVBInfo->pCRDE = XG27_CRDE;
 		pVBInfo->pSR40 = &XG27_SR40;
 		pVBInfo->pSR41 = &XG27_SR41;
+		pVBInfo->SR15 = XG27_SR13;
 
+		/*Z11m DDR*/
+		temp = xgifb_reg_get(pVBInfo->P3c4, 0x3B);
+		/* SR3B[7][3]MAA15 MAA11 (Power on Trapping) */
+		if (((temp & 0x88) == 0x80) || ((temp & 0x88) == 0x08))
+			pVBInfo->pXGINew_CR97 = &Z11m_CR97;
 	}
 
 	if (ChipType >= XG20) {
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index dddf261..e8d6f67 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -33,6 +33,13 @@
 	{0x5c, 0x23, 0x01, 166}
 };
 
+static unsigned char XG27_SR13[4][8] = {
+	{0x35, 0x45, 0xb1, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR13 */
+	{0x41, 0x51, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR14 */
+	{0x32, 0x32, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR18 */
+	{0x03, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00}  /* SR1B */
+};
+
 static unsigned char XGI340_SR13[4][8] = {
 	{0x35, 0x45, 0xb1, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR13 */
 	{0x41, 0x51, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR14 */
@@ -71,7 +78,7 @@
 	{0x20, 0x40, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 0 CR41 */
 	{0xC4, 0x40, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 1 CR8A */
 	{0xC4, 0x40, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 2 CR8B */
-	{0xB5, 0x13, 0xa4, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 3 CR40[7],
+	{0xB3, 0x13, 0xa4, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 3 CR40[7],
 							       CR99[2:0],
 							       CR45[3:0]*/
 	{0xf0, 0xf5, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 4 CR59 */
@@ -2803,6 +2810,8 @@
 static unsigned char XG27_SR40 = 0x04 ;
 static unsigned char XG27_SR41 = 0x00 ;
 
+static unsigned char Z11m_CR97 = 0x80 ;
+
 static struct XGI330_VCLKDataStruct XGI_VCLKData[] = {
 	/* SR2B,SR2C,SR2D */
 	{0x1B, 0xE1,  25}, /* 00 (25.175MHz) */
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
index 09caa4f..917461c 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -267,33 +267,39 @@
 	return off + obj_idx * class_size;
 }
 
+static void reset_page(struct page *page)
+{
+	clear_bit(PG_private, &page->flags);
+	clear_bit(PG_private_2, &page->flags);
+	set_page_private(page, 0);
+	page->mapping = NULL;
+	page->freelist = NULL;
+	reset_page_mapcount(page);
+}
+
 static void free_zspage(struct page *first_page)
 {
-	struct page *nextp, *tmp;
+	struct page *nextp, *tmp, *head_extra;
 
 	BUG_ON(!is_first_page(first_page));
 	BUG_ON(first_page->inuse);
 
-	nextp = (struct page *)page_private(first_page);
+	head_extra = (struct page *)page_private(first_page);
 
-	clear_bit(PG_private, &first_page->flags);
-	clear_bit(PG_private_2, &first_page->flags);
-	set_page_private(first_page, 0);
-	first_page->mapping = NULL;
-	first_page->freelist = NULL;
-	reset_page_mapcount(first_page);
+	reset_page(first_page);
 	__free_page(first_page);
 
 	/* zspage with only 1 system page */
-	if (!nextp)
+	if (!head_extra)
 		return;
 
-	list_for_each_entry_safe(nextp, tmp, &nextp->lru, lru) {
+	list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) {
 		list_del(&nextp->lru);
-		clear_bit(PG_private_2, &nextp->flags);
-		nextp->index = 0;
+		reset_page(nextp);
 		__free_page(nextp);
 	}
+	reset_page(head_extra);
+	__free_page(head_extra);
 }
 
 /* Initialize a newly allocated zspage */
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 8306579..c5eb3c3 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -122,6 +122,7 @@
 	/* Local sense buffer */
 	unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
 	u32 was_ddp_setup:1;		/* Set only if ddp is setup */
+	u32 aborted:1;			/* Set if aborted by reset or timeout */
 	struct scatterlist *sg;		/* Set only if DDP is setup */
 	u32 sg_cnt;			/* No. of item in scatterlist */
 };
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 62dec97..a375f25 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -121,6 +121,8 @@
 	struct fc_exch *ep;
 	size_t len;
 
+	if (cmd->aborted)
+		return 0;
 	ft_dump_cmd(cmd, __func__);
 	ep = fc_seq_exch(cmd->seq);
 	lport = ep->lp;
@@ -187,6 +189,8 @@
 
 	ft_dump_cmd(cmd, __func__);
 
+	if (cmd->aborted)
+		return 0;
 	ep = fc_seq_exch(cmd->seq);
 	lport = ep->lp;
 	fp = fc_frame_alloc(lport, sizeof(*txrdy));
@@ -252,10 +256,10 @@
 	struct ft_cmd *cmd = arg;
 	struct fc_frame_header *fh;
 
-	if (IS_ERR(fp)) {
+	if (unlikely(IS_ERR(fp))) {
 		/* XXX need to find cmd if queued */
 		cmd->seq = NULL;
-		transport_generic_free_cmd(&cmd->se_cmd, 0);
+		cmd->aborted = true;
 		return;
 	}
 
@@ -399,6 +403,8 @@
 	struct se_tmr_req *tmr = se_cmd->se_tmr_req;
 	enum fcp_resp_rsp_codes code;
 
+	if (cmd->aborted)
+		return 0;
 	switch (tmr->response) {
 	case TMR_FUNCTION_COMPLETE:
 		code = FCP_TMF_CMPL;
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index f357039..2948dc9 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -300,6 +300,7 @@
 {
 	struct ft_lport_acl *lacl;
 	struct ft_tpg *tpg;
+	struct workqueue_struct *wq;
 	unsigned long index;
 	int ret;
 
@@ -321,18 +322,20 @@
 	tpg->lport_acl = lacl;
 	INIT_LIST_HEAD(&tpg->lun_list);
 
-	ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
-				tpg, TRANSPORT_TPG_TYPE_NORMAL);
-	if (ret < 0) {
+	wq = alloc_workqueue("tcm_fc", 0, 1);
+	if (!wq) {
 		kfree(tpg);
 		return NULL;
 	}
 
-	tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1);
-	if (!tpg->workqueue) {
+	ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
+				tpg, TRANSPORT_TPG_TYPE_NORMAL);
+	if (ret < 0) {
+		destroy_workqueue(wq);
 		kfree(tpg);
 		return NULL;
 	}
+	tpg->workqueue = wq;
 
 	mutex_lock(&ft_lport_lock);
 	list_add_tail(&tpg->list, &lacl->tpg_list);
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 2b693ee..dc7c0db 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -81,6 +81,8 @@
 	void *from;
 	void *to = NULL;
 
+	if (cmd->aborted)
+		return 0;
 	ep = fc_seq_exch(cmd->seq);
 	lport = ep->lp;
 	cmd->seq = lport->tt.seq_start_next(cmd->seq);
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 24145c3..6cc4358 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1073,8 +1073,10 @@
 		    (new_serial.close_delay != port->close_delay) ||
 		    (new_serial.xmit_fifo_size != state->xmit_fifo_size) ||
 		    ((new_serial.flags & ~ASYNC_USR_MASK) !=
-		     (port->flags & ~ASYNC_USR_MASK)))
+		     (port->flags & ~ASYNC_USR_MASK))) {
+			tty_unlock();
 			return -EPERM;
+		}
 		port->flags = ((port->flags & ~ASYNC_USR_MASK) |
 			       (new_serial.flags & ASYNC_USR_MASK));
 		state->custom_divisor = new_serial.custom_divisor;
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c
index 5b149b4..5c27f7e 100644
--- a/drivers/tty/serial/8250/8250.c
+++ b/drivers/tty/serial/8250/8250.c
@@ -1572,13 +1572,11 @@
 	do {
 		struct uart_8250_port *up;
 		struct uart_port *port;
-		bool skip;
 
 		up = list_entry(l, struct uart_8250_port, list);
 		port = &up->port;
-		skip = pass_counter && up->port.flags & UPF_IIR_ONCE;
 
-		if (!skip && port->handle_irq(port)) {
+		if (port->handle_irq(port)) {
 			handled = 1;
 			end = NULL;
 		} else if (end == NULL)
@@ -2037,10 +2035,12 @@
 		spin_unlock_irqrestore(&port->lock, flags);
 
 		/*
-		 * If the interrupt is not reasserted, setup a timer to
-		 * kick the UART on a regular basis.
+		 * If the interrupt is not reasserted, or we otherwise
+		 * don't trust the iir, setup a timer to kick the UART
+		 * on a regular basis.
 		 */
-		if (!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) {
+		if ((!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) ||
+		    up->port.flags & UPF_BUG_THRE) {
 			up->bugs |= UART_BUG_THRE;
 			pr_debug("ttyS%d - using backup timer\n",
 				 serial_index(port));
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index da2b0b0..858dca8 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1096,7 +1096,7 @@
 			   const struct pciserial_board *board,
 			   struct uart_port *port, int idx)
 {
-	port->flags |= UPF_IIR_ONCE;
+	port->flags |= UPF_BUG_THRE;
 	return skip_tx_en_setup(priv, board, port, idx);
 }
 
@@ -1118,18 +1118,6 @@
 	return pci_default_setup(priv, board, port, idx);
 }
 
-static int try_enable_msi(struct pci_dev *dev)
-{
-	/* use msi if available, but fallback to legacy otherwise */
-	pci_enable_msi(dev);
-	return 0;
-}
-
-static void disable_msi(struct pci_dev *dev)
-{
-	pci_disable_msi(dev);
-}
-
 #define PCI_VENDOR_ID_SBSMODULARIO	0x124B
 #define PCI_SUBVENDOR_ID_SBSMODULARIO	0x124B
 #define PCI_DEVICE_ID_OCTPRO		0x0001
@@ -1249,9 +1237,7 @@
 		.device		= PCI_DEVICE_ID_INTEL_PATSBURG_KT,
 		.subvendor	= PCI_ANY_ID,
 		.subdevice	= PCI_ANY_ID,
-		.init		= try_enable_msi,
 		.setup		= kt_serial_setup,
-		.exit		= disable_msi,
 	},
 	/*
 	 * ITE
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 665beb6..070b442 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1041,7 +1041,7 @@
 
 config SERIAL_OMAP_CONSOLE
 	bool "Console on OMAP serial port"
-	depends on SERIAL_OMAP
+	depends on SERIAL_OMAP=y
 	select SERIAL_CORE_CONSOLE
 	help
 	  Select this option if you would like to use omap serial port as
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index e790375..1f03309 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -556,7 +556,7 @@
 	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (res_mem)
 		port->mapbase = res_mem->start;
-	else if (platp->mapbase)
+	else if (platp)
 		port->mapbase = platp->mapbase;
 	else
 		return -EINVAL;
@@ -564,7 +564,7 @@
 	res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	if (res_irq)
 		port->irq = res_irq->start;
-	else if (platp->irq)
+	else if (platp)
 		port->irq = platp->irq;
 
 	/* Check platform data first so we can override device node data */
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 0c65c9e..3d569cd 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1946,10 +1946,6 @@
 		goto unmap;
 	}
 
-	/* Ensure interrupts from this UART are masked and cleared */
-	writew(0, uap->port.membase + UART011_IMSC);
-	writew(0xffff, uap->port.membase + UART011_ICR);
-
 	uap->vendor = vendor;
 	uap->lcrh_rx = vendor->lcrh_rx;
 	uap->lcrh_tx = vendor->lcrh_tx;
@@ -1967,6 +1963,10 @@
 	uap->port.line = i;
 	pl011_dma_probe(uap);
 
+	/* Ensure interrupts from this UART are masked and cleared */
+	writew(0, uap->port.membase + UART011_IMSC);
+	writew(0xffff, uap->port.membase + UART011_ICR);
+
 	snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
 
 	amba_ports[i] = uap;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index f9a6be7..3d7e1ee 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -389,6 +389,8 @@
 {
 	UART_PUT_CR(port, ATMEL_US_RSTSTA);  /* reset status and receiver */
 
+	UART_PUT_CR(port, ATMEL_US_RXEN);
+
 	if (atmel_use_dma_rx(port)) {
 		/* enable PDC controller */
 		UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
@@ -404,6 +406,8 @@
  */
 static void atmel_stop_rx(struct uart_port *port)
 {
+	UART_PUT_CR(port, ATMEL_US_RXDIS);
+
 	if (atmel_use_dma_rx(port)) {
 		/* disable PDC receive */
 		UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS);
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index e6c3dbd..836fe273 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -154,10 +154,9 @@
 		port->x_char = 0;
 		return IRQ_HANDLED;
 	}
-	if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
-		clps711xuart_stop_tx(port);
-		return IRQ_HANDLED;
-	}
+
+	if (uart_circ_empty(xmit) || uart_tx_stopped(port))
+		goto disable_tx_irq;
 
 	count = port->fifosize >> 1;
 	do {
@@ -171,8 +170,11 @@
 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
 		uart_write_wakeup(port);
 
-	if (uart_circ_empty(xmit))
-		clps711xuart_stop_tx(port);
+	if (uart_circ_empty(xmit)) {
+	disable_tx_irq:
+		disable_irq_nosync(TX_IRQ(port));
+		tx_enabled(port) = 0;
+	}
 
 	return IRQ_HANDLED;
 }
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 0121486..d00b38e 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1381,29 +1381,24 @@
 		return -ENODEV;
 	}
 
-	if (!request_mem_region(mem->start, resource_size(mem),
+	if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
 				pdev->dev.driver->name)) {
 		dev_err(&pdev->dev, "memory region already claimed\n");
 		return -EBUSY;
 	}
 
 	dma_rx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
-	if (!dma_rx) {
-		ret = -EINVAL;
-		goto err;
-	}
+	if (!dma_rx)
+		return -ENXIO;
 
 	dma_tx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
-	if (!dma_tx) {
-		ret = -EINVAL;
-		goto err;
-	}
+	if (!dma_tx)
+		return -ENXIO;
 
-	up = kzalloc(sizeof(*up), GFP_KERNEL);
-	if (up == NULL) {
-		ret = -ENOMEM;
-		goto do_release_region;
-	}
+	up = devm_kzalloc(&pdev->dev, sizeof(*up), GFP_KERNEL);
+	if (!up)
+		return -ENOMEM;
+
 	up->pdev = pdev;
 	up->port.dev = &pdev->dev;
 	up->port.type = PORT_OMAP;
@@ -1423,16 +1418,17 @@
 		dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n",
 								up->port.line);
 		ret = -ENODEV;
-		goto err;
+		goto err_port_line;
 	}
 
 	sprintf(up->name, "OMAP UART%d", up->port.line);
 	up->port.mapbase = mem->start;
-	up->port.membase = ioremap(mem->start, resource_size(mem));
+	up->port.membase = devm_ioremap(&pdev->dev, mem->start,
+						resource_size(mem));
 	if (!up->port.membase) {
 		dev_err(&pdev->dev, "can't ioremap UART\n");
 		ret = -ENOMEM;
-		goto err;
+		goto err_ioremap;
 	}
 
 	up->port.flags = omap_up_info->flags;
@@ -1478,16 +1474,19 @@
 
 	ret = uart_add_one_port(&serial_omap_reg, &up->port);
 	if (ret != 0)
-		goto do_release_region;
+		goto err_add_port;
 
 	pm_runtime_put(&pdev->dev);
 	platform_set_drvdata(pdev, up);
 	return 0;
-err:
+
+err_add_port:
+	pm_runtime_put(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+err_ioremap:
+err_port_line:
 	dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n",
 				pdev->id, __func__, ret);
-do_release_region:
-	release_mem_region(mem->start, resource_size(mem));
 	return ret;
 }
 
@@ -1499,8 +1498,6 @@
 		pm_runtime_disable(&up->pdev->dev);
 		uart_remove_one_port(&serial_omap_reg, &up->port);
 		pm_qos_remove_request(&up->pm_qos_request);
-
-		kfree(up);
 	}
 
 	platform_set_drvdata(dev, NULL);
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 08b9962..c2816f4 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -210,6 +210,7 @@
 #define CMITC_UARTCLK   192000000 /* 192.0000 MHz */
 #define FRI2_64_UARTCLK  64000000 /*  64.0000 MHz */
 #define FRI2_48_UARTCLK  48000000 /*  48.0000 MHz */
+#define NTC1_UARTCLK     64000000 /*  64.0000 MHz */
 
 struct pch_uart_buffer {
 	unsigned char *buf;
@@ -384,6 +385,12 @@
 	if (cmp && strstr(cmp, "Fish River Island II"))
 		return FRI2_48_UARTCLK;
 
+	/* Kontron COMe-mTT10 (nanoETXexpress-TT) */
+	cmp = dmi_get_system_info(DMI_BOARD_NAME);
+	if (cmp && (strstr(cmp, "COMe-mTT") ||
+		    strstr(cmp, "nanoETXexpress-TT")))
+		return NTC1_UARTCLK;
+
 	return DEFAULT_UARTCLK;
 }
 
@@ -1440,9 +1447,11 @@
 			__func__);
 		return -EOPNOTSUPP;
 #endif
-		priv->use_dma = 1;
 		priv->use_dma_flag = 1;
 		dev_info(priv->port.dev, "PCH UART : Use DMA Mode\n");
+		if (!priv->use_dma)
+			pch_request_dma(port);
+		priv->use_dma = 1;
 	}
 
 	return 0;
@@ -1651,6 +1660,7 @@
 	}
 
 	pci_enable_msi(pdev);
+	pci_set_master(pdev);
 
 	iobase = pci_resource_start(pdev, 0);
 	mapbase = pci_resource_start(pdev, 1);
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index de249d2..d8b0aee 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -982,6 +982,7 @@
 
 	ucon &= ucon_mask;
 	wr_regl(port, S3C2410_UCON,  ucon | cfg->ucon);
+	wr_regl(port, S3C2410_ULCON, cfg->ulcon);
 
 	/* reset both fifos */
 	wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index bf461cf..3158e17 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -355,9 +355,6 @@
 		WARN(1, "Invalid register access\n");
 }
 
-#define sci_in(up, offset)		(up->serial_in(up, offset))
-#define sci_out(up, offset, value)	(up->serial_out(up, offset, value))
-
 static int sci_probe_regmap(struct plat_sci_port *cfg)
 {
 	switch (cfg->type) {
@@ -422,9 +419,9 @@
 	int c;
 
 	do {
-		status = sci_in(port, SCxSR);
+		status = serial_port_in(port, SCxSR);
 		if (status & SCxSR_ERRORS(port)) {
-			sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
+			serial_port_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
 			continue;
 		}
 		break;
@@ -433,11 +430,11 @@
 	if (!(status & SCxSR_RDxF(port)))
 		return NO_POLL_CHAR;
 
-	c = sci_in(port, SCxRDR);
+	c = serial_port_in(port, SCxRDR);
 
 	/* Dummy read */
-	sci_in(port, SCxSR);
-	sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
+	serial_port_in(port, SCxSR);
+	serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
 
 	return c;
 }
@@ -448,11 +445,11 @@
 	unsigned short status;
 
 	do {
-		status = sci_in(port, SCxSR);
+		status = serial_port_in(port, SCxSR);
 	} while (!(status & SCxSR_TDxE(port)));
 
-	sci_out(port, SCxTDR, c);
-	sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
+	serial_port_out(port, SCxTDR, c);
+	serial_port_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
 }
 #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
 
@@ -480,10 +477,10 @@
 	    ((!(cflag & CRTSCTS)))) {
 		unsigned short status;
 
-		status = sci_in(port, SCSPTR);
+		status = serial_port_in(port, SCSPTR);
 		status &= ~SCSPTR_CTSIO;
 		status |= SCSPTR_RTSIO;
-		sci_out(port, SCSPTR, status); /* Set RTS = 1 */
+		serial_port_out(port, SCSPTR, status); /* Set RTS = 1 */
 	}
 }
 
@@ -493,13 +490,13 @@
 
 	reg = sci_getreg(port, SCTFDR);
 	if (reg->size)
-		return sci_in(port, SCTFDR) & 0xff;
+		return serial_port_in(port, SCTFDR) & 0xff;
 
 	reg = sci_getreg(port, SCFDR);
 	if (reg->size)
-		return sci_in(port, SCFDR) >> 8;
+		return serial_port_in(port, SCFDR) >> 8;
 
-	return !(sci_in(port, SCxSR) & SCI_TDRE);
+	return !(serial_port_in(port, SCxSR) & SCI_TDRE);
 }
 
 static int sci_txroom(struct uart_port *port)
@@ -513,13 +510,13 @@
 
 	reg = sci_getreg(port, SCRFDR);
 	if (reg->size)
-		return sci_in(port, SCRFDR) & 0xff;
+		return serial_port_in(port, SCRFDR) & 0xff;
 
 	reg = sci_getreg(port, SCFDR);
 	if (reg->size)
-		return sci_in(port, SCFDR) & ((port->fifosize << 1) - 1);
+		return serial_port_in(port, SCFDR) & ((port->fifosize << 1) - 1);
 
-	return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
+	return (serial_port_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
 }
 
 /*
@@ -547,14 +544,14 @@
 	unsigned short ctrl;
 	int count;
 
-	status = sci_in(port, SCxSR);
+	status = serial_port_in(port, SCxSR);
 	if (!(status & SCxSR_TDxE(port))) {
-		ctrl = sci_in(port, SCSCR);
+		ctrl = serial_port_in(port, SCSCR);
 		if (uart_circ_empty(xmit))
 			ctrl &= ~SCSCR_TIE;
 		else
 			ctrl |= SCSCR_TIE;
-		sci_out(port, SCSCR, ctrl);
+		serial_port_out(port, SCSCR, ctrl);
 		return;
 	}
 
@@ -573,27 +570,27 @@
 			break;
 		}
 
-		sci_out(port, SCxTDR, c);
+		serial_port_out(port, SCxTDR, c);
 
 		port->icount.tx++;
 	} while (--count > 0);
 
-	sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
+	serial_port_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
 
 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
 		uart_write_wakeup(port);
 	if (uart_circ_empty(xmit)) {
 		sci_stop_tx(port);
 	} else {
-		ctrl = sci_in(port, SCSCR);
+		ctrl = serial_port_in(port, SCSCR);
 
 		if (port->type != PORT_SCI) {
-			sci_in(port, SCxSR); /* Dummy read */
-			sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
+			serial_port_in(port, SCxSR); /* Dummy read */
+			serial_port_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
 		}
 
 		ctrl |= SCSCR_TIE;
-		sci_out(port, SCSCR, ctrl);
+		serial_port_out(port, SCSCR, ctrl);
 	}
 }
 
@@ -608,7 +605,7 @@
 	unsigned short status;
 	unsigned char flag;
 
-	status = sci_in(port, SCxSR);
+	status = serial_port_in(port, SCxSR);
 	if (!(status & SCxSR_RDxF(port)))
 		return;
 
@@ -621,7 +618,7 @@
 			break;
 
 		if (port->type == PORT_SCI) {
-			char c = sci_in(port, SCxRDR);
+			char c = serial_port_in(port, SCxRDR);
 			if (uart_handle_sysrq_char(port, c) ||
 			    sci_port->break_flag)
 				count = 0;
@@ -629,9 +626,9 @@
 				tty_insert_flip_char(tty, c, TTY_NORMAL);
 		} else {
 			for (i = 0; i < count; i++) {
-				char c = sci_in(port, SCxRDR);
+				char c = serial_port_in(port, SCxRDR);
 
-				status = sci_in(port, SCxSR);
+				status = serial_port_in(port, SCxSR);
 #if defined(CONFIG_CPU_SH3)
 				/* Skip "chars" during break */
 				if (sci_port->break_flag) {
@@ -672,8 +669,8 @@
 			}
 		}
 
-		sci_in(port, SCxSR); /* dummy read */
-		sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
+		serial_port_in(port, SCxSR); /* dummy read */
+		serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
 
 		copied += count;
 		port->icount.rx += count;
@@ -683,8 +680,8 @@
 		/* Tell the rest of the system the news. New characters! */
 		tty_flip_buffer_push(tty);
 	} else {
-		sci_in(port, SCxSR); /* dummy read */
-		sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
+		serial_port_in(port, SCxSR); /* dummy read */
+		serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
 	}
 }
 
@@ -726,7 +723,7 @@
 static int sci_handle_errors(struct uart_port *port)
 {
 	int copied = 0;
-	unsigned short status = sci_in(port, SCxSR);
+	unsigned short status = serial_port_in(port, SCxSR);
 	struct tty_struct *tty = port->state->port.tty;
 	struct sci_port *s = to_sci_port(port);
 
@@ -804,8 +801,8 @@
 	if (!reg->size)
 		return 0;
 
-	if ((sci_in(port, SCLSR) & (1 << s->cfg->overrun_bit))) {
-		sci_out(port, SCLSR, 0);
+	if ((serial_port_in(port, SCLSR) & (1 << s->cfg->overrun_bit))) {
+		serial_port_out(port, SCLSR, 0);
 
 		port->icount.overrun++;
 
@@ -822,7 +819,7 @@
 static int sci_handle_breaks(struct uart_port *port)
 {
 	int copied = 0;
-	unsigned short status = sci_in(port, SCxSR);
+	unsigned short status = serial_port_in(port, SCxSR);
 	struct tty_struct *tty = port->state->port.tty;
 	struct sci_port *s = to_sci_port(port);
 
@@ -859,8 +856,8 @@
 	struct sci_port *s = to_sci_port(port);
 
 	if (s->chan_rx) {
-		u16 scr = sci_in(port, SCSCR);
-		u16 ssr = sci_in(port, SCxSR);
+		u16 scr = serial_port_in(port, SCSCR);
+		u16 ssr = serial_port_in(port, SCxSR);
 
 		/* Disable future Rx interrupts */
 		if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
@@ -869,9 +866,9 @@
 		} else {
 			scr &= ~SCSCR_RIE;
 		}
-		sci_out(port, SCSCR, scr);
+		serial_port_out(port, SCSCR, scr);
 		/* Clear current interrupt */
-		sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
+		serial_port_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
 		dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
 			jiffies, s->rx_timeout);
 		mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
@@ -909,15 +906,15 @@
 	if (port->type == PORT_SCI) {
 		if (sci_handle_errors(port)) {
 			/* discard character in rx buffer */
-			sci_in(port, SCxSR);
-			sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
+			serial_port_in(port, SCxSR);
+			serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
 		}
 	} else {
 		sci_handle_fifo_overrun(port);
 		sci_rx_interrupt(irq, ptr);
 	}
 
-	sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
+	serial_port_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
 
 	/* Kick the transmission */
 	sci_tx_interrupt(irq, ptr);
@@ -931,7 +928,7 @@
 
 	/* Handle BREAKs */
 	sci_handle_breaks(port);
-	sci_out(port, SCxSR, SCxSR_BREAK_CLEAR(port));
+	serial_port_out(port, SCxSR, SCxSR_BREAK_CLEAR(port));
 
 	return IRQ_HANDLED;
 }
@@ -955,8 +952,8 @@
 	struct sci_port *s = to_sci_port(port);
 	irqreturn_t ret = IRQ_NONE;
 
-	ssr_status = sci_in(port, SCxSR);
-	scr_status = sci_in(port, SCSCR);
+	ssr_status = serial_port_in(port, SCxSR);
+	scr_status = serial_port_in(port, SCSCR);
 	err_enabled = scr_status & port_rx_irq_mask(port);
 
 	/* Tx Interrupt */
@@ -1170,7 +1167,7 @@
 
 static unsigned int sci_tx_empty(struct uart_port *port)
 {
-	unsigned short status = sci_in(port, SCxSR);
+	unsigned short status = serial_port_in(port, SCxSR);
 	unsigned short in_tx_fifo = sci_txfill(port);
 
 	return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
@@ -1198,7 +1195,7 @@
 		 */
 		reg = sci_getreg(port, SCFCR);
 		if (reg->size)
-			sci_out(port, SCFCR, sci_in(port, SCFCR) | 1);
+			serial_port_out(port, SCFCR, serial_port_in(port, SCFCR) | 1);
 	}
 }
 
@@ -1240,8 +1237,8 @@
 	} else {
 		s->cookie_tx = -EINVAL;
 		if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
-			u16 ctrl = sci_in(port, SCSCR);
-			sci_out(port, SCSCR, ctrl & ~SCSCR_TIE);
+			u16 ctrl = serial_port_in(port, SCSCR);
+			serial_port_out(port, SCSCR, ctrl & ~SCSCR_TIE);
 		}
 	}
 
@@ -1494,13 +1491,13 @@
 
 #ifdef CONFIG_SERIAL_SH_SCI_DMA
 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
-		u16 new, scr = sci_in(port, SCSCR);
+		u16 new, scr = serial_port_in(port, SCSCR);
 		if (s->chan_tx)
 			new = scr | 0x8000;
 		else
 			new = scr & ~0x8000;
 		if (new != scr)
-			sci_out(port, SCSCR, new);
+			serial_port_out(port, SCSCR, new);
 	}
 
 	if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
@@ -1512,8 +1509,8 @@
 
 	if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
 		/* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
-		ctrl = sci_in(port, SCSCR);
-		sci_out(port, SCSCR, ctrl | SCSCR_TIE);
+		ctrl = serial_port_in(port, SCSCR);
+		serial_port_out(port, SCSCR, ctrl | SCSCR_TIE);
 	}
 }
 
@@ -1522,40 +1519,40 @@
 	unsigned short ctrl;
 
 	/* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
-	ctrl = sci_in(port, SCSCR);
+	ctrl = serial_port_in(port, SCSCR);
 
 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
 		ctrl &= ~0x8000;
 
 	ctrl &= ~SCSCR_TIE;
 
-	sci_out(port, SCSCR, ctrl);
+	serial_port_out(port, SCSCR, ctrl);
 }
 
 static void sci_start_rx(struct uart_port *port)
 {
 	unsigned short ctrl;
 
-	ctrl = sci_in(port, SCSCR) | port_rx_irq_mask(port);
+	ctrl = serial_port_in(port, SCSCR) | port_rx_irq_mask(port);
 
 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
 		ctrl &= ~0x4000;
 
-	sci_out(port, SCSCR, ctrl);
+	serial_port_out(port, SCSCR, ctrl);
 }
 
 static void sci_stop_rx(struct uart_port *port)
 {
 	unsigned short ctrl;
 
-	ctrl = sci_in(port, SCSCR);
+	ctrl = serial_port_in(port, SCSCR);
 
 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
 		ctrl &= ~0x4000;
 
 	ctrl &= ~port_rx_irq_mask(port);
 
-	sci_out(port, SCSCR, ctrl);
+	serial_port_out(port, SCSCR, ctrl);
 }
 
 static void sci_enable_ms(struct uart_port *port)
@@ -1589,13 +1586,13 @@
 {
 	struct sci_port *s = (struct sci_port *)arg;
 	struct uart_port *port = &s->port;
-	u16 scr = sci_in(port, SCSCR);
+	u16 scr = serial_port_in(port, SCSCR);
 
 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
 		scr &= ~0x4000;
 		enable_irq(s->cfg->irqs[1]);
 	}
-	sci_out(port, SCSCR, scr | SCSCR_RIE);
+	serial_port_out(port, SCSCR, scr | SCSCR_RIE);
 	dev_dbg(port->dev, "DMA Rx timed out\n");
 	schedule_work(&s->work_rx);
 }
@@ -1776,14 +1773,14 @@
 	unsigned int status;
 
 	do {
-		status = sci_in(port, SCxSR);
+		status = serial_port_in(port, SCxSR);
 	} while (!(status & SCxSR_TEND(port)));
 
-	sci_out(port, SCSCR, 0x00);	/* TE=0, RE=0, CKE1=0 */
+	serial_port_out(port, SCSCR, 0x00);	/* TE=0, RE=0, CKE1=0 */
 
 	reg = sci_getreg(port, SCFCR);
 	if (reg->size)
-		sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
+		serial_port_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
 }
 
 static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
@@ -1812,7 +1809,7 @@
 
 	sci_reset(port);
 
-	smr_val = sci_in(port, SCSMR) & 3;
+	smr_val = serial_port_in(port, SCSMR) & 3;
 
 	if ((termios->c_cflag & CSIZE) == CS7)
 		smr_val |= 0x40;
@@ -1825,19 +1822,19 @@
 
 	uart_update_timeout(port, termios->c_cflag, baud);
 
-	sci_out(port, SCSMR, smr_val);
+	serial_port_out(port, SCSMR, smr_val);
 
 	dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t,
 		s->cfg->scscr);
 
 	if (t > 0) {
 		if (t >= 256) {
-			sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1);
+			serial_port_out(port, SCSMR, (serial_port_in(port, SCSMR) & ~3) | 1);
 			t >>= 2;
 		} else
-			sci_out(port, SCSMR, sci_in(port, SCSMR) & ~3);
+			serial_port_out(port, SCSMR, serial_port_in(port, SCSMR) & ~3);
 
-		sci_out(port, SCBRR, t);
+		serial_port_out(port, SCBRR, t);
 		udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */
 	}
 
@@ -1845,7 +1842,7 @@
 
 	reg = sci_getreg(port, SCFCR);
 	if (reg->size) {
-		unsigned short ctrl = sci_in(port, SCFCR);
+		unsigned short ctrl = serial_port_in(port, SCFCR);
 
 		if (s->cfg->capabilities & SCIx_HAVE_RTSCTS) {
 			if (termios->c_cflag & CRTSCTS)
@@ -1861,10 +1858,10 @@
 		 */
 		ctrl &= ~(SCFCR_RFRST | SCFCR_TFRST);
 
-		sci_out(port, SCFCR, ctrl);
+		serial_port_out(port, SCFCR, ctrl);
 	}
 
-	sci_out(port, SCSCR, s->cfg->scscr);
+	serial_port_out(port, SCSCR, s->cfg->scscr);
 
 #ifdef CONFIG_SERIAL_SH_SCI_DMA
 	/*
@@ -2166,7 +2163,7 @@
 
 	/* wait until fifo is empty and last bit has been transmitted */
 	bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
-	while ((sci_in(port, SCxSR) & bits) != bits)
+	while ((serial_port_in(port, SCxSR) & bits) != bits)
 		cpu_relax();
 
 	sci_port_disable(sci_port);
@@ -2260,12 +2257,12 @@
 	if (uart_console(port)) {
 		struct plat_sci_reg *reg;
 
-		sci_port->saved_smr = sci_in(port, SCSMR);
-		sci_port->saved_brr = sci_in(port, SCBRR);
+		sci_port->saved_smr = serial_port_in(port, SCSMR);
+		sci_port->saved_brr = serial_port_in(port, SCBRR);
 
 		reg = sci_getreg(port, SCFCR);
 		if (reg->size)
-			sci_port->saved_fcr = sci_in(port, SCFCR);
+			sci_port->saved_fcr = serial_port_in(port, SCFCR);
 		else
 			sci_port->saved_fcr = 0;
 	}
@@ -2279,13 +2276,13 @@
 
 	if (uart_console(port)) {
 		sci_reset(port);
-		sci_out(port, SCSMR, sci_port->saved_smr);
-		sci_out(port, SCBRR, sci_port->saved_brr);
+		serial_port_out(port, SCSMR, sci_port->saved_smr);
+		serial_port_out(port, SCBRR, sci_port->saved_brr);
 
 		if (sci_port->saved_fcr)
-			sci_out(port, SCFCR, sci_port->saved_fcr);
+			serial_port_out(port, SCFCR, sci_port->saved_fcr);
 
-		sci_out(port, SCSCR, sci_port->cfg->scscr);
+		serial_port_out(port, SCSCR, sci_port->cfg->scscr);
 	}
 	return 0;
 }
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index a1a2d36..4c22a15 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -20,10 +20,10 @@
     defined(CONFIG_ARCH_SH7372) || \
     defined(CONFIG_ARCH_R8A7740)
 
-# define SCxSR_RDxF_CLEAR(port)	 (sci_in(port, SCxSR) & 0xfffc)
-# define SCxSR_ERROR_CLEAR(port) (sci_in(port, SCxSR) & 0xfd73)
-# define SCxSR_TDxE_CLEAR(port)	 (sci_in(port, SCxSR) & 0xffdf)
-# define SCxSR_BREAK_CLEAR(port) (sci_in(port, SCxSR) & 0xffe3)
+# define SCxSR_RDxF_CLEAR(port)	 (serial_port_in(port, SCxSR) & 0xfffc)
+# define SCxSR_ERROR_CLEAR(port) (serial_port_in(port, SCxSR) & 0xfd73)
+# define SCxSR_TDxE_CLEAR(port)	 (serial_port_in(port, SCxSR) & 0xffdf)
+# define SCxSR_BREAK_CLEAR(port) (serial_port_in(port, SCxSR) & 0xffe3)
 #else
 # define SCxSR_RDxF_CLEAR(port)	 (((port)->type == PORT_SCI) ? 0xbc : 0x00fc)
 # define SCxSR_ERROR_CLEAR(port) (((port)->type == PORT_SCI) ? 0xc4 : 0x0073)
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 3bdd4b1..2156188 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -2932,11 +2932,10 @@
 	gotoxy(vc, vc->vc_x, vc->vc_y);
 	csi_J(vc, 0);
 	update_screen(vc);
-	pr_info("Console: %s %s %dx%d",
+	pr_info("Console: %s %s %dx%d\n",
 		vc->vc_can_do_color ? "colour" : "mono",
 		display_desc, vc->vc_cols, vc->vc_rows);
 	printable = 1;
-	printk("\n");
 
 	console_unlock();
 
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index cbd8f5f..76316a3 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -2,14 +2,6 @@
 # USB device configuration
 #
 
-menuconfig USB_SUPPORT
-	bool "USB support"
-	depends on HAS_IOMEM
-	default y
-	---help---
-	  This option adds core support for Universal Serial Bus (USB).
-	  You will also need drivers from the following menu to make use of it.
-
 # many non-PCI SOC chips embed OHCI
 config USB_ARCH_HAS_OHCI
 	boolean
@@ -63,6 +55,14 @@
 	boolean
 	default PCI
 
+menuconfig USB_SUPPORT
+	bool "USB support"
+	depends on HAS_IOMEM
+	default y
+	---help---
+	  This option adds core support for Universal Serial Bus (USB).
+	  You will also need drivers from the following menu to make use of it.
+
 if USB_SUPPORT
 
 config USB_COMMON
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index f8e2d6d..9a56635 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1189,8 +1189,13 @@
 	if (status == 0) {
 		status = usb_suspend_device(udev, msg);
 
-		/* Again, ignore errors during system sleep transitions */
-		if (!PMSG_IS_AUTO(msg))
+		/*
+		 * Ignore errors from non-root-hub devices during
+		 * system sleep transitions.  For the most part,
+		 * these devices should go to low power anyway when
+		 * the entire bus is suspended.
+		 */
+		if (udev->parent && !PMSG_IS_AUTO(msg))
 			status = 0;
 	}
 
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 9d7fc9a..140d3e1 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1978,6 +1978,18 @@
 	if (status == 0) {
 		usb_set_device_state(rhdev, USB_STATE_SUSPENDED);
 		hcd->state = HC_STATE_SUSPENDED;
+
+		/* Did we race with a root-hub wakeup event? */
+		if (rhdev->do_remote_wakeup) {
+			char	buffer[6];
+
+			status = hcd->driver->hub_status_data(hcd, buffer);
+			if (status != 0) {
+				dev_dbg(&rhdev->dev, "suspend raced with wakeup event\n");
+				hcd_bus_resume(rhdev, PMSG_AUTO_RESUME);
+				status = -EBUSY;
+			}
+		}
 	} else {
 		spin_lock_irq(&hcd_root_hub_lock);
 		if (!HCD_DEAD(hcd)) {
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 28664eb..ec6c97d 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1667,7 +1667,6 @@
 {
 	struct usb_device	*udev = *pdev;
 	int			i;
-	struct usb_hcd		*hcd = bus_to_hcd(udev->bus);
 
 	/* mark the device as inactive, so any further urb submissions for
 	 * this device (and any of its children) will fail immediately.
@@ -1690,9 +1689,7 @@
 	 * so that the hardware is now fully quiesced.
 	 */
 	dev_dbg (&udev->dev, "unregistering device\n");
-	mutex_lock(hcd->bandwidth_mutex);
 	usb_disable_device(udev, 0);
-	mutex_unlock(hcd->bandwidth_mutex);
 	usb_hcd_synchronize_unlinks(udev);
 
 	usb_remove_ep_devs(&udev->ep0);
@@ -3163,6 +3160,22 @@
 	if (retval)
 		goto fail;
 
+	/*
+	 * Some superspeed devices have finished the link training process
+	 * and attached to a superspeed hub port, but the device descriptor
+	 * got from those devices show they aren't superspeed devices. Warm
+	 * reset the port attached by the devices can fix them.
+	 */
+	if ((udev->speed == USB_SPEED_SUPER) &&
+			(le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) {
+		dev_err(&udev->dev, "got a wrong device descriptor, "
+				"warm reset device\n");
+		hub_port_reset(hub, port1, udev,
+				HUB_BH_RESET_TIME, true);
+		retval = -EINVAL;
+		goto fail;
+	}
+
 	if (udev->descriptor.bMaxPacketSize0 == 0xff ||
 			udev->speed == USB_SPEED_SUPER)
 		i = 512;
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index b3bdfed..ca717da 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -308,7 +308,8 @@
 				retval = usb_unlink_urb(io->urbs [i]);
 				if (retval != -EINPROGRESS &&
 				    retval != -ENODEV &&
-				    retval != -EBUSY)
+				    retval != -EBUSY &&
+				    retval != -EIDRM)
 					dev_err(&io->dev->dev,
 						"%s, unlink --> %d\n",
 						__func__, retval);
@@ -317,7 +318,6 @@
 		}
 		spin_lock(&io->lock);
 	}
-	urb->dev = NULL;
 
 	/* on the last completion, signal usb_sg_wait() */
 	io->bytes += urb->actual_length;
@@ -524,7 +524,6 @@
 		case -ENXIO:	/* hc didn't queue this one */
 		case -EAGAIN:
 		case -ENOMEM:
-			io->urbs[i]->dev = NULL;
 			retval = 0;
 			yield();
 			break;
@@ -542,7 +541,6 @@
 
 			/* fail any uncompleted urbs */
 		default:
-			io->urbs[i]->dev = NULL;
 			io->urbs[i]->status = retval;
 			dev_dbg(&io->dev->dev, "%s, submit --> %d\n",
 				__func__, retval);
@@ -593,7 +591,10 @@
 			if (!io->urbs [i]->dev)
 				continue;
 			retval = usb_unlink_urb(io->urbs [i]);
-			if (retval != -EINPROGRESS && retval != -EBUSY)
+			if (retval != -EINPROGRESS
+					&& retval != -ENODEV
+					&& retval != -EBUSY
+					&& retval != -EIDRM)
 				dev_warn(&io->dev->dev, "%s, unlink --> %d\n",
 					__func__, retval);
 		}
@@ -1135,8 +1136,6 @@
  * Deallocates hcd/hardware state for the endpoints (nuking all or most
  * pending urbs) and usbcore state for the interfaces, so that usbcore
  * must usb_set_configuration() before any interfaces could be used.
- *
- * Must be called with hcd->bandwidth_mutex held.
  */
 void usb_disable_device(struct usb_device *dev, int skip_ep0)
 {
@@ -1189,7 +1188,9 @@
 			usb_disable_endpoint(dev, i + USB_DIR_IN, false);
 		}
 		/* Remove endpoints from the host controller internal state */
+		mutex_lock(hcd->bandwidth_mutex);
 		usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
+		mutex_unlock(hcd->bandwidth_mutex);
 		/* Second pass: remove endpoint pointers */
 	}
 	for (i = skip_ep0; i < 16; ++i) {
@@ -1749,7 +1750,6 @@
 	/* if it's already configured, clear out old state first.
 	 * getting rid of old interfaces means unbinding their drivers.
 	 */
-	mutex_lock(hcd->bandwidth_mutex);
 	if (dev->state != USB_STATE_ADDRESS)
 		usb_disable_device(dev, 1);	/* Skip ep0 */
 
@@ -1762,6 +1762,7 @@
 	 * host controller will not allow submissions to dropped endpoints.  If
 	 * this call fails, the device state is unchanged.
 	 */
+	mutex_lock(hcd->bandwidth_mutex);
 	ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
 	if (ret < 0) {
 		mutex_unlock(hcd->bandwidth_mutex);
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 7239a73..cd9b3a2 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -539,6 +539,10 @@
  * never submitted, or it was unlinked before, or the hardware is already
  * finished with it), even if the completion handler has not yet run.
  *
+ * The URB must not be deallocated while this routine is running.  In
+ * particular, when a driver calls this routine, it must insure that the
+ * completion handler cannot deallocate the URB.
+ *
  * Unlinking and Endpoint Queues:
  *
  * [The behaviors and guarantees described below do not apply to virtual
@@ -603,6 +607,10 @@
  * with error -EPERM.  Thus even if the URB's completion handler always
  * tries to resubmit, it will not succeed and the URB will become idle.
  *
+ * The URB must not be deallocated while this routine is running.  In
+ * particular, when a driver calls this routine, it must insure that the
+ * completion handler cannot deallocate the URB.
+ *
  * This routine may not be used in an interrupt context (such as a bottom
  * half or a completion handler), or when holding a spinlock, or in other
  * situations where the caller can't schedule().
@@ -640,6 +648,10 @@
  * with error -EPERM.  Thus even if the URB's completion handler always
  * tries to resubmit, it will not succeed and the URB will become idle.
  *
+ * The URB must not be deallocated while this routine is running.  In
+ * particular, when a driver calls this routine, it must insure that the
+ * completion handler cannot deallocate the URB.
+ *
  * This routine may not be used in an interrupt context (such as a bottom
  * half or a completion handler), or when holding a spinlock, or in other
  * situations where the caller can't schedule().
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 7bd815a..99b58d84 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -206,11 +206,11 @@
 
 	for (i = 0; i < dwc->num_event_buffers; i++) {
 		evt = dwc->ev_buffs[i];
-		if (evt) {
+		if (evt)
 			dwc3_free_one_event_buffer(dwc, evt);
-			dwc->ev_buffs[i] = NULL;
-		}
 	}
+
+	kfree(dwc->ev_buffs);
 }
 
 /**
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 25910e2..3584a16 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -353,6 +353,9 @@
 
 			dwc->test_mode_nr = wIndex >> 8;
 			dwc->test_mode = true;
+			break;
+		default:
+			return -EINVAL;
 		}
 		break;
 
@@ -559,15 +562,20 @@
 	length = trb->size & DWC3_TRB_SIZE_MASK;
 
 	if (dwc->ep0_bounced) {
+		unsigned transfer_size = ur->length;
+		unsigned maxp = ep0->endpoint.maxpacket;
+
+		transfer_size += (maxp - (transfer_size % maxp));
 		transferred = min_t(u32, ur->length,
-				ep0->endpoint.maxpacket - length);
+				transfer_size - length);
 		memcpy(ur->buf, dwc->ep0_bounce, transferred);
 		dwc->ep0_bounced = false;
 	} else {
 		transferred = ur->length - length;
-		ur->actual += transferred;
 	}
 
+	ur->actual += transferred;
+
 	if ((epnum & 1) && ur->actual < ur->length) {
 		/* for some reason we did not get everything out */
 
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 0c935d7..9d7bcd9 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1863,8 +1863,8 @@
 			mod_timer(&udc->vbus_timer,
 				  jiffies + VBUS_POLL_TIMEOUT);
 		} else {
-			if (request_irq(udc->board.vbus_pin, at91_vbus_irq,
-					0, driver_name, udc)) {
+			if (request_irq(gpio_to_irq(udc->board.vbus_pin),
+					at91_vbus_irq, 0, driver_name, udc)) {
 				DBG("request vbus irq %d failed\n",
 				    udc->board.vbus_pin);
 				retval = -EBUSY;
@@ -1886,7 +1886,7 @@
 	return 0;
 fail4:
 	if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled)
-		free_irq(udc->board.vbus_pin, udc);
+		free_irq(gpio_to_irq(udc->board.vbus_pin), udc);
 fail3:
 	if (gpio_is_valid(udc->board.vbus_pin))
 		gpio_free(udc->board.vbus_pin);
@@ -1924,7 +1924,7 @@
 	device_init_wakeup(&pdev->dev, 0);
 	remove_debug_file(udc);
 	if (gpio_is_valid(udc->board.vbus_pin)) {
-		free_irq(udc->board.vbus_pin, udc);
+		free_irq(gpio_to_irq(udc->board.vbus_pin), udc);
 		gpio_free(udc->board.vbus_pin);
 	}
 	free_irq(udc->udp_irq, udc);
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 1cbba70..f52cb1a 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -712,7 +712,7 @@
 	if (code == FUNCTIONFS_INTERFACE_REVMAP) {
 		struct ffs_function *func = ffs->func;
 		ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
-	} else if (gadget->ops->ioctl) {
+	} else if (gadget && gadget->ops->ioctl) {
 		ret = gadget->ops->ioctl(gadget, code, value);
 	} else {
 		ret = -ENOTTY;
@@ -1382,6 +1382,7 @@
 		ffs->ep0req = NULL;
 		ffs->gadget = NULL;
 		ffs_data_put(ffs);
+		clear_bit(FFS_FL_BOUND, &ffs->flags);
 	}
 }
 
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 7b1cf18..5234365 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -500,6 +500,7 @@
 			if (buf) {
 				memcpy(req->buf, buf, n);
 				req->complete = rndis_response_complete;
+				req->context = rndis;
 				rndis_free_response(rndis->config, buf);
 				value = n;
 			}
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 5f94e79..55abfb6 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -730,7 +730,7 @@
 		: (1 << (ep_index(ep)));
 
 	/* check if the pipe is empty */
-	if (!(list_empty(&ep->queue))) {
+	if (!(list_empty(&ep->queue)) && !(ep_index(ep) == 0)) {
 		/* Add td to the end */
 		struct fsl_req *lastreq;
 		lastreq = list_entry(ep->queue.prev, struct fsl_req, queue);
@@ -918,10 +918,6 @@
 		return -ENOMEM;
 	}
 
-	/* Update ep0 state */
-	if ((ep_index(ep) == 0))
-		udc->ep0_state = DATA_STATE_XMIT;
-
 	/* irq handler advances the queue */
 	if (req != NULL)
 		list_add_tail(&req->queue, &ep->queue);
@@ -1279,7 +1275,8 @@
 		udc->ep0_dir = USB_DIR_OUT;
 
 	ep = &udc->eps[0];
-	udc->ep0_state = WAIT_FOR_OUT_STATUS;
+	if (udc->ep0_state != DATA_STATE_XMIT)
+		udc->ep0_state = WAIT_FOR_OUT_STATUS;
 
 	req->ep = ep;
 	req->req.length = 0;
@@ -1384,6 +1381,9 @@
 
 	list_add_tail(&req->queue, &ep->queue);
 	udc->ep0_state = DATA_STATE_XMIT;
+	if (ep0_prime_status(udc, EP_DIR_OUT))
+		ep0stall(udc);
+
 	return;
 stall:
 	ep0stall(udc);
@@ -1492,6 +1492,14 @@
 		spin_lock(&udc->lock);
 		udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
 				?  DATA_STATE_XMIT : DATA_STATE_RECV;
+		/*
+		 * If the data stage is IN, send status prime immediately.
+		 * See 2.0 Spec chapter 8.5.3.3 for detail.
+		 */
+		if (udc->ep0_state == DATA_STATE_XMIT)
+			if (ep0_prime_status(udc, EP_DIR_OUT))
+				ep0stall(udc);
+
 	} else {
 		/* No data phase, IN status from gadget */
 		udc->ep0_dir = USB_DIR_IN;
@@ -1520,9 +1528,8 @@
 
 	switch (udc->ep0_state) {
 	case DATA_STATE_XMIT:
-		/* receive status phase */
-		if (ep0_prime_status(udc, EP_DIR_OUT))
-			ep0stall(udc);
+		/* already primed at setup_received_irq */
+		udc->ep0_state = WAIT_FOR_OUT_STATUS;
 		break;
 	case DATA_STATE_RECV:
 		/* send status phase */
diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c
index 331cd67..a85eaf4 100644
--- a/drivers/usb/gadget/g_ffs.c
+++ b/drivers/usb/gadget/g_ffs.c
@@ -161,7 +161,7 @@
 static struct ffs_data *gfs_ffs_data;
 static unsigned long gfs_registered;
 
-static int  gfs_init(void)
+static int __init gfs_init(void)
 {
 	ENTER();
 
@@ -169,7 +169,7 @@
 }
 module_init(gfs_init);
 
-static void  gfs_exit(void)
+static void __exit gfs_exit(void)
 {
 	ENTER();
 
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 8793f32..e58b164 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1574,7 +1574,6 @@
 	DBG (dev, "%s %d\n", __func__, dev->state);
 
 	/* dev->state must prevent interference */
-restart:
 	spin_lock_irq (&dev->lock);
 	while (!list_empty(&dev->epfiles)) {
 		struct ep_data	*ep;
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 69295ba..105b206 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -340,7 +340,7 @@
 	/* currently we allocate TX FIFOs for all possible endpoints,
 	 * and assume that they are all the same size. */
 
-	for (ep = 0; ep <= 15; ep++) {
+	for (ep = 1; ep <= 15; ep++) {
 		val = addr;
 		val |= size << S3C_DPTXFSIZn_DPTxFSize_SHIFT;
 		addr += size;
@@ -741,7 +741,7 @@
 	/* write size / packets */
 	writel(epsize, hsotg->regs + epsize_reg);
 
-	if (using_dma(hsotg)) {
+	if (using_dma(hsotg) && !continuing) {
 		unsigned int dma_reg;
 
 		/* write DMA address to control register, buffer already
@@ -1696,10 +1696,12 @@
 	reg |= mpsval;
 	writel(reg, regs + S3C_DIEPCTL(ep));
 
-	reg = readl(regs + S3C_DOEPCTL(ep));
-	reg &= ~S3C_DxEPCTL_MPS_MASK;
-	reg |= mpsval;
-	writel(reg, regs + S3C_DOEPCTL(ep));
+	if (ep) {
+		reg = readl(regs + S3C_DOEPCTL(ep));
+		reg &= ~S3C_DxEPCTL_MPS_MASK;
+		reg |= mpsval;
+		writel(reg, regs + S3C_DOEPCTL(ep));
+	}
 
 	return;
 
@@ -1919,7 +1921,8 @@
 		    ints & S3C_DIEPMSK_TxFIFOEmpty) {
 			dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
 				__func__, idx);
-			s3c_hsotg_trytx(hsotg, hs_ep);
+			if (!using_dma(hsotg))
+				s3c_hsotg_trytx(hsotg, hs_ep);
 		}
 	}
 }
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 56da49f..2fa9865 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -264,8 +264,8 @@
 	if (udc_is_newstyle(udc)) {
 		udc->driver->disconnect(udc->gadget);
 		udc->driver->unbind(udc->gadget);
-		usb_gadget_udc_stop(udc->gadget, udc->driver);
 		usb_gadget_disconnect(udc->gadget);
+		usb_gadget_udc_stop(udc->gadget, udc->driver);
 	} else {
 		usb_gadget_stop(udc->gadget, udc->driver);
 	}
@@ -411,8 +411,12 @@
 	struct usb_udc		*udc = container_of(dev, struct usb_udc, dev);
 
 	if (sysfs_streq(buf, "connect")) {
+		if (udc_is_newstyle(udc))
+			usb_gadget_udc_start(udc->gadget, udc->driver);
 		usb_gadget_connect(udc->gadget);
 	} else if (sysfs_streq(buf, "disconnect")) {
+		if (udc_is_newstyle(udc))
+			usb_gadget_udc_stop(udc->gadget, udc->driver);
 		usb_gadget_disconnect(udc->gadget);
 	} else {
 		dev_err(dev, "unsupported command '%s'\n", buf);
diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c
index d776adb..0cdf89d 100644
--- a/drivers/usb/gadget/uvc_queue.c
+++ b/drivers/usb/gadget/uvc_queue.c
@@ -543,11 +543,11 @@
 	return ret;
 }
 
+/* called with queue->irqlock held.. */
 static struct uvc_buffer *
 uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf)
 {
 	struct uvc_buffer *nextbuf;
-	unsigned long flags;
 
 	if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
 	    buf->buf.length != buf->buf.bytesused) {
@@ -556,14 +556,12 @@
 		return buf;
 	}
 
-	spin_lock_irqsave(&queue->irqlock, flags);
 	list_del(&buf->queue);
 	if (!list_empty(&queue->irqqueue))
 		nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
 					   queue);
 	else
 		nextbuf = NULL;
-	spin_unlock_irqrestore(&queue->irqlock, flags);
 
 	buf->buf.sequence = queue->sequence++;
 	do_gettimeofday(&buf->buf.timestamp);
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 3e73451..d0a84bd 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -218,6 +218,9 @@
 	u32 portsc;
 	struct usb_hcd *hcd = ehci_to_hcd(ehci);
 	void __iomem *non_ehci = hcd->regs;
+	struct fsl_usb2_platform_data *pdata;
+
+	pdata = hcd->self.controller->platform_data;
 
 	portsc = ehci_readl(ehci, &ehci->regs->port_status[port_offset]);
 	portsc &= ~(PORT_PTS_MSK | PORT_PTS_PTW);
@@ -234,7 +237,9 @@
 		/* fall through */
 	case FSL_USB2_PHY_UTMI:
 		/* enable UTMI PHY */
-		setbits32(non_ehci + FSL_SOC_USB_CTRL, CTRL_UTMI_PHY_EN);
+		if (pdata->have_sysif_regs)
+			setbits32(non_ehci + FSL_SOC_USB_CTRL,
+				  CTRL_UTMI_PHY_EN);
 		portsc |= PORT_PTS_UTMI;
 		break;
 	case FSL_USB2_PHY_NONE:
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 057cdda..4a3bc5b 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -347,6 +347,8 @@
 	if (ehci->debug)
 		dbgp_external_startup();
 
+	ehci->port_c_suspend = ehci->suspended_ports =
+			ehci->resuming_ports = 0;
 	return retval;
 }
 
@@ -856,8 +858,13 @@
 		goto dead;
 	}
 
+	/*
+	 * We don't use STS_FLR, but some controllers don't like it to
+	 * remain on, so mask it out along with the other status bits.
+	 */
+	masked_status = status & (INTR_MASK | STS_FLR);
+
 	/* Shared IRQ? */
-	masked_status = status & INTR_MASK;
 	if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
 		spin_unlock(&ehci->lock);
 		return IRQ_NONE;
@@ -908,7 +915,7 @@
 		pcd_status = status;
 
 		/* resume root hub? */
-		if (!(cmd & CMD_RUN))
+		if (ehci->rh_state == EHCI_RH_SUSPENDED)
 			usb_hcd_resume_root_hub(hcd);
 
 		/* get per-port change detect bits */
@@ -939,6 +946,7 @@
 			 * like usb_port_resume() does.
 			 */
 			ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
+			set_bit(i, &ehci->resuming_ports);
 			ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
 			mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
 		}
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 256fbd4..38fe076 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -223,15 +223,10 @@
 	 * remote wakeup, we must fail the suspend.
 	 */
 	if (hcd->self.root_hub->do_remote_wakeup) {
-		port = HCS_N_PORTS(ehci->hcs_params);
-		while (port--) {
-			if (ehci->reset_done[port] != 0) {
-				spin_unlock_irq(&ehci->lock);
-				ehci_dbg(ehci, "suspend failed because "
-						"port %d is resuming\n",
-						port + 1);
-				return -EBUSY;
-			}
+		if (ehci->resuming_ports) {
+			spin_unlock_irq(&ehci->lock);
+			ehci_dbg(ehci, "suspend failed because a port is resuming\n");
+			return -EBUSY;
 		}
 	}
 
@@ -554,16 +549,12 @@
 ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
 {
 	struct ehci_hcd	*ehci = hcd_to_ehci (hcd);
-	u32		temp, status = 0;
+	u32		temp, status;
 	u32		mask;
 	int		ports, i, retval = 1;
 	unsigned long	flags;
 	u32		ppcd = 0;
 
-	/* if !USB_SUSPEND, root hub timers won't get shut down ... */
-	if (ehci->rh_state != EHCI_RH_RUNNING)
-		return 0;
-
 	/* init status to no-changes */
 	buf [0] = 0;
 	ports = HCS_N_PORTS (ehci->hcs_params);
@@ -572,6 +563,11 @@
 		retval++;
 	}
 
+	/* Inform the core about resumes-in-progress by returning
+	 * a non-zero value even if there are no status changes.
+	 */
+	status = ehci->resuming_ports;
+
 	/* Some boards (mostly VIA?) report bogus overcurrent indications,
 	 * causing massive log spam unless we completely ignore them.  It
 	 * may be relevant that VIA VT8235 controllers, where PORT_POWER is
@@ -846,6 +842,7 @@
 				ehci_writel(ehci,
 					temp & ~(PORT_RWC_BITS | PORT_RESUME),
 					status_reg);
+				clear_bit(wIndex, &ehci->resuming_ports);
 				retval = handshake(ehci, status_reg,
 					   PORT_RESUME, 0, 2000 /* 2msec */);
 				if (retval != 0) {
@@ -864,6 +861,7 @@
 					ehci->reset_done[wIndex])) {
 			status |= USB_PORT_STAT_C_RESET << 16;
 			ehci->reset_done [wIndex] = 0;
+			clear_bit(wIndex, &ehci->resuming_ports);
 
 			/* force reset to complete */
 			ehci_writel(ehci, temp & ~(PORT_RWC_BITS | PORT_RESET),
@@ -884,8 +882,10 @@
 					ehci_readl(ehci, status_reg));
 		}
 
-		if (!(temp & (PORT_RESUME|PORT_RESET)))
+		if (!(temp & (PORT_RESUME|PORT_RESET))) {
 			ehci->reset_done[wIndex] = 0;
+			clear_bit(wIndex, &ehci->resuming_ports);
+		}
 
 		/* transfer dedicated ports to the companion hc */
 		if ((temp & PORT_CONNECT) &&
@@ -920,6 +920,7 @@
 			status |= USB_PORT_STAT_SUSPEND;
 		} else if (test_bit(wIndex, &ehci->suspended_ports)) {
 			clear_bit(wIndex, &ehci->suspended_ports);
+			clear_bit(wIndex, &ehci->resuming_ports);
 			ehci->reset_done[wIndex] = 0;
 			if (temp & PORT_PE)
 				set_bit(wIndex, &ehci->port_c_suspend);
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index bba9850..5c78f9e 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -42,6 +42,7 @@
 #include <plat/usb.h>
 #include <linux/regulator/consumer.h>
 #include <linux/pm_runtime.h>
+#include <linux/gpio.h>
 
 /* EHCI Register Set */
 #define EHCI_INSNREG04					(0xA0)
@@ -191,6 +192,19 @@
 		}
 	}
 
+	if (pdata->phy_reset) {
+		if (gpio_is_valid(pdata->reset_gpio_port[0]))
+			gpio_request_one(pdata->reset_gpio_port[0],
+					 GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
+
+		if (gpio_is_valid(pdata->reset_gpio_port[1]))
+			gpio_request_one(pdata->reset_gpio_port[1],
+					 GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
+
+		/* Hold the PHY in RESET for enough time till DIR is high */
+		udelay(10);
+	}
+
 	pm_runtime_enable(dev);
 	pm_runtime_get_sync(dev);
 
@@ -237,6 +251,19 @@
 	/* root ports should always stay powered */
 	ehci_port_power(omap_ehci, 1);
 
+	if (pdata->phy_reset) {
+		/* Hold the PHY in RESET for enough time till
+		 * PHY is settled and ready
+		 */
+		udelay(10);
+
+		if (gpio_is_valid(pdata->reset_gpio_port[0]))
+			gpio_set_value(pdata->reset_gpio_port[0], 1);
+
+		if (gpio_is_valid(pdata->reset_gpio_port[1]))
+			gpio_set_value(pdata->reset_gpio_port[1], 1);
+	}
+
 	return 0;
 
 err_add_hcd:
@@ -259,8 +286,9 @@
  */
 static int ehci_hcd_omap_remove(struct platform_device *pdev)
 {
-	struct device *dev	= &pdev->dev;
-	struct usb_hcd *hcd	= dev_get_drvdata(dev);
+	struct device *dev				= &pdev->dev;
+	struct usb_hcd *hcd				= dev_get_drvdata(dev);
+	struct ehci_hcd_omap_platform_data *pdata	= dev->platform_data;
 
 	usb_remove_hcd(hcd);
 	disable_put_regulator(dev->platform_data);
@@ -269,6 +297,13 @@
 	pm_runtime_put_sync(dev);
 	pm_runtime_disable(dev);
 
+	if (pdata->phy_reset) {
+		if (gpio_is_valid(pdata->reset_gpio_port[0]))
+			gpio_free(pdata->reset_gpio_port[0]);
+
+		if (gpio_is_valid(pdata->reset_gpio_port[1]))
+			gpio_free(pdata->reset_gpio_port[1]);
+	}
 	return 0;
 }
 
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 3de48a2..8618336 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -224,6 +224,7 @@
 		temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
 		/* start resume signalling */
 		ehci_writel(ehci, temp | PORT_RESUME, status_reg);
+		set_bit(wIndex-1, &ehci->resuming_ports);
 
 		spin_unlock_irqrestore(&ehci->lock, flags);
 		msleep(20);
@@ -236,6 +237,7 @@
 			pr_err("%s: timeout waiting for SUSPEND\n", __func__);
 
 		ehci->reset_done[wIndex-1] = 0;
+		clear_bit(wIndex-1, &ehci->resuming_ports);
 
 		tegra->port_resuming = 1;
 		goto done;
@@ -729,7 +731,6 @@
 		err = -ENODEV;
 		goto fail;
 	}
-	set_irq_flags(irq, IRQF_VALID);
 
 #ifdef CONFIG_USB_OTG_UTILS
 	if (pdata->operating_mode == TEGRA_USB_OTG) {
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 8f9acbc..2694ed6 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -117,6 +117,8 @@
 			the change-suspend feature turned on */
 	unsigned long		suspended_ports;	/* which ports are
 			suspended */
+	unsigned long		resuming_ports;		/* which ports have
+			started to resume */
 
 	/* per-HC memory pools (could be per-bus, but ...) */
 	struct dma_pool		*qh_pool;	/* qh per active urb */
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 09f597a..13ebeca 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -94,7 +94,7 @@
 
 /*-------------------------------------------------------------------------*/
 
-static void usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *);
+static void __devexit usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *);
 
 /* configure so an HC device and id are always provided */
 /* always called with process context; sleeping is OK */
@@ -108,7 +108,7 @@
  * then invokes the start() method for the HCD associated with it
  * through the hotplug entry's driver_data.
  */
-static int usb_hcd_at91_probe(const struct hc_driver *driver,
+static int __devinit usb_hcd_at91_probe(const struct hc_driver *driver,
 			struct platform_device *pdev)
 {
 	int retval;
@@ -203,7 +203,7 @@
  * context, "rmmod" or something similar.
  *
  */
-static void usb_hcd_at91_remove(struct usb_hcd *hcd,
+static void __devexit usb_hcd_at91_remove(struct usb_hcd *hcd,
 				struct platform_device *pdev)
 {
 	usb_remove_hcd(hcd);
@@ -545,7 +545,7 @@
 
 /*-------------------------------------------------------------------------*/
 
-static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
+static int __devinit ohci_hcd_at91_drv_probe(struct platform_device *pdev)
 {
 	struct at91_usbh_data	*pdata;
 	int			i;
@@ -620,7 +620,7 @@
 	return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev);
 }
 
-static int ohci_hcd_at91_drv_remove(struct platform_device *pdev)
+static int __devexit ohci_hcd_at91_drv_remove(struct platform_device *pdev)
 {
 	struct at91_usbh_data	*pdata = pdev->dev.platform_data;
 	int			i;
@@ -696,7 +696,7 @@
 
 static struct platform_driver ohci_hcd_at91_driver = {
 	.probe		= ohci_hcd_at91_drv_probe,
-	.remove		= ohci_hcd_at91_drv_remove,
+	.remove		= __devexit_p(ohci_hcd_at91_drv_remove),
 	.shutdown	= usb_hcd_platform_shutdown,
 	.suspend	= ohci_hcd_at91_drv_suspend,
 	.resume		= ohci_hcd_at91_drv_resume,
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 11de5f1..32dada8 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -825,9 +825,13 @@
 		}
 	}
 
-	/* Disable any BIOS SMIs */
-	writel(XHCI_LEGACY_DISABLE_SMI,
-			base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+	val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+	/* Mask off (turn off) any enabled SMIs */
+	val &= XHCI_LEGACY_DISABLE_SMI;
+	/* Mask all SMI events bits, RW1C */
+	val |= XHCI_LEGACY_SMI_EVENTS;
+	/* Disable any BIOS SMIs and clear all SMI events*/
+	writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
 
 	if (usb_is_intel_switchable_xhci(pdev))
 		usb_enable_xhci_ports(pdev);
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
index 045cde4..768d542 100644
--- a/drivers/usb/host/uhci-hub.c
+++ b/drivers/usb/host/uhci-hub.c
@@ -196,11 +196,12 @@
 	status = get_hub_status_data(uhci, buf);
 
 	switch (uhci->rh_state) {
-	    case UHCI_RH_SUSPENDING:
 	    case UHCI_RH_SUSPENDED:
 		/* if port change, ask to be resumed */
-		if (status || uhci->resuming_ports)
+		if (status || uhci->resuming_ports) {
+			status = 1;
 			usb_hcd_resume_root_hub(hcd);
+		}
 		break;
 
 	    case UHCI_RH_AUTO_STOPPED:
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index e9b0f04..4b436f5 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -119,7 +119,7 @@
 	xhci_dbg(xhci, "  Event Interrupts %s\n",
 			(temp & CMD_EIE) ? "enabled " : "disabled");
 	xhci_dbg(xhci, "  Host System Error Interrupts %s\n",
-			(temp & CMD_EIE) ? "enabled " : "disabled");
+			(temp & CMD_HSEIE) ? "enabled " : "disabled");
 	xhci_dbg(xhci, "  HC has %sfinished light reset\n",
 			(temp & CMD_LRESET) ? "not " : "");
 }
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index c7f3312..377f424 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -62,8 +62,9 @@
 /* USB Legacy Support Control and Status Register  - section 7.1.2 */
 /* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
 #define XHCI_LEGACY_CONTROL_OFFSET	(0x04)
-/* bits 1:2, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
-#define	XHCI_LEGACY_DISABLE_SMI		((0x3 << 1) + (0xff << 5) + (0x7 << 17))
+/* bits 1:3, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
+#define	XHCI_LEGACY_DISABLE_SMI		((0x7 << 1) + (0xff << 5) + (0x7 << 17))
+#define XHCI_LEGACY_SMI_EVENTS		(0x7 << 29)
 
 /* USB 2.0 xHCI 0.96 L1C capability - section 7.2.2.1.3.2 */
 #define XHCI_L1C               (1 << 16)
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index cae4c6f..68eaa90 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1796,11 +1796,6 @@
 	int i;
 
 	/* Free the Event Ring Segment Table and the actual Event Ring */
-	if (xhci->ir_set) {
-		xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
-		xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
-		xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
-	}
 	size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
 	if (xhci->erst.entries)
 		dma_free_coherent(&pdev->dev, size,
@@ -1812,7 +1807,6 @@
 	xhci->event_ring = NULL;
 	xhci_dbg(xhci, "Freed event ring\n");
 
-	xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
 	if (xhci->cmd_ring)
 		xhci_ring_free(xhci, xhci->cmd_ring);
 	xhci->cmd_ring = NULL;
@@ -1841,7 +1835,6 @@
 	xhci->medium_streams_pool = NULL;
 	xhci_dbg(xhci, "Freed medium stream array pool\n");
 
-	xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
 	if (xhci->dcbaa)
 		dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
 				xhci->dcbaa, xhci->dcbaa->dma);
@@ -2459,6 +2452,8 @@
 
 fail:
 	xhci_warn(xhci, "Couldn't initialize memory\n");
+	xhci_halt(xhci);
+	xhci_reset(xhci);
 	xhci_mem_cleanup(xhci);
 	return -ENOMEM;
 }
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index ef98b38..7a856a7 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -95,6 +95,8 @@
 		xhci->quirks |= XHCI_RESET_ON_RESUME;
 		xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
 	}
+	if (pdev->vendor == PCI_VENDOR_ID_VIA)
+		xhci->quirks |= XHCI_RESET_ON_RESUME;
 }
 
 /* called during probe() after chip reset completes */
@@ -326,7 +328,7 @@
 	return pci_register_driver(&xhci_pci_driver);
 }
 
-void __exit xhci_unregister_pci(void)
+void xhci_unregister_pci(void)
 {
 	pci_unregister_driver(&xhci_pci_driver);
 }
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 6bd9d53..3d9422f 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2417,7 +2417,7 @@
 		u32 irq_pending;
 		/* Acknowledge the PCI interrupt */
 		irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
-		irq_pending |= 0x3;
+		irq_pending |= IMAN_IP;
 		xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
 	}
 
@@ -2734,7 +2734,7 @@
 				urb->dev->speed == USB_SPEED_FULL)
 			urb->interval /= 8;
 	}
-	return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
+	return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
 }
 
 /*
@@ -3514,7 +3514,7 @@
 	}
 	ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
 
-	return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
+	return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
 }
 
 /****		Command Ring Operations		****/
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index e1963d4..36641a7 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -106,6 +106,9 @@
 			STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
 	if (!ret)
 		xhci->xhc_state |= XHCI_STATE_HALTED;
+	else
+		xhci_warn(xhci, "Host not halted after %u microseconds.\n",
+				XHCI_MAX_HALT_USEC);
 	return ret;
 }
 
@@ -664,11 +667,11 @@
 	xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
 	xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
 	xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
-	xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
-	xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
 	xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
 	xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
 	xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+	xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+	xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
 }
 
 static void xhci_restore_registers(struct xhci_hcd *xhci)
@@ -677,10 +680,11 @@
 	xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
 	xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
 	xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
-	xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
-	xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
 	xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
 	xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
+	xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
+	xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
+	xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
 }
 
 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 91074fd..3d69c4b 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -205,6 +205,10 @@
 #define CMD_PM_INDEX	(1 << 11)
 /* bits 12:31 are reserved (and should be preserved on writes). */
 
+/* IMAN - Interrupt Management Register */
+#define IMAN_IP		(1 << 1)
+#define IMAN_IE		(1 << 0)
+
 /* USBSTS - USB status - status bitmasks */
 /* HC not running - set to 1 when run/stop bit is cleared. */
 #define STS_HALT	XHCI_STS_HALT
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 959145b..9dcb68f 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -423,7 +423,7 @@
 	unsigned		i;
 	unsigned		size = max;
 
-	sg = kmalloc(nents * sizeof *sg, GFP_KERNEL);
+	sg = kmalloc_array(nents, sizeof *sg, GFP_KERNEL);
 	if (!sg)
 		return NULL;
 	sg_init_table(sg, nents);
@@ -904,6 +904,9 @@
 	struct ctrl_ctx		context;
 	int			i;
 
+	if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
+		return -EOPNOTSUPP;
+
 	spin_lock_init(&context.lock);
 	context.dev = dev;
 	init_completion(&context.complete);
@@ -1981,8 +1984,6 @@
 
 	/* queued control messaging */
 	case 10:
-		if (param->sglen == 0)
-			break;
 		retval = 0;
 		dev_info(&intf->dev,
 				"TEST 10:  queue %d control calls, %d times\n",
@@ -2276,6 +2277,8 @@
 			if (status < 0) {
 				WARNING(dev, "couldn't get endpoints, %d\n",
 						status);
+				kfree(dev->buf);
+				kfree(dev);
 				return status;
 			}
 			/* may find bulk or ISO pipes */
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 897edda..7020146 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -99,9 +99,7 @@
 	usb_put_dev(dev->udev);
 	if (dev->cntl_urb) {
 		usb_kill_urb(dev->cntl_urb);
-		if (dev->cntl_req)
-			usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
-				dev->cntl_req, dev->cntl_urb->setup_dma);
+		kfree(dev->cntl_req);
 		if (dev->cntl_buffer)
 			usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
 				dev->cntl_buffer, dev->cntl_urb->transfer_dma);
@@ -234,9 +232,7 @@
 	}
 
 	/* allocate buffer for control req */
-	dev->cntl_req = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE,
-					   GFP_KERNEL,
-					   &dev->cntl_urb->setup_dma);
+	dev->cntl_req = kmalloc(YUREX_BUF_SIZE, GFP_KERNEL);
 	if (!dev->cntl_req) {
 		err("Could not allocate cntl_req");
 		goto error;
@@ -286,7 +282,7 @@
 			 usb_rcvintpipe(dev->udev, dev->int_in_endpointAddr),
 			 dev->int_buffer, YUREX_BUF_SIZE, yurex_interrupt,
 			 dev, 1);
-	dev->cntl_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+	dev->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 	if (usb_submit_urb(dev->urb, GFP_KERNEL)) {
 		retval = -EIO;
 		err("Could not submitting URB");
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 0f8b829..66aaccf 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -137,6 +137,9 @@
 	int	i = 0;
 	u8	r;
 	u8	power;
+	int	ret;
+
+	pm_runtime_get_sync(phy->io_dev);
 
 	/* Make sure the transceiver is not in low power mode */
 	power = musb_readb(addr, MUSB_POWER);
@@ -154,15 +157,22 @@
 	while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
 				& MUSB_ULPI_REG_CMPLT)) {
 		i++;
-		if (i == 10000)
-			return -ETIMEDOUT;
+		if (i == 10000) {
+			ret = -ETIMEDOUT;
+			goto out;
+		}
 
 	}
 	r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
 	r &= ~MUSB_ULPI_REG_CMPLT;
 	musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
 
-	return musb_readb(addr, MUSB_ULPI_REG_DATA);
+	ret = musb_readb(addr, MUSB_ULPI_REG_DATA);
+
+out:
+	pm_runtime_put(phy->io_dev);
+
+	return ret;
 }
 
 static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
@@ -171,6 +181,9 @@
 	int	i = 0;
 	u8	r = 0;
 	u8	power;
+	int	ret = 0;
+
+	pm_runtime_get_sync(phy->io_dev);
 
 	/* Make sure the transceiver is not in low power mode */
 	power = musb_readb(addr, MUSB_POWER);
@@ -184,15 +197,20 @@
 	while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
 				& MUSB_ULPI_REG_CMPLT)) {
 		i++;
-		if (i == 10000)
-			return -ETIMEDOUT;
+		if (i == 10000) {
+			ret = -ETIMEDOUT;
+			goto out;
+		}
 	}
 
 	r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
 	r &= ~MUSB_ULPI_REG_CMPLT;
 	musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
 
-	return 0;
+out:
+	pm_runtime_put(phy->io_dev);
+
+	return ret;
 }
 #else
 #define musb_ulpi_read		NULL
@@ -1904,14 +1922,17 @@
 
 	if (!musb->isr) {
 		status = -ENODEV;
-		goto fail3;
+		goto fail2;
 	}
 
 	if (!musb->xceiv->io_ops) {
+		musb->xceiv->io_dev = musb->controller;
 		musb->xceiv->io_priv = musb->mregs;
 		musb->xceiv->io_ops = &musb_ulpi_access;
 	}
 
+	pm_runtime_get_sync(musb->controller);
+
 #ifndef CONFIG_MUSB_PIO_ONLY
 	if (use_dma && dev->dma_mask) {
 		struct dma_controller	*c;
@@ -2023,6 +2044,8 @@
 		goto fail5;
 #endif
 
+	pm_runtime_put(musb->controller);
+
 	dev_info(dev, "USB %s mode controller at %p using %s, IRQ %d\n",
 			({char *s;
 			 switch (musb->board_mode) {
@@ -2047,6 +2070,9 @@
 		musb_gadget_cleanup(musb);
 
 fail3:
+	pm_runtime_put_sync(musb->controller);
+
+fail2:
 	if (musb->irq_wake)
 		device_init_wakeup(dev, 0);
 	musb_platform_exit(musb);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 79cb0af..ef8d744 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2098,7 +2098,7 @@
 	}
 
 	/* turn off DMA requests, discard state, stop polling ... */
-	if (is_in) {
+	if (ep->epnum && is_in) {
 		/* giveback saves bulk toggle */
 		csr = musb_h_flush_rxfifo(ep, 0);
 
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 2ae0bb3..c7785e8 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -282,7 +282,8 @@
 
 static int omap2430_musb_init(struct musb *musb)
 {
-	u32 l, status = 0;
+	u32 l;
+	int status = 0;
 	struct device *dev = musb->controller;
 	struct musb_hdrc_platform_data *plat = dev->platform_data;
 	struct omap_musb_board_data *data = plat->board_data;
@@ -301,7 +302,7 @@
 
 	status = pm_runtime_get_sync(dev);
 	if (status < 0) {
-		dev_err(dev, "pm_runtime_get_sync FAILED");
+		dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
 		goto err1;
 	}
 
@@ -333,6 +334,7 @@
 
 	setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
 
+	pm_runtime_put_noidle(musb->controller);
 	return 0;
 
 err1:
@@ -452,14 +454,14 @@
 		goto err2;
 	}
 
+	pm_runtime_enable(&pdev->dev);
+
 	ret = platform_device_add(musb);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to register musb device\n");
 		goto err2;
 	}
 
-	pm_runtime_enable(&pdev->dev);
-
 	return 0;
 
 err2:
@@ -478,7 +480,6 @@
 
 	platform_device_del(glue->musb);
 	platform_device_put(glue->musb);
-	pm_runtime_put(&pdev->dev);
 	kfree(glue);
 
 	return 0;
@@ -491,11 +492,13 @@
 	struct omap2430_glue		*glue = dev_get_drvdata(dev);
 	struct musb			*musb = glue_to_musb(glue);
 
-	musb->context.otg_interfsel = musb_readl(musb->mregs,
-						OTG_INTERFSEL);
+	if (musb) {
+		musb->context.otg_interfsel = musb_readl(musb->mregs,
+				OTG_INTERFSEL);
 
-	omap2430_low_level_exit(musb);
-	usb_phy_set_suspend(musb->xceiv, 1);
+		omap2430_low_level_exit(musb);
+		usb_phy_set_suspend(musb->xceiv, 1);
+	}
 
 	return 0;
 }
@@ -505,11 +508,13 @@
 	struct omap2430_glue		*glue = dev_get_drvdata(dev);
 	struct musb			*musb = glue_to_musb(glue);
 
-	omap2430_low_level_init(musb);
-	musb_writel(musb->mregs, OTG_INTERFSEL,
-					musb->context.otg_interfsel);
+	if (musb) {
+		omap2430_low_level_init(musb);
+		musb_writel(musb->mregs, OTG_INTERFSEL,
+				musb->context.otg_interfsel);
 
-	usb_phy_set_suspend(musb->xceiv, 0);
+		usb_phy_set_suspend(musb->xceiv, 0);
+	}
 
 	return 0;
 }
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index 7f547dc..ed8adb0 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -60,8 +60,6 @@
 		retval = -ENODEV;
 		goto exit;
 	}
-	if (port->dev_state != PORT_REGISTERING)
-		goto exit;
 
 	driver = port->serial->type;
 	if (driver->port_probe) {
@@ -98,9 +96,6 @@
 	if (!port)
 		return -ENODEV;
 
-	if (port->dev_state != PORT_UNREGISTERING)
-		return retval;
-
 	device_remove_file(&port->dev, &dev_attr_port_number);
 
 	driver = port->serial->type;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 0310e2d..ec30f95 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -287,7 +287,8 @@
 	/* Issue the request, attempting to read 'size' bytes */
 	result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
 				request, REQTYPE_DEVICE_TO_HOST, 0x0000,
-				port_priv->bInterfaceNumber, buf, size, 300);
+				port_priv->bInterfaceNumber, buf, size,
+				USB_CTRL_GET_TIMEOUT);
 
 	/* Convert data into an array of integers */
 	for (i = 0; i < length; i++)
@@ -340,12 +341,14 @@
 		result = usb_control_msg(serial->dev,
 				usb_sndctrlpipe(serial->dev, 0),
 				request, REQTYPE_HOST_TO_DEVICE, 0x0000,
-				port_priv->bInterfaceNumber, buf, size, 300);
+				port_priv->bInterfaceNumber, buf, size,
+				USB_CTRL_SET_TIMEOUT);
 	} else {
 		result = usb_control_msg(serial->dev,
 				usb_sndctrlpipe(serial->dev, 0),
 				request, REQTYPE_HOST_TO_DEVICE, data[0],
-				port_priv->bInterfaceNumber, NULL, 0, 300);
+				port_priv->bInterfaceNumber, NULL, 0,
+				USB_CTRL_SET_TIMEOUT);
 	}
 
 	kfree(buf);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index ff8605b..02e7f2d 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -75,7 +75,8 @@
 	unsigned long last_dtr_rts;	/* saved modem control outputs */
 	struct async_icount	icount;
 	wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
-	char prev_status, diff_status;        /* Used for TIOCMIWAIT */
+	char prev_status;        /* Used for TIOCMIWAIT */
+	bool dev_gone;        /* Used to abort TIOCMIWAIT */
 	char transmit_empty;	/* If transmitter is empty or not */
 	struct usb_serial_port *port;
 	__u16 interface;	/* FT2232C, FT2232H or FT4232H port interface
@@ -1681,6 +1682,7 @@
 	init_waitqueue_head(&priv->delta_msr_wait);
 
 	priv->flags = ASYNC_LOW_LATENCY;
+	priv->dev_gone = false;
 
 	if (quirk && quirk->port_probe)
 		quirk->port_probe(priv);
@@ -1839,6 +1841,9 @@
 
 	dbg("%s", __func__);
 
+	priv->dev_gone = true;
+	wake_up_interruptible_all(&priv->delta_msr_wait);
+
 	remove_sysfs_attrs(port);
 
 	kref_put(&priv->kref, ftdi_sio_priv_release);
@@ -1982,17 +1987,19 @@
 	   N.B. packet may be processed more than once, but differences
 	   are only processed once.  */
 	status = packet[0] & FTDI_STATUS_B0_MASK;
-	if (status & FTDI_RS0_CTS)
-		priv->icount.cts++;
-	if (status & FTDI_RS0_DSR)
-		priv->icount.dsr++;
-	if (status & FTDI_RS0_RI)
-		priv->icount.rng++;
-	if (status & FTDI_RS0_RLSD)
-		priv->icount.dcd++;
 	if (status != priv->prev_status) {
-		priv->diff_status |= status ^ priv->prev_status;
-		wake_up_interruptible(&priv->delta_msr_wait);
+		char diff_status = status ^ priv->prev_status;
+
+		if (diff_status & FTDI_RS0_CTS)
+			priv->icount.cts++;
+		if (diff_status & FTDI_RS0_DSR)
+			priv->icount.dsr++;
+		if (diff_status & FTDI_RS0_RI)
+			priv->icount.rng++;
+		if (diff_status & FTDI_RS0_RLSD)
+			priv->icount.dcd++;
+
+		wake_up_interruptible_all(&priv->delta_msr_wait);
 		priv->prev_status = status;
 	}
 
@@ -2395,15 +2402,12 @@
 	 */
 	case TIOCMIWAIT:
 		cprev = priv->icount;
-		while (1) {
+		while (!priv->dev_gone) {
 			interruptible_sleep_on(&priv->delta_msr_wait);
 			/* see if a signal did it */
 			if (signal_pending(current))
 				return -ERESTARTSYS;
 			cnow = priv->icount;
-			if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
-			    cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
-				return -EIO; /* no change => error */
 			if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
 			    ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
 			    ((arg & TIOCM_CD)  && (cnow.dcd != cprev.dcd)) ||
@@ -2412,7 +2416,7 @@
 			}
 			cprev = cnow;
 		}
-		/* not reached */
+		return -EIO;
 		break;
 	case TIOCSERGETLSR:
 		return get_lsr_info(port, (struct serial_struct __user *)arg);
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index 6e1622f..08d16e8 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -27,8 +27,8 @@
 
 /* Product information. */
 #define FOCUS_VENDOR_ID			0x0C2E
-#define FOCUS_PRODUCT_ID		0x0720
-#define FOCUS_PRODUCT_ID_UNI		0x0710
+#define FOCUS_PRODUCT_ID_BI		0x0720
+#define FOCUS_PRODUCT_ID_UNI		0x0700
 
 #define METROUSB_SET_REQUEST_TYPE	0x40
 #define METROUSB_SET_MODEM_CTRL_REQUEST	10
@@ -47,7 +47,7 @@
 
 /* Device table list. */
 static struct usb_device_id id_table[] = {
-	{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID) },
+	{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) },
 	{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) },
 	{ }, /* Terminating entry. */
 };
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 836cfa9..f4465cc 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -708,6 +708,7 @@
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) },
+	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) },
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index ff4a174..a1a9062 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -420,7 +420,7 @@
 	control = priv->line_control;
 	if ((cflag & CBAUD) == B0)
 		priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
-	else
+	else if ((old_termios->c_cflag & CBAUD) == B0)
 		priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
 	if (control != priv->line_control) {
 		control = priv->line_control;
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index f14465a..8c8bf80 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -221,7 +221,7 @@
 };
 
 /* 'blacklist' of interfaces not served by this driver */
-static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11 };
+static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11, 19, 20 };
 static const struct sierra_iface_info direct_ip_interface_blacklist = {
 	.infolen = ARRAY_SIZE(direct_ip_non_serial_ifaces),
 	.ifaceinfo = direct_ip_non_serial_ifaces,
@@ -298,6 +298,9 @@
 	/* Sierra Wireless HSPA Non-Composite Device */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
 	{ USB_DEVICE(0x1199, 0x6893) },	/* Sierra Wireless Device */
+	{ USB_DEVICE(0x1199, 0x68A2),   /* Sierra Wireless MC77xx in QMI mode */
+	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+	},
 	{ USB_DEVICE(0x1199, 0x68A3), 	/* Sierra Wireless Direct IP modems */
 	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
 	},
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 69230f0..97355a1 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1059,6 +1059,12 @@
 		serial->attached = 1;
 	}
 
+	/* Avoid race with tty_open and serial_install by setting the
+	 * disconnected flag and not clearing it until all ports have been
+	 * registered.
+	 */
+	serial->disconnected = 1;
+
 	if (get_free_serial(serial, num_ports, &minor) == NULL) {
 		dev_err(&interface->dev, "No more free serial devices\n");
 		goto probe_error;
@@ -1070,19 +1076,16 @@
 		port = serial->port[i];
 		dev_set_name(&port->dev, "ttyUSB%d", port->number);
 		dbg ("%s - registering %s", __func__, dev_name(&port->dev));
-		port->dev_state = PORT_REGISTERING;
 		device_enable_async_suspend(&port->dev);
 
 		retval = device_add(&port->dev);
-		if (retval) {
+		if (retval)
 			dev_err(&port->dev, "Error registering port device, "
 				"continuing\n");
-			port->dev_state = PORT_UNREGISTERED;
-		} else {
-			port->dev_state = PORT_REGISTERED;
-		}
 	}
 
+	serial->disconnected = 0;
+
 	usb_serial_console_init(debug, minor);
 
 exit:
@@ -1124,22 +1127,8 @@
 			}
 			kill_traffic(port);
 			cancel_work_sync(&port->work);
-			if (port->dev_state == PORT_REGISTERED) {
-
-				/* Make sure the port is bound so that the
-				 * driver's port_remove method is called.
-				 */
-				if (!port->dev.driver) {
-					int rc;
-
-					port->dev.driver =
-							&serial->type->driver;
-					rc = device_bind_driver(&port->dev);
-				}
-				port->dev_state = PORT_UNREGISTERING;
+			if (device_is_registered(&port->dev))
 				device_del(&port->dev);
-				port->dev_state = PORT_UNREGISTERED;
-			}
 		}
 	}
 	serial->type->disconnect(serial);
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index c18538e..2653e73 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -132,6 +132,35 @@
 #undef COMPLIANT_DEV
 #undef USUAL_DEV
 
+#ifdef CONFIG_LOCKDEP
+
+static struct lock_class_key us_interface_key[USB_MAXINTERFACES];
+
+static void us_set_lock_class(struct mutex *mutex,
+		struct usb_interface *intf)
+{
+	struct usb_device *udev = interface_to_usbdev(intf);
+	struct usb_host_config *config = udev->actconfig;
+	int i;
+
+	for (i = 0; i < config->desc.bNumInterfaces; i++) {
+		if (config->interface[i] == intf)
+			break;
+	}
+
+	BUG_ON(i == config->desc.bNumInterfaces);
+
+	lockdep_set_class(mutex, &us_interface_key[i]);
+}
+
+#else
+
+static void us_set_lock_class(struct mutex *mutex,
+		struct usb_interface *intf)
+{
+}
+
+#endif
 
 #ifdef CONFIG_PM	/* Minimal support for suspend and resume */
 
@@ -895,6 +924,7 @@
 	*pus = us = host_to_us(host);
 	memset(us, 0, sizeof(struct us_data));
 	mutex_init(&(us->dev_mutex));
+	us_set_lock_class(&us->dev_mutex, intf);
 	init_completion(&us->cmnd_ready);
 	init_completion(&(us->notify));
 	init_waitqueue_head(&us->delay_wait);
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 66797e9..810c90a 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -645,7 +645,8 @@
 		dev_err(dev, "NEEP: URB error %d\n", urb->status);
 	}
 	result = usb_submit_urb(urb, GFP_ATOMIC);
-	if (result < 0) {
+	if (result < 0 && result != -ENODEV && result != -EPERM) {
+		/* ignoring unrecoverable errors */
 		dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n",
 			result);
 		goto error;
diff --git a/drivers/uwb/neh.c b/drivers/uwb/neh.c
index a269937..8cb71bb 100644
--- a/drivers/uwb/neh.c
+++ b/drivers/uwb/neh.c
@@ -107,6 +107,7 @@
 	u8 evt_type;
 	__le16 evt;
 	u8 context;
+	u8 completed;
 	uwb_rc_cmd_cb_f cb;
 	void *arg;
 
@@ -409,6 +410,7 @@
 	struct device *dev = &rc->uwb_dev.dev;
 	struct uwb_rc_neh *neh;
 	struct uwb_rceb *notif;
+	unsigned long flags;
 
 	if (rceb->bEventContext == 0) {
 		notif = kmalloc(size, GFP_ATOMIC);
@@ -422,7 +424,11 @@
 	} else {
 		neh = uwb_rc_neh_lookup(rc, rceb);
 		if (neh) {
-			del_timer_sync(&neh->timer);
+			spin_lock_irqsave(&rc->neh_lock, flags);
+			/* to guard against a timeout */
+			neh->completed = 1;
+			del_timer(&neh->timer);
+			spin_unlock_irqrestore(&rc->neh_lock, flags);
 			uwb_rc_neh_cb(neh, rceb, size);
 		} else
 			dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n",
@@ -568,6 +574,10 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&rc->neh_lock, flags);
+	if (neh->completed) {
+		spin_unlock_irqrestore(&rc->neh_lock, flags);
+		return;
+	}
 	if (neh->context)
 		__uwb_rc_neh_rm(rc, neh);
 	else
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f0da2c3..1f21d2a 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -238,7 +238,7 @@
 
 				vq->heads[vq->upend_idx].len = len;
 				ubuf->callback = vhost_zerocopy_callback;
-				ubuf->arg = vq->ubufs;
+				ubuf->ctx = vq->ubufs;
 				ubuf->desc = vq->upend_idx;
 				msg.msg_control = ubuf;
 				msg.msg_controllen = sizeof(ubuf);
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index fc9a1d7..3de00d9 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -155,7 +155,7 @@
 
 	vhost_test_stop(n, &private);
 	vhost_test_flush(n);
-	vhost_dev_cleanup(&n->dev);
+	vhost_dev_cleanup(&n->dev, false);
 	/* We do an extra flush before freeing memory,
 	 * since jobs can re-queue themselves. */
 	vhost_test_flush(n);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 947f00d..51e4c1e 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1598,10 +1598,9 @@
 	kfree(ubufs);
 }
 
-void vhost_zerocopy_callback(void *arg)
+void vhost_zerocopy_callback(struct ubuf_info *ubuf)
 {
-	struct ubuf_info *ubuf = arg;
-	struct vhost_ubuf_ref *ubufs = ubuf->arg;
+	struct vhost_ubuf_ref *ubufs = ubuf->ctx;
 	struct vhost_virtqueue *vq = ubufs->vq;
 
 	/* set len = 1 to mark this desc buffers done DMA */
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 8dcf4cc..8de1fd5 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -188,7 +188,7 @@
 
 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
 		    unsigned int log_num, u64 len);
-void vhost_zerocopy_callback(void *arg);
+void vhost_zerocopy_callback(struct ubuf_info *);
 int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq);
 
 #define vq_err(vq, fmt, ...) do {                                  \
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index befcbd8..ffbce45 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -499,7 +499,8 @@
 	au1100fb_fix.mmio_start = regs_res->start;
 	au1100fb_fix.mmio_len = resource_size(regs_res);
 
-	if (!devm_request_mem_region(au1100fb_fix.mmio_start,
+	if (!devm_request_mem_region(&dev->dev,
+				     au1100fb_fix.mmio_start,
 				     au1100fb_fix.mmio_len,
 				     DRIVER_NAME)) {
 		print_err("fail to lock memory region at 0x%08lx",
@@ -516,7 +517,7 @@
 	fbdev->fb_len = fbdev->panel->xres * fbdev->panel->yres *
 		  	(fbdev->panel->bpp >> 3) * AU1100FB_NBR_VIDEO_BUFFERS;
 
-	fbdev->fb_mem = dmam_alloc_coherent(&dev->dev, &dev->dev,
+	fbdev->fb_mem = dmam_alloc_coherent(&dev->dev,
 					    PAGE_ALIGN(fbdev->fb_len),
 					    &fbdev->fb_phys, GFP_KERNEL);
 	if (!fbdev->fb_mem) {
diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
index 3e9a773..7ca79f0 100644
--- a/drivers/video/au1200fb.c
+++ b/drivers/video/au1200fb.c
@@ -1724,7 +1724,7 @@
 		/* Allocate the framebuffer to the maximum screen size */
 		fbdev->fb_len = (win->w[plane].xres * win->w[plane].yres * bpp) / 8;
 
-		fbdev->fb_mem = dmam_alloc_noncoherent(&dev->dev, &dev->dev,
+		fbdev->fb_mem = dmam_alloc_noncoherent(&dev->dev,
 				PAGE_ALIGN(fbdev->fb_len),
 				&fbdev->fb_phys, GFP_KERNEL);
 		if (!fbdev->fb_mem) {
diff --git a/drivers/video/kyro/STG4000Reg.h b/drivers/video/kyro/STG4000Reg.h
index 5d62698..50f4670 100644
--- a/drivers/video/kyro/STG4000Reg.h
+++ b/drivers/video/kyro/STG4000Reg.h
@@ -73,210 +73,210 @@
 /* Register Table */
 typedef struct {
 	/* 0h  */
-	volatile unsigned long Thread0Enable;	/* 0x0000 */
-	volatile unsigned long Thread1Enable;	/* 0x0004 */
-	volatile unsigned long Thread0Recover;	/* 0x0008 */
-	volatile unsigned long Thread1Recover;	/* 0x000C */
-	volatile unsigned long Thread0Step;	/* 0x0010 */
-	volatile unsigned long Thread1Step;	/* 0x0014 */
-	volatile unsigned long VideoInStatus;	/* 0x0018 */
-	volatile unsigned long Core2InSignStart;	/* 0x001C */
-	volatile unsigned long Core1ResetVector;	/* 0x0020 */
-	volatile unsigned long Core1ROMOffset;	/* 0x0024 */
-	volatile unsigned long Core1ArbiterPriority;	/* 0x0028 */
-	volatile unsigned long VideoInControl;	/* 0x002C */
-	volatile unsigned long VideoInReg0CtrlA;	/* 0x0030 */
-	volatile unsigned long VideoInReg0CtrlB;	/* 0x0034 */
-	volatile unsigned long VideoInReg1CtrlA;	/* 0x0038 */
-	volatile unsigned long VideoInReg1CtrlB;	/* 0x003C */
-	volatile unsigned long Thread0Kicker;	/* 0x0040 */
-	volatile unsigned long Core2InputSign;	/* 0x0044 */
-	volatile unsigned long Thread0ProgCtr;	/* 0x0048 */
-	volatile unsigned long Thread1ProgCtr;	/* 0x004C */
-	volatile unsigned long Thread1Kicker;	/* 0x0050 */
-	volatile unsigned long GPRegister1;	/* 0x0054 */
-	volatile unsigned long GPRegister2;	/* 0x0058 */
-	volatile unsigned long GPRegister3;	/* 0x005C */
-	volatile unsigned long GPRegister4;	/* 0x0060 */
-	volatile unsigned long SerialIntA;	/* 0x0064 */
+	volatile u32 Thread0Enable;	/* 0x0000 */
+	volatile u32 Thread1Enable;	/* 0x0004 */
+	volatile u32 Thread0Recover;	/* 0x0008 */
+	volatile u32 Thread1Recover;	/* 0x000C */
+	volatile u32 Thread0Step;	/* 0x0010 */
+	volatile u32 Thread1Step;	/* 0x0014 */
+	volatile u32 VideoInStatus;	/* 0x0018 */
+	volatile u32 Core2InSignStart;	/* 0x001C */
+	volatile u32 Core1ResetVector;	/* 0x0020 */
+	volatile u32 Core1ROMOffset;	/* 0x0024 */
+	volatile u32 Core1ArbiterPriority;	/* 0x0028 */
+	volatile u32 VideoInControl;	/* 0x002C */
+	volatile u32 VideoInReg0CtrlA;	/* 0x0030 */
+	volatile u32 VideoInReg0CtrlB;	/* 0x0034 */
+	volatile u32 VideoInReg1CtrlA;	/* 0x0038 */
+	volatile u32 VideoInReg1CtrlB;	/* 0x003C */
+	volatile u32 Thread0Kicker;	/* 0x0040 */
+	volatile u32 Core2InputSign;	/* 0x0044 */
+	volatile u32 Thread0ProgCtr;	/* 0x0048 */
+	volatile u32 Thread1ProgCtr;	/* 0x004C */
+	volatile u32 Thread1Kicker;	/* 0x0050 */
+	volatile u32 GPRegister1;	/* 0x0054 */
+	volatile u32 GPRegister2;	/* 0x0058 */
+	volatile u32 GPRegister3;	/* 0x005C */
+	volatile u32 GPRegister4;	/* 0x0060 */
+	volatile u32 SerialIntA;	/* 0x0064 */
 
-	volatile unsigned long Fill0[6];	/* GAP 0x0068 - 0x007C */
+	volatile u32 Fill0[6];	/* GAP 0x0068 - 0x007C */
 
-	volatile unsigned long SoftwareReset;	/* 0x0080 */
-	volatile unsigned long SerialIntB;	/* 0x0084 */
+	volatile u32 SoftwareReset;	/* 0x0080 */
+	volatile u32 SerialIntB;	/* 0x0084 */
 
-	volatile unsigned long Fill1[37];	/* GAP 0x0088 - 0x011C */
+	volatile u32 Fill1[37];	/* GAP 0x0088 - 0x011C */
 
-	volatile unsigned long ROMELQV;	/* 0x011C */
-	volatile unsigned long WLWH;	/* 0x0120 */
-	volatile unsigned long ROMELWL;	/* 0x0124 */
+	volatile u32 ROMELQV;	/* 0x011C */
+	volatile u32 WLWH;	/* 0x0120 */
+	volatile u32 ROMELWL;	/* 0x0124 */
 
-	volatile unsigned long dwFill_1;	/* GAP 0x0128 */
+	volatile u32 dwFill_1;	/* GAP 0x0128 */
 
-	volatile unsigned long IntStatus;	/* 0x012C */
-	volatile unsigned long IntMask;	/* 0x0130 */
-	volatile unsigned long IntClear;	/* 0x0134 */
+	volatile u32 IntStatus;	/* 0x012C */
+	volatile u32 IntMask;	/* 0x0130 */
+	volatile u32 IntClear;	/* 0x0134 */
 
-	volatile unsigned long Fill2[6];	/* GAP 0x0138 - 0x014C */
+	volatile u32 Fill2[6];	/* GAP 0x0138 - 0x014C */
 
-	volatile unsigned long ROMGPIOA;	/* 0x0150 */
-	volatile unsigned long ROMGPIOB;	/* 0x0154 */
-	volatile unsigned long ROMGPIOC;	/* 0x0158 */
-	volatile unsigned long ROMGPIOD;	/* 0x015C */
+	volatile u32 ROMGPIOA;	/* 0x0150 */
+	volatile u32 ROMGPIOB;	/* 0x0154 */
+	volatile u32 ROMGPIOC;	/* 0x0158 */
+	volatile u32 ROMGPIOD;	/* 0x015C */
 
-	volatile unsigned long Fill3[2];	/* GAP 0x0160 - 0x0168 */
+	volatile u32 Fill3[2];	/* GAP 0x0160 - 0x0168 */
 
-	volatile unsigned long AGPIntID;	/* 0x0168 */
-	volatile unsigned long AGPIntClassCode;	/* 0x016C */
-	volatile unsigned long AGPIntBIST;	/* 0x0170 */
-	volatile unsigned long AGPIntSSID;	/* 0x0174 */
-	volatile unsigned long AGPIntPMCSR;	/* 0x0178 */
-	volatile unsigned long VGAFrameBufBase;	/* 0x017C */
-	volatile unsigned long VGANotify;	/* 0x0180 */
-	volatile unsigned long DACPLLMode;	/* 0x0184 */
-	volatile unsigned long Core1VideoClockDiv;	/* 0x0188 */
-	volatile unsigned long AGPIntStat;	/* 0x018C */
+	volatile u32 AGPIntID;	/* 0x0168 */
+	volatile u32 AGPIntClassCode;	/* 0x016C */
+	volatile u32 AGPIntBIST;	/* 0x0170 */
+	volatile u32 AGPIntSSID;	/* 0x0174 */
+	volatile u32 AGPIntPMCSR;	/* 0x0178 */
+	volatile u32 VGAFrameBufBase;	/* 0x017C */
+	volatile u32 VGANotify;	/* 0x0180 */
+	volatile u32 DACPLLMode;	/* 0x0184 */
+	volatile u32 Core1VideoClockDiv;	/* 0x0188 */
+	volatile u32 AGPIntStat;	/* 0x018C */
 
 	/*
-	   volatile unsigned long Fill4[0x0400/4 - 0x0190/4]; //GAP 0x0190 - 0x0400
-	   volatile unsigned long Fill5[0x05FC/4 - 0x0400/4]; //GAP 0x0400 - 0x05FC Fog Table
-	   volatile unsigned long Fill6[0x0604/4 - 0x0600/4]; //GAP 0x0600 - 0x0604
-	   volatile unsigned long Fill7[0x0680/4 - 0x0608/4]; //GAP 0x0608 - 0x0680
-	   volatile unsigned long Fill8[0x07FC/4 - 0x0684/4]; //GAP 0x0684 - 0x07FC
+	   volatile u32 Fill4[0x0400/4 - 0x0190/4]; //GAP 0x0190 - 0x0400
+	   volatile u32 Fill5[0x05FC/4 - 0x0400/4]; //GAP 0x0400 - 0x05FC Fog Table
+	   volatile u32 Fill6[0x0604/4 - 0x0600/4]; //GAP 0x0600 - 0x0604
+	   volatile u32 Fill7[0x0680/4 - 0x0608/4]; //GAP 0x0608 - 0x0680
+	   volatile u32 Fill8[0x07FC/4 - 0x0684/4]; //GAP 0x0684 - 0x07FC
 	 */
-	volatile unsigned long Fill4[412];	/* 0x0190 - 0x07FC */
+	volatile u32 Fill4[412];	/* 0x0190 - 0x07FC */
 
-	volatile unsigned long TACtrlStreamBase;	/* 0x0800 */
-	volatile unsigned long TAObjDataBase;	/* 0x0804 */
-	volatile unsigned long TAPtrDataBase;	/* 0x0808 */
-	volatile unsigned long TARegionDataBase;	/* 0x080C */
-	volatile unsigned long TATailPtrBase;	/* 0x0810 */
-	volatile unsigned long TAPtrRegionSize;	/* 0x0814 */
-	volatile unsigned long TAConfiguration;	/* 0x0818 */
-	volatile unsigned long TAObjDataStartAddr;	/* 0x081C */
-	volatile unsigned long TAObjDataEndAddr;	/* 0x0820 */
-	volatile unsigned long TAXScreenClip;	/* 0x0824 */
-	volatile unsigned long TAYScreenClip;	/* 0x0828 */
-	volatile unsigned long TARHWClamp;	/* 0x082C */
-	volatile unsigned long TARHWCompare;	/* 0x0830 */
-	volatile unsigned long TAStart;	/* 0x0834 */
-	volatile unsigned long TAObjReStart;	/* 0x0838 */
-	volatile unsigned long TAPtrReStart;	/* 0x083C */
-	volatile unsigned long TAStatus1;	/* 0x0840 */
-	volatile unsigned long TAStatus2;	/* 0x0844 */
-	volatile unsigned long TAIntStatus;	/* 0x0848 */
-	volatile unsigned long TAIntMask;	/* 0x084C */
+	volatile u32 TACtrlStreamBase;	/* 0x0800 */
+	volatile u32 TAObjDataBase;	/* 0x0804 */
+	volatile u32 TAPtrDataBase;	/* 0x0808 */
+	volatile u32 TARegionDataBase;	/* 0x080C */
+	volatile u32 TATailPtrBase;	/* 0x0810 */
+	volatile u32 TAPtrRegionSize;	/* 0x0814 */
+	volatile u32 TAConfiguration;	/* 0x0818 */
+	volatile u32 TAObjDataStartAddr;	/* 0x081C */
+	volatile u32 TAObjDataEndAddr;	/* 0x0820 */
+	volatile u32 TAXScreenClip;	/* 0x0824 */
+	volatile u32 TAYScreenClip;	/* 0x0828 */
+	volatile u32 TARHWClamp;	/* 0x082C */
+	volatile u32 TARHWCompare;	/* 0x0830 */
+	volatile u32 TAStart;	/* 0x0834 */
+	volatile u32 TAObjReStart;	/* 0x0838 */
+	volatile u32 TAPtrReStart;	/* 0x083C */
+	volatile u32 TAStatus1;	/* 0x0840 */
+	volatile u32 TAStatus2;	/* 0x0844 */
+	volatile u32 TAIntStatus;	/* 0x0848 */
+	volatile u32 TAIntMask;	/* 0x084C */
 
-	volatile unsigned long Fill5[235];	/* GAP 0x0850 - 0x0BF8 */
+	volatile u32 Fill5[235];	/* GAP 0x0850 - 0x0BF8 */
 
-	volatile unsigned long TextureAddrThresh;	/* 0x0BFC */
-	volatile unsigned long Core1Translation;	/* 0x0C00 */
-	volatile unsigned long TextureAddrReMap;	/* 0x0C04 */
-	volatile unsigned long RenderOutAGPRemap;	/* 0x0C08 */
-	volatile unsigned long _3DRegionReadTrans;	/* 0x0C0C */
-	volatile unsigned long _3DPtrReadTrans;	/* 0x0C10 */
-	volatile unsigned long _3DParamReadTrans;	/* 0x0C14 */
-	volatile unsigned long _3DRegionReadThresh;	/* 0x0C18 */
-	volatile unsigned long _3DPtrReadThresh;	/* 0x0C1C */
-	volatile unsigned long _3DParamReadThresh;	/* 0x0C20 */
-	volatile unsigned long _3DRegionReadAGPRemap;	/* 0x0C24 */
-	volatile unsigned long _3DPtrReadAGPRemap;	/* 0x0C28 */
-	volatile unsigned long _3DParamReadAGPRemap;	/* 0x0C2C */
-	volatile unsigned long ZBufferAGPRemap;	/* 0x0C30 */
-	volatile unsigned long TAIndexAGPRemap;	/* 0x0C34 */
-	volatile unsigned long TAVertexAGPRemap;	/* 0x0C38 */
-	volatile unsigned long TAUVAddrTrans;	/* 0x0C3C */
-	volatile unsigned long TATailPtrCacheTrans;	/* 0x0C40 */
-	volatile unsigned long TAParamWriteTrans;	/* 0x0C44 */
-	volatile unsigned long TAPtrWriteTrans;	/* 0x0C48 */
-	volatile unsigned long TAParamWriteThresh;	/* 0x0C4C */
-	volatile unsigned long TAPtrWriteThresh;	/* 0x0C50 */
-	volatile unsigned long TATailPtrCacheAGPRe;	/* 0x0C54 */
-	volatile unsigned long TAParamWriteAGPRe;	/* 0x0C58 */
-	volatile unsigned long TAPtrWriteAGPRe;	/* 0x0C5C */
-	volatile unsigned long SDRAMArbiterConf;	/* 0x0C60 */
-	volatile unsigned long SDRAMConf0;	/* 0x0C64 */
-	volatile unsigned long SDRAMConf1;	/* 0x0C68 */
-	volatile unsigned long SDRAMConf2;	/* 0x0C6C */
-	volatile unsigned long SDRAMRefresh;	/* 0x0C70 */
-	volatile unsigned long SDRAMPowerStat;	/* 0x0C74 */
+	volatile u32 TextureAddrThresh;	/* 0x0BFC */
+	volatile u32 Core1Translation;	/* 0x0C00 */
+	volatile u32 TextureAddrReMap;	/* 0x0C04 */
+	volatile u32 RenderOutAGPRemap;	/* 0x0C08 */
+	volatile u32 _3DRegionReadTrans;	/* 0x0C0C */
+	volatile u32 _3DPtrReadTrans;	/* 0x0C10 */
+	volatile u32 _3DParamReadTrans;	/* 0x0C14 */
+	volatile u32 _3DRegionReadThresh;	/* 0x0C18 */
+	volatile u32 _3DPtrReadThresh;	/* 0x0C1C */
+	volatile u32 _3DParamReadThresh;	/* 0x0C20 */
+	volatile u32 _3DRegionReadAGPRemap;	/* 0x0C24 */
+	volatile u32 _3DPtrReadAGPRemap;	/* 0x0C28 */
+	volatile u32 _3DParamReadAGPRemap;	/* 0x0C2C */
+	volatile u32 ZBufferAGPRemap;	/* 0x0C30 */
+	volatile u32 TAIndexAGPRemap;	/* 0x0C34 */
+	volatile u32 TAVertexAGPRemap;	/* 0x0C38 */
+	volatile u32 TAUVAddrTrans;	/* 0x0C3C */
+	volatile u32 TATailPtrCacheTrans;	/* 0x0C40 */
+	volatile u32 TAParamWriteTrans;	/* 0x0C44 */
+	volatile u32 TAPtrWriteTrans;	/* 0x0C48 */
+	volatile u32 TAParamWriteThresh;	/* 0x0C4C */
+	volatile u32 TAPtrWriteThresh;	/* 0x0C50 */
+	volatile u32 TATailPtrCacheAGPRe;	/* 0x0C54 */
+	volatile u32 TAParamWriteAGPRe;	/* 0x0C58 */
+	volatile u32 TAPtrWriteAGPRe;	/* 0x0C5C */
+	volatile u32 SDRAMArbiterConf;	/* 0x0C60 */
+	volatile u32 SDRAMConf0;	/* 0x0C64 */
+	volatile u32 SDRAMConf1;	/* 0x0C68 */
+	volatile u32 SDRAMConf2;	/* 0x0C6C */
+	volatile u32 SDRAMRefresh;	/* 0x0C70 */
+	volatile u32 SDRAMPowerStat;	/* 0x0C74 */
 
-	volatile unsigned long Fill6[2];	/* GAP 0x0C78 - 0x0C7C */
+	volatile u32 Fill6[2];	/* GAP 0x0C78 - 0x0C7C */
 
-	volatile unsigned long RAMBistData;	/* 0x0C80 */
-	volatile unsigned long RAMBistCtrl;	/* 0x0C84 */
-	volatile unsigned long FIFOBistKey;	/* 0x0C88 */
-	volatile unsigned long RAMBistResult;	/* 0x0C8C */
-	volatile unsigned long FIFOBistResult;	/* 0x0C90 */
+	volatile u32 RAMBistData;	/* 0x0C80 */
+	volatile u32 RAMBistCtrl;	/* 0x0C84 */
+	volatile u32 FIFOBistKey;	/* 0x0C88 */
+	volatile u32 RAMBistResult;	/* 0x0C8C */
+	volatile u32 FIFOBistResult;	/* 0x0C90 */
 
 	/*
-	   volatile unsigned long Fill11[0x0CBC/4 - 0x0C94/4]; //GAP 0x0C94 - 0x0CBC
-	   volatile unsigned long Fill12[0x0CD0/4 - 0x0CC0/4]; //GAP 0x0CC0 - 0x0CD0 3DRegisters
+	   volatile u32 Fill11[0x0CBC/4 - 0x0C94/4]; //GAP 0x0C94 - 0x0CBC
+	   volatile u32 Fill12[0x0CD0/4 - 0x0CC0/4]; //GAP 0x0CC0 - 0x0CD0 3DRegisters
 	 */
 
-	volatile unsigned long Fill7[16];	/* 0x0c94 - 0x0cd0 */
+	volatile u32 Fill7[16];	/* 0x0c94 - 0x0cd0 */
 
-	volatile unsigned long SDRAMAddrSign;	/* 0x0CD4 */
-	volatile unsigned long SDRAMDataSign;	/* 0x0CD8 */
-	volatile unsigned long SDRAMSignConf;	/* 0x0CDC */
+	volatile u32 SDRAMAddrSign;	/* 0x0CD4 */
+	volatile u32 SDRAMDataSign;	/* 0x0CD8 */
+	volatile u32 SDRAMSignConf;	/* 0x0CDC */
 
 	/* DWFILL; //GAP 0x0CE0 */
-	volatile unsigned long dwFill_2;
+	volatile u32 dwFill_2;
 
-	volatile unsigned long ISPSignature;	/* 0x0CE4 */
+	volatile u32 ISPSignature;	/* 0x0CE4 */
 
-	volatile unsigned long Fill8[454];	/*GAP 0x0CE8 - 0x13FC */
+	volatile u32 Fill8[454];	/*GAP 0x0CE8 - 0x13FC */
 
-	volatile unsigned long DACPrimAddress;	/* 0x1400 */
-	volatile unsigned long DACPrimSize;	/* 0x1404 */
-	volatile unsigned long DACCursorAddr;	/* 0x1408 */
-	volatile unsigned long DACCursorCtrl;	/* 0x140C */
-	volatile unsigned long DACOverlayAddr;	/* 0x1410 */
-	volatile unsigned long DACOverlayUAddr;	/* 0x1414 */
-	volatile unsigned long DACOverlayVAddr;	/* 0x1418 */
-	volatile unsigned long DACOverlaySize;	/* 0x141C */
-	volatile unsigned long DACOverlayVtDec;	/* 0x1420 */
+	volatile u32 DACPrimAddress;	/* 0x1400 */
+	volatile u32 DACPrimSize;	/* 0x1404 */
+	volatile u32 DACCursorAddr;	/* 0x1408 */
+	volatile u32 DACCursorCtrl;	/* 0x140C */
+	volatile u32 DACOverlayAddr;	/* 0x1410 */
+	volatile u32 DACOverlayUAddr;	/* 0x1414 */
+	volatile u32 DACOverlayVAddr;	/* 0x1418 */
+	volatile u32 DACOverlaySize;	/* 0x141C */
+	volatile u32 DACOverlayVtDec;	/* 0x1420 */
 
-	volatile unsigned long Fill9[9];	/* GAP 0x1424 - 0x1444 */
+	volatile u32 Fill9[9];	/* GAP 0x1424 - 0x1444 */
 
-	volatile unsigned long DACVerticalScal;	/* 0x1448 */
-	volatile unsigned long DACPixelFormat;	/* 0x144C */
-	volatile unsigned long DACHorizontalScal;	/* 0x1450 */
-	volatile unsigned long DACVidWinStart;	/* 0x1454 */
-	volatile unsigned long DACVidWinEnd;	/* 0x1458 */
-	volatile unsigned long DACBlendCtrl;	/* 0x145C */
-	volatile unsigned long DACHorTim1;	/* 0x1460 */
-	volatile unsigned long DACHorTim2;	/* 0x1464 */
-	volatile unsigned long DACHorTim3;	/* 0x1468 */
-	volatile unsigned long DACVerTim1;	/* 0x146C */
-	volatile unsigned long DACVerTim2;	/* 0x1470 */
-	volatile unsigned long DACVerTim3;	/* 0x1474 */
-	volatile unsigned long DACBorderColor;	/* 0x1478 */
-	volatile unsigned long DACSyncCtrl;	/* 0x147C */
-	volatile unsigned long DACStreamCtrl;	/* 0x1480 */
-	volatile unsigned long DACLUTAddress;	/* 0x1484 */
-	volatile unsigned long DACLUTData;	/* 0x1488 */
-	volatile unsigned long DACBurstCtrl;	/* 0x148C */
-	volatile unsigned long DACCrcTrigger;	/* 0x1490 */
-	volatile unsigned long DACCrcDone;	/* 0x1494 */
-	volatile unsigned long DACCrcResult1;	/* 0x1498 */
-	volatile unsigned long DACCrcResult2;	/* 0x149C */
-	volatile unsigned long DACLinecount;	/* 0x14A0 */
+	volatile u32 DACVerticalScal;	/* 0x1448 */
+	volatile u32 DACPixelFormat;	/* 0x144C */
+	volatile u32 DACHorizontalScal;	/* 0x1450 */
+	volatile u32 DACVidWinStart;	/* 0x1454 */
+	volatile u32 DACVidWinEnd;	/* 0x1458 */
+	volatile u32 DACBlendCtrl;	/* 0x145C */
+	volatile u32 DACHorTim1;	/* 0x1460 */
+	volatile u32 DACHorTim2;	/* 0x1464 */
+	volatile u32 DACHorTim3;	/* 0x1468 */
+	volatile u32 DACVerTim1;	/* 0x146C */
+	volatile u32 DACVerTim2;	/* 0x1470 */
+	volatile u32 DACVerTim3;	/* 0x1474 */
+	volatile u32 DACBorderColor;	/* 0x1478 */
+	volatile u32 DACSyncCtrl;	/* 0x147C */
+	volatile u32 DACStreamCtrl;	/* 0x1480 */
+	volatile u32 DACLUTAddress;	/* 0x1484 */
+	volatile u32 DACLUTData;	/* 0x1488 */
+	volatile u32 DACBurstCtrl;	/* 0x148C */
+	volatile u32 DACCrcTrigger;	/* 0x1490 */
+	volatile u32 DACCrcDone;	/* 0x1494 */
+	volatile u32 DACCrcResult1;	/* 0x1498 */
+	volatile u32 DACCrcResult2;	/* 0x149C */
+	volatile u32 DACLinecount;	/* 0x14A0 */
 
-	volatile unsigned long Fill10[151];	/*GAP 0x14A4 - 0x16FC */
+	volatile u32 Fill10[151];	/*GAP 0x14A4 - 0x16FC */
 
-	volatile unsigned long DigVidPortCtrl;	/* 0x1700 */
-	volatile unsigned long DigVidPortStat;	/* 0x1704 */
+	volatile u32 DigVidPortCtrl;	/* 0x1700 */
+	volatile u32 DigVidPortStat;	/* 0x1704 */
 
 	/*
-	   volatile unsigned long Fill11[0x1FFC/4 - 0x1708/4]; //GAP 0x1708 - 0x1FFC
-	   volatile unsigned long Fill17[0x3000/4 - 0x2FFC/4]; //GAP 0x2000 - 0x2FFC ALUT
+	   volatile u32 Fill11[0x1FFC/4 - 0x1708/4]; //GAP 0x1708 - 0x1FFC
+	   volatile u32 Fill17[0x3000/4 - 0x2FFC/4]; //GAP 0x2000 - 0x2FFC ALUT
 	 */
 
-	volatile unsigned long Fill11[1598];
+	volatile u32 Fill11[1598];
 
 	/* DWFILL; //GAP 0x3000          ALUT 256MB offset */
-	volatile unsigned long Fill_3;
+	volatile u32 Fill_3;
 
 } STG4000REG;
 
diff --git a/drivers/video/msm/mddi.c b/drivers/video/msm/mddi.c
index 4527cbf..b061d70 100644
--- a/drivers/video/msm/mddi.c
+++ b/drivers/video/msm/mddi.c
@@ -420,7 +420,7 @@
 	mddi_set_auto_hibernate(&mddi->client_data, 1);
 }
 
-static int __init mddi_get_client_caps(struct mddi_info *mddi)
+static int __devinit mddi_get_client_caps(struct mddi_info *mddi)
 {
 	int i, j;
 
@@ -622,9 +622,9 @@
 
 static struct mddi_info mddi_info[2];
 
-static int __init mddi_clk_setup(struct platform_device *pdev,
-				 struct mddi_info *mddi,
-				 unsigned long clk_rate)
+static int __devinit mddi_clk_setup(struct platform_device *pdev,
+				    struct mddi_info *mddi,
+				    unsigned long clk_rate)
 {
 	int ret;
 
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 260cca7..26e83d7 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -815,8 +815,15 @@
 	par->pmi_setpal = pmi_setpal;
 	par->ypan = ypan;
 
-	if (par->pmi_setpal || par->ypan)
-		uvesafb_vbe_getpmi(task, par);
+	if (par->pmi_setpal || par->ypan) {
+		if (__supported_pte_mask & _PAGE_NX) {
+			par->pmi_setpal = par->ypan = 0;
+			printk(KERN_WARNING "uvesafb: NX protection is actively."
+				"We have better not to use the PMI.\n");
+		} else {
+			uvesafb_vbe_getpmi(task, par);
+		}
+	}
 #else
 	/* The protected mode interface is not available on non-x86. */
 	par->pmi_setpal = par->ypan = 0;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 05f0a80..c2d05a8 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -28,6 +28,13 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 
+/*
+ * Balloon device works in 4K page units.  So each page is pointed to by
+ * multiple balloon pages.  All memory counters in this driver are in balloon
+ * page units.
+ */
+#define VIRTIO_BALLOON_PAGES_PER_PAGE (PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
+
 struct virtio_balloon
 {
 	struct virtio_device *vdev;
@@ -42,8 +49,13 @@
 	/* Waiting for host to ack the pages we released. */
 	struct completion acked;
 
-	/* The pages we've told the Host we're not using. */
+	/* Number of balloon pages we've told the Host we're not using. */
 	unsigned int num_pages;
+	/*
+	 * The pages we've told the Host we're not using.
+	 * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE
+	 * to num_pages above.
+	 */
 	struct list_head pages;
 
 	/* The array of pfns we tell the Host about. */
@@ -66,7 +78,13 @@
 
 	BUILD_BUG_ON(PAGE_SHIFT < VIRTIO_BALLOON_PFN_SHIFT);
 	/* Convert pfn from Linux page size to balloon page size. */
-	return pfn >> (PAGE_SHIFT - VIRTIO_BALLOON_PFN_SHIFT);
+	return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE;
+}
+
+static struct page *balloon_pfn_to_page(u32 pfn)
+{
+	BUG_ON(pfn % VIRTIO_BALLOON_PAGES_PER_PAGE);
+	return pfn_to_page(pfn / VIRTIO_BALLOON_PAGES_PER_PAGE);
 }
 
 static void balloon_ack(struct virtqueue *vq)
@@ -96,12 +114,23 @@
 	wait_for_completion(&vb->acked);
 }
 
+static void set_page_pfns(u32 pfns[], struct page *page)
+{
+	unsigned int i;
+
+	/* Set balloon pfns pointing at this page.
+	 * Note that the first pfn points at start of the page. */
+	for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
+		pfns[i] = page_to_balloon_pfn(page) + i;
+}
+
 static void fill_balloon(struct virtio_balloon *vb, size_t num)
 {
 	/* We can only do one array worth at a time. */
 	num = min(num, ARRAY_SIZE(vb->pfns));
 
-	for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) {
+	for (vb->num_pfns = 0; vb->num_pfns < num;
+	     vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
 		struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY |
 					__GFP_NOMEMALLOC | __GFP_NOWARN);
 		if (!page) {
@@ -113,9 +142,9 @@
 			msleep(200);
 			break;
 		}
-		vb->pfns[vb->num_pfns] = page_to_balloon_pfn(page);
+		set_page_pfns(vb->pfns + vb->num_pfns, page);
+		vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
 		totalram_pages--;
-		vb->num_pages++;
 		list_add(&page->lru, &vb->pages);
 	}
 
@@ -130,8 +159,9 @@
 {
 	unsigned int i;
 
-	for (i = 0; i < num; i++) {
-		__free_page(pfn_to_page(pfns[i]));
+	/* Find pfns pointing at start of each page, get pages and free them. */
+	for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
+		__free_page(balloon_pfn_to_page(pfns[i]));
 		totalram_pages++;
 	}
 }
@@ -143,11 +173,12 @@
 	/* We can only do one array worth at a time. */
 	num = min(num, ARRAY_SIZE(vb->pfns));
 
-	for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) {
+	for (vb->num_pfns = 0; vb->num_pfns < num;
+	     vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
 		page = list_first_entry(&vb->pages, struct page, lru);
 		list_del(&page->lru);
-		vb->pfns[vb->num_pfns] = page_to_balloon_pfn(page);
-		vb->num_pages--;
+		set_page_pfns(vb->pfns + vb->num_pfns, page);
+		vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
 	}
 
 	/*
@@ -234,11 +265,14 @@
 
 static inline s64 towards_target(struct virtio_balloon *vb)
 {
-	u32 v;
+	__le32 v;
+	s64 target;
+
 	vb->vdev->config->get(vb->vdev,
 			      offsetof(struct virtio_balloon_config, num_pages),
 			      &v, sizeof(v));
-	return (s64)v - vb->num_pages;
+	target = le32_to_cpu(v);
+	return target - vb->num_pages;
 }
 
 static void update_balloon_size(struct virtio_balloon *vb)
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 99d8151..1ffd03b 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -722,7 +722,7 @@
 	vma->vm_flags |= VM_RESERVED|VM_DONTEXPAND;
 
 	if (use_ptemod)
-		vma->vm_flags |= VM_DONTCOPY|VM_PFNMAP;
+		vma->vm_flags |= VM_DONTCOPY;
 
 	vma->vm_private_data = map;
 
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index b4d4eac..f100ce2 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1029,6 +1029,7 @@
 	int i;
 	unsigned int max_nr_glist_frames, nr_glist_frames;
 	unsigned int nr_init_grefs;
+	int ret;
 
 	nr_grant_frames = 1;
 	boot_max_nr_grant_frames = __max_nr_grant_frames();
@@ -1047,12 +1048,16 @@
 	nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
 	for (i = 0; i < nr_glist_frames; i++) {
 		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
-		if (gnttab_list[i] == NULL)
+		if (gnttab_list[i] == NULL) {
+			ret = -ENOMEM;
 			goto ini_nomem;
+		}
 	}
 
-	if (gnttab_resume() < 0)
-		return -ENODEV;
+	if (gnttab_resume() < 0) {
+		ret = -ENODEV;
+		goto ini_nomem;
+	}
 
 	nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME;
 
@@ -1070,7 +1075,7 @@
 	for (i--; i >= 0; i--)
 		free_page((unsigned long)gnttab_list[i]);
 	kfree(gnttab_list);
-	return -ENOMEM;
+	return ret;
 }
 EXPORT_SYMBOL_GPL(gnttab_init);
 
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 9e14ae6..412b96c 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -132,6 +132,7 @@
 	err = dpm_suspend_end(PMSG_FREEZE);
 	if (err) {
 		printk(KERN_ERR "dpm_suspend_end failed: %d\n", err);
+		si.cancelled = 0;
 		goto out_resume;
 	}
 
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 63616d7..97f5d26 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -234,7 +234,7 @@
 	if (dev_data)
 		dev_data->ack_intr = 0;
 
-	return result;
+	return result > 0 ? 0 : result;
 }
 
 static
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index f20c5f1..a31b54d 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -135,7 +135,7 @@
 	return xenbus_read_otherend_details(xendev, "backend-id", "backend");
 }
 
-static int is_device_connecting(struct device *dev, void *data)
+static int is_device_connecting(struct device *dev, void *data, bool ignore_nonessential)
 {
 	struct xenbus_device *xendev = to_xenbus_device(dev);
 	struct device_driver *drv = data;
@@ -152,16 +152,41 @@
 	if (drv && (dev->driver != drv))
 		return 0;
 
+	if (ignore_nonessential) {
+		/* With older QEMU, for PVonHVM guests the guest config files
+		 * could contain: vfb = [ 'vnc=1, vnclisten=0.0.0.0']
+		 * which is nonsensical as there is no PV FB (there can be
+		 * a PVKB) running as HVM guest. */
+
+		if ((strncmp(xendev->nodename, "device/vkbd", 11) == 0))
+			return 0;
+
+		if ((strncmp(xendev->nodename, "device/vfb", 10) == 0))
+			return 0;
+	}
 	xendrv = to_xenbus_driver(dev->driver);
 	return (xendev->state < XenbusStateConnected ||
 		(xendev->state == XenbusStateConnected &&
 		 xendrv->is_ready && !xendrv->is_ready(xendev)));
 }
+static int essential_device_connecting(struct device *dev, void *data)
+{
+	return is_device_connecting(dev, data, true /* ignore PV[KBB+FB] */);
+}
+static int non_essential_device_connecting(struct device *dev, void *data)
+{
+	return is_device_connecting(dev, data, false);
+}
 
-static int exists_connecting_device(struct device_driver *drv)
+static int exists_essential_connecting_device(struct device_driver *drv)
 {
 	return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
-				is_device_connecting);
+				essential_device_connecting);
+}
+static int exists_non_essential_connecting_device(struct device_driver *drv)
+{
+	return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+				non_essential_device_connecting);
 }
 
 static int print_device_status(struct device *dev, void *data)
@@ -192,6 +217,23 @@
 /* We only wait for device setup after most initcalls have run. */
 static int ready_to_wait_for_devices;
 
+static bool wait_loop(unsigned long start, unsigned int max_delay,
+		     unsigned int *seconds_waited)
+{
+	if (time_after(jiffies, start + (*seconds_waited+5)*HZ)) {
+		if (!*seconds_waited)
+			printk(KERN_WARNING "XENBUS: Waiting for "
+			       "devices to initialise: ");
+		*seconds_waited += 5;
+		printk("%us...", max_delay - *seconds_waited);
+		if (*seconds_waited == max_delay)
+			return true;
+	}
+
+	schedule_timeout_interruptible(HZ/10);
+
+	return false;
+}
 /*
  * On a 5-minute timeout, wait for all devices currently configured.  We need
  * to do this to guarantee that the filesystems and / or network devices
@@ -215,19 +257,14 @@
 	if (!ready_to_wait_for_devices || !xen_domain())
 		return;
 
-	while (exists_connecting_device(drv)) {
-		if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
-			if (!seconds_waited)
-				printk(KERN_WARNING "XENBUS: Waiting for "
-				       "devices to initialise: ");
-			seconds_waited += 5;
-			printk("%us...", 300 - seconds_waited);
-			if (seconds_waited == 300)
-				break;
-		}
+	while (exists_non_essential_connecting_device(drv))
+		if (wait_loop(start, 30, &seconds_waited))
+			break;
 
-		schedule_timeout_interruptible(HZ/10);
-	}
+	/* Skips PVKB and PVFB check.*/
+	while (exists_essential_connecting_device(drv))
+		if (wait_loop(start, 270, &seconds_waited))
+			break;
 
 	if (seconds_waited)
 		printk("\n");
diff --git a/fs/aio.c b/fs/aio.c
index da88760..67a6db3 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -93,9 +93,8 @@
 		put_page(info->ring_pages[i]);
 
 	if (info->mmap_size) {
-		down_write(&ctx->mm->mmap_sem);
-		do_munmap(ctx->mm, info->mmap_base, info->mmap_size);
-		up_write(&ctx->mm->mmap_sem);
+		BUG_ON(ctx->mm != current->mm);
+		vm_munmap(info->mmap_base, info->mmap_size);
 	}
 
 	if (info->ring_pages && info->ring_pages != info->internal_pages)
@@ -389,6 +388,17 @@
 				"exit_aio:ioctx still alive: %d %d %d\n",
 				atomic_read(&ctx->users), ctx->dead,
 				ctx->reqs_active);
+		/*
+		 * We don't need to bother with munmap() here -
+		 * exit_mmap(mm) is coming and it'll unmap everything.
+		 * Since aio_free_ring() uses non-zero ->mmap_size
+		 * as indicator that it needs to unmap the area,
+		 * just set it to 0; aio_free_ring() is the only
+		 * place that uses ->mmap_size, so it's safe.
+		 * That way we get all munmap done to current->mm -
+		 * all other callers have ctx->mm == current->mm.
+		 */
+		ctx->ring_info.mmap_size = 0;
 		put_ioctx(ctx);
 	}
 }
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index 2eb12f1..d146e18 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -50,9 +50,7 @@
 	end = PAGE_ALIGN(end);
 	if (end > start) {
 		unsigned long addr;
-		down_write(&current->mm->mmap_sem);
-		addr = do_brk(start, end - start);
-		up_write(&current->mm->mmap_sem);
+		addr = vm_brk(start, end - start);
 		if (BAD_ADDR(addr))
 			return addr;
 	}
@@ -280,9 +278,7 @@
 		pos = 32;
 		map_size = ex.a_text+ex.a_data;
 #endif
-		down_write(&current->mm->mmap_sem);
-		error = do_brk(text_addr & PAGE_MASK, map_size);
-		up_write(&current->mm->mmap_sem);
+		error = vm_brk(text_addr & PAGE_MASK, map_size);
 		if (error != (text_addr & PAGE_MASK)) {
 			send_sig(SIGKILL, current, 0);
 			return error;
@@ -313,9 +309,7 @@
 
 		if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) {
 			loff_t pos = fd_offset;
-			down_write(&current->mm->mmap_sem);
-			do_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
-			up_write(&current->mm->mmap_sem);
+			vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
 			bprm->file->f_op->read(bprm->file,
 					(char __user *)N_TXTADDR(ex),
 					ex.a_text+ex.a_data, &pos);
@@ -325,24 +319,20 @@
 			goto beyond_if;
 		}
 
-		down_write(&current->mm->mmap_sem);
-		error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
+		error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
 			PROT_READ | PROT_EXEC,
 			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
 			fd_offset);
-		up_write(&current->mm->mmap_sem);
 
 		if (error != N_TXTADDR(ex)) {
 			send_sig(SIGKILL, current, 0);
 			return error;
 		}
 
-		down_write(&current->mm->mmap_sem);
- 		error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
+		error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
 				PROT_READ | PROT_WRITE | PROT_EXEC,
 				MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
 				fd_offset + ex.a_text);
-		up_write(&current->mm->mmap_sem);
 		if (error != N_DATADDR(ex)) {
 			send_sig(SIGKILL, current, 0);
 			return error;
@@ -412,9 +402,7 @@
 			       "N_TXTOFF is not page aligned. Please convert library: %s\n",
 			       file->f_path.dentry->d_name.name);
 		}
-		down_write(&current->mm->mmap_sem);
-		do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
-		up_write(&current->mm->mmap_sem);
+		vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
 		
 		file->f_op->read(file, (char __user *)start_addr,
 			ex.a_text + ex.a_data, &pos);
@@ -425,12 +413,10 @@
 		goto out;
 	}
 	/* Now use mmap to map the library into memory. */
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap(file, start_addr, ex.a_text + ex.a_data,
+	error = vm_mmap(file, start_addr, ex.a_text + ex.a_data,
 			PROT_READ | PROT_WRITE | PROT_EXEC,
 			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
 			N_TXTOFF(ex));
-	up_write(&current->mm->mmap_sem);
 	retval = error;
 	if (error != start_addr)
 		goto out;
@@ -438,9 +424,7 @@
 	len = PAGE_ALIGN(ex.a_text + ex.a_data);
 	bss = ex.a_text + ex.a_data + ex.a_bss;
 	if (bss > len) {
-		down_write(&current->mm->mmap_sem);
-		error = do_brk(start_addr + len, bss - len);
-		up_write(&current->mm->mmap_sem);
+		error = vm_brk(start_addr + len, bss - len);
 		retval = error;
 		if (error != start_addr + len)
 			goto out;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 48ffb3d..16f7354 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -82,9 +82,7 @@
 	end = ELF_PAGEALIGN(end);
 	if (end > start) {
 		unsigned long addr;
-		down_write(&current->mm->mmap_sem);
-		addr = do_brk(start, end - start);
-		up_write(&current->mm->mmap_sem);
+		addr = vm_brk(start, end - start);
 		if (BAD_ADDR(addr))
 			return addr;
 	}
@@ -514,9 +512,7 @@
 		elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
 
 		/* Map the last of the bss segment */
-		down_write(&current->mm->mmap_sem);
-		error = do_brk(elf_bss, last_bss - elf_bss);
-		up_write(&current->mm->mmap_sem);
+		error = vm_brk(elf_bss, last_bss - elf_bss);
 		if (BAD_ADDR(error))
 			goto out_close;
 	}
@@ -962,10 +958,8 @@
 		   and some applications "depend" upon this behavior.
 		   Since we do not have the power to recompile these, we
 		   emulate the SVr4 behavior. Sigh. */
-		down_write(&current->mm->mmap_sem);
-		error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
+		error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
 				MAP_FIXED | MAP_PRIVATE, 0);
-		up_write(&current->mm->mmap_sem);
 	}
 
 #ifdef ELF_PLAT_INIT
@@ -1050,8 +1044,7 @@
 		eppnt++;
 
 	/* Now use mmap to map the library into memory. */
-	down_write(&current->mm->mmap_sem);
-	error = do_mmap(file,
+	error = vm_mmap(file,
 			ELF_PAGESTART(eppnt->p_vaddr),
 			(eppnt->p_filesz +
 			 ELF_PAGEOFFSET(eppnt->p_vaddr)),
@@ -1059,7 +1052,6 @@
 			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
 			(eppnt->p_offset -
 			 ELF_PAGEOFFSET(eppnt->p_vaddr)));
-	up_write(&current->mm->mmap_sem);
 	if (error != ELF_PAGESTART(eppnt->p_vaddr))
 		goto out_free_ph;
 
@@ -1072,11 +1064,8 @@
 	len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
 			    ELF_MIN_ALIGN - 1);
 	bss = eppnt->p_memsz + eppnt->p_vaddr;
-	if (bss > len) {
-		down_write(&current->mm->mmap_sem);
-		do_brk(len, bss - len);
-		up_write(&current->mm->mmap_sem);
-	}
+	if (bss > len)
+		vm_brk(len, bss - len);
 	error = 0;
 
 out_free_ph:
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 9bd5612..d390a0f 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -390,21 +390,17 @@
 	    (executable_stack == EXSTACK_DEFAULT && VM_STACK_FLAGS & VM_EXEC))
 		stack_prot |= PROT_EXEC;
 
-	down_write(&current->mm->mmap_sem);
-	current->mm->start_brk = do_mmap(NULL, 0, stack_size, stack_prot,
+	current->mm->start_brk = vm_mmap(NULL, 0, stack_size, stack_prot,
 					 MAP_PRIVATE | MAP_ANONYMOUS |
 					 MAP_UNINITIALIZED | MAP_GROWSDOWN,
 					 0);
 
 	if (IS_ERR_VALUE(current->mm->start_brk)) {
-		up_write(&current->mm->mmap_sem);
 		retval = current->mm->start_brk;
 		current->mm->start_brk = 0;
 		goto error_kill;
 	}
 
-	up_write(&current->mm->mmap_sem);
-
 	current->mm->brk = current->mm->start_brk;
 	current->mm->context.end_brk = current->mm->start_brk;
 	current->mm->context.end_brk +=
@@ -955,10 +951,8 @@
 	if (params->flags & ELF_FDPIC_FLAG_EXECUTABLE)
 		mflags |= MAP_EXECUTABLE;
 
-	down_write(&mm->mmap_sem);
-	maddr = do_mmap(NULL, load_addr, top - base,
+	maddr = vm_mmap(NULL, load_addr, top - base,
 			PROT_READ | PROT_WRITE | PROT_EXEC, mflags, 0);
-	up_write(&mm->mmap_sem);
 	if (IS_ERR_VALUE(maddr))
 		return (int) maddr;
 
@@ -1096,10 +1090,8 @@
 
 		/* create the mapping */
 		disp = phdr->p_vaddr & ~PAGE_MASK;
-		down_write(&mm->mmap_sem);
-		maddr = do_mmap(file, maddr, phdr->p_memsz + disp, prot, flags,
+		maddr = vm_mmap(file, maddr, phdr->p_memsz + disp, prot, flags,
 				phdr->p_offset - disp);
-		up_write(&mm->mmap_sem);
 
 		kdebug("mmap[%d] <file> sz=%lx pr=%x fl=%x of=%lx --> %08lx",
 		       loop, phdr->p_memsz + disp, prot, flags,
@@ -1143,10 +1135,8 @@
 			unsigned long xmaddr;
 
 			flags |= MAP_FIXED | MAP_ANONYMOUS;
-			down_write(&mm->mmap_sem);
-			xmaddr = do_mmap(NULL, xaddr, excess - excess1,
+			xmaddr = vm_mmap(NULL, xaddr, excess - excess1,
 					 prot, flags, 0);
-			up_write(&mm->mmap_sem);
 
 			kdebug("mmap[%d] <anon>"
 			       " ad=%lx sz=%lx pr=%x fl=%x of=0 --> %08lx",
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 024d20e..6b2daf9 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -542,10 +542,8 @@
 		 */
 		DBG_FLT("BINFMT_FLAT: ROM mapping of file (we hope)\n");
 
-		down_write(&current->mm->mmap_sem);
-		textpos = do_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC,
+		textpos = vm_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC,
 				  MAP_PRIVATE|MAP_EXECUTABLE, 0);
-		up_write(&current->mm->mmap_sem);
 		if (!textpos || IS_ERR_VALUE(textpos)) {
 			if (!textpos)
 				textpos = (unsigned long) -ENOMEM;
@@ -556,10 +554,8 @@
 
 		len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long);
 		len = PAGE_ALIGN(len);
-		down_write(&current->mm->mmap_sem);
-		realdatastart = do_mmap(0, 0, len,
+		realdatastart = vm_mmap(0, 0, len,
 			PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0);
-		up_write(&current->mm->mmap_sem);
 
 		if (realdatastart == 0 || IS_ERR_VALUE(realdatastart)) {
 			if (!realdatastart)
@@ -603,10 +599,8 @@
 
 		len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long);
 		len = PAGE_ALIGN(len);
-		down_write(&current->mm->mmap_sem);
-		textpos = do_mmap(0, 0, len,
+		textpos = vm_mmap(0, 0, len,
 			PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0);
-		up_write(&current->mm->mmap_sem);
 
 		if (!textpos || IS_ERR_VALUE(textpos)) {
 			if (!textpos)
diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c
index e4fc746..4517aaf 100644
--- a/fs/binfmt_som.c
+++ b/fs/binfmt_som.c
@@ -147,10 +147,8 @@
 	code_size = SOM_PAGEALIGN(hpuxhdr->exec_tsize);
 	current->mm->start_code = code_start;
 	current->mm->end_code = code_start + code_size;
-	down_write(&current->mm->mmap_sem);
-	retval = do_mmap(file, code_start, code_size, prot,
+	retval = vm_mmap(file, code_start, code_size, prot,
 			flags, SOM_PAGESTART(hpuxhdr->exec_tfile));
-	up_write(&current->mm->mmap_sem);
 	if (retval < 0 && retval > -1024)
 		goto out;
 
@@ -158,20 +156,16 @@
 	data_size = SOM_PAGEALIGN(hpuxhdr->exec_dsize);
 	current->mm->start_data = data_start;
 	current->mm->end_data = bss_start = data_start + data_size;
-	down_write(&current->mm->mmap_sem);
-	retval = do_mmap(file, data_start, data_size,
+	retval = vm_mmap(file, data_start, data_size,
 			prot | PROT_WRITE, flags,
 			SOM_PAGESTART(hpuxhdr->exec_dfile));
-	up_write(&current->mm->mmap_sem);
 	if (retval < 0 && retval > -1024)
 		goto out;
 
 	som_brk = bss_start + SOM_PAGEALIGN(hpuxhdr->exec_bsize);
 	current->mm->start_brk = current->mm->brk = som_brk;
-	down_write(&current->mm->mmap_sem);
-	retval = do_mmap(NULL, bss_start, som_brk - bss_start,
+	retval = vm_mmap(NULL, bss_start, som_brk - bss_start,
 			prot | PROT_WRITE, MAP_FIXED | MAP_PRIVATE, 0);
-	up_write(&current->mm->mmap_sem);
 	if (retval > 0 || retval < -1024)
 		retval = 0;
 out:
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index d286b40..86eff48 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -405,6 +405,7 @@
 			bio_put(bio);
 
 			bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
+			BUG_ON(!bio);
 			bio->bi_private = cb;
 			bio->bi_end_io = end_compressed_bio_write;
 			bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
@@ -687,6 +688,7 @@
 
 			comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
 							GFP_NOFS);
+			BUG_ON(!comp_bio);
 			comp_bio->bi_private = cb;
 			comp_bio->bi_end_io = end_compressed_bio_read;
 
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 5b8ef8e..3f65a81 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2166,7 +2166,7 @@
 
 static inline bool btrfs_root_readonly(struct btrfs_root *root)
 {
-	return root->root_item.flags & BTRFS_ROOT_SUBVOL_RDONLY;
+	return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
 }
 
 /* struct btrfs_root_backup */
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a844204..2b35f8d 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -529,9 +529,7 @@
 	 * allocate blocks for the tree root we can't do the fast caching since
 	 * we likely hold important locks.
 	 */
-	if (trans && (!trans->transaction->in_commit) &&
-	    (root && root != root->fs_info->tree_root) &&
-	    btrfs_test_opt(root, SPACE_CACHE)) {
+	if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
 		ret = load_free_space_cache(fs_info, cache);
 
 		spin_lock(&cache->lock);
@@ -3152,15 +3150,14 @@
 /*
  * returns target flags in extended format or 0 if restripe for this
  * chunk_type is not in progress
+ *
+ * should be called with either volume_mutex or balance_lock held
  */
 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
 {
 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
 	u64 target = 0;
 
-	BUG_ON(!mutex_is_locked(&fs_info->volume_mutex) &&
-	       !spin_is_locked(&fs_info->balance_lock));
-
 	if (!bctl)
 		return 0;
 
@@ -4205,7 +4202,7 @@
 	num_bytes += div64_u64(data_used + meta_used, 50);
 
 	if (num_bytes * 3 > meta_used)
-		num_bytes = div64_u64(meta_used, 3) * 2;
+		num_bytes = div64_u64(meta_used, 3);
 
 	return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
 }
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 8d904dd..cd4b5e4 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1937,7 +1937,7 @@
 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
 	u64 start = eb->start;
 	unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
-	int ret;
+	int ret = 0;
 
 	for (i = 0; i < num_pages; i++) {
 		struct page *p = extent_buffer_page(eb, i);
@@ -2180,6 +2180,10 @@
 	}
 
 	bio = bio_alloc(GFP_NOFS, 1);
+	if (!bio) {
+		free_io_failure(inode, failrec, 0);
+		return -EIO;
+	}
 	bio->bi_private = state;
 	bio->bi_end_io = failed_bio->bi_end_io;
 	bio->bi_sector = failrec->logical >> 9;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index e88330d3..202008e 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -748,13 +748,6 @@
 	u64 used = btrfs_block_group_used(&block_group->item);
 
 	/*
-	 * If we're unmounting then just return, since this does a search on the
-	 * normal root and not the commit root and we could deadlock.
-	 */
-	if (btrfs_fs_closing(fs_info))
-		return 0;
-
-	/*
 	 * If this block group has been marked to be cleared for one reason or
 	 * another then we can't trust the on disk cache, so just return.
 	 */
@@ -768,6 +761,8 @@
 	path = btrfs_alloc_path();
 	if (!path)
 		return 0;
+	path->search_commit_root = 1;
+	path->skip_locking = 1;
 
 	inode = lookup_free_space_inode(root, block_group, path);
 	if (IS_ERR(inode)) {
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 90acc82..bc015f7 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1044,6 +1044,8 @@
 
 		BUG_ON(!page->page);
 		bio = bio_alloc(GFP_NOFS, 1);
+		if (!bio)
+			return -EIO;
 		bio->bi_bdev = page->bdev;
 		bio->bi_sector = page->physical >> 9;
 		bio->bi_end_io = scrub_complete_bio_end_io;
@@ -1171,6 +1173,8 @@
 		DECLARE_COMPLETION_ONSTACK(complete);
 
 		bio = bio_alloc(GFP_NOFS, 1);
+		if (!bio)
+			return -EIO;
 		bio->bi_bdev = page_bad->bdev;
 		bio->bi_sector = page_bad->physical >> 9;
 		bio->bi_end_io = scrub_complete_bio_end_io;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 8da29e8..11b77a5 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -480,6 +480,7 @@
 	struct btrfs_transaction *cur_trans = trans->transaction;
 	struct btrfs_fs_info *info = root->fs_info;
 	int count = 0;
+	int err = 0;
 
 	if (--trans->use_count) {
 		trans->block_rsv = trans->orig_rsv;
@@ -532,18 +533,18 @@
 
 	if (current->journal_info == trans)
 		current->journal_info = NULL;
-	memset(trans, 0, sizeof(*trans));
-	kmem_cache_free(btrfs_trans_handle_cachep, trans);
 
 	if (throttle)
 		btrfs_run_delayed_iputs(root);
 
 	if (trans->aborted ||
 	    root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
-		return -EIO;
+		err = -EIO;
 	}
 
-	return 0;
+	memset(trans, 0, sizeof(*trans));
+	kmem_cache_free(btrfs_trans_handle_cachep, trans);
+	return err;
 }
 
 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index a872b48..759d0248 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3833,6 +3833,7 @@
 		int sub_stripes = 0;
 		u64 stripes_per_dev = 0;
 		u32 remaining_stripes = 0;
+		u32 last_stripe = 0;
 
 		if (map->type &
 		    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
@@ -3846,6 +3847,8 @@
 						      stripe_nr_orig,
 						      factor,
 						      &remaining_stripes);
+			div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
+			last_stripe *= sub_stripes;
 		}
 
 		for (i = 0; i < num_stripes; i++) {
@@ -3858,16 +3861,29 @@
 					 BTRFS_BLOCK_GROUP_RAID10)) {
 				bbio->stripes[i].length = stripes_per_dev *
 							  map->stripe_len;
+
 				if (i / sub_stripes < remaining_stripes)
 					bbio->stripes[i].length +=
 						map->stripe_len;
+
+				/*
+				 * Special for the first stripe and
+				 * the last stripe:
+				 *
+				 * |-------|...|-------|
+				 *     |----------|
+				 *    off     end_off
+				 */
 				if (i < sub_stripes)
 					bbio->stripes[i].length -=
 						stripe_offset;
-				if ((i / sub_stripes + 1) %
-				    sub_stripes == remaining_stripes)
+
+				if (stripe_index >= last_stripe &&
+				    stripe_index <= (last_stripe +
+						     sub_stripes - 1))
 					bbio->stripes[i].length -=
 						stripe_end_offset;
+
 				if (i == sub_stripes - 1)
 					stripe_offset = 0;
 			} else
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index d81e933..f31dc9a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -109,6 +109,8 @@
 
 	/* Options which could be blank */
 	Opt_blank_pass,
+	Opt_blank_user,
+	Opt_blank_ip,
 
 	Opt_err
 };
@@ -183,11 +185,15 @@
 	{ Opt_wsize, "wsize=%s" },
 	{ Opt_actimeo, "actimeo=%s" },
 
+	{ Opt_blank_user, "user=" },
+	{ Opt_blank_user, "username=" },
 	{ Opt_user, "user=%s" },
 	{ Opt_user, "username=%s" },
 	{ Opt_blank_pass, "pass=" },
 	{ Opt_pass, "pass=%s" },
 	{ Opt_pass, "password=%s" },
+	{ Opt_blank_ip, "ip=" },
+	{ Opt_blank_ip, "addr=" },
 	{ Opt_ip, "ip=%s" },
 	{ Opt_ip, "addr=%s" },
 	{ Opt_unc, "unc=%s" },
@@ -1117,7 +1123,7 @@
 	string = match_strdup(args);
 	if (string == NULL)
 		return -ENOMEM;
-	rc = kstrtoul(string, 10, option);
+	rc = kstrtoul(string, 0, option);
 	kfree(string);
 
 	return rc;
@@ -1534,15 +1540,17 @@
 
 		/* String Arguments */
 
+		case Opt_blank_user:
+			/* null user, ie. anonymous authentication */
+			vol->nullauth = 1;
+			vol->username = NULL;
+			break;
 		case Opt_user:
 			string = match_strdup(args);
 			if (string == NULL)
 				goto out_nomem;
 
-			if (!*string) {
-				/* null user, ie. anonymous authentication */
-				vol->nullauth = 1;
-			} else if (strnlen(string, MAX_USERNAME_SIZE) >
+			if (strnlen(string, MAX_USERNAME_SIZE) >
 							MAX_USERNAME_SIZE) {
 				printk(KERN_WARNING "CIFS: username too long\n");
 				goto cifs_parse_mount_err;
@@ -1611,14 +1619,15 @@
 			}
 			vol->password[j] = '\0';
 			break;
+		case Opt_blank_ip:
+			vol->UNCip = NULL;
+			break;
 		case Opt_ip:
 			string = match_strdup(args);
 			if (string == NULL)
 				goto out_nomem;
 
-			if (!*string) {
-				vol->UNCip = NULL;
-			} else if (strnlen(string, INET6_ADDRSTRLEN) >
+			if (strnlen(string, INET6_ADDRSTRLEN) >
 						INET6_ADDRSTRLEN) {
 				printk(KERN_WARNING "CIFS: ip address "
 						    "too long\n");
@@ -1636,12 +1645,6 @@
 			if (string == NULL)
 				goto out_nomem;
 
-			if (!*string) {
-				printk(KERN_WARNING "CIFS: invalid path to "
-						    "network resource\n");
-				goto cifs_parse_mount_err;
-			}
-
 			temp_len = strnlen(string, 300);
 			if (temp_len  == 300) {
 				printk(KERN_WARNING "CIFS: UNC name too long\n");
@@ -1670,11 +1673,7 @@
 			if (string == NULL)
 				goto out_nomem;
 
-			if (!*string) {
-				printk(KERN_WARNING "CIFS: invalid domain"
-						    " name\n");
-				goto cifs_parse_mount_err;
-			} else if (strnlen(string, 256) == 256) {
+			if (strnlen(string, 256) == 256) {
 				printk(KERN_WARNING "CIFS: domain name too"
 						    " long\n");
 				goto cifs_parse_mount_err;
@@ -1693,11 +1692,7 @@
 			if (string == NULL)
 				goto out_nomem;
 
-			if (!*string) {
-				printk(KERN_WARNING "CIFS: srcaddr value not"
-						    " specified\n");
-				goto cifs_parse_mount_err;
-			} else if (!cifs_convert_address(
+			if (!cifs_convert_address(
 					(struct sockaddr *)&vol->srcaddr,
 					string, strlen(string))) {
 				printk(KERN_WARNING "CIFS:  Could not parse"
@@ -1710,11 +1705,6 @@
 			if (string == NULL)
 				goto out_nomem;
 
-			if (!*string) {
-				printk(KERN_WARNING "CIFS: Invalid path"
-						    " prefix\n");
-				goto cifs_parse_mount_err;
-			}
 			temp_len = strnlen(string, 1024);
 			if (string[0] != '/')
 				temp_len++; /* missing leading slash */
@@ -1742,11 +1732,7 @@
 			if (string == NULL)
 				goto out_nomem;
 
-			if (!*string) {
-				printk(KERN_WARNING "CIFS: Invalid iocharset"
-						    " specified\n");
-				goto cifs_parse_mount_err;
-			} else if (strnlen(string, 1024) >= 65) {
+			if (strnlen(string, 1024) >= 65) {
 				printk(KERN_WARNING "CIFS: iocharset name "
 						    "too long.\n");
 				goto cifs_parse_mount_err;
@@ -1771,11 +1757,6 @@
 			if (string == NULL)
 				goto out_nomem;
 
-			if (!*string) {
-				printk(KERN_WARNING "CIFS: No socket option"
-						    " specified\n");
-				goto cifs_parse_mount_err;
-			}
 			if (strnicmp(string, "TCP_NODELAY", 11) == 0)
 				vol->sockopt_tcp_nodelay = 1;
 			break;
@@ -1784,12 +1765,6 @@
 			if (string == NULL)
 				goto out_nomem;
 
-			if (!*string) {
-				printk(KERN_WARNING "CIFS: Invalid (empty)"
-						    " netbiosname\n");
-				break;
-			}
-
 			memset(vol->source_rfc1001_name, 0x20,
 				RFC1001_NAME_LEN);
 			/*
@@ -1817,11 +1792,6 @@
 			if (string == NULL)
 				goto out_nomem;
 
-			if (!*string) {
-				printk(KERN_WARNING "CIFS: Empty server"
-					" netbiosname specified\n");
-				break;
-			}
 			/* last byte, type, is 0x20 for servr type */
 			memset(vol->target_rfc1001_name, 0x20,
 				RFC1001_NAME_LEN_WITH_NULL);
@@ -1848,12 +1818,6 @@
 			if (string == NULL)
 				goto out_nomem;
 
-			if (!*string) {
-				cERROR(1, "no protocol version specified"
-					  " after vers= mount option");
-				goto cifs_parse_mount_err;
-			}
-
 			if (strnicmp(string, "cifs", 4) == 0 ||
 			    strnicmp(string, "1", 1) == 0) {
 				/* This is the default */
@@ -1868,12 +1832,6 @@
 			if (string == NULL)
 				goto out_nomem;
 
-			if (!*string) {
-				printk(KERN_WARNING "CIFS: no security flavor"
-						    " specified\n");
-				break;
-			}
-
 			if (cifs_parse_security_flavors(string, vol) != 0)
 				goto cifs_parse_mount_err;
 			break;
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index fa5c07d..4c58d4a 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1737,6 +1737,18 @@
 		return 1;
 
 	/*
+	 * Even if the convert is compat with all granted locks,
+	 * QUECVT forces it behind other locks on the convert queue.
+	 */
+
+	if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) {
+		if (list_empty(&r->res_convertqueue))
+			return 1;
+		else
+			goto out;
+	}
+
+	/*
 	 * The NOORDER flag is set to avoid the standard vms rules on grant
 	 * order.
 	 */
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index ab2594a..0e01e90 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1203,9 +1203,6 @@
 	unsigned long s_ext_blocks;
 	unsigned long s_ext_extents;
 #endif
-	/* ext4 extent cache stats */
-	unsigned long extent_cache_hits;
-	unsigned long extent_cache_misses;
 
 	/* for buddy allocator */
 	struct ext4_group_info ***s_group_info;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 1421938..abcdeab 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -2066,10 +2066,6 @@
 		ret = 1;
 	}
 errout:
-	if (!ret)
-		sbi->extent_cache_misses++;
-	else
-		sbi->extent_cache_hits++;
 	trace_ext4_ext_in_cache(inode, block, ret);
 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 	return ret;
@@ -2882,7 +2878,7 @@
 		if (err)
 			goto fix_extent_len;
 		/* update the extent length and mark as initialized */
-		ex->ee_len = cpu_to_le32(ee_len);
+		ex->ee_len = cpu_to_le16(ee_len);
 		ext4_ext_try_to_merge(inode, path, ex);
 		err = ext4_ext_dirty(handle, inode, path + depth);
 		goto out;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index ceebaf8..6da1935 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1305,20 +1305,20 @@
 		ext4_msg(sb, KERN_ERR,
 			"Cannot change journaled "
 			"quota options when quota turned on");
-		return 0;
+		return -1;
 	}
 	qname = match_strdup(args);
 	if (!qname) {
 		ext4_msg(sb, KERN_ERR,
 			"Not enough memory for storing quotafile name");
-		return 0;
+		return -1;
 	}
 	if (sbi->s_qf_names[qtype] &&
 		strcmp(sbi->s_qf_names[qtype], qname)) {
 		ext4_msg(sb, KERN_ERR,
 			"%s quota file already specified", QTYPE2NAME(qtype));
 		kfree(qname);
-		return 0;
+		return -1;
 	}
 	sbi->s_qf_names[qtype] = qname;
 	if (strchr(sbi->s_qf_names[qtype], '/')) {
@@ -1326,7 +1326,7 @@
 			"quotafile must be on filesystem root");
 		kfree(sbi->s_qf_names[qtype]);
 		sbi->s_qf_names[qtype] = NULL;
-		return 0;
+		return -1;
 	}
 	set_opt(sb, QUOTA);
 	return 1;
@@ -1341,7 +1341,7 @@
 		sbi->s_qf_names[qtype]) {
 		ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
 			" when quota turned on");
-		return 0;
+		return -1;
 	}
 	/*
 	 * The space will be released later when all options are confirmed
@@ -1450,6 +1450,16 @@
 	const struct mount_opts *m;
 	int arg = 0;
 
+#ifdef CONFIG_QUOTA
+	if (token == Opt_usrjquota)
+		return set_qf_name(sb, USRQUOTA, &args[0]);
+	else if (token == Opt_grpjquota)
+		return set_qf_name(sb, GRPQUOTA, &args[0]);
+	else if (token == Opt_offusrjquota)
+		return clear_qf_name(sb, USRQUOTA);
+	else if (token == Opt_offgrpjquota)
+		return clear_qf_name(sb, GRPQUOTA);
+#endif
 	if (args->from && match_int(args, &arg))
 		return -1;
 	switch (token) {
@@ -1549,18 +1559,6 @@
 				sbi->s_mount_opt |= m->mount_opt;
 			}
 #ifdef CONFIG_QUOTA
-		} else if (token == Opt_usrjquota) {
-			if (!set_qf_name(sb, USRQUOTA, &args[0]))
-				return -1;
-		} else if (token == Opt_grpjquota) {
-			if (!set_qf_name(sb, GRPQUOTA, &args[0]))
-				return -1;
-		} else if (token == Opt_offusrjquota) {
-			if (!clear_qf_name(sb, USRQUOTA))
-				return -1;
-		} else if (token == Opt_offgrpjquota) {
-			if (!clear_qf_name(sb, GRPQUOTA))
-				return -1;
 		} else if (m->flags & MOPT_QFMT) {
 			if (sb_any_quota_loaded(sb) &&
 			    sbi->s_jquota_fmt != m->mount_opt) {
@@ -2366,18 +2364,6 @@
 			  EXT4_SB(sb)->s_sectors_written_start) >> 1)));
 }
 
-static ssize_t extent_cache_hits_show(struct ext4_attr *a,
-				      struct ext4_sb_info *sbi, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->extent_cache_hits);
-}
-
-static ssize_t extent_cache_misses_show(struct ext4_attr *a,
-					struct ext4_sb_info *sbi, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->extent_cache_misses);
-}
-
 static ssize_t inode_readahead_blks_store(struct ext4_attr *a,
 					  struct ext4_sb_info *sbi,
 					  const char *buf, size_t count)
@@ -2435,8 +2421,6 @@
 EXT4_RO_ATTR(delayed_allocation_blocks);
 EXT4_RO_ATTR(session_write_kbytes);
 EXT4_RO_ATTR(lifetime_write_kbytes);
-EXT4_RO_ATTR(extent_cache_hits);
-EXT4_RO_ATTR(extent_cache_misses);
 EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, sbi_ui_show,
 		 inode_readahead_blks_store, s_inode_readahead_blks);
 EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal);
@@ -2452,8 +2436,6 @@
 	ATTR_LIST(delayed_allocation_blocks),
 	ATTR_LIST(session_write_kbytes),
 	ATTR_LIST(lifetime_write_kbytes),
-	ATTR_LIST(extent_cache_hits),
-	ATTR_LIST(extent_cache_misses),
 	ATTR_LIST(inode_readahead_blks),
 	ATTR_LIST(inode_goal),
 	ATTR_LIST(mb_stats),
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 2066328..df5ac04 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -387,9 +387,6 @@
 	if (fc->no_create)
 		return -ENOSYS;
 
-	if (flags & O_DIRECT)
-		return -EINVAL;
-
 	forget = fuse_alloc_forget();
 	if (!forget)
 		return -ENOMEM;
@@ -644,13 +641,12 @@
 	fuse_put_request(fc, req);
 	if (!err) {
 		struct inode *inode = entry->d_inode;
+		struct fuse_inode *fi = get_fuse_inode(inode);
 
-		/*
-		 * Set nlink to zero so the inode can be cleared, if the inode
-		 * does have more links this will be discovered at the next
-		 * lookup/getattr.
-		 */
-		clear_nlink(inode);
+		spin_lock(&fc->lock);
+		fi->attr_version = ++fc->attr_version;
+		drop_nlink(inode);
+		spin_unlock(&fc->lock);
 		fuse_invalidate_attr(inode);
 		fuse_invalidate_attr(dir);
 		fuse_invalidate_entry_cache(entry);
@@ -762,8 +758,17 @@
 	   will reflect changes in the backing inode (link count,
 	   etc.)
 	*/
-	if (!err || err == -EINTR)
+	if (!err) {
+		struct fuse_inode *fi = get_fuse_inode(inode);
+
+		spin_lock(&fc->lock);
+		fi->attr_version = ++fc->attr_version;
+		inc_nlink(inode);
+		spin_unlock(&fc->lock);
 		fuse_invalidate_attr(inode);
+	} else if (err == -EINTR) {
+		fuse_invalidate_attr(inode);
+	}
 	return err;
 }
 
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index a841868..504e61b 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -194,10 +194,6 @@
 	struct fuse_conn *fc = get_fuse_conn(inode);
 	int err;
 
-	/* VFS checks this, but only _after_ ->open() */
-	if (file->f_flags & O_DIRECT)
-		return -EINVAL;
-
 	err = generic_file_open(inode, file);
 	if (err)
 		return err;
@@ -932,17 +928,23 @@
 	struct file *file = iocb->ki_filp;
 	struct address_space *mapping = file->f_mapping;
 	size_t count = 0;
+	size_t ocount = 0;
 	ssize_t written = 0;
+	ssize_t written_buffered = 0;
 	struct inode *inode = mapping->host;
 	ssize_t err;
 	struct iov_iter i;
+	loff_t endbyte = 0;
 
 	WARN_ON(iocb->ki_pos != pos);
 
-	err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
+	ocount = 0;
+	err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
 	if (err)
 		return err;
 
+	count = ocount;
+
 	mutex_lock(&inode->i_mutex);
 	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
 
@@ -962,11 +964,41 @@
 
 	file_update_time(file);
 
-	iov_iter_init(&i, iov, nr_segs, count, 0);
-	written = fuse_perform_write(file, mapping, &i, pos);
-	if (written >= 0)
-		iocb->ki_pos = pos + written;
+	if (file->f_flags & O_DIRECT) {
+		written = generic_file_direct_write(iocb, iov, &nr_segs,
+						    pos, &iocb->ki_pos,
+						    count, ocount);
+		if (written < 0 || written == count)
+			goto out;
 
+		pos += written;
+		count -= written;
+
+		iov_iter_init(&i, iov, nr_segs, count, written);
+		written_buffered = fuse_perform_write(file, mapping, &i, pos);
+		if (written_buffered < 0) {
+			err = written_buffered;
+			goto out;
+		}
+		endbyte = pos + written_buffered - 1;
+
+		err = filemap_write_and_wait_range(file->f_mapping, pos,
+						   endbyte);
+		if (err)
+			goto out;
+
+		invalidate_mapping_pages(file->f_mapping,
+					 pos >> PAGE_CACHE_SHIFT,
+					 endbyte >> PAGE_CACHE_SHIFT);
+
+		written += written_buffered;
+		iocb->ki_pos = pos + written_buffered;
+	} else {
+		iov_iter_init(&i, iov, nr_segs, count, 0);
+		written = fuse_perform_write(file, mapping, &i, pos);
+		if (written >= 0)
+			iocb->ki_pos = pos + written;
+	}
 out:
 	current->backing_dev_info = NULL;
 	mutex_unlock(&inode->i_mutex);
@@ -1101,6 +1133,24 @@
 	return res;
 }
 
+static ssize_t __fuse_direct_write(struct file *file, const char __user *buf,
+				   size_t count, loff_t *ppos)
+{
+	struct inode *inode = file->f_path.dentry->d_inode;
+	ssize_t res;
+
+	res = generic_write_checks(file, ppos, &count, 0);
+	if (!res) {
+		res = fuse_direct_io(file, buf, count, ppos, 1);
+		if (res > 0)
+			fuse_write_update_size(inode, *ppos);
+	}
+
+	fuse_invalidate_attr(inode);
+
+	return res;
+}
+
 static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
 				 size_t count, loff_t *ppos)
 {
@@ -1112,16 +1162,9 @@
 
 	/* Don't allow parallel writes to the same file */
 	mutex_lock(&inode->i_mutex);
-	res = generic_write_checks(file, ppos, &count, 0);
-	if (!res) {
-		res = fuse_direct_io(file, buf, count, ppos, 1);
-		if (res > 0)
-			fuse_write_update_size(inode, *ppos);
-	}
+	res = __fuse_direct_write(file, buf, count, ppos);
 	mutex_unlock(&inode->i_mutex);
 
-	fuse_invalidate_attr(inode);
-
 	return res;
 }
 
@@ -2077,6 +2120,57 @@
 	return 0;
 }
 
+static ssize_t fuse_loop_dio(struct file *filp, const struct iovec *iov,
+			     unsigned long nr_segs, loff_t *ppos, int rw)
+{
+	const struct iovec *vector = iov;
+	ssize_t ret = 0;
+
+	while (nr_segs > 0) {
+		void __user *base;
+		size_t len;
+		ssize_t nr;
+
+		base = vector->iov_base;
+		len = vector->iov_len;
+		vector++;
+		nr_segs--;
+
+		if (rw == WRITE)
+			nr = __fuse_direct_write(filp, base, len, ppos);
+		else
+			nr = fuse_direct_read(filp, base, len, ppos);
+
+		if (nr < 0) {
+			if (!ret)
+				ret = nr;
+			break;
+		}
+		ret += nr;
+		if (nr != len)
+			break;
+	}
+
+	return ret;
+}
+
+
+static ssize_t
+fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
+			loff_t offset, unsigned long nr_segs)
+{
+	ssize_t ret = 0;
+	struct file *file = NULL;
+	loff_t pos = 0;
+
+	file = iocb->ki_filp;
+	pos = offset;
+
+	ret = fuse_loop_dio(file, iov, nr_segs, &pos, rw);
+
+	return ret;
+}
+
 static const struct file_operations fuse_file_operations = {
 	.llseek		= fuse_file_llseek,
 	.read		= do_sync_read,
@@ -2120,6 +2214,7 @@
 	.readpages	= fuse_readpages,
 	.set_page_dirty	= __set_page_dirty_nobuffers,
 	.bmap		= fuse_bmap,
+	.direct_IO	= fuse_direct_IO,
 };
 
 void fuse_init_file_inode(struct inode *inode)
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 4aec599..26783eb 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -947,6 +947,7 @@
 	sb->s_magic = FUSE_SUPER_MAGIC;
 	sb->s_op = &fuse_super_operations;
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
+	sb->s_time_gran = 1;
 	sb->s_export_op = &fuse_export_operations;
 
 	file = fget(d.fd);
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index c465ae0..eb08c9e 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -1,10 +1,6 @@
 config GFS2_FS
 	tristate "GFS2 file system support"
 	depends on (64BIT || LBDAF)
-	select DLM if GFS2_FS_LOCKING_DLM
-	select CONFIGFS_FS if GFS2_FS_LOCKING_DLM
-	select SYSFS if GFS2_FS_LOCKING_DLM
-	select IP_SCTP if DLM_SCTP
 	select FS_POSIX_ACL
 	select CRC32
 	select QUOTACTL
@@ -29,7 +25,8 @@
 
 config GFS2_FS_LOCKING_DLM
 	bool "GFS2 DLM locking"
-	depends on (GFS2_FS!=n) && NET && INET && (IPV6 || IPV6=n) && HOTPLUG
+	depends on (GFS2_FS!=n) && NET && INET && (IPV6 || IPV6=n) && \
+		HOTPLUG && DLM && CONFIGFS_FS && SYSFS
 	help
 	  Multiple node locking module for GFS2
 
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 38b7a74..9b2ff0e 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -807,7 +807,7 @@
 
 	if (inode == sdp->sd_rindex) {
 		adjust_fs_space(inode);
-		ip->i_gh.gh_flags |= GL_NOCACHE;
+		sdp->sd_rindex_uptodate = 0;
 	}
 
 	brelse(dibh);
@@ -873,7 +873,7 @@
 
 	if (inode == sdp->sd_rindex) {
 		adjust_fs_space(inode);
-		ip->i_gh.gh_flags |= GL_NOCACHE;
+		sdp->sd_rindex_uptodate = 0;
 	}
 
 	brelse(dibh);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 197c5c4..03c04fe 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -724,7 +724,11 @@
 	int metadata;
 	unsigned int revokes = 0;
 	int x;
-	int error = 0;
+	int error;
+
+	error = gfs2_rindex_update(sdp);
+	if (error)
+		return error;
 
 	if (!*top)
 		sm->sm_first = 0;
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index c35573a..a836056 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1844,6 +1844,10 @@
 	unsigned int x, size = len * sizeof(u64);
 	int error;
 
+	error = gfs2_rindex_update(sdp);
+	if (error)
+		return error;
+
 	memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
 
 	ht = kzalloc(size, GFP_NOFS);
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index c98a60e..a9ba244 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -1031,7 +1031,13 @@
 	struct buffer_head *bh;
 	struct gfs2_holder ghs[3];
 	struct gfs2_rgrpd *rgd;
-	int error = -EROFS;
+	int error;
+
+	error = gfs2_rindex_update(sdp);
+	if (error)
+		return error;
+
+	error = -EROFS;
 
 	gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
 	gfs2_holder_init(ip->i_gl,  LM_ST_EXCLUSIVE, 0, ghs + 1);
@@ -1224,6 +1230,10 @@
 			return 0;
 	}
 
+	error = gfs2_rindex_update(sdp);
+	if (error)
+		return error;
+
 	if (odip != ndip) {
 		error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE,
 					   0, &r_gh);
@@ -1345,7 +1355,6 @@
 	error = alloc_required;
 	if (error < 0)
 		goto out_gunlock;
-	error = 0;
 
 	if (alloc_required) {
 		struct gfs2_qadata *qa = gfs2_qadata_get(ndip);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 19bde40..3df65c9 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -332,9 +332,6 @@
 	struct rb_node *n, *next;
 	struct gfs2_rgrpd *cur;
 
-	if (gfs2_rindex_update(sdp))
-		return NULL;
-
 	spin_lock(&sdp->sd_rindex_spin);
 	n = sdp->sd_rindex_tree.rb_node;
 	while (n) {
@@ -640,6 +637,7 @@
 		return 0;
 
 	error = 0; /* someone else read in the rgrp; free it and ignore it */
+	gfs2_glock_put(rgd->rd_gl);
 
 fail:
 	kfree(rgd->rd_bits);
@@ -927,6 +925,10 @@
 	} else if (copy_from_user(&r, argp, sizeof(r)))
 		return -EFAULT;
 
+	ret = gfs2_rindex_update(sdp);
+	if (ret)
+		return ret;
+
 	rgd = gfs2_blk2rgrpd(sdp, r.start, 0);
 	rgd_end = gfs2_blk2rgrpd(sdp, r.start + r.len, 0);
 
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 2e5ba42..927f4df 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -238,6 +238,10 @@
 	unsigned int x;
 	int error;
 
+	error = gfs2_rindex_update(sdp);
+	if (error)
+		return error;
+
 	if (GFS2_EA_IS_STUFFED(ea))
 		return 0;
 
@@ -1330,6 +1334,10 @@
 	unsigned int x;
 	int error;
 
+	error = gfs2_rindex_update(sdp);
+	if (error)
+		return error;
+
 	memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
 
 	error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &indbh);
@@ -1439,6 +1447,10 @@
 	struct gfs2_holder gh;
 	int error;
 
+	error = gfs2_rindex_update(sdp);
+	if (error)
+		return error;
+
 	rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr, 1);
 	if (!rgd) {
 		gfs2_consist_inode(ip);
diff --git a/fs/libfs.c b/fs/libfs.c
index 358094f..18d08f5 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -529,6 +529,7 @@
 	return 0;
 out:
 	d_genocide(root);
+	shrink_dcache_parent(root);
 	dput(root);
 	return -ENOMEM;
 }
diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c
index 3ddcbb1..13ad153 100644
--- a/fs/lockd/clnt4xdr.c
+++ b/fs/lockd/clnt4xdr.c
@@ -241,7 +241,7 @@
 	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		goto out_overflow;
-	if (unlikely(*p > nlm4_failed))
+	if (unlikely(ntohl(*p) > ntohl(nlm4_failed)))
 		goto out_bad_xdr;
 	*stat = *p;
 	return 0;
diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
index 3d35e3e..d269ada 100644
--- a/fs/lockd/clntxdr.c
+++ b/fs/lockd/clntxdr.c
@@ -236,7 +236,7 @@
 	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		goto out_overflow;
-	if (unlikely(*p > nlm_lck_denied_grace_period))
+	if (unlikely(ntohl(*p) > ntohl(nlm_lck_denied_grace_period)))
 		goto out_enum;
 	*stat = *p;
 	return 0;
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 08c6e36..43f46cd 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -803,13 +803,13 @@
 	return p;
 }
 
-static int
+static __be32
 compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
 		const char *name, int namlen)
 {
 	struct svc_export	*exp;
 	struct dentry		*dparent, *dchild;
-	int rv = 0;
+	__be32 rv = nfserr_noent;
 
 	dparent = cd->fh.fh_dentry;
 	exp  = cd->fh.fh_export;
@@ -817,26 +817,20 @@
 	if (isdotent(name, namlen)) {
 		if (namlen == 2) {
 			dchild = dget_parent(dparent);
-			if (dchild == dparent) {
-				/* filesystem root - cannot return filehandle for ".." */
-				dput(dchild);
-				return -ENOENT;
-			}
+			/* filesystem root - cannot return filehandle for ".." */
+			if (dchild == dparent)
+				goto out;
 		} else
 			dchild = dget(dparent);
 	} else
 		dchild = lookup_one_len(name, dparent, namlen);
 	if (IS_ERR(dchild))
-		return -ENOENT;
-	rv = -ENOENT;
+		return rv;
 	if (d_mountpoint(dchild))
 		goto out;
-	rv = fh_compose(fhp, exp, dchild, &cd->fh);
-	if (rv)
-		goto out;
 	if (!dchild->d_inode)
 		goto out;
-	rv = 0;
+	rv = fh_compose(fhp, exp, dchild, &cd->fh);
 out:
 	dput(dchild);
 	return rv;
@@ -845,7 +839,7 @@
 static __be32 *encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen)
 {
 	struct svc_fh	fh;
-	int err;
+	__be32 err;
 
 	fh_init(&fh, NFS3_FHSIZE);
 	err = compose_entry_fh(cd, &fh, name, namlen);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 2ed14df..987e719 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -235,17 +235,17 @@
 		 */
 		if (open->op_createmode == NFS4_CREATE_EXCLUSIVE && status == 0)
 			open->op_bmval[1] = (FATTR4_WORD1_TIME_ACCESS |
-						FATTR4_WORD1_TIME_MODIFY);
+							FATTR4_WORD1_TIME_MODIFY);
 	} else {
 		status = nfsd_lookup(rqstp, current_fh,
 				     open->op_fname.data, open->op_fname.len, resfh);
 		fh_unlock(current_fh);
-		if (status)
-			goto out;
-		status = nfsd_check_obj_isreg(resfh);
 	}
 	if (status)
 		goto out;
+	status = nfsd_check_obj_isreg(resfh);
+	if (status)
+		goto out;
 
 	if (is_create_with_attrs(open) && open->op_acl != NULL)
 		do_set_nfs4_acl(rqstp, resfh, open->op_acl, open->op_bmval);
@@ -841,6 +841,7 @@
 	      struct nfsd4_setattr *setattr)
 {
 	__be32 status = nfs_ok;
+	int err;
 
 	if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
 		nfs4_lock_state();
@@ -852,9 +853,9 @@
 			return status;
 		}
 	}
-	status = fh_want_write(&cstate->current_fh);
-	if (status)
-		return status;
+	err = fh_want_write(&cstate->current_fh);
+	if (err)
+		return nfserrno(err);
 	status = nfs_ok;
 
 	status = check_attr_support(rqstp, cstate, setattr->sa_bmval,
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 1841f8b..7f71c69 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4211,16 +4211,14 @@
  * vfs_test_lock.  (Arguably perhaps test_lock should be done with an
  * inode operation.)
  */
-static int nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
+static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
 {
 	struct file *file;
-	int err;
-
-	err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
-	if (err)
-		return err;
-	err = vfs_test_lock(file, lock);
-	nfsd_close(file);
+	__be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
+	if (!err) {
+		err = nfserrno(vfs_test_lock(file, lock));
+		nfsd_close(file);
+	}
 	return err;
 }
 
@@ -4234,7 +4232,6 @@
 	struct inode *inode;
 	struct file_lock file_lock;
 	struct nfs4_lockowner *lo;
-	int error;
 	__be32 status;
 
 	if (locks_in_grace())
@@ -4280,12 +4277,10 @@
 
 	nfs4_transform_lock_offset(&file_lock);
 
-	status = nfs_ok;
-	error = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock);
-	if (error) {
-		status = nfserrno(error);
+	status = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock);
+	if (status)
 		goto out;
-	}
+
 	if (file_lock.fl_type != F_UNLCK) {
 		status = nfserr_denied;
 		nfs4_set_lock_denied(&file_lock, &lockt->lt_denied);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index bcd8904..74c00bc 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1392,7 +1392,7 @@
 	for (i = 0; i < test_stateid->ts_num_ids; i++) {
 		stateid = kmalloc(sizeof(struct nfsd4_test_stateid_id), GFP_KERNEL);
 		if (!stateid) {
-			status = PTR_ERR(stateid);
+			status = nfserrno(-ENOMEM);
 			goto out;
 		}
 
@@ -3410,7 +3410,7 @@
 	*p++ = htonl(test_stateid->ts_num_ids);
 
 	list_for_each_entry_safe(stateid, next, &test_stateid->ts_stateid_list, ts_id_list) {
-		*p++ = htonl(stateid->ts_id_status);
+		*p++ = stateid->ts_id_status;
 	}
 
 	ADJUST_ARGS();
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 296d671..5686661 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1458,7 +1458,7 @@
 		switch (createmode) {
 		case NFS3_CREATE_UNCHECKED:
 			if (! S_ISREG(dchild->d_inode->i_mode))
-				err = nfserr_exist;
+				goto out;
 			else if (truncp) {
 				/* in nfsv4, we need to treat this case a little
 				 * differently.  we don't want to truncate the
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 3165aeb..31b9463 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1134,7 +1134,7 @@
 	}
 
 	el = path_leaf_el(path);
-	rec = &el->l_recs[le32_to_cpu(el->l_next_free_rec) - 1];
+	rec = &el->l_recs[le16_to_cpu(el->l_next_free_rec) - 1];
 
 	ocfs2_adjust_rightmost_records(handle, et, path, rec);
 
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 044e7b5..1bfe880 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -2005,7 +2005,7 @@
 	o2net_listen_sock = sock;
 	INIT_WORK(&o2net_listen_work, o2net_accept_many);
 
-	sock->sk->sk_reuse = 1;
+	sock->sk->sk_reuse = SK_CAN_REUSE;
 	ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
 	if (ret < 0) {
 		printk(KERN_ERR "o2net: Error %d while binding socket at "
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index cf78233..9f32d7c 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -1036,14 +1036,14 @@
 
 	tmp_el = left_path->p_node[subtree_root].el;
 	blkno = left_path->p_node[subtree_root+1].bh->b_blocknr;
-	for (i = 0; i < le32_to_cpu(tmp_el->l_next_free_rec); i++) {
+	for (i = 0; i < le16_to_cpu(tmp_el->l_next_free_rec); i++) {
 		if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) {
 			*cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos);
 			break;
 		}
 	}
 
-	BUG_ON(i == le32_to_cpu(tmp_el->l_next_free_rec));
+	BUG_ON(i == le16_to_cpu(tmp_el->l_next_free_rec));
 
 out:
 	ocfs2_free_path(left_path);
@@ -1468,7 +1468,7 @@
 
 	trace_ocfs2_divide_leaf_refcount_block(
 		(unsigned long long)ref_leaf_bh->b_blocknr,
-		le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used));
+		le16_to_cpu(rl->rl_count), le16_to_cpu(rl->rl_used));
 
 	/*
 	 * XXX: Improvement later.
@@ -2411,7 +2411,7 @@
 				rb = (struct ocfs2_refcount_block *)
 							prev_bh->b_data;
 
-				if (le64_to_cpu(rb->rf_records.rl_used) +
+				if (le16_to_cpu(rb->rf_records.rl_used) +
 				    recs_add >
 				    le16_to_cpu(rb->rf_records.rl_count))
 					ref_blocks++;
@@ -2476,7 +2476,7 @@
 	if (prev_bh) {
 		rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
 
-		if (le64_to_cpu(rb->rf_records.rl_used) + recs_add >
+		if (le16_to_cpu(rb->rf_records.rl_used) + recs_add >
 		    le16_to_cpu(rb->rf_records.rl_count))
 			ref_blocks++;
 
@@ -3629,7 +3629,7 @@
 			 * one will split a refcount rec, so totally we need
 			 * clusters * 2 new refcount rec.
 			 */
-			if (le64_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
+			if (le16_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
 			    le16_to_cpu(rb->rf_records.rl_count))
 				ref_blocks++;
 
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index ba5d97e..f169da4 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -600,7 +600,7 @@
 		ret = ocfs2_free_clusters(handle, cluster_ac->ac_inode,
 					  cluster_ac->ac_bh,
 					  le64_to_cpu(rec->e_blkno),
-					  le32_to_cpu(rec->e_leaf_clusters));
+					  le16_to_cpu(rec->e_leaf_clusters));
 		if (ret)
 			mlog_errno(ret);
 		/* Try all the clusters to free */
@@ -1628,7 +1628,7 @@
 {
 	unsigned int bpc = le16_to_cpu(cl->cl_bpc);
 	unsigned int bitoff = le32_to_cpu(rec->e_cpos) * bpc;
-	unsigned int bitcount = le32_to_cpu(rec->e_leaf_clusters) * bpc;
+	unsigned int bitcount = le16_to_cpu(rec->e_leaf_clusters) * bpc;
 
 	if (res->sr_bit_offset < bitoff)
 		return 0;
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 6a0c62d..64c3b31 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -18,19 +18,39 @@
 #ifndef arch_irq_stat
 #define arch_irq_stat() 0
 #endif
-#ifndef arch_idle_time
-#define arch_idle_time(cpu) 0
-#endif
+
+#ifdef arch_idle_time
+
+static cputime64_t get_idle_time(int cpu)
+{
+	cputime64_t idle;
+
+	idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
+	if (cpu_online(cpu) && !nr_iowait_cpu(cpu))
+		idle += arch_idle_time(cpu);
+	return idle;
+}
+
+static cputime64_t get_iowait_time(int cpu)
+{
+	cputime64_t iowait;
+
+	iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
+	if (cpu_online(cpu) && nr_iowait_cpu(cpu))
+		iowait += arch_idle_time(cpu);
+	return iowait;
+}
+
+#else
 
 static u64 get_idle_time(int cpu)
 {
 	u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL);
 
-	if (idle_time == -1ULL) {
+	if (idle_time == -1ULL)
 		/* !NO_HZ so we can rely on cpustat.idle */
 		idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
-		idle += arch_idle_time(cpu);
-	} else
+	else
 		idle = usecs_to_cputime64(idle_time);
 
 	return idle;
@@ -49,6 +69,8 @@
 	return iowait;
 }
 
+#endif
+
 static int show_stat(struct seq_file *p, void *v)
 {
 	int i, j;
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 2a7a3f5..35a36d3 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -729,6 +729,9 @@
 	else
 		parent_sd = &sysfs_root;
 
+	if (!parent_sd)
+		return -ENOENT;
+
 	if (sysfs_ns_type(parent_sd))
 		ns = kobj->ktype->namespace(kobj);
 	type = sysfs_read_ns_type(kobj);
@@ -878,7 +881,6 @@
 
 		dup_name = sd->s_name;
 		sd->s_name = new_name;
-		sd->s_hash = sysfs_name_hash(sd->s_ns, sd->s_name);
 	}
 
 	/* Move to the appropriate place in the appropriate directories rbtree. */
@@ -886,6 +888,7 @@
 	sysfs_get(new_parent_sd);
 	sysfs_put(sd->s_parent);
 	sd->s_ns = new_ns;
+	sd->s_hash = sysfs_name_hash(sd->s_ns, sd->s_name);
 	sd->s_parent = new_parent_sd;
 	sysfs_link_sibling(sd);
 
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index dd1701c..2df555c 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -67,7 +67,11 @@
 	/* Updates may happen before the object has been instantiated */
 	if (unlikely(update && !kobj->sd))
 		return -EINVAL;
-
+	if (!grp->attrs) {
+		WARN(1, "sysfs: attrs not set by subsystem for group: %s/%s\n",
+			kobj->name, grp->name ? "" : grp->name);
+		return -EINVAL;
+	}
 	if (grp->name) {
 		error = sysfs_create_subdir(kobj, grp->name, &sd);
 		if (error)
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index 3963116..e478de4 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -85,7 +85,7 @@
 struct drm_exynos_vidi_connection {
 	unsigned int connection;
 	unsigned int extensions;
-	uint64_t *edid;
+	uint64_t edid;
 };
 
 struct drm_exynos_plane_set_zpos {
@@ -96,7 +96,8 @@
 /* memory type definitions. */
 enum e_drm_exynos_gem_mem_type {
 	/* Physically Non-Continuous memory. */
-	EXYNOS_BO_NONCONTIG	= 1 << 0
+	EXYNOS_BO_NONCONTIG	= 1 << 0,
+	EXYNOS_BO_MASK		= EXYNOS_BO_NONCONTIG
 };
 
 #define DRM_EXYNOS_GEM_CREATE		0x00
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 7847e19..8d54f79 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -30,7 +30,6 @@
 	struct device		dev;
 	struct resource		res;
 	struct clk		*pclk;
-	struct regulator	*vcore;
 	u64			dma_mask;
 	unsigned int		periphid;
 	unsigned int		irq[AMBA_NR_IRQS];
@@ -75,12 +74,6 @@
 #define amba_pclk_disable(d)	\
 	do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0)
 
-#define amba_vcore_enable(d)	\
-	(IS_ERR((d)->vcore) ? 0 : regulator_enable((d)->vcore))
-
-#define amba_vcore_disable(d)	\
-	do { if (!IS_ERR((d)->vcore)) regulator_disable((d)->vcore); } while (0)
-
 /* Some drivers don't use the struct amba_device */
 #define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
 #define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f)
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h
index b8c5112..76dd1b1 100644
--- a/include/linux/amba/pl022.h
+++ b/include/linux/amba/pl022.h
@@ -25,6 +25,8 @@
 #ifndef _SSP_PL022_H
 #define _SSP_PL022_H
 
+#include <linux/types.h>
+
 /**
  * whether SSP is in loopback mode or not
  */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 606cf33..2aa2466 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -426,14 +426,10 @@
 				 (1 << QUEUE_FLAG_SAME_COMP)	|	\
 				 (1 << QUEUE_FLAG_ADD_RANDOM))
 
-static inline int queue_is_locked(struct request_queue *q)
+static inline void queue_lockdep_assert_held(struct request_queue *q)
 {
-#ifdef CONFIG_SMP
-	spinlock_t *lock = q->queue_lock;
-	return lock && spin_is_locked(lock);
-#else
-	return 1;
-#endif
+	if (q->queue_lock)
+		lockdep_assert_held(q->queue_lock);
 }
 
 static inline void queue_flag_set_unlocked(unsigned int flag,
@@ -445,7 +441,7 @@
 static inline int queue_flag_test_and_clear(unsigned int flag,
 					    struct request_queue *q)
 {
-	WARN_ON_ONCE(!queue_is_locked(q));
+	queue_lockdep_assert_held(q);
 
 	if (test_bit(flag, &q->queue_flags)) {
 		__clear_bit(flag, &q->queue_flags);
@@ -458,7 +454,7 @@
 static inline int queue_flag_test_and_set(unsigned int flag,
 					  struct request_queue *q)
 {
-	WARN_ON_ONCE(!queue_is_locked(q));
+	queue_lockdep_assert_held(q);
 
 	if (!test_bit(flag, &q->queue_flags)) {
 		__set_bit(flag, &q->queue_flags);
@@ -470,7 +466,7 @@
 
 static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
 {
-	WARN_ON_ONCE(!queue_is_locked(q));
+	queue_lockdep_assert_held(q);
 	__set_bit(flag, &q->queue_flags);
 }
 
@@ -487,7 +483,7 @@
 
 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
 {
-	WARN_ON_ONCE(!queue_is_locked(q));
+	queue_lockdep_assert_held(q);
 	__clear_bit(flag, &q->queue_flags);
 }
 
diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h
index 65a2562..6bb4338 100644
--- a/include/linux/dcbnl.h
+++ b/include/linux/dcbnl.h
@@ -67,6 +67,17 @@
 	__u8	reco_prio_tc[IEEE_8021QAZ_MAX_TCS];
 };
 
+/* This structure contains rate limit extension to the IEEE 802.1Qaz ETS
+ * managed object.
+ * Values are 64 bits long and specified in Kbps to enable usage over both
+ * slow and very fast networks.
+ *
+ * @tc_maxrate: maximal tc tx bandwidth indexed by traffic class
+ */
+struct ieee_maxrate {
+	__u64	tc_maxrate[IEEE_8021QAZ_MAX_TCS];
+};
+
 /* This structure contains the IEEE 802.1Qaz PFC managed object
  *
  * @pfc_cap: Indicates the number of traffic classes on the local device
@@ -321,6 +332,7 @@
 	DCB_ATTR_IEEE_PEER_ETS,
 	DCB_ATTR_IEEE_PEER_PFC,
 	DCB_ATTR_IEEE_PEER_APP,
+	DCB_ATTR_IEEE_MAXRATE,
 	__DCB_ATTR_IEEE_MAX
 };
 #define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1)
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index eaf95a0..d16294e 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -549,6 +549,8 @@
 	return NULL;
 }
 
+extern void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
+
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_DCCP_H */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 676f967..f9a2e5e 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -974,6 +974,7 @@
 void dma_async_device_unregister(struct dma_device *device);
 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
+struct dma_chan *net_dma_find_channel(void);
 #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
 
 /* --- Helper iov-locking functions --- */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index f5647b5..89d68d8 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -726,6 +726,29 @@
 	struct ethtool_set_features_block features[0];
 };
 
+/**
+ * struct ethtool_ts_info - holds a device's timestamping and PHC association
+ * @cmd: command number = %ETHTOOL_GET_TS_INFO
+ * @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags
+ * @phc_index: device index of the associated PHC, or -1 if there is none
+ * @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values
+ * @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values
+ *
+ * The bits in the 'tx_types' and 'rx_filters' fields correspond to
+ * the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
+ * respectively.  For example, if the device supports HWTSTAMP_TX_ON,
+ * then (1 << HWTSTAMP_TX_ON) in 'tx_types' will be set.
+ */
+struct ethtool_ts_info {
+	__u32	cmd;
+	__u32	so_timestamping;
+	__s32	phc_index;
+	__u32	tx_types;
+	__u32	tx_reserved[3];
+	__u32	rx_filters;
+	__u32	rx_reserved[3];
+};
+
 /*
  * %ETHTOOL_SFEATURES changes features present in features[].valid to the
  * values of corresponding bits in features[].requested. Bits in .requested
@@ -788,6 +811,7 @@
 
 /* Some generic methods drivers may use in their ethtool_ops */
 u32 ethtool_op_get_link(struct net_device *dev);
+int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
 
 /**
  * ethtool_rxfh_indir_default - get default value for RX flow hash indirection
@@ -893,6 +917,9 @@
  * 		   and flag of the device.
  * @get_dump_data: Get dump data.
  * @set_dump: Set dump specific flags to the device.
+ * @get_ts_info: Get the time stamping and PTP hardware clock capabilities.
+ *	Drivers supporting transmit time stamps in software should set this to
+ *	ethtool_op_get_ts_info().
  *
  * All operations are optional (i.e. the function pointer may be set
  * to %NULL) and callers must take this into account.  Callers must
@@ -954,6 +981,7 @@
 	int	(*get_dump_data)(struct net_device *,
 				 struct ethtool_dump *, void *);
 	int	(*set_dump)(struct net_device *, struct ethtool_dump *);
+	int	(*get_ts_info)(struct net_device *, struct ethtool_ts_info *);
 
 };
 #endif /* __KERNEL__ */
@@ -1028,6 +1056,7 @@
 #define ETHTOOL_SET_DUMP	0x0000003e /* Set dump settings */
 #define ETHTOOL_GET_DUMP_FLAG	0x0000003f /* Get dump settings */
 #define ETHTOOL_GET_DUMP_DATA	0x00000040 /* Get dump data */
+#define ETHTOOL_GET_TS_INFO	0x00000041 /* Get time stamping and PHC info */
 
 /* compatibility with older code */
 #define SPARC_ETH_GSET		ETHTOOL_GSET
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 8eeb205..7209099 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -126,7 +126,8 @@
 #define SKF_AD_HATYPE	28
 #define SKF_AD_RXHASH	32
 #define SKF_AD_CPU	36
-#define SKF_AD_MAX	40
+#define SKF_AD_ALU_XOR_X	40
+#define SKF_AD_MAX	44
 #define SKF_NET_OFF   (-0x100000)
 #define SKF_LL_OFF    (-0x200000)
 
@@ -153,6 +154,9 @@
 extern int sk_filter(struct sock *sk, struct sk_buff *skb);
 extern unsigned int sk_run_filter(const struct sk_buff *skb,
 				  const struct sock_filter *filter);
+extern int sk_unattached_filter_create(struct sk_filter **pfp,
+				       struct sock_fprog *fprog);
+extern void sk_unattached_filter_destroy(struct sk_filter *fp);
 extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 extern int sk_detach_filter(struct sock *sk);
 extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
@@ -228,6 +232,7 @@
 	BPF_S_ANC_HATYPE,
 	BPF_S_ANC_RXHASH,
 	BPF_S_ANC_CPU,
+	BPF_S_ANC_ALU_XOR_X,
 };
 
 #endif /* __KERNEL__ */
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index 8ba2c94..8f2ab8f 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -593,7 +593,7 @@
 	__u64	off;
 	__u32	namelen;
 	__u32	type;
-	char name[0];
+	char name[];
 };
 
 #define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name)
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 5852545..6af8738 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -274,6 +274,33 @@
 	u32 bytes_avail_towrite;
 };
 
+
+/*
+ *
+ * hv_get_ringbuffer_availbytes()
+ *
+ * Get number of bytes available to read and to write to
+ * for the specified ring buffer
+ */
+static inline void
+hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
+			  u32 *read, u32 *write)
+{
+	u32 read_loc, write_loc, dsize;
+
+	smp_read_barrier_depends();
+
+	/* Capture the read/write indices before they changed */
+	read_loc = rbi->ring_buffer->read_index;
+	write_loc = rbi->ring_buffer->write_index;
+	dsize = rbi->ring_datasize;
+
+	*write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
+		read_loc - write_loc;
+	*read = dsize - *write;
+}
+
+
 /*
  * We use the same version numbering for all Hyper-V modules.
  *
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index 2463b61..1f90de0 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -666,23 +666,11 @@
 	unsigned int check_defaults:1;
 	unsigned int reset_registers:1;
 	unsigned int hs_extmute:1;
-	u16 hs_left_step;
-	u16 hs_right_step;
-	u16 hf_left_step;
-	u16 hf_right_step;
 	void (*set_hs_extmute)(int mute);
 };
 
 struct twl4030_vibra_data {
 	unsigned int	coexist;
-
-	/* twl6040 */
-	unsigned int vibldrv_res;	/* left driver resistance */
-	unsigned int vibrdrv_res;	/* right driver resistance */
-	unsigned int viblmotor_res;	/* left motor resistance */
-	unsigned int vibrmotor_res;	/* right motor resistance */
-	int vddvibl_uV;			/* VDDVIBL volt, set 0 for fixed reg */
-	int vddvibr_uV;			/* VDDVIBR volt, set 0 for fixed reg */
 };
 
 struct twl4030_audio_data {
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 4b24ff4..f715750 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -138,6 +138,8 @@
 	IFLA_GROUP,		/* Group the device belongs to */
 	IFLA_NET_NS_FD,
 	IFLA_EXT_MASK,		/* Extended info mask, VFs, etc */
+	IFLA_PROMISCUITY,	/* Promiscuity count: > 0 means acts PROMISC */
+#define IFLA_PROMISCUITY IFLA_PROMISCUITY
 	__IFLA_MAX
 };
 
@@ -253,6 +255,7 @@
 enum {
 	IFLA_MACVLAN_UNSPEC,
 	IFLA_MACVLAN_MODE,
+	IFLA_MACVLAN_FLAGS,
 	__IFLA_MACVLAN_MAX,
 };
 
@@ -265,6 +268,8 @@
 	MACVLAN_MODE_PASSTHRU = 8,/* take over the underlying device */
 };
 
+#define MACVLAN_FLAG_NOPROMISC	1
+
 /* SR-IOV virtual function management section */
 
 enum {
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index d103dca..f65e8d2 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -60,6 +60,7 @@
 	struct net_device	*lowerdev;
 	struct macvlan_pcpu_stats __percpu *pcpu_stats;
 	enum macvlan_mode	mode;
+	u16			flags;
 	int (*receive)(struct sk_buff *skb);
 	int (*forward)(struct net_device *dev, struct sk_buff *skb);
 	struct macvtap_queue	*taps[MAX_MACVTAP_QUEUES];
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 58404b0c..8185f57a 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -28,10 +28,28 @@
 
 struct team_port {
 	struct net_device *dev;
-	struct hlist_node hlist; /* node in hash list */
+	struct hlist_node hlist; /* node in enabled ports hash list */
 	struct list_head list; /* node in ordinary list */
 	struct team *team;
-	int index;
+	int index; /* index of enabled port. If disabled, it's set to -1 */
+
+	bool linkup; /* either state.linkup or user.linkup */
+
+	struct {
+		bool linkup;
+		u32 speed;
+		u8 duplex;
+	} state;
+
+	/* Values set by userspace */
+	struct {
+		bool linkup;
+		bool linkup_enabled;
+	} user;
+
+	/* Custom gennetlink interface related flags */
+	bool changed;
+	bool removed;
 
 	/*
 	 * A place for storing original values of the device before it
@@ -42,14 +60,6 @@
 		unsigned int mtu;
 	} orig;
 
-	bool linkup;
-	u32 speed;
-	u8 duplex;
-
-	/* Custom gennetlink interface related flags */
-	bool changed;
-	bool removed;
-
 	struct rcu_head rcu;
 };
 
@@ -68,18 +78,30 @@
 enum team_option_type {
 	TEAM_OPTION_TYPE_U32,
 	TEAM_OPTION_TYPE_STRING,
+	TEAM_OPTION_TYPE_BINARY,
+	TEAM_OPTION_TYPE_BOOL,
+};
+
+struct team_gsetter_ctx {
+	union {
+		u32 u32_val;
+		const char *str_val;
+		struct {
+			const void *ptr;
+			u32 len;
+		} bin_val;
+		bool bool_val;
+	} data;
+	struct team_port *port;
 };
 
 struct team_option {
 	struct list_head list;
 	const char *name;
+	bool per_port;
 	enum team_option_type type;
-	int (*getter)(struct team *team, void *arg);
-	int (*setter)(struct team *team, void *arg);
-
-	/* Custom gennetlink interface related flags */
-	bool changed;
-	bool removed;
+	int (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
+	int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
 };
 
 struct team_mode {
@@ -103,13 +125,15 @@
 	struct mutex lock; /* used for overall locking, e.g. port lists write */
 
 	/*
-	 * port lists with port count
+	 * List of enabled ports and their count
 	 */
-	int port_count;
-	struct hlist_head port_hlist[TEAM_PORT_HASHENTRIES];
-	struct list_head port_list;
+	int en_port_count;
+	struct hlist_head en_port_hlist[TEAM_PORT_HASHENTRIES];
+
+	struct list_head port_list; /* list of all ports */
 
 	struct list_head option_list;
+	struct list_head option_inst_list; /* list of option instances */
 
 	const struct team_mode *mode;
 	struct team_mode_ops ops;
@@ -119,7 +143,7 @@
 static inline struct hlist_head *team_port_index_hash(struct team *team,
 						      int port_index)
 {
-	return &team->port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
+	return &team->en_port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
 }
 
 static inline struct team_port *team_get_port_by_index(struct team *team,
@@ -216,6 +240,7 @@
 	TEAM_ATTR_OPTION_TYPE,		/* u8 */
 	TEAM_ATTR_OPTION_DATA,		/* dynamic */
 	TEAM_ATTR_OPTION_REMOVED,	/* flag */
+	TEAM_ATTR_OPTION_PORT_IFINDEX,	/* u32 */ /* for per-port options */
 
 	__TEAM_ATTR_OPTION_MAX,
 	TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1,
diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h
index 4deb383..be0ef3d 100644
--- a/include/linux/ip_vs.h
+++ b/include/linux/ip_vs.h
@@ -125,8 +125,8 @@
 
 	/* virtual service options */
 	char			sched_name[IP_VS_SCHEDNAME_MAXLEN];
-	unsigned		flags;		/* virtual service flags */
-	unsigned		timeout;	/* persistent timeout in sec */
+	unsigned int		flags;		/* virtual service flags */
+	unsigned int		timeout;	/* persistent timeout in sec */
 	__be32			netmask;	/* persistent netmask */
 };
 
@@ -137,7 +137,7 @@
 	__be16			port;
 
 	/* real server options */
-	unsigned		conn_flags;	/* connection flags */
+	unsigned int		conn_flags;	/* connection flags */
 	int			weight;		/* destination weight */
 
 	/* thresholds for active connections */
@@ -187,8 +187,8 @@
 
 	/* service options */
 	char			sched_name[IP_VS_SCHEDNAME_MAXLEN];
-	unsigned		flags;          /* virtual service flags */
-	unsigned		timeout;	/* persistent timeout */
+	unsigned int		flags;          /* virtual service flags */
+	unsigned int		timeout;	/* persistent timeout */
 	__be32			netmask;	/* persistent netmask */
 
 	/* number of real servers */
@@ -202,7 +202,7 @@
 struct ip_vs_dest_entry {
 	__be32			addr;		/* destination address */
 	__be16			port;
-	unsigned		conn_flags;	/* connection flags */
+	unsigned int		conn_flags;	/* connection flags */
 	int			weight;		/* destination weight */
 
 	__u32		u_threshold;	/* upper threshold */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index bff29c5..b27cfcf 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -49,6 +49,12 @@
  * IRQ_TYPE_LEVEL_LOW		- low level triggered
  * IRQ_TYPE_LEVEL_MASK		- Mask to filter out the level bits
  * IRQ_TYPE_SENSE_MASK		- Mask for all the above bits
+ * IRQ_TYPE_DEFAULT		- For use by some PICs to ask irq_set_type
+ *				  to setup the HW to a sane default (used
+ *                                by irqdomain map() callbacks to synchronize
+ *                                the HW state and SW flags for a newly
+ *                                allocated descriptor).
+ *
  * IRQ_TYPE_PROBE		- Special flag for probing in progress
  *
  * Bits which can be modified via irq_set/clear/modify_status_flags()
@@ -77,6 +83,7 @@
 	IRQ_TYPE_LEVEL_LOW	= 0x00000008,
 	IRQ_TYPE_LEVEL_MASK	= (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH),
 	IRQ_TYPE_SENSE_MASK	= 0x0000000f,
+	IRQ_TYPE_DEFAULT	= IRQ_TYPE_SENSE_MASK,
 
 	IRQ_TYPE_PROBE		= 0x00000010,
 
@@ -263,6 +270,11 @@
 	d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS;
 }
 
+static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
+{
+	return d->hwirq;
+}
+
 /**
  * struct irq_chip - hardware interrupt chip descriptor
  *
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index ead4a42..c65740d 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -42,12 +42,6 @@
 /* Number of irqs reserved for a legacy isa controller */
 #define NUM_ISA_INTERRUPTS	16
 
-/* This type is the placeholder for a hardware interrupt number. It has to
- * be big enough to enclose whatever representation is used by a given
- * platform.
- */
-typedef unsigned long irq_hw_number_t;
-
 /**
  * struct irq_domain_ops - Methods for irq_domain objects
  * @match: Match an interrupt controller device node to a host, returns
@@ -104,6 +98,9 @@
 			unsigned int size;
 			unsigned int *revmap;
 		} linear;
+		struct {
+			unsigned int max_irq;
+		} nomap;
 		struct radix_tree_root tree;
 	} revmap_data;
 	const struct irq_domain_ops *ops;
@@ -126,6 +123,7 @@
 					 const struct irq_domain_ops *ops,
 					 void *host_data);
 struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
+					 unsigned int max_irq,
 					 const struct irq_domain_ops *ops,
 					 void *host_data);
 struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
@@ -134,7 +132,6 @@
 
 extern struct irq_domain *irq_find_host(struct device_node *node);
 extern void irq_set_default_host(struct irq_domain *host);
-extern void irq_set_virq_count(unsigned int count);
 
 static inline struct irq_domain *irq_domain_add_legacy_isa(
 				struct device_node *of_node,
@@ -146,7 +143,6 @@
 }
 extern struct irq_domain *irq_find_host(struct device_node *node);
 extern void irq_set_default_host(struct irq_domain *host);
-extern void irq_set_virq_count(unsigned int count);
 
 
 extern unsigned int irq_create_mapping(struct irq_domain *host,
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index 067eda0..be342b9 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -4,29 +4,43 @@
 #include <generated/autoconf.h>
 
 /*
- * Helper macros to use CONFIG_ options in C expressions. Note that
+ * Helper macros to use CONFIG_ options in C/CPP expressions. Note that
  * these only work with boolean and tristate options.
  */
 
 /*
+ * Getting something that works in C and CPP for an arg that may or may
+ * not be defined is tricky.  Here, if we have "#define CONFIG_BOOGER 1"
+ * we match on the placeholder define, insert the "0," for arg1 and generate
+ * the triplet (0, 1, 0).  Then the last step cherry picks the 2nd arg (a one).
+ * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
+ * the last step cherry picks the 2nd arg, we get a zero.
+ */
+#define __ARG_PLACEHOLDER_1 0,
+#define config_enabled(cfg) _config_enabled(cfg)
+#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value)
+#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
+#define ___config_enabled(__ignored, val, ...) val
+
+/*
  * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
  * 0 otherwise.
  *
  */
 #define IS_ENABLED(option) \
-	(__enabled_ ## option || __enabled_ ## option ## _MODULE)
+	(config_enabled(option) || config_enabled(option##_MODULE))
 
 /*
  * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0
  * otherwise. For boolean options, this is equivalent to
  * IS_ENABLED(CONFIG_FOO).
  */
-#define IS_BUILTIN(option) __enabled_ ## option
+#define IS_BUILTIN(option) config_enabled(option)
 
 /*
  * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
  * otherwise.
  */
-#define IS_MODULE(option) __enabled_ ## option ## _MODULE
+#define IS_MODULE(option) config_enabled(option##_MODULE)
 
 #endif /* __LINUX_KCONFIG_H */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 665a260..72cbf08 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -596,6 +596,7 @@
 
 #ifdef CONFIG_IOMMU_API
 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
+void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
 int kvm_iommu_map_guest(struct kvm *kvm);
 int kvm_iommu_unmap_guest(struct kvm *kvm);
 int kvm_assign_device(struct kvm *kvm,
@@ -609,6 +610,11 @@
 	return 0;
 }
 
+static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
+					 struct kvm_memory_slot *slot)
+{
+}
+
 static inline int kvm_iommu_map_guest(struct kvm *kvm)
 {
 	return -ENODEV;
diff --git a/include/linux/mfd/db5500-prcmu.h b/include/linux/mfd/db5500-prcmu.h
index 9890687..5a049df 100644
--- a/include/linux/mfd/db5500-prcmu.h
+++ b/include/linux/mfd/db5500-prcmu.h
@@ -8,70 +8,6 @@
 #ifndef __MFD_DB5500_PRCMU_H
 #define __MFD_DB5500_PRCMU_H
 
-#ifdef CONFIG_MFD_DB5500_PRCMU
-
-void db5500_prcmu_early_init(void);
-int db5500_prcmu_set_epod(u16 epod_id, u8 epod_state);
-int db5500_prcmu_set_display_clocks(void);
-int db5500_prcmu_disable_dsipll(void);
-int db5500_prcmu_enable_dsipll(void);
-int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
-int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size);
-void db5500_prcmu_enable_wakeups(u32 wakeups);
-int db5500_prcmu_request_clock(u8 clock, bool enable);
-void db5500_prcmu_config_abb_event_readout(u32 abb_events);
-void db5500_prcmu_get_abb_event_buffer(void __iomem **buf);
-int prcmu_resetout(u8 resoutn, u8 state);
-int db5500_prcmu_set_power_state(u8 state, bool keep_ulp_clk,
-	bool keep_ap_pll);
-int db5500_prcmu_config_esram0_deep_sleep(u8 state);
-void db5500_prcmu_system_reset(u16 reset_code);
-u16 db5500_prcmu_get_reset_code(void);
-bool db5500_prcmu_is_ac_wake_requested(void);
-int db5500_prcmu_set_arm_opp(u8 opp);
-int db5500_prcmu_get_arm_opp(void);
-
-#else /* !CONFIG_UX500_SOC_DB5500 */
-
-static inline void db5500_prcmu_early_init(void) {}
-
-static inline int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
-{
-	return -ENOSYS;
-}
-
-static inline int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
-{
-	return -ENOSYS;
-}
-
-static inline int db5500_prcmu_request_clock(u8 clock, bool enable)
-{
-	return 0;
-}
-
-static inline int db5500_prcmu_set_display_clocks(void)
-{
-	return 0;
-}
-
-static inline int db5500_prcmu_disable_dsipll(void)
-{
-	return 0;
-}
-
-static inline int db5500_prcmu_enable_dsipll(void)
-{
-	return 0;
-}
-
-static inline int db5500_prcmu_config_esram0_deep_sleep(u8 state)
-{
-	return 0;
-}
-
-static inline void db5500_prcmu_enable_wakeups(u32 wakeups) {}
-
 static inline int prcmu_resetout(u8 resoutn, u8 state)
 {
 	return 0;
@@ -82,8 +18,10 @@
 	return 0;
 }
 
-static inline void db5500_prcmu_get_abb_event_buffer(void __iomem **buf) {}
-static inline void db5500_prcmu_config_abb_event_readout(u32 abb_events) {}
+static inline int db5500_prcmu_request_clock(u8 clock, bool enable)
+{
+	return 0;
+}
 
 static inline int db5500_prcmu_set_power_state(u8 state, bool keep_ulp_clk,
 	bool keep_ap_pll)
@@ -91,7 +29,10 @@
 	return 0;
 }
 
-static inline void db5500_prcmu_system_reset(u16 reset_code) {}
+static inline int db5500_prcmu_config_esram0_deep_sleep(u8 state)
+{
+	return 0;
+}
 
 static inline u16 db5500_prcmu_get_reset_code(void)
 {
@@ -113,6 +54,51 @@
 	return 0;
 }
 
+static inline void db5500_prcmu_config_abb_event_readout(u32 abb_events) {}
+
+static inline void db5500_prcmu_get_abb_event_buffer(void __iomem **buf) {}
+
+static inline void db5500_prcmu_system_reset(u16 reset_code) {}
+
+static inline void db5500_prcmu_enable_wakeups(u32 wakeups) {}
+
+#ifdef CONFIG_MFD_DB5500_PRCMU
+
+void db5500_prcmu_early_init(void);
+int db5500_prcmu_set_display_clocks(void);
+int db5500_prcmu_disable_dsipll(void);
+int db5500_prcmu_enable_dsipll(void);
+int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
+int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size);
+
+#else /* !CONFIG_UX500_SOC_DB5500 */
+
+static inline void db5500_prcmu_early_init(void) {}
+
+static inline int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
+{
+	return -ENOSYS;
+}
+
+static inline int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
+{
+	return -ENOSYS;
+}
+
+static inline int db5500_prcmu_set_display_clocks(void)
+{
+	return 0;
+}
+
+static inline int db5500_prcmu_disable_dsipll(void)
+{
+	return 0;
+}
+
+static inline int db5500_prcmu_enable_dsipll(void)
+{
+	return 0;
+}
 
 #endif /* CONFIG_MFD_DB5500_PRCMU */
 
diff --git a/include/linux/mfd/rc5t583.h b/include/linux/mfd/rc5t583.h
index a2c6160..0b64b19 100644
--- a/include/linux/mfd/rc5t583.h
+++ b/include/linux/mfd/rc5t583.h
@@ -26,6 +26,7 @@
 
 #include <linux/mutex.h>
 #include <linux/types.h>
+#include <linux/regmap.h>
 
 #define RC5T583_MAX_REGS		0xF8
 
@@ -279,14 +280,44 @@
 	bool		enable_shutdown;
 };
 
-int rc5t583_write(struct device *dev, u8 reg, uint8_t val);
-int rc5t583_read(struct device *dev, uint8_t reg, uint8_t *val);
-int rc5t583_set_bits(struct device *dev, unsigned int reg,
-		unsigned int bit_mask);
-int rc5t583_clear_bits(struct device *dev, unsigned int reg,
-		unsigned int bit_mask);
-int rc5t583_update(struct device *dev, unsigned int reg,
-		unsigned int val, unsigned int mask);
+static inline int rc5t583_write(struct device *dev, uint8_t reg, uint8_t val)
+{
+	struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
+	return regmap_write(rc5t583->regmap, reg, val);
+}
+
+static inline int rc5t583_read(struct device *dev, uint8_t reg, uint8_t *val)
+{
+	struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
+	unsigned int ival;
+	int ret;
+	ret = regmap_read(rc5t583->regmap, reg, &ival);
+	if (!ret)
+		*val = (uint8_t)ival;
+	return ret;
+}
+
+static inline int rc5t583_set_bits(struct device *dev, unsigned int reg,
+			unsigned int bit_mask)
+{
+	struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
+	return regmap_update_bits(rc5t583->regmap, reg, bit_mask, bit_mask);
+}
+
+static inline int rc5t583_clear_bits(struct device *dev, unsigned int reg,
+			unsigned int bit_mask)
+{
+	struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
+	return regmap_update_bits(rc5t583->regmap, reg, bit_mask, 0);
+}
+
+static inline int rc5t583_update(struct device *dev, unsigned int reg,
+		unsigned int val, unsigned int mask)
+{
+	struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
+	return regmap_update_bits(rc5t583->regmap, reg, mask, val);
+}
+
 int rc5t583_ext_power_req_config(struct device *dev, int deepsleep_id,
 	int ext_pwr_req, int deepsleep_slot_nr);
 int rc5t583_irq_init(struct rc5t583 *rc5t583, int irq, int irq_base);
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index 9bc9ac6..b15b5f0 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -174,8 +174,35 @@
 #define TWL6040_SYSCLK_SEL_LPPLL	0
 #define TWL6040_SYSCLK_SEL_HPPLL	1
 
+struct twl6040_codec_data {
+	u16 hs_left_step;
+	u16 hs_right_step;
+	u16 hf_left_step;
+	u16 hf_right_step;
+};
+
+struct twl6040_vibra_data {
+	unsigned int vibldrv_res;	/* left driver resistance */
+	unsigned int vibrdrv_res;	/* right driver resistance */
+	unsigned int viblmotor_res;	/* left motor resistance */
+	unsigned int vibrmotor_res;	/* right motor resistance */
+	int vddvibl_uV;			/* VDDVIBL volt, set 0 for fixed reg */
+	int vddvibr_uV;			/* VDDVIBR volt, set 0 for fixed reg */
+};
+
+struct twl6040_platform_data {
+	int audpwron_gpio;	/* audio power-on gpio */
+	unsigned int irq_base;
+
+	struct twl6040_codec_data *codec;
+	struct twl6040_vibra_data *vibra;
+};
+
+struct regmap;
+
 struct twl6040 {
 	struct device *dev;
+	struct regmap *regmap;
 	struct mutex mutex;
 	struct mutex io_mutex;
 	struct mutex irq_mutex;
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 9958ff2..1f3860a 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -150,6 +150,10 @@
 	/* statistics commands */
 	MLX4_CMD_QUERY_IF_STAT	 = 0X54,
 	MLX4_CMD_SET_IF_STAT	 = 0X55,
+
+	/* set port opcode modifiers */
+	MLX4_SET_PORT_PRIO2TC = 0x8,
+	MLX4_SET_PORT_SCHEDULER  = 0x9,
 };
 
 enum {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 834c96c..6d02824 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -628,6 +628,9 @@
 			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
 			   u8 promisc);
+int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
+int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
+		u8 *pg, u16 *ratelimit);
 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 091f9e7..96005d7 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -139,7 +139,8 @@
 	u8			rgid[16];
 	u8			sched_queue;
 	u8			vlan_index;
-	u8			reserved3[2];
+	u8			feup;
+	u8			reserved3;
 	u8			reserved4[2];
 	u8			dmac[6];
 };
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d8738a4..74aa71b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1393,29 +1393,20 @@
 
 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
 
-extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
-	unsigned long len, unsigned long prot,
-	unsigned long flag, unsigned long pgoff);
 extern unsigned long mmap_region(struct file *file, unsigned long addr,
 	unsigned long len, unsigned long flags,
 	vm_flags_t vm_flags, unsigned long pgoff);
-
-static inline unsigned long do_mmap(struct file *file, unsigned long addr,
-	unsigned long len, unsigned long prot,
-	unsigned long flag, unsigned long offset)
-{
-	unsigned long ret = -EINVAL;
-	if ((offset + PAGE_ALIGN(len)) < offset)
-		goto out;
-	if (!(offset & ~PAGE_MASK))
-		ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
-out:
-	return ret;
-}
-
+extern unsigned long do_mmap(struct file *, unsigned long,
+        unsigned long, unsigned long,
+        unsigned long, unsigned long);
 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
 
-extern unsigned long do_brk(unsigned long, unsigned long);
+/* These take the mm semaphore themselves */
+extern unsigned long vm_brk(unsigned long, unsigned long);
+extern int vm_munmap(unsigned long, size_t);
+extern unsigned long vm_mmap(struct file *, unsigned long,
+        unsigned long, unsigned long,
+        unsigned long, unsigned long);
 
 /* truncate.c */
 extern void truncate_inode_pages(struct address_space *, loff_t);
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 01beae7..629b823 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -481,7 +481,7 @@
 	struct device_driver drv;
 	int (*probe)(struct mmc_card *);
 	void (*remove)(struct mmc_card *);
-	int (*suspend)(struct mmc_card *, pm_message_t);
+	int (*suspend)(struct mmc_card *);
 	int (*resume)(struct mmc_card *);
 };
 
diff --git a/include/linux/neighbour.h b/include/linux/neighbour.h
index b188f68..275e5d6 100644
--- a/include/linux/neighbour.h
+++ b/include/linux/neighbour.h
@@ -33,6 +33,9 @@
 #define NTF_PROXY	0x08	/* == ATF_PUBL */
 #define NTF_ROUTER	0x80
 
+#define NTF_SELF	0x02
+#define NTF_MASTER	0x04
+
 /*
  *	Neighbor Cache Entry States.
  */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5cbaa20..e0b70e9 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -54,6 +54,7 @@
 #include <net/netprio_cgroup.h>
 
 #include <linux/netdev_features.h>
+#include <linux/neighbour.h>
 
 struct netpoll_info;
 struct device;
@@ -288,7 +289,7 @@
 struct header_ops {
 	int	(*create) (struct sk_buff *skb, struct net_device *dev,
 			   unsigned short type, const void *daddr,
-			   const void *saddr, unsigned len);
+			   const void *saddr, unsigned int len);
 	int	(*parse)(const struct sk_buff *skb, unsigned char *haddr);
 	int	(*rebuild)(struct sk_buff *skb);
 	int	(*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
@@ -905,6 +906,16 @@
  *	feature set might be less than what was returned by ndo_fix_features()).
  *	Must return >0 or -errno if it changed dev->features itself.
  *
+ * int (*ndo_fdb_add)(struct ndmsg *ndm, struct net_device *dev,
+ *		      unsigned char *addr, u16 flags)
+ *	Adds an FDB entry to dev for addr.
+ * int (*ndo_fdb_del)(struct ndmsg *ndm, struct net_device *dev,
+ *		      unsigned char *addr)
+ *	Deletes the FDB entry from dev coresponding to addr.
+ * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
+ *		       struct net_device *dev, int idx)
+ *	Used to add FDB entries to dump requests. Implementers should add
+ *	entries to skb and update idx with the number of entries.
  */
 struct net_device_ops {
 	int			(*ndo_init)(struct net_device *dev);
@@ -1002,6 +1013,18 @@
 						    netdev_features_t features);
 	int			(*ndo_neigh_construct)(struct neighbour *n);
 	void			(*ndo_neigh_destroy)(struct neighbour *n);
+
+	int			(*ndo_fdb_add)(struct ndmsg *ndm,
+					       struct net_device *dev,
+					       unsigned char *addr,
+					       u16 flags);
+	int			(*ndo_fdb_del)(struct ndmsg *ndm,
+					       struct net_device *dev,
+					       unsigned char *addr);
+	int			(*ndo_fdb_dump)(struct sk_buff *skb,
+						struct netlink_callback *cb,
+						struct net_device *dev,
+						int idx);
 };
 
 /*
@@ -1689,7 +1712,7 @@
 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
 				  unsigned short type,
 				  const void *daddr, const void *saddr,
-				  unsigned len)
+				  unsigned int len)
 {
 	if (!dev->header_ops || !dev->header_ops->create)
 		return 0;
@@ -1740,7 +1763,7 @@
 	unsigned int		input_queue_head;
 	unsigned int		input_queue_tail;
 #endif
-	unsigned		dropped;
+	unsigned int		dropped;
 	struct sk_buff_head	input_pkt_queue;
 	struct napi_struct	backlog;
 };
@@ -1925,7 +1948,7 @@
 }
 
 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
-					     unsigned pkts, unsigned bytes)
+					     unsigned int pkts, unsigned int bytes)
 {
 #ifdef CONFIG_BQL
 	if (unlikely(!bytes))
@@ -1949,7 +1972,7 @@
 }
 
 static inline void netdev_completed_queue(struct net_device *dev,
-					  unsigned pkts, unsigned bytes)
+					  unsigned int pkts, unsigned int bytes)
 {
 	netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
 }
@@ -2144,9 +2167,9 @@
 extern bool		dev_valid_name(const char *name);
 extern int		dev_ioctl(struct net *net, unsigned int cmd, void __user *);
 extern int		dev_ethtool(struct net *net, struct ifreq *);
-extern unsigned		dev_get_flags(const struct net_device *);
+extern unsigned int	dev_get_flags(const struct net_device *);
 extern int		__dev_change_flags(struct net_device *, unsigned int flags);
-extern int		dev_change_flags(struct net_device *, unsigned);
+extern int		dev_change_flags(struct net_device *, unsigned int);
 extern void		__dev_notify_flags(struct net_device *, unsigned int old_flags);
 extern int		dev_change_name(struct net_device *, const char *);
 extern int		dev_set_alias(struct net_device *, const char *, size_t);
@@ -2546,6 +2569,7 @@
 
 /* Functions used for unicast addresses handling */
 extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
+extern int dev_uc_add_excl(struct net_device *dev, unsigned char *addr);
 extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
 extern int dev_uc_sync(struct net_device *to, struct net_device *from);
 extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
@@ -2555,6 +2579,7 @@
 /* Functions used for multicast addresses handling */
 extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
 extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
+extern int dev_mc_add_excl(struct net_device *dev, unsigned char *addr);
 extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
 extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
 extern int dev_mc_sync(struct net_device *to, struct net_device *from);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 29734be..ff9c84c 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -154,12 +154,6 @@
 int nf_register_sockopt(struct nf_sockopt_ops *reg);
 void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
 
-#ifdef CONFIG_SYSCTL
-/* Sysctl registration */
-extern struct ctl_path nf_net_netfilter_sysctl_path[];
-extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[];
-#endif /* CONFIG_SYSCTL */
-
 extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 
 #if defined(CONFIG_JUMP_LABEL)
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 2f8e18a..2edc64c 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -411,26 +411,32 @@
 #define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED)
 #define ipset_nest_end(skb, start)  nla_nest_end(skb, start)
 
-#define NLA_PUT_IPADDR4(skb, type, ipaddr)			\
-do {								\
-	struct nlattr *__nested = ipset_nest_start(skb, type);	\
-								\
-	if (!__nested)						\
-		goto nla_put_failure;				\
-	NLA_PUT_NET32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);	\
-	ipset_nest_end(skb, __nested);				\
-} while (0)
+static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr)
+{
+	struct nlattr *__nested = ipset_nest_start(skb, type);
+	int ret;
 
-#define NLA_PUT_IPADDR6(skb, type, ipaddrptr)			\
-do {								\
-	struct nlattr *__nested = ipset_nest_start(skb, type);	\
-								\
-	if (!__nested)						\
-		goto nla_put_failure;				\
-	NLA_PUT(skb, IPSET_ATTR_IPADDR_IPV6,			\
-		sizeof(struct in6_addr), ipaddrptr);		\
-	ipset_nest_end(skb, __nested);				\
-} while (0)
+	if (!__nested)
+		return -EMSGSIZE;
+	ret = nla_put_net32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);
+	if (!ret)
+		ipset_nest_end(skb, __nested);
+	return ret;
+}
+
+static inline int nla_put_ipaddr6(struct sk_buff *skb, int type, const struct in6_addr *ipaddrptr)
+{
+	struct nlattr *__nested = ipset_nest_start(skb, type);
+	int ret;
+
+	if (!__nested)
+		return -EMSGSIZE;
+	ret = nla_put(skb, IPSET_ATTR_IPADDR_IPV6,
+		      sizeof(struct in6_addr), ipaddrptr);
+	if (!ret)
+		ipset_nest_end(skb, __nested);
+	return ret;
+}
 
 /* Get address from skbuff */
 static inline __be32
@@ -472,8 +478,8 @@
 
 #define IP_SET_OP_GET_BYNAME	0x00000006	/* Get set index by name */
 struct ip_set_req_get_set {
-	unsigned op;
-	unsigned version;
+	unsigned int op;
+	unsigned int version;
 	union ip_set_name_index set;
 };
 
@@ -482,8 +488,8 @@
 
 #define IP_SET_OP_VERSION	0x00000100	/* Ask kernel version */
 struct ip_set_req_version {
-	unsigned op;
-	unsigned version;
+	unsigned int op;
+	unsigned int version;
 };
 
 #endif /*_IP_SET_H */
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
index 05a5d72..289b62d 100644
--- a/include/linux/netfilter/ipset/ip_set_ahash.h
+++ b/include/linux/netfilter/ipset/ip_set_ahash.h
@@ -594,17 +594,20 @@
 	nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
 	if (!nested)
 		goto nla_put_failure;
-	NLA_PUT_NET32(skb, IPSET_ATTR_HASHSIZE,
-		      htonl(jhash_size(h->table->htable_bits)));
-	NLA_PUT_NET32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem));
+	if (nla_put_net32(skb, IPSET_ATTR_HASHSIZE,
+			  htonl(jhash_size(h->table->htable_bits))) ||
+	    nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)))
+		goto nla_put_failure;
 #ifdef IP_SET_HASH_WITH_NETMASK
-	if (h->netmask != HOST_MASK)
-		NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask);
+	if (h->netmask != HOST_MASK &&
+	    nla_put_u8(skb, IPSET_ATTR_NETMASK, h->netmask))
+		goto nla_put_failure;
 #endif
-	NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
-	NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize));
-	if (with_timeout(h->timeout))
-		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout));
+	if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
+	    (with_timeout(h->timeout) &&
+	     nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout))))
+		goto nla_put_failure;
 	ipset_nest_end(skb, nested);
 
 	return 0;
diff --git a/include/linux/netfilter/nf_conntrack_h323_types.h b/include/linux/netfilter/nf_conntrack_h323_types.h
index f35b6b4..b0821f4 100644
--- a/include/linux/netfilter/nf_conntrack_h323_types.h
+++ b/include/linux/netfilter/nf_conntrack_h323_types.h
@@ -7,12 +7,12 @@
 
 typedef struct TransportAddress_ipAddress {	/* SEQUENCE */
 	int options;		/* No use */
-	unsigned ip;
+	unsigned int ip;
 } TransportAddress_ipAddress;
 
 typedef struct TransportAddress_ip6Address {	/* SEQUENCE */
 	int options;		/* No use */
-	unsigned ip;
+	unsigned int ip;
 } TransportAddress_ip6Address;
 
 typedef struct TransportAddress {	/* CHOICE */
@@ -96,12 +96,12 @@
 
 typedef struct UnicastAddress_iPAddress {	/* SEQUENCE */
 	int options;		/* No use */
-	unsigned network;
+	unsigned int network;
 } UnicastAddress_iPAddress;
 
 typedef struct UnicastAddress_iP6Address {	/* SEQUENCE */
 	int options;		/* No use */
-	unsigned network;
+	unsigned int network;
 } UnicastAddress_iP6Address;
 
 typedef struct UnicastAddress {	/* CHOICE */
@@ -698,7 +698,7 @@
 	} options;
 	RegistrationRequest_callSignalAddress callSignalAddress;
 	RegistrationRequest_rasAddress rasAddress;
-	unsigned timeToLive;
+	unsigned int timeToLive;
 } RegistrationRequest;
 
 typedef struct RegistrationConfirm_callSignalAddress {	/* SEQUENCE OF */
@@ -730,7 +730,7 @@
 		eRegistrationConfirm_genericData = (1 << 12),
 	} options;
 	RegistrationConfirm_callSignalAddress callSignalAddress;
-	unsigned timeToLive;
+	unsigned int timeToLive;
 } RegistrationConfirm;
 
 typedef struct UnregistrationRequest_callSignalAddress {	/* SEQUENCE OF */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 6fd1f0d..a1048c1 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -80,7 +80,7 @@
 extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
 
 extern int nfnetlink_has_listeners(struct net *net, unsigned int group);
-extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group,
+extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group,
 			  int echo, gfp_t flags);
 extern int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error);
 extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags);
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index f549adc..1bc898b 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -287,7 +287,17 @@
 				  struct xt_table *table);
 
 /* Check for an extension */
-extern int ip6t_ext_hdr(u8 nexthdr);
+static inline int
+ip6t_ext_hdr(u8 nexthdr)
+{	return (nexthdr == IPPROTO_HOPOPTS) ||
+	       (nexthdr == IPPROTO_ROUTING) ||
+	       (nexthdr == IPPROTO_FRAGMENT) ||
+	       (nexthdr == IPPROTO_ESP) ||
+	       (nexthdr == IPPROTO_AH) ||
+	       (nexthdr == IPPROTO_NONE) ||
+	       (nexthdr == IPPROTO_DSTOPTS);
+}
+
 /* find specified header and get offset to it */
 extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
 			 int target, unsigned short *fragoff);
diff --git a/include/linux/nfsd/Kbuild b/include/linux/nfsd/Kbuild
index b8d4001..5b7d84a 100644
--- a/include/linux/nfsd/Kbuild
+++ b/include/linux/nfsd/Kbuild
@@ -1,3 +1,4 @@
+header-y += cld.h
 header-y += debug.h
 header-y += export.h
 header-y += nfsfh.h
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 6fe0a37..f092032 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -412,6 +412,9 @@
 	/* Clears up any memory if needed */
 	void (*remove)(struct phy_device *phydev);
 
+	/* Handles ethtool queries for hardware time stamping. */
+	int (*ts_info)(struct phy_device *phydev, struct ethtool_ts_info *ti);
+
 	/* Handles SIOCSHWTSTAMP ioctl for hardware time stamping. */
 	int  (*hwtstamp)(struct phy_device *phydev, struct ifreq *ifr);
 
diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h
index fee4349..e4d1de7 100644
--- a/include/linux/pinctrl/machine.h
+++ b/include/linux/pinctrl/machine.h
@@ -12,6 +12,8 @@
 #ifndef __LINUX_PINCTRL_MACHINE_H
 #define __LINUX_PINCTRL_MACHINE_H
 
+#include <linux/bug.h>
+
 #include "pinctrl-state.h"
 
 enum pinctrl_map_type {
@@ -148,7 +150,7 @@
 #define PIN_MAP_CONFIGS_GROUP_HOG_DEFAULT(dev, grp, cfgs)		\
 	PIN_MAP_CONFIGS_GROUP(dev, PINCTRL_STATE_DEFAULT, dev, grp, cfgs)
 
-#ifdef CONFIG_PINMUX
+#ifdef CONFIG_PINCTRL
 
 extern int pinctrl_register_mappings(struct pinctrl_map const *map,
 				unsigned num_maps);
diff --git a/include/linux/platform_data/wiznet.h b/include/linux/platform_data/wiznet.h
new file mode 100644
index 0000000..b5d8c19
--- /dev/null
+++ b/include/linux/platform_data/wiznet.h
@@ -0,0 +1,24 @@
+/*
+ * Ethernet driver for the WIZnet W5x00 chip.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef PLATFORM_DATA_WIZNET_H
+#define PLATFORM_DATA_WIZNET_H
+
+#include <linux/if_ether.h>
+
+struct wiznet_platform_data {
+	int	link_gpio;
+	u8	mac_addr[ETH_ALEN];
+};
+
+#ifndef CONFIG_WIZNET_BUS_SHIFT
+#define CONFIG_WIZNET_BUS_SHIFT 0
+#endif
+
+#define W5100_BUS_DIRECT_SIZE	(0x8000 << CONFIG_WIZNET_BUS_SHIFT)
+#define W5300_BUS_DIRECT_SIZE	(0x0400 << CONFIG_WIZNET_BUS_SHIFT)
+
+#endif /* PLATFORM_DATA_WIZNET_H */
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index dd2e44f..945704c 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -136,4 +136,12 @@
 extern void ptp_clock_event(struct ptp_clock *ptp,
 			    struct ptp_clock_event *event);
 
+/**
+ * ptp_clock_index() - obtain the device index of a PTP clock
+ *
+ * @ptp:    The clock obtained from ptp_clock_register().
+ */
+
+extern int ptp_clock_index(struct ptp_clock *ptp);
+
 #endif
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 577592e..2c1de89 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -801,6 +801,10 @@
 	return table;
 }
 
+extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
+			     struct netlink_callback *cb,
+			     struct net_device *dev,
+			     int idx);
 #endif /* __KERNEL__ */
 
 
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index f51bf2e..2db407a 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -357,7 +357,7 @@
 #define UPF_CONS_FLOW		((__force upf_t) (1 << 23))
 #define UPF_SHARE_IRQ		((__force upf_t) (1 << 24))
 #define UPF_EXAR_EFR		((__force upf_t) (1 << 25))
-#define UPF_IIR_ONCE		((__force upf_t) (1 << 26))
+#define UPF_BUG_THRE		((__force upf_t) (1 << 26))
 /* The exact UART type is known and should not be probed.  */
 #define UPF_FIXED_TYPE		((__force upf_t) (1 << 27))
 #define UPF_BOOT_AUTOCONF	((__force upf_t) (1 << 28))
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 3337027..4a656b5 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -117,11 +117,11 @@
 
 #ifdef CONFIG_BRIDGE_NETFILTER
 struct nf_bridge_info {
-	atomic_t use;
-	struct net_device *physindev;
-	struct net_device *physoutdev;
-	unsigned int mask;
-	unsigned long data[32 / sizeof(unsigned long)];
+	atomic_t		use;
+	unsigned int		mask;
+	struct net_device	*physindev;
+	struct net_device	*physoutdev;
+	unsigned long		data[32 / sizeof(unsigned long)];
 };
 #endif
 
@@ -238,11 +238,12 @@
 /*
  * The callback notifies userspace to release buffers when skb DMA is done in
  * lower device, the skb last reference should be 0 when calling this.
- * The desc is used to track userspace buffer index.
+ * The ctx field is used to track device context.
+ * The desc field is used to track userspace buffer index.
  */
 struct ubuf_info {
-	void (*callback)(void *);
-	void *arg;
+	void (*callback)(struct ubuf_info *);
+	void *ctx;
 	unsigned long desc;
 };
 
@@ -481,6 +482,7 @@
 	union {
 		__u32		mark;
 		__u32		dropcount;
+		__u32		avail_size;
 	};
 
 	sk_buff_data_t		transport_header;
@@ -1366,6 +1368,18 @@
 }
 
 /**
+ *	skb_availroom - bytes at buffer end
+ *	@skb: buffer to check
+ *
+ *	Return the number of bytes of free space at the tail of an sk_buff
+ *	allocated by sk_stream_alloc()
+ */
+static inline int skb_availroom(const struct sk_buff *skb)
+{
+	return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len;
+}
+
+/**
  *	skb_reserve - adjust headroom
  *	@skb: buffer to alter
  *	@len: bytes to move
@@ -1949,8 +1963,8 @@
 	return -EFAULT;
 }
 
-static inline int skb_can_coalesce(struct sk_buff *skb, int i,
-				   const struct page *page, int off)
+static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
+				    const struct page *page, int off)
 {
 	if (i) {
 		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
@@ -1958,7 +1972,7 @@
 		return page == skb_frag_page(frag) &&
 		       off == frag->page_offset + skb_frag_size(frag);
 	}
-	return 0;
+	return false;
 }
 
 static inline int __skb_linearize(struct sk_buff *skb)
diff --git a/include/linux/socket.h b/include/linux/socket.h
index b84bbd4..25d6322 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -68,13 +68,13 @@
 	__kernel_size_t	msg_iovlen;	/* Number of blocks		*/
 	void 	*	msg_control;	/* Per protocol magic (eg BSD file descriptor passing) */
 	__kernel_size_t	msg_controllen;	/* Length of cmsg list */
-	unsigned	msg_flags;
+	unsigned int	msg_flags;
 };
 
 /* For recvmmsg/sendmmsg */
 struct mmsghdr {
 	struct msghdr   msg_hdr;
-	unsigned        msg_len;
+	unsigned int        msg_len;
 };
 
 /*
diff --git a/include/linux/stddef.h b/include/linux/stddef.h
index 6a40c76..1747b67 100644
--- a/include/linux/stddef.h
+++ b/include/linux/stddef.h
@@ -3,15 +3,11 @@
 
 #include <linux/compiler.h>
 
-#undef NULL
-#if defined(__cplusplus)
-#define NULL 0
-#else
-#define NULL ((void *)0)
-#endif
-
 #ifdef __KERNEL__
 
+#undef NULL
+#define NULL ((void *)0)
+
 enum {
 	false	= 0,
 	true	= 1
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 0dddc9e..f85c93d 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -28,6 +28,51 @@
 
 #include <linux/platform_device.h>
 
+#define STMMAC_RX_COE_NONE	0
+#define STMMAC_RX_COE_TYPE1	1
+#define STMMAC_RX_COE_TYPE2	2
+
+/* Define the macros for CSR clock range parameters to be passed by
+ * platform code.
+ * This could also be configured at run time using CPU freq framework. */
+
+/* MDC Clock Selection define*/
+#define	STMMAC_CSR_60_100M	0x0	/* MDC = clk_scr_i/42 */
+#define	STMMAC_CSR_100_150M	0x1	/* MDC = clk_scr_i/62 */
+#define	STMMAC_CSR_20_35M	0x2	/* MDC = clk_scr_i/16 */
+#define	STMMAC_CSR_35_60M	0x3	/* MDC = clk_scr_i/26 */
+#define	STMMAC_CSR_150_250M	0x4	/* MDC = clk_scr_i/102 */
+#define	STMMAC_CSR_250_300M	0x5	/* MDC = clk_scr_i/122 */
+
+/* The MDC clock could be set higher than the IEEE 802.3
+ * specified frequency limit 0f 2.5 MHz, by programming a clock divider
+ * of value different than the above defined values. The resultant MDIO
+ * clock frequency of 12.5 MHz is applicable for the interfacing chips
+ * supporting higher MDC clocks.
+ * The MDC clock selection macros need to be defined for MDC clock rate
+ * of 12.5 MHz, corresponding to the following selection.
+ */
+#define STMMAC_CSR_I_4		0x8	/* clk_csr_i/4 */
+#define STMMAC_CSR_I_6		0x9	/* clk_csr_i/6 */
+#define STMMAC_CSR_I_8		0xA	/* clk_csr_i/8 */
+#define STMMAC_CSR_I_10		0xB	/* clk_csr_i/10 */
+#define STMMAC_CSR_I_12		0xC	/* clk_csr_i/12 */
+#define STMMAC_CSR_I_14		0xD	/* clk_csr_i/14 */
+#define STMMAC_CSR_I_16		0xE	/* clk_csr_i/16 */
+#define STMMAC_CSR_I_18		0xF	/* clk_csr_i/18 */
+
+/* AXI DMA Burst length suported */
+#define DMA_AXI_BLEN_4		(1 << 1)
+#define DMA_AXI_BLEN_8		(1 << 2)
+#define DMA_AXI_BLEN_16		(1 << 3)
+#define DMA_AXI_BLEN_32		(1 << 4)
+#define DMA_AXI_BLEN_64		(1 << 5)
+#define DMA_AXI_BLEN_128	(1 << 6)
+#define DMA_AXI_BLEN_256	(1 << 7)
+#define DMA_AXI_BLEN_ALL (DMA_AXI_BLEN_4 | DMA_AXI_BLEN_8 | DMA_AXI_BLEN_16 \
+			| DMA_AXI_BLEN_32 | DMA_AXI_BLEN_64 \
+			| DMA_AXI_BLEN_128 | DMA_AXI_BLEN_256)
+
 /* Platfrom data for platform device structure's platform_data field */
 
 struct stmmac_mdio_bus_data {
@@ -38,16 +83,24 @@
 	int probed_phy_irq;
 };
 
+struct stmmac_dma_cfg {
+	int pbl;
+	int fixed_burst;
+	int burst_len;
+};
+
 struct plat_stmmacenet_data {
+	char *phy_bus_name;
 	int bus_id;
 	int phy_addr;
 	int interface;
 	struct stmmac_mdio_bus_data *mdio_bus_data;
-	int pbl;
+	struct stmmac_dma_cfg *dma_cfg;
 	int clk_csr;
 	int has_gmac;
 	int enh_desc;
 	int tx_coe;
+	int rx_coe;
 	int bugged_jumbo;
 	int pmt;
 	int force_sf_dma_mode;
@@ -56,6 +109,7 @@
 	int (*init)(struct platform_device *pdev);
 	void (*exit)(struct platform_device *pdev);
 	void *custom_cfg;
+	void *custom_data;
 	void *bsp_priv;
 };
 #endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index b6c62d2..9865936 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -106,6 +106,17 @@
 #define TCP_THIN_LINEAR_TIMEOUTS 16      /* Use linear timeouts for thin streams*/
 #define TCP_THIN_DUPACK         17      /* Fast retrans. after 1 dupack */
 #define TCP_USER_TIMEOUT	18	/* How long for loss retry before timeout */
+#define TCP_REPAIR		19	/* TCP sock is under repair right now */
+#define TCP_REPAIR_QUEUE	20
+#define TCP_QUEUE_SEQ		21
+#define TCP_REPAIR_OPTIONS	22
+
+enum {
+	TCP_NO_QUEUE,
+	TCP_RECV_QUEUE,
+	TCP_SEND_QUEUE,
+	TCP_QUEUES_NR,
+};
 
 /* for TCP_INFO socket option */
 #define TCPI_OPT_TIMESTAMPS	1
@@ -353,7 +364,9 @@
 	u8	nonagle     : 4,/* Disable Nagle algorithm?             */
 		thin_lto    : 1,/* Use linear timeouts for thin streams */
 		thin_dupack : 1,/* Fast retransmit on first dupack      */
-		unused      : 2;
+		repair      : 1,
+		unused      : 1;
+	u8	repair_queue;
 
 /* RTT measurement */
 	u32	srtt;		/* smoothed round trip time << 3	*/
diff --git a/include/linux/types.h b/include/linux/types.h
index e5fa503..7f480db 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -210,6 +210,12 @@
 
 typedef phys_addr_t resource_size_t;
 
+/*
+ * This type is the placeholder for a hardware interrupt number. It has to be
+ * big enough to enclose whatever representation is used by a given platform.
+ */
+typedef unsigned long irq_hw_number_t;
+
 typedef struct {
 	int counter;
 } atomic_t;
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index f67810f..38ab3f4 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -94,6 +94,7 @@
 
 	struct usb_otg		*otg;
 
+	struct device		*io_dev;
 	struct usb_phy_io_ops	*io_ops;
 	void __iomem		*io_priv;
 
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index fbb666b..4742838 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -28,13 +28,6 @@
 /* parity check flag */
 #define RELEVANT_IFLAG(iflag)	(iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
 
-enum port_dev_state {
-	PORT_UNREGISTERED,
-	PORT_REGISTERING,
-	PORT_REGISTERED,
-	PORT_UNREGISTERING,
-};
-
 /* USB serial flags */
 #define USB_SERIAL_WRITE_BUSY	0
 
@@ -124,7 +117,6 @@
 	char			throttle_req;
 	unsigned long		sysrq; /* sysrq timeout */
 	struct device		dev;
-	enum port_dev_state	dev_state;
 };
 #define to_usb_serial_port(d) container_of(d, struct usb_serial_port, dev)
 
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 9c3120d..b572f80 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -47,6 +47,8 @@
  */
 #define VGA_DEFAULT_DEVICE     (NULL)
 
+struct pci_dev;
+
 /* For use by clients */
 
 /**
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 970d5a2..2470f54 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -49,8 +49,11 @@
 #define VIRTIO_NET_F_CTRL_RX	18	/* Control channel RX mode support */
 #define VIRTIO_NET_F_CTRL_VLAN	19	/* Control channel VLAN filtering */
 #define VIRTIO_NET_F_CTRL_RX_EXTRA 20	/* Extra RX mode control support */
+#define VIRTIO_NET_F_GUEST_ANNOUNCE 21	/* Guest can announce device on the
+					 * network */
 
 #define VIRTIO_NET_S_LINK_UP	1	/* Link is up */
+#define VIRTIO_NET_S_ANNOUNCE	2	/* Announcement is needed */
 
 struct virtio_net_config {
 	/* The config defining mac address (if VIRTIO_NET_F_MAC) */
@@ -152,4 +155,15 @@
  #define VIRTIO_NET_CTRL_VLAN_ADD             0
  #define VIRTIO_NET_CTRL_VLAN_DEL             1
 
+/*
+ * Control link announce acknowledgement
+ *
+ * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that
+ * driver has recevied the notification; device would clear the
+ * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives
+ * this command.
+ */
+#define VIRTIO_NET_CTRL_ANNOUNCE       3
+ #define VIRTIO_NET_CTRL_ANNOUNCE_ACK         0
+
 #endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 757a176..27f450b 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -92,7 +92,7 @@
 					const struct in6_addr *addr);
 
 static inline unsigned long addrconf_timeout_fixup(u32 timeout,
-						    unsigned unit)
+						   unsigned int unit)
 {
 	if (timeout == 0xffffffff)
 		return ~0UL;
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index ca68e2c..2ee33da 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -22,7 +22,7 @@
 struct unix_address {
 	atomic_t	refcnt;
 	int		len;
-	unsigned	hash;
+	unsigned int	hash;
 	struct sockaddr_un name[0];
 };
 
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 94e09d3..5d23521 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -215,7 +215,7 @@
 	struct ax25_dev		*next;
 	struct net_device	*dev;
 	struct net_device	*forward;
-	struct ctl_table	*systable;
+	struct ctl_table_header *sysheader;
 	int			values[AX25_MAX_VALUES];
 #if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
 	ax25_dama_info		dama;
@@ -441,11 +441,11 @@
 
 /* sysctl_net_ax25.c */
 #ifdef CONFIG_SYSCTL
-extern void ax25_register_sysctl(void);
-extern void ax25_unregister_sysctl(void);
+extern int ax25_register_dev_sysctl(ax25_dev *ax25_dev);
+extern void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev);
 #else
-static inline void ax25_register_sysctl(void) {};
-static inline void ax25_unregister_sysctl(void) {};
+static inline int ax25_register_dev_sysctl(ax25_dev *ax25_dev) { return 0; }
+static inline void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev) {}
 #endif /* CONFIG_SYSCTL */
 
 #endif
diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
index 6db8ecf..439dadc 100644
--- a/include/net/caif/caif_hsi.h
+++ b/include/net/caif/caif_hsi.h
@@ -123,12 +123,21 @@
 	bool piggy_desc;
 };
 
+/* Priority mapping */
+enum {
+	CFHSI_PRIO_CTL = 0,
+	CFHSI_PRIO_VI,
+	CFHSI_PRIO_VO,
+	CFHSI_PRIO_BEBK,
+	CFHSI_PRIO_LAST,
+};
+
 /* Structure implemented by CAIF HSI drivers. */
 struct cfhsi {
 	struct caif_dev_common cfdev;
 	struct net_device *ndev;
 	struct platform_device *pdev;
-	struct sk_buff_head qhead;
+	struct sk_buff_head qhead[CFHSI_PRIO_LAST];
 	struct cfhsi_drv drv;
 	struct cfhsi_dev *dev;
 	int tx_state;
@@ -151,8 +160,14 @@
 	wait_queue_head_t wake_up_wait;
 	wait_queue_head_t wake_down_wait;
 	wait_queue_head_t flush_fifo_wait;
-	struct timer_list timer;
+	struct timer_list inactivity_timer;
 	struct timer_list rx_slowpath_timer;
+
+	/* TX aggregation */
+	unsigned long aggregation_timeout;
+	int aggregation_len;
+	struct timer_list aggregation_timer;
+
 	unsigned long bits;
 };
 
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h
index 6bd200a..83a89ba 100644
--- a/include/net/caif/cfpkt.h
+++ b/include/net/caif/cfpkt.h
@@ -188,11 +188,18 @@
  */
 void *cfpkt_tonative(struct cfpkt *pkt);
 
-
 /*
  * Returns packet information for a packet.
  * pkt Packet to get info from;
  * @return Packet information
  */
 struct caif_payload_info *cfpkt_info(struct cfpkt *pkt);
+
+/** cfpkt_set_prio - set priority for a CAIF packet.
+ *
+ * @pkt: The CAIF packet to be adjusted.
+ * @prio: one of TC_PRIO_ constants.
+ */
+void cfpkt_set_prio(struct cfpkt *pkt, int prio);
+
 #endif				/* CFPKT_H_ */
diff --git a/include/net/compat.h b/include/net/compat.h
index a974ae9..6e95653 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -42,12 +42,12 @@
 
 extern int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *);
 extern int verify_compat_iovec(struct msghdr *, struct iovec *, struct sockaddr_storage *, int);
-extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsigned);
+extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsigned int);
 extern asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
-					   unsigned, unsigned);
-extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned);
+					   unsigned int, unsigned int);
+extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned int);
 extern asmlinkage long compat_sys_recvmmsg(int, struct compat_mmsghdr __user *,
-					   unsigned, unsigned,
+					   unsigned int, unsigned int,
 					   struct compat_timespec __user *);
 extern asmlinkage long compat_sys_getsockopt(int, int, int, char __user *, int __user *);
 extern int put_cmsg_compat(struct msghdr*, int, int, int, void *);
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
index f55c980..fc5d5dc 100644
--- a/include/net/dcbnl.h
+++ b/include/net/dcbnl.h
@@ -48,6 +48,8 @@
 	/* IEEE 802.1Qaz std */
 	int (*ieee_getets) (struct net_device *, struct ieee_ets *);
 	int (*ieee_setets) (struct net_device *, struct ieee_ets *);
+	int (*ieee_getmaxrate) (struct net_device *, struct ieee_maxrate *);
+	int (*ieee_setmaxrate) (struct net_device *, struct ieee_maxrate *);
 	int (*ieee_getpfc) (struct net_device *, struct ieee_pfc *);
 	int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *);
 	int (*ieee_getapp) (struct net_device *, struct dcb_app *);
diff --git a/include/net/dn.h b/include/net/dn.h
index 814af0b..c88bf4e 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -199,7 +199,7 @@
 	fld->fld_dport = scp->addrrem;
 }
 
-extern unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu);
+extern unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
 
 #define DN_MENUVER_ACC 0x01
 #define DN_MENUVER_USR 0x02
diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h
index 782ef7c..1ee9d4b 100644
--- a/include/net/dn_fib.h
+++ b/include/net/dn_fib.h
@@ -31,7 +31,7 @@
 
 struct dn_fib_nh {
 	struct net_device	*nh_dev;
-	unsigned		nh_flags;
+	unsigned int		nh_flags;
 	unsigned char		nh_scope;
 	int			nh_weight;
 	int			nh_power;
@@ -45,7 +45,7 @@
 	int 			fib_treeref;
 	atomic_t		fib_clntref;
 	int			fib_dead;
-	unsigned		fib_flags;
+	unsigned int		fib_flags;
 	int			fib_protocol;
 	__le16			fib_prefsrc;
 	__u32			fib_priority;
@@ -140,7 +140,7 @@
  */
 extern void dn_fib_rules_init(void);
 extern void dn_fib_rules_cleanup(void);
-extern unsigned dnet_addr_type(__le16 addr);
+extern unsigned int dnet_addr_type(__le16 addr);
 extern int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res);
 
 extern int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index 81712cf..c507e05 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -76,8 +76,8 @@
 	__le16 rt_src_map;
 	__le16 rt_dst_map;
 
-	unsigned rt_flags;
-	unsigned rt_type;
+	unsigned int rt_flags;
+	unsigned int rt_type;
 };
 
 static inline bool dn_is_input_route(struct dn_route *rt)
diff --git a/include/net/dst.h b/include/net/dst.h
index 59c5d18..ff4da42 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -36,7 +36,11 @@
 	struct net_device       *dev;
 	struct  dst_ops	        *ops;
 	unsigned long		_metrics;
-	unsigned long		expires;
+	union {
+		unsigned long           expires;
+		/* point to where the dst_entry copied from */
+		struct dst_entry        *from;
+	};
 	struct dst_entry	*path;
 	struct neighbour __rcu	*_neighbour;
 #ifdef CONFIG_XFRM
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index e1c2ee0..3682a0a 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -12,7 +12,7 @@
 struct dst_ops {
 	unsigned short		family;
 	__be16			protocol;
-	unsigned		gc_thresh;
+	unsigned int		gc_thresh;
 
 	int			(*gc)(struct dst_ops *ops);
 	struct dst_entry *	(*check)(struct dst_entry *, __u32 cookie);
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 75d6156..9ac2524 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -25,7 +25,7 @@
 
 struct icmp_err {
   int		errno;
-  unsigned	fatal:1;
+  unsigned int	fatal:1;
 };
 
 extern const struct icmp_err icmp_err_convert[];
@@ -41,7 +41,6 @@
 
 extern void	icmp_send(struct sk_buff *skb_in,  int type, int code, __be32 info);
 extern int	icmp_rcv(struct sk_buff *skb);
-extern int	icmp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 extern int	icmp_init(void);
 extern void	icmp_out_count(struct net *net, unsigned char type);
 
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 51a7031..50f325f 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -120,7 +120,7 @@
 	unsigned char		mca_crcount;
 	unsigned long		mca_sfcount[2];
 	struct timer_list	mca_timer;
-	unsigned		mca_flags;
+	unsigned int		mca_flags;
 	int			mca_users;
 	atomic_t		mca_refcnt;
 	spinlock_t		mca_lock;
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 3207e58..1866a67 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -23,7 +23,7 @@
 struct sockaddr;
 
 extern int inet6_csk_bind_conflict(const struct sock *sk,
-				   const struct inet_bind_bucket *tb);
+				   const struct inet_bind_bucket *tb, bool relax);
 
 extern struct dst_entry* inet6_csk_route_req(struct sock *sk,
 					     const struct request_sock *req);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index dbf9aab..46c9e2c 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -60,7 +60,7 @@
 #endif
 	void	    (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
 	int	    (*bind_conflict)(const struct sock *sk,
-				     const struct inet_bind_bucket *tb);
+				     const struct inet_bind_bucket *tb, bool relax);
 };
 
 /** inet_connection_sock - INET connection oriented sock
@@ -245,7 +245,7 @@
 						const __be32 raddr,
 						const __be32 laddr);
 extern int inet_csk_bind_conflict(const struct sock *sk,
-				  const struct inet_bind_bucket *tb);
+				  const struct inet_bind_bucket *tb, bool relax);
 extern int inet_csk_get_port(struct sock *sk, unsigned short snum);
 
 extern struct dst_entry* inet_csk_route_req(struct sock *sk,
diff --git a/include/net/ip.h b/include/net/ip.h
index b53d65f..94ddb69c 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -222,9 +222,6 @@
 
 extern int sysctl_ip_nonlocal_bind;
 
-extern struct ctl_path net_core_path[];
-extern struct ctl_path net_ipv4_ctl_path[];
-
 /* From inetpeer.c */
 extern int inet_peer_threshold;
 extern int inet_peer_minttl;
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index b26bb81..0ae759a 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -123,6 +123,54 @@
 	return ((struct rt6_info *)dst)->rt6i_idev;
 }
 
+static inline void rt6_clean_expires(struct rt6_info *rt)
+{
+	if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from)
+		dst_release(rt->dst.from);
+
+	rt->rt6i_flags &= ~RTF_EXPIRES;
+	rt->dst.from = NULL;
+}
+
+static inline void rt6_set_expires(struct rt6_info *rt, unsigned long expires)
+{
+	if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from)
+		dst_release(rt->dst.from);
+
+	rt->rt6i_flags |= RTF_EXPIRES;
+	rt->dst.expires = expires;
+}
+
+static inline void rt6_update_expires(struct rt6_info *rt, int timeout)
+{
+	if (!(rt->rt6i_flags & RTF_EXPIRES)) {
+		if (rt->dst.from)
+			dst_release(rt->dst.from);
+		/* dst_set_expires relies on expires == 0 
+		 * if it has not been set previously.
+		 */
+		rt->dst.expires = 0;
+	}
+
+	dst_set_expires(&rt->dst, timeout);
+	rt->rt6i_flags |= RTF_EXPIRES;
+}
+
+static inline void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
+{
+	struct dst_entry *new = (struct dst_entry *) from;
+
+	if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from) {
+		if (new == rt->dst.from)
+			return;
+		dst_release(rt->dst.from);
+	}
+
+	rt->rt6i_flags &= ~RTF_EXPIRES;
+	rt->dst.from = new;
+	dst_hold(new);
+}
+
 struct fib6_walker_t {
 	struct list_head lh;
 	struct fib6_node *root, *node;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 2ad92ca..c062b67 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -146,7 +146,7 @@
 
 extern int rt6_dump_route(struct rt6_info *rt, void *p_arg);
 extern void rt6_ifdown(struct net *net, struct net_device *dev);
-extern void rt6_mtu_change(struct net_device *dev, unsigned mtu);
+extern void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
 extern void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
 
 
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 10422ef..78df0866 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -49,7 +49,7 @@
 	struct net_device	*nh_dev;
 	struct hlist_node	nh_hash;
 	struct fib_info		*nh_parent;
-	unsigned		nh_flags;
+	unsigned int		nh_flags;
 	unsigned char		nh_scope;
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 	int			nh_weight;
@@ -74,7 +74,7 @@
 	struct net		*fib_net;
 	int			fib_treeref;
 	atomic_t		fib_clntref;
-	unsigned		fib_flags;
+	unsigned int		fib_flags;
 	unsigned char		fib_dead;
 	unsigned char		fib_protocol;
 	unsigned char		fib_scope;
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 2bdee51..990216d 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -10,7 +10,6 @@
 
 #include <asm/types.h>                  /* for __uXX types */
 
-#include <linux/sysctl.h>               /* for ctl_path */
 #include <linux/list.h>                 /* for struct list_head */
 #include <linux/spinlock.h>             /* for struct rwlock_t */
 #include <linux/atomic.h>                 /* for struct atomic_t */
@@ -580,8 +579,8 @@
 	/* virtual service options */
 	char			*sched_name;
 	char			*pe_name;
-	unsigned		flags;		/* virtual service flags */
-	unsigned		timeout;	/* persistent timeout in sec */
+	unsigned int		flags;		/* virtual service flags */
+	unsigned int		timeout;	/* persistent timeout in sec */
 	u32			netmask;	/* persistent netmask */
 };
 
@@ -592,7 +591,7 @@
 	u16			port;
 
 	/* real server options */
-	unsigned		conn_flags;	/* connection flags */
+	unsigned int		conn_flags;	/* connection flags */
 	int			weight;		/* destination weight */
 
 	/* thresholds for active connections */
@@ -616,8 +615,8 @@
 	union nf_inet_addr	addr;	  /* IP address for virtual service */
 	__be16			port;	  /* port number for the service */
 	__u32                   fwmark;   /* firewall mark of the service */
-	unsigned		flags;	  /* service status flags */
-	unsigned		timeout;  /* persistent timeout in ticks */
+	unsigned int		flags;	  /* service status flags */
+	unsigned int		timeout;  /* persistent timeout in ticks */
 	__be32			netmask;  /* grouping granularity */
 	struct net		*net;
 
@@ -647,7 +646,7 @@
 	u16			af;		/* address family */
 	__be16			port;		/* port number of the server */
 	union nf_inet_addr	addr;		/* IP address of the server */
-	volatile unsigned	flags;		/* dest status flags */
+	volatile unsigned int	flags;		/* dest status flags */
 	atomic_t		conn_flags;	/* flags to copy to conn */
 	atomic_t		weight;		/* server weight */
 
@@ -953,7 +952,7 @@
  *      IPVS core functions
  *      (from ip_vs_core.c)
  */
-extern const char *ip_vs_proto_name(unsigned proto);
+extern const char *ip_vs_proto_name(unsigned int proto);
 extern void ip_vs_init_hash_table(struct list_head *table, int rows);
 #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
 
@@ -1014,7 +1013,7 @@
 
 struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p,
 				  const union nf_inet_addr *daddr,
-				  __be16 dport, unsigned flags,
+				  __be16 dport, unsigned int flags,
 				  struct ip_vs_dest *dest, __u32 fwmark);
 extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
 
@@ -1184,7 +1183,6 @@
  *      IPVS control data and functions (from ip_vs_ctl.c)
  */
 extern struct ip_vs_stats ip_vs_stats;
-extern const struct ctl_path net_vs_ctl_path[];
 extern int sysctl_ip_vs_sync_ver;
 
 extern void ip_vs_sync_switch_mode(struct net *net, int mode);
diff --git a/include/net/ipip.h b/include/net/ipip.h
index a32654d..a93cf6d 100644
--- a/include/net/ipip.h
+++ b/include/net/ipip.h
@@ -54,8 +54,10 @@
 									\
 	err = ip_local_out(skb);					\
 	if (likely(net_xmit_eval(err) == 0)) {				\
+		u64_stats_update_begin(&(stats1)->syncp);		\
 		(stats1)->tx_bytes += pkt_len;				\
 		(stats1)->tx_packets++;					\
+		u64_stats_update_end(&(stats1)->syncp);			\
 	} else {							\
 		(stats2)->tx_errors++;					\
 		(stats2)->tx_aborted_errors++;				\
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index e4170a2..4332e9a 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -113,7 +113,6 @@
 
 /* sysctls */
 extern int sysctl_mld_max_msf;
-extern struct ctl_path net_ipv6_ctl_path[];
 
 #define _DEVINC(net, statname, modifier, idev, field)			\
 ({									\
@@ -345,7 +344,7 @@
 static inline int __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2,
 				      unsigned int prefixlen)
 {
-	unsigned pdw, pbi;
+	unsigned int pdw, pbi;
 
 	/* check complete u32 in prefix */
 	pdw = prefixlen >> 5;
@@ -661,8 +660,6 @@
 extern struct ctl_table *ipv6_route_sysctl_init(struct net *net);
 extern int ipv6_sysctl_register(void);
 extern void ipv6_sysctl_unregister(void);
-extern int ipv6_static_sysctl_register(void);
-extern void ipv6_static_sysctl_unregister(void);
 #endif
 
 #endif /* _NET_IPV6_H */
diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
index 23a4093..6ca3113 100644
--- a/include/net/llc_c_ev.h
+++ b/include/net/llc_c_ev.h
@@ -264,6 +264,6 @@
 static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb)
 {
 	return atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
-	       (unsigned)sk->sk_rcvbuf;
+	       (unsigned int)sk->sk_rcvbuf;
 }
 #endif /* LLC_C_EV_H */
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 6f9c25a..c02b6ad 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -34,6 +34,7 @@
 	__ND_OPT_ARRAY_MAX,
 	ND_OPT_ROUTE_INFO = 24,		/* RFC4191 */
 	ND_OPT_RDNSS = 25,		/* RFC5006 */
+	ND_OPT_DNSSL = 31,		/* RFC6106 */
 	__ND_OPT_MAX
 };
 
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 34c996f..6cdfeed 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -195,7 +195,6 @@
 #define NEIGH_UPDATE_F_ADMIN			0x80000000
 
 extern void			neigh_table_init(struct neigh_table *tbl);
-extern void			neigh_table_init_no_netlink(struct neigh_table *tbl);
 extern int			neigh_table_clear(struct neigh_table *tbl);
 extern struct neighbour *	neigh_lookup(struct neigh_table *tbl,
 					     const void *pkey,
@@ -323,7 +322,7 @@
 #ifdef CONFIG_BRIDGE_NETFILTER
 static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
 {
-	unsigned seq, hh_alen;
+	unsigned int seq, hh_alen;
 
 	do {
 		seq = read_seqbegin(&hh->hh_lock);
@@ -336,7 +335,7 @@
 
 static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
 {
-	unsigned seq;
+	unsigned int seq;
 	int hh_len;
 
 	do {
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index ee547c1..ac9195e 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -279,14 +279,25 @@
 extern int register_pernet_device(struct pernet_operations *);
 extern void unregister_pernet_device(struct pernet_operations *);
 
-struct ctl_path;
 struct ctl_table;
 struct ctl_table_header;
 
-extern struct ctl_table_header *register_net_sysctl_table(struct net *net,
-	const struct ctl_path *path, struct ctl_table *table);
-extern struct ctl_table_header *register_net_sysctl_rotable(
-	const struct ctl_path *path, struct ctl_table *table);
+#ifdef CONFIG_SYSCTL
+extern int net_sysctl_init(void);
+extern struct ctl_table_header *register_net_sysctl(struct net *net,
+	const char *path, struct ctl_table *table);
 extern void unregister_net_sysctl_table(struct ctl_table_header *header);
+#else
+static inline int net_sysctl_init(void) { return 0; }
+static inline struct ctl_table_header *register_net_sysctl(struct net *net,
+	const char *path, struct ctl_table *table)
+{
+	return NULL;
+}
+static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
+{
+}
+#endif
+
 
 #endif /* __NET_NET_NAMESPACE_H */
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index e8010f4..9699c02 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -65,7 +65,7 @@
 
 #ifdef CONFIG_SYSCTL
 	struct ctl_table_header	*ctl_table_header;
-	struct ctl_path		*ctl_table_path;
+	const char		*ctl_table_path;
 	struct ctl_table	*ctl_table;
 #endif /* CONFIG_SYSCTL */
 
diff --git a/include/net/netlink.h b/include/net/netlink.h
index f394fe5..785f37a 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -102,20 +102,6 @@
  *   nla_put_flag(skb, type)		add flag attribute to skb
  *   nla_put_msecs(skb, type, jiffies)	add msecs attribute to skb
  *
- * Exceptions Based Attribute Construction:
- *   NLA_PUT(skb, type, len, data)	add attribute to skb
- *   NLA_PUT_U8(skb, type, value)	add u8 attribute to skb
- *   NLA_PUT_U16(skb, type, value)	add u16 attribute to skb
- *   NLA_PUT_U32(skb, type, value)	add u32 attribute to skb
- *   NLA_PUT_U64(skb, type, value)	add u64 attribute to skb
- *   NLA_PUT_STRING(skb, type, str)	add string attribute to skb
- *   NLA_PUT_FLAG(skb, type)		add flag attribute to skb
- *   NLA_PUT_MSECS(skb, type, jiffies)	add msecs attribute to skb
- *
- *   The meaning of these functions is equal to their lower case
- *   variants but they jump to the label nla_put_failure in case
- *   of a failure.
- *
  * Nested Attributes Construction:
  *   nla_nest_start(skb, type)		start a nested attribute
  *   nla_nest_end(skb, nla)		finalize a nested attribute
@@ -772,6 +758,39 @@
 }
 
 /**
+ * nla_put_be16 - Add a __be16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
+{
+	return nla_put(skb, attrtype, sizeof(__be16), &value);
+}
+
+/**
+ * nla_put_net16 - Add 16-bit network byte order netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
+{
+	return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+}
+
+/**
+ * nla_put_le16 - Add a __le16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
+{
+	return nla_put(skb, attrtype, sizeof(__le16), &value);
+}
+
+/**
  * nla_put_u32 - Add a u32 netlink attribute to a socket buffer
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
@@ -783,7 +802,40 @@
 }
 
 /**
- * nla_put_64 - Add a u64 netlink attribute to a socket buffer
+ * nla_put_be32 - Add a __be32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
+{
+	return nla_put(skb, attrtype, sizeof(__be32), &value);
+}
+
+/**
+ * nla_put_net32 - Add 32-bit network byte order netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
+{
+	return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+}
+
+/**
+ * nla_put_le32 - Add a __le32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
+{
+	return nla_put(skb, attrtype, sizeof(__le32), &value);
+}
+
+/**
+ * nla_put_u64 - Add a u64 netlink attribute to a socket buffer
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
  * @value: numeric value
@@ -794,6 +846,39 @@
 }
 
 /**
+ * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value)
+{
+	return nla_put(skb, attrtype, sizeof(__be64), &value);
+}
+
+/**
+ * nla_put_net64 - Add 64-bit network byte order netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value)
+{
+	return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+}
+
+/**
+ * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value)
+{
+	return nla_put(skb, attrtype, sizeof(__le64), &value);
+}
+
+/**
  * nla_put_string - Add a string netlink attribute to a socket buffer
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
@@ -828,60 +913,6 @@
 	return nla_put(skb, attrtype, sizeof(u64), &tmp);
 }
 
-#define NLA_PUT(skb, attrtype, attrlen, data) \
-	do { \
-		if (unlikely(nla_put(skb, attrtype, attrlen, data) < 0)) \
-			goto nla_put_failure; \
-	} while(0)
-
-#define NLA_PUT_TYPE(skb, type, attrtype, value) \
-	do { \
-		type __tmp = value; \
-		NLA_PUT(skb, attrtype, sizeof(type), &__tmp); \
-	} while(0)
-
-#define NLA_PUT_U8(skb, attrtype, value) \
-	NLA_PUT_TYPE(skb, u8, attrtype, value)
-
-#define NLA_PUT_U16(skb, attrtype, value) \
-	NLA_PUT_TYPE(skb, u16, attrtype, value)
-
-#define NLA_PUT_LE16(skb, attrtype, value) \
-	NLA_PUT_TYPE(skb, __le16, attrtype, value)
-
-#define NLA_PUT_BE16(skb, attrtype, value) \
-	NLA_PUT_TYPE(skb, __be16, attrtype, value)
-
-#define NLA_PUT_NET16(skb, attrtype, value) \
-	NLA_PUT_BE16(skb, attrtype | NLA_F_NET_BYTEORDER, value)
-
-#define NLA_PUT_U32(skb, attrtype, value) \
-	NLA_PUT_TYPE(skb, u32, attrtype, value)
-
-#define NLA_PUT_BE32(skb, attrtype, value) \
-	NLA_PUT_TYPE(skb, __be32, attrtype, value)
-
-#define NLA_PUT_NET32(skb, attrtype, value) \
-	NLA_PUT_BE32(skb, attrtype | NLA_F_NET_BYTEORDER, value)
-
-#define NLA_PUT_U64(skb, attrtype, value) \
-	NLA_PUT_TYPE(skb, u64, attrtype, value)
-
-#define NLA_PUT_BE64(skb, attrtype, value) \
-	NLA_PUT_TYPE(skb, __be64, attrtype, value)
-
-#define NLA_PUT_NET64(skb, attrtype, value) \
-	NLA_PUT_BE64(skb, attrtype | NLA_F_NET_BYTEORDER, value)
-
-#define NLA_PUT_STRING(skb, attrtype, value) \
-	NLA_PUT(skb, attrtype, strlen(value) + 1, value)
-
-#define NLA_PUT_FLAG(skb, attrtype) \
-	NLA_PUT(skb, attrtype, 0, NULL)
-
-#define NLA_PUT_MSECS(skb, attrtype, jiffies) \
-	NLA_PUT_U64(skb, attrtype, jiffies_to_msecs(jiffies))
-
 /**
  * nla_get_u32 - return payload of u32 attribute
  * @nla: u32 netlink attribute
diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
index 548d78f..c06ac58 100644
--- a/include/net/netns/hash.h
+++ b/include/net/netns/hash.h
@@ -5,7 +5,7 @@
 
 struct net;
 
-static inline unsigned net_hash_mix(struct net *net)
+static inline unsigned int net_hash_mix(struct net *net)
 {
 #ifdef CONFIG_NET_NS
 	/*
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 81abfcb2..b42be53 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -12,7 +12,9 @@
 
 struct netns_sysctl_ipv6 {
 #ifdef CONFIG_SYSCTL
-	struct ctl_table_header *table;
+	struct ctl_table_header *hdr;
+	struct ctl_table_header *route_hdr;
+	struct ctl_table_header *icmp_hdr;
 	struct ctl_table_header *frags_hdr;
 #endif
 	int bindv6only;
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 313d00f..9a2505a 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -90,7 +90,7 @@
 };
 
 struct nfc_dev {
-	unsigned idx;
+	unsigned int idx;
 	u32 target_next_idx;
 	struct nfc_target *targets;
 	int n_targets;
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index fffdc60..66f5ac3 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -107,7 +107,7 @@
 /* Calculate maximal size of packet seen by hard_start_xmit
    routine of this device.
  */
-static inline unsigned psched_mtu(const struct net_device *dev)
+static inline unsigned int psched_mtu(const struct net_device *dev)
 {
 	return dev->mtu + dev->hard_header_len;
 }
diff --git a/include/net/red.h b/include/net/red.h
index 77d4c37..ef46058 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -245,7 +245,7 @@
 	 *
 	 * dummy packets as a burst after idle time, i.e.
 	 *
-	 * 	p->qavg *= (1-W)^m
+	 * 	v->qavg *= (1-W)^m
 	 *
 	 * This is an apparently overcomplicated solution (f.e. we have to
 	 * precompute a table to make this calculation in reasonable time)
@@ -279,7 +279,7 @@
 						       unsigned int backlog)
 {
 	/*
-	 * NOTE: p->qavg is fixed point number with point at Wlog.
+	 * NOTE: v->qavg is fixed point number with point at Wlog.
 	 * The formula below is equvalent to floating point
 	 * version:
 	 *
@@ -390,7 +390,7 @@
 	if (red_is_idling(v))
 		qavg = red_calc_qavg_from_idle_time(p, v);
 
-	/* p->qavg is fixed point number with point at Wlog */
+	/* v->qavg is fixed point number with point at Wlog */
 	qavg >>= p->Wlog;
 
 	if (qavg > p->target_max && p->max_P <= MAX_P_MAX)
diff --git a/include/net/route.h b/include/net/route.h
index b1c0d5b..ed2b78e 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -50,7 +50,7 @@
 	__be32			rt_key_src;
 
 	int			rt_genid;
-	unsigned		rt_flags;
+	unsigned int		rt_flags;
 	__u16			rt_type;
 	__u8			rt_key_tos;
 
@@ -185,8 +185,8 @@
 					  unsigned short new_mtu, struct net_device *dev);
 extern void		ip_rt_send_redirect(struct sk_buff *skb);
 
-extern unsigned		inet_addr_type(struct net *net, __be32 addr);
-extern unsigned		inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr);
+extern unsigned int		inet_addr_type(struct net *net, __be32 addr);
+extern unsigned int		inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr);
 extern void		ip_rt_multicast_event(struct in_device *);
 extern int		ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
 extern void		ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 3702939..bbcfd09 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -41,9 +41,11 @@
  *	@get_size: Function to calculate required room for dumping device
  *		   specific netlink attributes
  *	@fill_info: Function to dump device specific netlink attributes
- *	@get_xstats_size: Function to calculate required room for dumping devic
+ *	@get_xstats_size: Function to calculate required room for dumping device
  *			  specific statistics
  *	@fill_xstats: Function to dump device specific statistics
+ *	@get_tx_queues: Function to determine number of transmit queues to create when
+ *		        creating a new device.
  */
 struct rtnl_link_ops {
 	struct list_head	list;
@@ -75,9 +77,8 @@
 	size_t			(*get_xstats_size)(const struct net_device *dev);
 	int			(*fill_xstats)(struct sk_buff *skb,
 					       const struct net_device *dev);
-	int			(*get_tx_queues)(struct net *net, struct nlattr *tb[],
-						 unsigned int *tx_queues,
-						 unsigned int *real_tx_queues);
+	int			(*get_tx_queues)(struct net *net,
+						 struct nlattr *tb[]);
 };
 
 extern int	__rtnl_link_register(struct rtnl_link_ops *ops);
@@ -94,7 +95,7 @@
  * 	@fill_link_af: Function to fill IFLA_AF_SPEC with address family
  * 		       specific netlink attributes.
  * 	@get_link_af_size: Function to calculate size of address family specific
- * 			   netlink attributes exlusive the container attribute.
+ * 			   netlink attributes.
  *	@validate_link_af: Validate a IFLA_AF_SPEC attribute, must check attr
  *			   for invalid configuration settings.
  * 	@set_link_af: Function to parse a IFLA_AF_SPEC attribute and modify
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 88949a9..e4652fe 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1145,10 +1145,10 @@
 	/* Data pending that has never been transmitted.  */
 	struct list_head out_chunk_list;
 
-	unsigned out_qlen;	/* Total length of queued data chunks. */
+	unsigned int out_qlen;	/* Total length of queued data chunks. */
 
 	/* Error of send failed, may used in SCTP_SEND_FAILED event. */
-	unsigned error;
+	unsigned int error;
 
 	/* These are control chunks we want to send.  */
 	struct list_head control_chunk_list;
@@ -2000,8 +2000,8 @@
 __u32 sctp_association_get_next_tsn(struct sctp_association *);
 
 void sctp_assoc_sync_pmtu(struct sctp_association *);
-void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned);
-void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned);
+void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
+void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
 void sctp_assoc_set_primary(struct sctp_association *,
 			    struct sctp_transport *);
 void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
diff --git a/include/net/sock.h b/include/net/sock.h
index a6ba1f8..68a2834 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -246,6 +246,7 @@
   *	@sk_user_data: RPC layer private data
   *	@sk_sndmsg_page: cached page for sendmsg
   *	@sk_sndmsg_off: cached offset for sendmsg
+  *	@sk_peek_off: current peek_offset value
   *	@sk_send_head: front of stuff to transmit
   *	@sk_security: used by security modules
   *	@sk_mark: generic packet mark
@@ -376,6 +377,17 @@
 	void                    (*sk_destruct)(struct sock *sk);
 };
 
+/*
+ * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
+ * or not whether his port will be reused by someone else. SK_FORCE_REUSE
+ * on a socket means that the socket will reuse everybody else's port
+ * without looking at the other's sk_reuse value.
+ */
+
+#define SK_NO_REUSE	0
+#define SK_CAN_REUSE	1
+#define SK_FORCE_REUSE	2
+
 static inline int sk_peek_offset(struct sock *sk, int flags)
 {
 	if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0))
@@ -698,17 +710,19 @@
  * Do not take into account this skb truesize,
  * to allow even a single big packet to come.
  */
-static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
+static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb,
+				     unsigned int limit)
 {
 	unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
 
-	return qsize > sk->sk_rcvbuf;
+	return qsize > limit;
 }
 
 /* The per-socket spinlock must be held here. */
-static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
+					      unsigned int limit)
 {
-	if (sk_rcvqueues_full(sk, skb))
+	if (sk_rcvqueues_full(sk, skb, limit))
 		return -ENOBUFS;
 
 	__sk_add_backlog(sk, skb);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f75a04d..fc880e9 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -123,7 +123,7 @@
 #endif
 #define TCP_RTO_MAX	((unsigned)(120*HZ))
 #define TCP_RTO_MIN	((unsigned)(HZ/5))
-#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC2988bis initial RTO value	*/
+#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
 #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
 						 * used as a fallback RTO for the
 						 * initial data transmission if no
@@ -395,6 +395,7 @@
 extern void tcp_clear_retrans(struct tcp_sock *tp);
 extern void tcp_update_metrics(struct sock *sk);
 extern void tcp_close(struct sock *sk, long timeout);
+extern void tcp_init_sock(struct sock *sk);
 extern unsigned int tcp_poll(struct file * file, struct socket *sock,
 			     struct poll_table_struct *wait);
 extern int tcp_getsockopt(struct sock *sk, int level, int optname,
@@ -435,6 +436,9 @@
 					struct request_values *rvp);
 extern int tcp_disconnect(struct sock *sk, int flags);
 
+void tcp_connect_init(struct sock *sk);
+void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
+void tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen);
 
 /* From syncookies.c */
 extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
@@ -609,6 +613,8 @@
  */
 extern u32 __tcp_select_window(struct sock *sk);
 
+void tcp_send_window_probe(struct sock *sk);
+
 /* TCP timestamps are only 32-bits, this causes a slight
  * complication on 64-bit systems since we store a snapshot
  * of jiffies in the buffer control blocks below.  We decided
@@ -645,16 +651,17 @@
 	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
 	__u32		when;		/* used to compute rtt's	*/
 	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
+
 	__u8		sacked;		/* State flags for SACK/FACK.	*/
 #define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
 #define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
 #define TCPCB_LOST		0x04	/* SKB is lost			*/
 #define TCPCB_TAGBITS		0x07	/* All tag bits			*/
-	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
-	/* 1 byte hole */
 #define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
 #define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
 
+	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
+	/* 1 byte hole */
 	__u32		ack_seq;	/* Sequence number ACK'd	*/
 };
 
@@ -1226,7 +1233,7 @@
 
 extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
 extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
-				 unsigned header_len);
+				 unsigned int header_len);
 extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
 			    const struct tcp_md5sig_key *key);
 
diff --git a/include/net/udp.h b/include/net/udp.h
index 5d606d9..e66fc29 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -81,7 +81,7 @@
 extern struct udp_table udp_table;
 extern void udp_table_init(struct udp_table *, const char *);
 static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
-					     struct net *net, unsigned num)
+					     struct net *net, unsigned int num)
 {
 	return &table->hash[udp_hashfn(net, num, table->mask)];
 }
@@ -267,4 +267,5 @@
 extern int udp4_ufo_send_check(struct sk_buff *skb);
 extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
 	netdev_features_t features);
+extern void udp_encap_enable(void);
 #endif	/* _UDP_H */
diff --git a/include/net/wimax.h b/include/net/wimax.h
index 322ff4f..bbb74f9 100644
--- a/include/net/wimax.h
+++ b/include/net/wimax.h
@@ -423,8 +423,8 @@
 	int (*op_reset)(struct wimax_dev *wimax_dev);
 
 	struct rfkill *rfkill;
-	unsigned rf_hw;
-	unsigned rf_sw;
+	unsigned int rf_hw;
+	unsigned int rf_sw;
 	char name[32];
 
 	struct dentry *debugfs_dentry;
diff --git a/include/net/x25.h b/include/net/x25.h
index a06119a..b4a8a89 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -305,7 +305,7 @@
 #endif /* CONFIG_SYSCTL */
 
 struct x25_skb_cb {
-	unsigned flags;
+	unsigned int flags;
 };
 #define X25_SKB_CB(s) ((struct x25_skb_cb *) ((s)->cb))
 
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 96239e7..1cb32bf 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1682,8 +1682,9 @@
 
 static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
 {
-	if (m->m | m->v)
-		NLA_PUT(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
+	if ((m->m | m->v) &&
+	    nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 377df4a..1e11985 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -134,6 +134,9 @@
 
 static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
 {
+	if (!cmd->request->rq_disk)
+		return NULL;
+
 	return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
 }
 
diff --git a/include/sound/core.h b/include/sound/core.h
index b6e0f57..bc05668 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -325,6 +325,13 @@
 
 /* --- */
 
+/* sound printk debug levels */
+enum {
+	SND_PR_ALWAYS,
+	SND_PR_DEBUG,
+	SND_PR_VERBOSE,
+};
+
 #if defined(CONFIG_SND_DEBUG) || defined(CONFIG_SND_VERBOSE_PRINTK)
 __printf(4, 5)
 void __snd_printk(unsigned int level, const char *file, int line,
@@ -354,6 +361,8 @@
  */
 #define snd_printd(fmt, args...) \
 	__snd_printk(1, __FILE__, __LINE__, fmt, ##args)
+#define _snd_printd(level, fmt, args...) \
+	__snd_printk(level, __FILE__, __LINE__, fmt, ##args)
 
 /**
  * snd_BUG - give a BUG warning message and stack trace
@@ -383,6 +392,7 @@
 #else /* !CONFIG_SND_DEBUG */
 
 #define snd_printd(fmt, args...)	do { } while (0)
+#define _snd_printd(level, fmt, args...) do { } while (0)
 #define snd_BUG()			do { } while (0)
 static inline int __snd_bug_on(int cond)
 {
diff --git a/kernel/cred.c b/kernel/cred.c
index 97b36ee..e70683d 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -386,6 +386,8 @@
 	struct cred *new;
 	int ret;
 
+	p->replacement_session_keyring = NULL;
+
 	if (
 #ifdef CONFIG_KEYS
 		!p->cred->thread_keyring &&
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index cf1a4a6..d1a758b 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -62,7 +62,7 @@
 	help
 	  This option will show the mapping relationship between hardware irq
 	  numbers and Linux irq numbers. The mapping is exposed via debugfs
-	  in the file "virq_mapping".
+	  in the file "irq_domain_mapping".
 
 	  If you don't know what this means you don't need it.
 
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 3601f3f..0e0ba5f 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -23,7 +23,6 @@
 static DEFINE_MUTEX(irq_domain_mutex);
 
 static DEFINE_MUTEX(revmap_trees_mutex);
-static unsigned int irq_virq_count = NR_IRQS;
 static struct irq_domain *irq_default_domain;
 
 /**
@@ -184,13 +183,16 @@
 }
 
 struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
+					 unsigned int max_irq,
 					 const struct irq_domain_ops *ops,
 					 void *host_data)
 {
 	struct irq_domain *domain = irq_domain_alloc(of_node,
 					IRQ_DOMAIN_MAP_NOMAP, ops, host_data);
-	if (domain)
+	if (domain) {
+		domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0;
 		irq_domain_add(domain);
+	}
 	return domain;
 }
 
@@ -262,22 +264,6 @@
 	irq_default_domain = domain;
 }
 
-/**
- * irq_set_virq_count() - Set the maximum number of linux irqs
- * @count: number of linux irqs, capped with NR_IRQS
- *
- * This is mainly for use by platforms like iSeries who want to program
- * the virtual irq number in the controller to avoid the reverse mapping
- */
-void irq_set_virq_count(unsigned int count)
-{
-	pr_debug("irq: Trying to set virq count to %d\n", count);
-
-	BUG_ON(count < NUM_ISA_INTERRUPTS);
-	if (count < NR_IRQS)
-		irq_virq_count = count;
-}
-
 static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
 			    irq_hw_number_t hwirq)
 {
@@ -320,13 +306,12 @@
 		pr_debug("irq: create_direct virq allocation failed\n");
 		return 0;
 	}
-	if (virq >= irq_virq_count) {
+	if (virq >= domain->revmap_data.nomap.max_irq) {
 		pr_err("ERROR: no free irqs available below %i maximum\n",
-			irq_virq_count);
+			domain->revmap_data.nomap.max_irq);
 		irq_free_desc(virq);
 		return 0;
 	}
-
 	pr_debug("irq: create_direct obtained virq %d\n", virq);
 
 	if (irq_setup_virq(domain, virq, virq)) {
@@ -350,7 +335,8 @@
 unsigned int irq_create_mapping(struct irq_domain *domain,
 				irq_hw_number_t hwirq)
 {
-	unsigned int virq, hint;
+	unsigned int hint;
+	int virq;
 
 	pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
 
@@ -377,13 +363,13 @@
 		return irq_domain_legacy_revmap(domain, hwirq);
 
 	/* Allocate a virtual interrupt number */
-	hint = hwirq % irq_virq_count;
+	hint = hwirq % nr_irqs;
 	if (hint == 0)
 		hint++;
 	virq = irq_alloc_desc_from(hint, 0);
-	if (!virq)
+	if (virq <= 0)
 		virq = irq_alloc_desc_from(1, 0);
-	if (!virq) {
+	if (virq <= 0) {
 		pr_debug("irq: -> virq allocation failed\n");
 		return 0;
 	}
@@ -515,7 +501,7 @@
 			      irq_hw_number_t hwirq)
 {
 	unsigned int i;
-	unsigned int hint = hwirq % irq_virq_count;
+	unsigned int hint = hwirq % nr_irqs;
 
 	/* Look for default domain if nececssary */
 	if (domain == NULL)
@@ -536,7 +522,7 @@
 		if (data && (data->domain == domain) && (data->hwirq == hwirq))
 			return i;
 		i++;
-		if (i >= irq_virq_count)
+		if (i >= nr_irqs)
 			i = 1;
 	} while(i != hint);
 	return 0;
@@ -642,8 +628,9 @@
 	void *data;
 	int i;
 
-	seq_printf(m, "%-5s  %-7s  %-15s  %-18s  %s\n", "virq", "hwirq",
-		      "chip name", "chip data", "domain name");
+	seq_printf(m, "%-5s  %-7s  %-15s  %-*s  %s\n", "irq", "hwirq",
+		      "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
+		      "domain name");
 
 	for (i = 1; i < nr_irqs; i++) {
 		desc = irq_to_desc(i);
@@ -666,7 +653,7 @@
 			seq_printf(m, "%-15s  ", p);
 
 			data = irq_desc_get_chip_data(desc);
-			seq_printf(m, "0x%16p  ", data);
+			seq_printf(m, data ? "0x%p  " : "  %p  ", data);
 
 			if (desc->irq_data.domain && desc->irq_data.domain->of_node)
 				p = desc->irq_data.domain->of_node->full_name;
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 0c56d44..1588e3b 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -11,6 +11,7 @@
 #include <linux/irq_work.h>
 #include <linux/percpu.h>
 #include <linux/hardirq.h>
+#include <linux/irqflags.h>
 #include <asm/processor.h>
 
 /*
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 22000c3..8d262b4 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -284,8 +284,12 @@
 	if (value) {
 		if(copy_from_user(&set_buffer, value, sizeof(set_buffer)))
 			return -EFAULT;
-	} else
-		memset((char *) &set_buffer, 0, sizeof(set_buffer));
+	} else {
+		memset(&set_buffer, 0, sizeof(set_buffer));
+		printk_once(KERN_WARNING "%s calls setitimer() with new_value NULL pointer."
+			    " Misfeature support will be removed\n",
+			    current->comm);
+	}
 
 	error = do_setitimer(which, &set_buffer, ovalue ? &get_buffer : NULL);
 	if (error || !ovalue)
diff --git a/kernel/panic.c b/kernel/panic.c
index 80aed44..8ed89a1 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -97,7 +97,7 @@
 	/*
 	 * Avoid nested stack-dumping if a panic occurs during oops processing
 	 */
-	if (!oops_in_progress)
+	if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
 		dump_stack();
 #endif
 
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 52b3a06..4ab1187 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -170,7 +170,7 @@
 #endif
 
 #ifdef CONFIG_PRINTK
-static int proc_dmesg_restrict(struct ctl_table *table, int write,
+static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
 				void __user *buffer, size_t *lenp, loff_t *ppos);
 #endif
 
@@ -703,7 +703,7 @@
 		.data		= &dmesg_restrict,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
+		.proc_handler	= proc_dointvec_minmax_sysadmin,
 		.extra1		= &zero,
 		.extra2		= &one,
 	},
@@ -712,7 +712,7 @@
 		.data		= &kptr_restrict,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dmesg_restrict,
+		.proc_handler	= proc_dointvec_minmax_sysadmin,
 		.extra1		= &zero,
 		.extra2		= &two,
 	},
@@ -1943,7 +1943,7 @@
 }
 
 #ifdef CONFIG_PRINTK
-static int proc_dmesg_restrict(struct ctl_table *table, int write,
+static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
 				void __user *buffer, size_t *lenp, loff_t *ppos)
 {
 	if (write && !capable(CAP_SYS_ADMIN))
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 2cf9cc7..a20dc8a 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -1,6 +1,10 @@
 #
 # Timer subsystem related configuration options
 #
+
+# Core internal switch. Selected by NO_HZ / HIGH_RES_TIMERS. This is
+# only related to the tick functionality. Oneshot clockevent devices
+# are supported independ of this.
 config TICK_ONESHOT
 	bool
 
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index e883f57..bf57abd 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -575,10 +575,12 @@
 	unsigned long flags;
 
 	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
+
+	tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
+
 	if (cpumask_empty(tick_get_broadcast_mask()))
 		goto end;
 
-	tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
 	bc = tick_broadcast_device.evtdev;
 	if (bc)
 		tick_broadcast_setup_oneshot(bc);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3526038..6a3a5b9 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -534,9 +534,9 @@
 				hrtimer_get_expires(&ts->sched_timer), 0))
 				break;
 		}
-		/* Update jiffies and reread time */
-		tick_do_update_jiffies64(now);
+		/* Reread time and update jiffies */
 		now = ktime_get();
+		tick_do_update_jiffies64(now);
 	}
 }
 
diff --git a/lib/kobject.c b/lib/kobject.c
index 21dee7c..aeefa8b 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -192,14 +192,14 @@
 
 		/* be noisy on error issues */
 		if (error == -EEXIST)
-			printk(KERN_ERR "%s failed for %s with "
-			       "-EEXIST, don't try to register things with "
-			       "the same name in the same directory.\n",
-			       __func__, kobject_name(kobj));
+			WARN(1, "%s failed for %s with "
+			     "-EEXIST, don't try to register things with "
+			     "the same name in the same directory.\n",
+			     __func__, kobject_name(kobj));
 		else
-			printk(KERN_ERR "%s failed for %s (%d)\n",
-			       __func__, kobject_name(kobj), error);
-		dump_stack();
+			WARN(1, "%s failed for %s (error: %d parent: %s)\n",
+			     __func__, kobject_name(kobj), error,
+			     parent ? kobject_name(parent) : "'none'");
 	} else
 		kobj->state_in_sysfs = 1;
 
diff --git a/lib/mpi/mpi-bit.c b/lib/mpi/mpi-bit.c
index 2f52662..0c50536 100644
--- a/lib/mpi/mpi-bit.c
+++ b/lib/mpi/mpi-bit.c
@@ -177,8 +177,8 @@
  */
 int mpi_lshift_limbs(MPI a, unsigned int count)
 {
-	mpi_ptr_t ap = a->d;
-	int n = a->nlimbs;
+	const int n = a->nlimbs;
+	mpi_ptr_t ap;
 	int i;
 
 	if (!count || !n)
@@ -187,6 +187,7 @@
 	if (RESIZE_IF_NEEDED(a, n + count) < 0)
 		return -ENOMEM;
 
+	ap = a->d;
 	for (i = n - 1; i >= 0; i--)
 		ap[i + count] = ap[i];
 	for (i = 0; i < count; i++)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b8ce6f4..cd65cb1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2791,6 +2791,7 @@
 	 * so no worry about deadlock.
 	 */
 	page = pte_page(entry);
+	get_page(page);
 	if (page != pagecache_page)
 		lock_page(page);
 
@@ -2822,6 +2823,7 @@
 	}
 	if (page != pagecache_page)
 		unlock_page(page);
+	put_page(page);
 
 out_mutex:
 	mutex_unlock(&hugetlb_instantiation_mutex);
diff --git a/mm/memblock.c b/mm/memblock.c
index 99f2855..a44eab3 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -330,6 +330,9 @@
 	phys_addr_t end = base + memblock_cap_size(base, &size);
 	int i, nr_new;
 
+	if (!size)
+		return 0;
+
 	/* special case for empty array */
 	if (type->regions[0].size == 0) {
 		WARN_ON(type->cnt != 1 || type->total_size);
@@ -430,6 +433,9 @@
 
 	*start_rgn = *end_rgn = 0;
 
+	if (!size)
+		return 0;
+
 	/* we'll create at most two more regions */
 	while (type->cnt + 2 > type->max)
 		if (memblock_double_array(type) < 0)
@@ -514,7 +520,6 @@
 		     (unsigned long long)base,
 		     (unsigned long long)base + size,
 		     (void *)_RET_IP_);
-	BUG_ON(0 == size);
 
 	return memblock_add_region(_rgn, base, size, MAX_NUMNODES);
 }
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7d698df..b868def 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2165,7 +2165,7 @@
 	if (action == CPU_ONLINE)
 		return NOTIFY_OK;
 
-	if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
+	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
 		return NOTIFY_OK;
 
 	for_each_mem_cgroup(iter)
@@ -3392,6 +3392,7 @@
 	 * the newpage may be on LRU(or pagevec for LRU) already. We lock
 	 * LRU while we overwrite pc->mem_cgroup.
 	 */
+	pc = lookup_page_cgroup(newpage);
 	__mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true);
 }
 
@@ -3763,7 +3764,7 @@
 			goto try_to_free;
 		cond_resched();
 	/* "ret" should also be checked to ensure all lists are empty. */
-	} while (memcg->res.usage > 0 || ret);
+	} while (res_counter_read_u64(&memcg->res, RES_USAGE) > 0 || ret);
 out:
 	css_put(&memcg->css);
 	return ret;
@@ -3778,7 +3779,7 @@
 	lru_add_drain_all();
 	/* try to free all pages in this cgroup */
 	shrink = 1;
-	while (nr_retries && memcg->res.usage > 0) {
+	while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
 		int progress;
 
 		if (signal_pending(current)) {
diff --git a/mm/mmap.c b/mm/mmap.c
index a7bf6a3..848ef52 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -240,6 +240,8 @@
 	return next;
 }
 
+static unsigned long do_brk(unsigned long addr, unsigned long len);
+
 SYSCALL_DEFINE1(brk, unsigned long, brk)
 {
 	unsigned long rlim, retval;
@@ -951,7 +953,7 @@
  * The caller must hold down_write(&current->mm->mmap_sem).
  */
 
-unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
 			unsigned long len, unsigned long prot,
 			unsigned long flags, unsigned long pgoff)
 {
@@ -1087,7 +1089,32 @@
 
 	return mmap_region(file, addr, len, flags, vm_flags, pgoff);
 }
-EXPORT_SYMBOL(do_mmap_pgoff);
+
+unsigned long do_mmap(struct file *file, unsigned long addr,
+	unsigned long len, unsigned long prot,
+	unsigned long flag, unsigned long offset)
+{
+	if (unlikely(offset + PAGE_ALIGN(len) < offset))
+		return -EINVAL;
+	if (unlikely(offset & ~PAGE_MASK))
+		return -EINVAL;
+	return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
+}
+EXPORT_SYMBOL(do_mmap);
+
+unsigned long vm_mmap(struct file *file, unsigned long addr,
+	unsigned long len, unsigned long prot,
+	unsigned long flag, unsigned long offset)
+{
+	unsigned long ret;
+	struct mm_struct *mm = current->mm;
+
+	down_write(&mm->mmap_sem);
+	ret = do_mmap(file, addr, len, prot, flag, offset);
+	up_write(&mm->mmap_sem);
+	return ret;
+}
+EXPORT_SYMBOL(vm_mmap);
 
 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
 		unsigned long, prot, unsigned long, flags,
@@ -2105,21 +2132,25 @@
 
 	return 0;
 }
-
 EXPORT_SYMBOL(do_munmap);
 
-SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+int vm_munmap(unsigned long start, size_t len)
 {
 	int ret;
 	struct mm_struct *mm = current->mm;
 
-	profile_munmap(addr);
-
 	down_write(&mm->mmap_sem);
-	ret = do_munmap(mm, addr, len);
+	ret = do_munmap(mm, start, len);
 	up_write(&mm->mmap_sem);
 	return ret;
 }
+EXPORT_SYMBOL(vm_munmap);
+
+SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+{
+	profile_munmap(addr);
+	return vm_munmap(addr, len);
+}
 
 static inline void verify_mm_writelocked(struct mm_struct *mm)
 {
@@ -2136,7 +2167,7 @@
  *  anonymous maps.  eventually we may be able to do some
  *  brk-specific accounting here.
  */
-unsigned long do_brk(unsigned long addr, unsigned long len)
+static unsigned long do_brk(unsigned long addr, unsigned long len)
 {
 	struct mm_struct * mm = current->mm;
 	struct vm_area_struct * vma, * prev;
@@ -2232,7 +2263,17 @@
 	return addr;
 }
 
-EXPORT_SYMBOL(do_brk);
+unsigned long vm_brk(unsigned long addr, unsigned long len)
+{
+	struct mm_struct *mm = current->mm;
+	unsigned long ret;
+
+	down_write(&mm->mmap_sem);
+	ret = do_brk(addr, len);
+	up_write(&mm->mmap_sem);
+	return ret;
+}
+EXPORT_SYMBOL(vm_brk);
 
 /* Release all mmaps. */
 void exit_mmap(struct mm_struct *mm)
diff --git a/mm/nommu.c b/mm/nommu.c
index f59e170..bb8f4f0 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1233,7 +1233,7 @@
 /*
  * handle mapping creation for uClinux
  */
-unsigned long do_mmap_pgoff(struct file *file,
+static unsigned long do_mmap_pgoff(struct file *file,
 			    unsigned long addr,
 			    unsigned long len,
 			    unsigned long prot,
@@ -1470,7 +1470,32 @@
 	show_free_areas(0);
 	return -ENOMEM;
 }
-EXPORT_SYMBOL(do_mmap_pgoff);
+
+unsigned long do_mmap(struct file *file, unsigned long addr,
+	unsigned long len, unsigned long prot,
+	unsigned long flag, unsigned long offset)
+{
+	if (unlikely(offset + PAGE_ALIGN(len) < offset))
+		return -EINVAL;
+	if (unlikely(offset & ~PAGE_MASK))
+		return -EINVAL;
+	return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
+}
+EXPORT_SYMBOL(do_mmap);
+
+unsigned long vm_mmap(struct file *file, unsigned long addr,
+	unsigned long len, unsigned long prot,
+	unsigned long flag, unsigned long offset)
+{
+	unsigned long ret;
+	struct mm_struct *mm = current->mm;
+
+	down_write(&mm->mmap_sem);
+	ret = do_mmap(file, addr, len, prot, flag, offset);
+	up_write(&mm->mmap_sem);
+	return ret;
+}
+EXPORT_SYMBOL(vm_mmap);
 
 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
 		unsigned long, prot, unsigned long, flags,
@@ -1709,16 +1734,22 @@
 }
 EXPORT_SYMBOL(do_munmap);
 
-SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+int vm_munmap(unsigned long addr, size_t len)
 {
-	int ret;
 	struct mm_struct *mm = current->mm;
+	int ret;
 
 	down_write(&mm->mmap_sem);
 	ret = do_munmap(mm, addr, len);
 	up_write(&mm->mmap_sem);
 	return ret;
 }
+EXPORT_SYMBOL(vm_munmap);
+
+SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+{
+	return vm_munmap(addr, len);
+}
 
 /*
  * release all the mappings made in a process's VM space
@@ -1744,7 +1775,7 @@
 	kleave("");
 }
 
-unsigned long do_brk(unsigned long addr, unsigned long len)
+unsigned long vm_brk(unsigned long addr, unsigned long len)
 {
 	return -ENOMEM;
 }
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 9d3dd37..4c5ff7f 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -26,7 +26,7 @@
  */
 static const struct address_space_operations swap_aops = {
 	.writepage	= swap_writepage,
-	.set_page_dirty	= __set_page_dirty_nobuffers,
+	.set_page_dirty	= __set_page_dirty_no_writeback,
 	.migratepage	= migrate_page,
 };
 
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 33c332b..1a51868 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2107,12 +2107,7 @@
 		 * with multiple processes reclaiming pages, the total
 		 * freeing target can get unreasonably large.
 		 */
-		if (nr_reclaimed >= nr_to_reclaim)
-			nr_to_reclaim = 0;
-		else
-			nr_to_reclaim -= nr_reclaimed;
-
-		if (!nr_to_reclaim && priority < DEF_PRIORITY)
+		if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
 			break;
 	}
 	blk_finish_plug(&plug);
diff --git a/net/802/fc.c b/net/802/fc.c
index b324e31..05eea6b 100644
--- a/net/802/fc.c
+++ b/net/802/fc.c
@@ -35,7 +35,7 @@
 
 static int fc_header(struct sk_buff *skb, struct net_device *dev,
 		     unsigned short type,
-		     const void *daddr, const void *saddr, unsigned len)
+		     const void *daddr, const void *saddr, unsigned int len)
 {
 	struct fch_hdr *fch;
 	int hdr_len;
diff --git a/net/802/fddi.c b/net/802/fddi.c
index 5ab25cd..9cda406 100644
--- a/net/802/fddi.c
+++ b/net/802/fddi.c
@@ -51,7 +51,7 @@
 
 static int fddi_header(struct sk_buff *skb, struct net_device *dev,
 		       unsigned short type,
-		       const void *daddr, const void *saddr, unsigned len)
+		       const void *daddr, const void *saddr, unsigned int len)
 {
 	int hl = FDDI_K_SNAP_HLEN;
 	struct fddihdr *fddi;
diff --git a/net/802/garp.c b/net/802/garp.c
index a5c2248..8456f5d 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -157,9 +157,9 @@
 	while (parent) {
 		attr = rb_entry(parent, struct garp_attr, node);
 		d = garp_attr_cmp(attr, data, len, type);
-		if (d < 0)
+		if (d > 0)
 			parent = parent->rb_left;
-		else if (d > 0)
+		else if (d < 0)
 			parent = parent->rb_right;
 		else
 			return attr;
@@ -178,9 +178,9 @@
 		parent = *p;
 		attr = rb_entry(parent, struct garp_attr, node);
 		d = garp_attr_cmp(attr, data, len, type);
-		if (d < 0)
+		if (d > 0)
 			p = &parent->rb_left;
-		else if (d > 0)
+		else if (d < 0)
 			p = &parent->rb_right;
 		else {
 			/* The attribute already exists; re-use it. */
diff --git a/net/802/hippi.c b/net/802/hippi.c
index 056794e..51a1f53 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -45,7 +45,7 @@
 
 static int hippi_header(struct sk_buff *skb, struct net_device *dev,
 			unsigned short type,
-			const void *daddr, const void *saddr, unsigned len)
+			const void *daddr, const void *saddr, unsigned int len)
 {
 	struct hippi_hdr *hip = (struct hippi_hdr *)skb_push(skb, HIPPI_HLEN);
 	struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
diff --git a/net/802/tr.c b/net/802/tr.c
index b9a3a14..30a352e 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -101,7 +101,7 @@
 
 static int tr_header(struct sk_buff *skb, struct net_device *dev,
 		     unsigned short type,
-		     const void *daddr, const void *saddr, unsigned len)
+		     const void *daddr, const void *saddr, unsigned int len)
 {
 	struct trh_hdr *trh;
 	int hdr_len;
@@ -193,7 +193,7 @@
 
 	struct trh_hdr *trh;
 	struct trllc *trllc;
-	unsigned riflen=0;
+	unsigned int riflen=0;
 
 	skb->dev = dev;
 	skb_reset_mac_header(skb);
@@ -643,12 +643,6 @@
 	},
 	{ },
 };
-
-static __initdata struct ctl_path tr_path[] = {
-	{ .procname = "net", },
-	{ .procname = "token-ring", },
-	{ }
-};
 #endif
 
 /*
@@ -662,7 +656,7 @@
 	setup_timer(&rif_timer, rif_check_expire, 0);
 	add_timer(&rif_timer);
 #ifdef CONFIG_SYSCTL
-	register_sysctl_paths(tr_path, tr_table);
+	register_net_sysctl(&init_net, "net/token-ring", tr_table);
 #endif
 	proc_net_fops_create(&init_net, "tr_rif", S_IRUGO, &rif_seq_fops);
 	return 0;
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 5071136..708c80e 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -166,11 +166,13 @@
 	struct nlattr *nest;
 	unsigned int i;
 
-	NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id);
+	if (nla_put_u16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id))
+		goto nla_put_failure;
 	if (vlan->flags) {
 		f.flags = vlan->flags;
 		f.mask  = ~0;
-		NLA_PUT(skb, IFLA_VLAN_FLAGS, sizeof(f), &f);
+		if (nla_put(skb, IFLA_VLAN_FLAGS, sizeof(f), &f))
+			goto nla_put_failure;
 	}
 	if (vlan->nr_ingress_mappings) {
 		nest = nla_nest_start(skb, IFLA_VLAN_INGRESS_QOS);
@@ -183,8 +185,9 @@
 
 			m.from = i;
 			m.to   = vlan->ingress_priority_map[i];
-			NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING,
-				sizeof(m), &m);
+			if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
+				    sizeof(m), &m))
+				goto nla_put_failure;
 		}
 		nla_nest_end(skb, nest);
 	}
@@ -202,8 +205,9 @@
 
 				m.from = pm->priority;
 				m.to   = (pm->vlan_qos >> 13) & 0x7;
-				NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING,
-					sizeof(m), &m);
+				if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
+					    sizeof(m), &m))
+					goto nla_put_failure;
 			}
 		}
 		nla_nest_end(skb, nest);
diff --git a/net/9p/client.c b/net/9p/client.c
index b23a17c..a170893 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1530,7 +1530,7 @@
 
 
 	p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
-		   fid->fid, (long long unsigned) offset, count);
+		   fid->fid, (unsigned long long) offset, count);
 	err = 0;
 	clnt = fid->clnt;
 
@@ -1605,7 +1605,7 @@
 	struct p9_req_t *req;
 
 	p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d\n",
-				fid->fid, (long long unsigned) offset, count);
+				fid->fid, (unsigned long long) offset, count);
 	err = 0;
 	clnt = fid->clnt;
 
@@ -2040,7 +2040,7 @@
 	char *dataptr;
 
 	p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
-				fid->fid, (long long unsigned) offset, count);
+				fid->fid, (unsigned long long) offset, count);
 
 	err = 0;
 	clnt = fid->clnt;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index fccae26..6449bae 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -513,7 +513,7 @@
 	clear_bit(Wworksched, &m->wsched);
 }
 
-static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
+static int p9_pollwake(wait_queue_t *wait, unsigned int mode, int sync, void *key)
 {
 	struct p9_poll_wait *pwait =
 		container_of(wait, struct p9_poll_wait, wait);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index bfa9ab9..0301b32 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -63,7 +63,7 @@
 #include <net/tcp_states.h>
 #include <net/route.h>
 #include <linux/atalk.h>
-#include "../core/kmap_skb.h"
+#include <linux/highmem.h>
 
 struct datalink_proto *ddp_dl, *aarp_dl;
 static const struct proto_ops atalk_dgram_ops;
@@ -960,10 +960,10 @@
 
 			if (copy > len)
 				copy = len;
-			vaddr = kmap_skb_frag(frag);
+			vaddr = kmap_atomic(skb_frag_page(frag));
 			sum = atalk_sum_partial(vaddr + frag->page_offset +
 						  offset - start, copy, sum);
-			kunmap_skb_frag(vaddr);
+			kunmap_atomic(vaddr);
 
 			if (!(len -= copy))
 				return sum;
diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c
index 04e9c0d..ebb8643 100644
--- a/net/appletalk/sysctl_net_atalk.c
+++ b/net/appletalk/sysctl_net_atalk.c
@@ -42,20 +42,14 @@
 	{ },
 };
 
-static struct ctl_path atalk_path[] = {
-	{ .procname = "net", },
-	{ .procname = "appletalk", },
-	{ }
-};
-
 static struct ctl_table_header *atalk_table_header;
 
 void atalk_register_sysctl(void)
 {
-	atalk_table_header = register_sysctl_paths(atalk_path, atalk_table);
+	atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table);
 }
 
 void atalk_unregister_sysctl(void)
 {
-	unregister_sysctl_table(atalk_table_header);
+	unregister_net_sysctl_table(atalk_table_header);
 }
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 353fccf..4819d315 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -73,7 +73,7 @@
 #ifdef CONFIG_ATM_BR2684_IPFILTER
 	struct br2684_filter filter;
 #endif /* CONFIG_ATM_BR2684_IPFILTER */
-	unsigned copies_needed, copies_failed;
+	unsigned int copies_needed, copies_failed;
 };
 
 struct br2684_dev {
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index 53e5002..5bdd300 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -207,7 +207,7 @@
 			      size_t nbytes, loff_t *ppos)
 {
 	char *page, *p;
-	unsigned len;
+	unsigned int len;
 
 	if (nbytes == 0)
 		return 0;
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 614d3fc..ce1e59f 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -62,12 +62,25 @@
 	void (*old_pop)(struct atm_vcc *, struct sk_buff *);
 					/* keep old push/pop for detaching */
 	enum pppoatm_encaps encaps;
+	atomic_t inflight;
+	unsigned long blocked;
 	int flags;			/* SC_COMP_PROT - compress protocol */
 	struct ppp_channel chan;	/* interface to generic ppp layer */
 	struct tasklet_struct wakeup_tasklet;
 };
 
 /*
+ * We want to allow two packets in the queue. The one that's currently in
+ * flight, and *one* queued up ready for the ATM device to send immediately
+ * from its TX done IRQ. We want to be able to use atomic_inc_not_zero(), so
+ * inflight == -2 represents an empty queue, -1 one packet, and zero means
+ * there are two packets in the queue.
+ */
+#define NONE_INFLIGHT -2
+
+#define BLOCKED 0
+
+/*
  * Header used for LLC Encapsulated PPP (4 bytes) followed by the LCP protocol
  * ID (0xC021) used in autodetection
  */
@@ -102,16 +115,30 @@
 static void pppoatm_pop(struct atm_vcc *atmvcc, struct sk_buff *skb)
 {
 	struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
+
 	pvcc->old_pop(atmvcc, skb);
+	atomic_dec(&pvcc->inflight);
+
 	/*
-	 * We don't really always want to do this since it's
-	 * really inefficient - it would be much better if we could
-	 * test if we had actually throttled the generic layer.
-	 * Unfortunately then there would be a nasty SMP race where
-	 * we could clear that flag just as we refuse another packet.
-	 * For now we do the safe thing.
+	 * We always used to run the wakeup tasklet unconditionally here, for
+	 * fear of race conditions where we clear the BLOCKED flag just as we
+	 * refuse another packet in pppoatm_send(). This was quite inefficient.
+	 *
+	 * In fact it's OK. The PPP core will only ever call pppoatm_send()
+	 * while holding the channel->downl lock. And ppp_output_wakeup() as
+	 * called by the tasklet will *also* grab that lock. So even if another
+	 * CPU is in pppoatm_send() right now, the tasklet isn't going to race
+	 * with it. The wakeup *will* happen after the other CPU is safely out
+	 * of pppoatm_send() again.
+	 *
+	 * So if the CPU in pppoatm_send() has already set the BLOCKED bit and
+	 * it about to return, that's fine. We trigger a wakeup which will
+	 * happen later. And if the CPU in pppoatm_send() *hasn't* set the
+	 * BLOCKED bit yet, that's fine too because of the double check in
+	 * pppoatm_may_send() which is commented there.
 	 */
-	tasklet_schedule(&pvcc->wakeup_tasklet);
+	if (test_and_clear_bit(BLOCKED, &pvcc->blocked))
+		tasklet_schedule(&pvcc->wakeup_tasklet);
 }
 
 /*
@@ -184,6 +211,51 @@
 	ppp_input_error(&pvcc->chan, 0);
 }
 
+static inline int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
+{
+	/*
+	 * It's not clear that we need to bother with using atm_may_send()
+	 * to check we don't exceed sk->sk_sndbuf. If userspace sets a
+	 * value of sk_sndbuf which is lower than the MTU, we're going to
+	 * block for ever. But the code always did that before we introduced
+	 * the packet count limit, so...
+	 */
+	if (atm_may_send(pvcc->atmvcc, size) &&
+	    atomic_inc_not_zero_hint(&pvcc->inflight, NONE_INFLIGHT))
+		return 1;
+
+	/*
+	 * We use test_and_set_bit() rather than set_bit() here because
+	 * we need to ensure there's a memory barrier after it. The bit
+	 * *must* be set before we do the atomic_inc() on pvcc->inflight.
+	 * There's no smp_mb__after_set_bit(), so it's this or abuse
+	 * smp_mb__after_clear_bit().
+	 */
+	test_and_set_bit(BLOCKED, &pvcc->blocked);
+
+	/*
+	 * We may have raced with pppoatm_pop(). If it ran for the
+	 * last packet in the queue, *just* before we set the BLOCKED
+	 * bit, then it might never run again and the channel could
+	 * remain permanently blocked. Cope with that race by checking
+	 * *again*. If it did run in that window, we'll have space on
+	 * the queue now and can return success. It's harmless to leave
+	 * the BLOCKED flag set, since it's only used as a trigger to
+	 * run the wakeup tasklet. Another wakeup will never hurt.
+	 * If pppoatm_pop() is running but hasn't got as far as making
+	 * space on the queue yet, then it hasn't checked the BLOCKED
+	 * flag yet either, so we're safe in that case too. It'll issue
+	 * an "immediate" wakeup... where "immediate" actually involves
+	 * taking the PPP channel's ->downl lock, which is held by the
+	 * code path that calls pppoatm_send(), and is thus going to
+	 * wait for us to finish.
+	 */
+	if (atm_may_send(pvcc->atmvcc, size) &&
+	    atomic_inc_not_zero(&pvcc->inflight))
+		return 1;
+
+	return 0;
+}
 /*
  * Called by the ppp_generic.c to send a packet - returns true if packet
  * was accepted.  If we return false, then it's our job to call
@@ -207,7 +279,7 @@
 			struct sk_buff *n;
 			n = skb_realloc_headroom(skb, LLC_LEN);
 			if (n != NULL &&
-			    !atm_may_send(pvcc->atmvcc, n->truesize)) {
+			    !pppoatm_may_send(pvcc, n->truesize)) {
 				kfree_skb(n);
 				goto nospace;
 			}
@@ -215,12 +287,12 @@
 			skb = n;
 			if (skb == NULL)
 				return DROP_PACKET;
-		} else if (!atm_may_send(pvcc->atmvcc, skb->truesize))
+		} else if (!pppoatm_may_send(pvcc, skb->truesize))
 			goto nospace;
 		memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN);
 		break;
 	case e_vc:
-		if (!atm_may_send(pvcc->atmvcc, skb->truesize))
+		if (!pppoatm_may_send(pvcc, skb->truesize))
 			goto nospace;
 		break;
 	case e_autodetect:
@@ -285,6 +357,9 @@
 	if (pvcc == NULL)
 		return -ENOMEM;
 	pvcc->atmvcc = atmvcc;
+
+	/* Maximum is zero, so that we can use atomic_inc_not_zero() */
+	atomic_set(&pvcc->inflight, NONE_INFLIGHT);
 	pvcc->old_push = atmvcc->push;
 	pvcc->old_pop = atmvcc->pop;
 	pvcc->encaps = (enum pppoatm_encaps) be.encaps;
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 509c8ac..86767ca 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -166,7 +166,7 @@
 {
 	struct sk_buff *skb;
 	struct atmsvc_msg *msg;
-	static unsigned session = 0;
+	static unsigned int session = 0;
 
 	pr_debug("%d (0x%p)\n", (int)type, vcc);
 	while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 0906c19..051f7ab 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1990,7 +1990,6 @@
 	sock_register(&ax25_family_ops);
 	dev_add_pack(&ax25_packet_type);
 	register_netdevice_notifier(&ax25_dev_notifier);
-	ax25_register_sysctl();
 
 	proc_net_fops_create(&init_net, "ax25_route", S_IRUGO, &ax25_route_fops);
 	proc_net_fops_create(&init_net, "ax25", S_IRUGO, &ax25_info_fops);
@@ -2011,16 +2010,16 @@
 	proc_net_remove(&init_net, "ax25_route");
 	proc_net_remove(&init_net, "ax25");
 	proc_net_remove(&init_net, "ax25_calls");
-	ax25_rt_free();
-	ax25_uid_free();
-	ax25_dev_free();
 
-	ax25_unregister_sysctl();
 	unregister_netdevice_notifier(&ax25_dev_notifier);
 
 	dev_remove_pack(&ax25_packet_type);
 
 	sock_unregister(PF_AX25);
 	proto_unregister(&ax25_proto);
+
+	ax25_rt_free();
+	ax25_uid_free();
+	ax25_dev_free();
 }
 module_exit(ax25_exit);
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index d0de30e..3d10676 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -59,8 +59,6 @@
 		return;
 	}
 
-	ax25_unregister_sysctl();
-
 	dev->ax25_ptr     = ax25_dev;
 	ax25_dev->dev     = dev;
 	dev_hold(dev);
@@ -90,7 +88,7 @@
 	ax25_dev_list  = ax25_dev;
 	spin_unlock_bh(&ax25_dev_lock);
 
-	ax25_register_sysctl();
+	ax25_register_dev_sysctl(ax25_dev);
 }
 
 void ax25_dev_device_down(struct net_device *dev)
@@ -100,7 +98,7 @@
 	if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
 		return;
 
-	ax25_unregister_sysctl();
+	ax25_unregister_dev_sysctl(ax25_dev);
 
 	spin_lock_bh(&ax25_dev_lock);
 
@@ -120,7 +118,6 @@
 		spin_unlock_bh(&ax25_dev_lock);
 		dev_put(dev);
 		kfree(ax25_dev);
-		ax25_register_sysctl();
 		return;
 	}
 
@@ -130,7 +127,6 @@
 			spin_unlock_bh(&ax25_dev_lock);
 			dev_put(dev);
 			kfree(ax25_dev);
-			ax25_register_sysctl();
 			return;
 		}
 
@@ -138,8 +134,6 @@
 	}
 	spin_unlock_bh(&ax25_dev_lock);
 	dev->ax25_ptr = NULL;
-
-	ax25_register_sysctl();
 }
 
 int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd)
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index 846ae4e..67de6b3 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -48,7 +48,7 @@
 
 int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
 		     unsigned short type, const void *daddr,
-		     const void *saddr, unsigned len)
+		     const void *saddr, unsigned int len)
 {
 	unsigned char *buff;
 
@@ -219,7 +219,7 @@
 
 int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
 		     unsigned short type, const void *daddr,
-		     const void *saddr, unsigned len)
+		     const void *saddr, unsigned int len)
 {
 	return -AX25_HEADER_LEN;
 }
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
index ebe0ef3..d5744b7 100644
--- a/net/ax25/sysctl_net_ax25.c
+++ b/net/ax25/sysctl_net_ax25.c
@@ -29,17 +29,6 @@
 static int min_ds_timeout[1],		max_ds_timeout[] = {65535000};
 #endif
 
-static struct ctl_table_header *ax25_table_header;
-
-static ctl_table *ax25_table;
-static int ax25_table_size;
-
-static struct ctl_path ax25_path[] = {
-	{ .procname = "net", },
-	{ .procname = "ax25", },
-	{ }
-};
-
 static const ctl_table ax25_param_table[] = {
 	{
 		.procname	= "ip_default_mode",
@@ -159,52 +148,37 @@
 	{ }	/* that's all, folks! */
 };
 
-void ax25_register_sysctl(void)
+int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
 {
-	ax25_dev *ax25_dev;
-	int n, k;
+	char path[sizeof("net/ax25/") + IFNAMSIZ];
+	int k;
+	struct ctl_table *table;
 
-	spin_lock_bh(&ax25_dev_lock);
-	for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
-		ax25_table_size += sizeof(ctl_table);
+	table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
 
-	if ((ax25_table = kzalloc(ax25_table_size, GFP_ATOMIC)) == NULL) {
-		spin_unlock_bh(&ax25_dev_lock);
-		return;
+	for (k = 0; k < AX25_MAX_VALUES; k++)
+		table[k].data = &ax25_dev->values[k];
+
+	snprintf(path, sizeof(path), "net/ax25/%s", ax25_dev->dev->name);
+	ax25_dev->sysheader = register_net_sysctl(&init_net, path, table);
+	if (!ax25_dev->sysheader) {
+		kfree(table);
+		return -ENOMEM;
 	}
-
-	for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) {
-		struct ctl_table *child = kmemdup(ax25_param_table,
-						  sizeof(ax25_param_table),
-						  GFP_ATOMIC);
-		if (!child) {
-			while (n--)
-				kfree(ax25_table[n].child);
-			kfree(ax25_table);
-			spin_unlock_bh(&ax25_dev_lock);
-			return;
-		}
-		ax25_table[n].child = ax25_dev->systable = child;
-		ax25_table[n].procname     = ax25_dev->dev->name;
-		ax25_table[n].mode         = 0555;
-
-
-		for (k = 0; k < AX25_MAX_VALUES; k++)
-			child[k].data = &ax25_dev->values[k];
-
-		n++;
-	}
-	spin_unlock_bh(&ax25_dev_lock);
-
-	ax25_table_header = register_sysctl_paths(ax25_path, ax25_table);
+	return 0;
 }
 
-void ax25_unregister_sysctl(void)
+void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev)
 {
-	ctl_table *p;
-	unregister_sysctl_table(ax25_table_header);
+	struct ctl_table_header *header = ax25_dev->sysheader;
+	struct ctl_table *table;
 
-	for (p = ax25_table; p->procname; p++)
-		kfree(p->child);
-	kfree(ax25_table);
+	if (header) {
+		ax25_dev->sysheader = NULL;
+		table = header->ctl_table_arg;
+		unregister_net_sysctl_table(header);
+		kfree(table);
+	}
 }
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index 2b68d06..53f5244 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -7,19 +7,28 @@
 	depends on NET
 	select CRC16
         default n
-	---help---
+	help
+          B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
+          a routing protocol for multi-hop ad-hoc mesh networks. The
+          networks may be wired or wireless. See
+          http://www.open-mesh.org/ for more information and user space
+          tools.
 
-        B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
-        a routing protocol for multi-hop ad-hoc mesh networks. The
-        networks may be wired or wireless. See
-        http://www.open-mesh.org/ for more information and user space
-        tools.
+config BATMAN_ADV_BLA
+	bool "Bridge Loop Avoidance"
+	depends on BATMAN_ADV && INET
+	default y
+	help
+	  This option enables BLA (Bridge Loop Avoidance), a mechanism
+	  to avoid Ethernet frames looping when mesh nodes are connected
+	  to both the same LAN and the same mesh. If you will never use
+	  more than one mesh node in the same LAN, you can safely remove
+	  this feature and save some space.
 
 config BATMAN_ADV_DEBUG
 	bool "B.A.T.M.A.N. debugging"
-	depends on BATMAN_ADV != n
-	---help---
-
+	depends on BATMAN_ADV
+	help
 	  This is an option for use by developers; most people should
 	  say N here. This enables compilation of support for
 	  outputting debugging information to the kernel log. The
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 4e392eb..6d5c194 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -23,6 +23,7 @@
 batman-adv-y += bat_iv_ogm.o
 batman-adv-y += bat_sysfs.o
 batman-adv-y += bitarray.o
+batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
 batman-adv-y += gateway_client.o
 batman-adv-y += gateway_common.o
 batman-adv-y += hard-interface.o
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
index c3b0548..916380c 100644
--- a/net/batman-adv/bat_debugfs.c
+++ b/net/batman-adv/bat_debugfs.c
@@ -32,6 +32,7 @@
 #include "soft-interface.h"
 #include "vis.h"
 #include "icmp_socket.h"
+#include "bridge_loop_avoidance.h"
 
 static struct dentry *bat_debugfs;
 
@@ -238,18 +239,20 @@
 	return single_open(file, gw_client_seq_print_text, net_dev);
 }
 
-static int softif_neigh_open(struct inode *inode, struct file *file)
-{
-	struct net_device *net_dev = (struct net_device *)inode->i_private;
-	return single_open(file, softif_neigh_seq_print_text, net_dev);
-}
-
 static int transtable_global_open(struct inode *inode, struct file *file)
 {
 	struct net_device *net_dev = (struct net_device *)inode->i_private;
 	return single_open(file, tt_global_seq_print_text, net_dev);
 }
 
+#ifdef CONFIG_BATMAN_ADV_BLA
+static int bla_claim_table_open(struct inode *inode, struct file *file)
+{
+	struct net_device *net_dev = (struct net_device *)inode->i_private;
+	return single_open(file, bla_claim_table_seq_print_text, net_dev);
+}
+#endif
+
 static int transtable_local_open(struct inode *inode, struct file *file)
 {
 	struct net_device *net_dev = (struct net_device *)inode->i_private;
@@ -282,16 +285,20 @@
 static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open);
 static BAT_DEBUGINFO(originators, S_IRUGO, originators_open);
 static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open);
-static BAT_DEBUGINFO(softif_neigh, S_IRUGO, softif_neigh_open);
 static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open);
+#ifdef CONFIG_BATMAN_ADV_BLA
+static BAT_DEBUGINFO(bla_claim_table, S_IRUGO, bla_claim_table_open);
+#endif
 static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open);
 static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open);
 
 static struct bat_debuginfo *mesh_debuginfos[] = {
 	&bat_debuginfo_originators,
 	&bat_debuginfo_gateways,
-	&bat_debuginfo_softif_neigh,
 	&bat_debuginfo_transtable_global,
+#ifdef CONFIG_BATMAN_ADV_BLA
+	&bat_debuginfo_bla_claim_table,
+#endif
 	&bat_debuginfo_transtable_local,
 	&bat_debuginfo_vis_data,
 	NULL,
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index a6d5d63..8b2db2e 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -30,24 +30,44 @@
 #include "send.h"
 #include "bat_algo.h"
 
-static void bat_iv_ogm_init(struct hard_iface *hard_iface)
+static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface)
 {
 	struct batman_ogm_packet *batman_ogm_packet;
+	uint32_t random_seqno;
+	int res = -1;
 
-	hard_iface->packet_len = BATMAN_OGM_LEN;
+	/* randomize initial seqno to avoid collision */
+	get_random_bytes(&random_seqno, sizeof(random_seqno));
+	atomic_set(&hard_iface->seqno, random_seqno);
+
+	hard_iface->packet_len = BATMAN_OGM_HLEN;
 	hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
 
+	if (!hard_iface->packet_buff)
+		goto out;
+
 	batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
-	batman_ogm_packet->header.packet_type = BAT_OGM;
+	batman_ogm_packet->header.packet_type = BAT_IV_OGM;
 	batman_ogm_packet->header.version = COMPAT_VERSION;
 	batman_ogm_packet->header.ttl = 2;
 	batman_ogm_packet->flags = NO_FLAGS;
 	batman_ogm_packet->tq = TQ_MAX_VALUE;
 	batman_ogm_packet->tt_num_changes = 0;
 	batman_ogm_packet->ttvn = 0;
+
+	res = 0;
+
+out:
+	return res;
 }
 
-static void bat_iv_ogm_init_primary(struct hard_iface *hard_iface)
+static void bat_iv_ogm_iface_disable(struct hard_iface *hard_iface)
+{
+	kfree(hard_iface->packet_buff);
+	hard_iface->packet_buff = NULL;
+}
+
+static void bat_iv_ogm_primary_iface_set(struct hard_iface *hard_iface)
 {
 	struct batman_ogm_packet *batman_ogm_packet;
 
@@ -92,7 +112,7 @@
 static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len,
 				  int tt_num_changes)
 {
-	int next_buff_pos = buff_pos + BATMAN_OGM_LEN + tt_len(tt_num_changes);
+	int next_buff_pos = buff_pos + BATMAN_OGM_HLEN + tt_len(tt_num_changes);
 
 	return (next_buff_pos <= packet_len) &&
 		(next_buff_pos <= MAX_AGGREGATION_BYTES);
@@ -132,7 +152,7 @@
 							    "Sending own" :
 							    "Forwarding"));
 		bat_dbg(DBG_BATMAN, bat_priv,
-			"%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
+			"%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
 			fwd_str, (packet_num > 0 ? "aggregated " : ""),
 			batman_ogm_packet->orig,
 			ntohl(batman_ogm_packet->seqno),
@@ -142,7 +162,7 @@
 			batman_ogm_packet->ttvn, hard_iface->net_dev->name,
 			hard_iface->net_dev->dev_addr);
 
-		buff_pos += BATMAN_OGM_LEN +
+		buff_pos += BATMAN_OGM_HLEN +
 				tt_len(batman_ogm_packet->tt_num_changes);
 		packet_num++;
 		batman_ogm_packet = (struct batman_ogm_packet *)
@@ -191,7 +211,7 @@
 
 		/* FIXME: what about aggregated packets ? */
 		bat_dbg(DBG_BATMAN, bat_priv,
-			"%s packet (originator %pM, seqno %d, TTL %d) on interface %s [%pM]\n",
+			"%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n",
 			(forw_packet->own ? "Sending own" : "Forwarding"),
 			batman_ogm_packet->orig,
 			ntohl(batman_ogm_packet->seqno),
@@ -335,10 +355,9 @@
 	if ((atomic_read(&bat_priv->aggregated_ogms)) &&
 	    (packet_len < MAX_AGGREGATION_BYTES))
 		forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
-						      sizeof(struct ethhdr));
+						      ETH_HLEN);
 	else
-		forw_packet_aggr->skb = dev_alloc_skb(packet_len +
-						      sizeof(struct ethhdr));
+		forw_packet_aggr->skb = dev_alloc_skb(packet_len + ETH_HLEN);
 
 	if (!forw_packet_aggr->skb) {
 		if (!own_packet)
@@ -346,7 +365,7 @@
 		kfree(forw_packet_aggr);
 		goto out;
 	}
-	skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr));
+	skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
 
 	INIT_HLIST_NODE(&forw_packet_aggr->list);
 
@@ -520,7 +539,7 @@
 		batman_ogm_packet->flags &= ~DIRECTLINK;
 
 	bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet,
-			     BATMAN_OGM_LEN + tt_len(tt_num_changes),
+			     BATMAN_OGM_HLEN + tt_len(tt_num_changes),
 			     if_incoming, 0, bat_iv_ogm_fwd_send_time());
 }
 
@@ -842,7 +861,8 @@
 	seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno;
 
 	/* signalize caller that the packet is to be dropped. */
-	if (window_protected(bat_priv, seq_diff,
+	if (!hlist_empty(&orig_node->neigh_list) &&
+	    window_protected(bat_priv, seq_diff,
 			     &orig_node->batman_seqno_reset))
 		goto out;
 
@@ -850,9 +870,9 @@
 	hlist_for_each_entry_rcu(tmp_neigh_node, node,
 				 &orig_node->neigh_list, list) {
 
-		is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
-					       orig_node->last_real_seqno,
-					       batman_ogm_packet->seqno);
+		is_duplicate |= bat_test_bit(tmp_neigh_node->real_bits,
+					     orig_node->last_real_seqno,
+					     batman_ogm_packet->seqno);
 
 		if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
 		    (tmp_neigh_node->if_incoming == if_incoming))
@@ -866,13 +886,14 @@
 					      seq_diff, set_mark);
 
 		tmp_neigh_node->real_packet_count =
-			bit_packet_count(tmp_neigh_node->real_bits);
+			bitmap_weight(tmp_neigh_node->real_bits,
+				      TQ_LOCAL_WINDOW_SIZE);
 	}
 	rcu_read_unlock();
 
 	if (need_update) {
 		bat_dbg(DBG_BATMAN, bat_priv,
-			"updating last_seqno: old %d, new %d\n",
+			"updating last_seqno: old %u, new %u\n",
 			orig_node->last_real_seqno, batman_ogm_packet->seqno);
 		orig_node->last_real_seqno = batman_ogm_packet->seqno;
 	}
@@ -913,7 +934,7 @@
 	 * packet in an aggregation.  Here we expect that the padding
 	 * is always zero (or not 0x01)
 	 */
-	if (batman_ogm_packet->header.packet_type != BAT_OGM)
+	if (batman_ogm_packet->header.packet_type != BAT_IV_OGM)
 		return;
 
 	/* could be changed by schedule_own_packet() */
@@ -925,7 +946,7 @@
 					   batman_ogm_packet->orig) ? 1 : 0);
 
 	bat_dbg(DBG_BATMAN, bat_priv,
-		"Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
+		"Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
 		ethhdr->h_source, if_incoming->net_dev->name,
 		if_incoming->net_dev->dev_addr, batman_ogm_packet->orig,
 		batman_ogm_packet->prev_sender, batman_ogm_packet->seqno,
@@ -998,11 +1019,11 @@
 
 			spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
 			word = &(orig_neigh_node->bcast_own[offset]);
-			bit_mark(word,
-				 if_incoming_seqno -
+			bat_set_bit(word,
+				    if_incoming_seqno -
 						batman_ogm_packet->seqno - 2);
 			orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
-				bit_packet_count(word);
+				bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
 			spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
 		}
 
@@ -1152,12 +1173,12 @@
 		batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno);
 		batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc);
 
-		tt_buff = packet_buff + buff_pos + BATMAN_OGM_LEN;
+		tt_buff = packet_buff + buff_pos + BATMAN_OGM_HLEN;
 
 		bat_iv_ogm_process(ethhdr, batman_ogm_packet,
 				   tt_buff, if_incoming);
 
-		buff_pos += BATMAN_OGM_LEN +
+		buff_pos += BATMAN_OGM_HLEN +
 				tt_len(batman_ogm_packet->tt_num_changes);
 
 		batman_ogm_packet = (struct batman_ogm_packet *)
@@ -1168,8 +1189,9 @@
 
 static struct bat_algo_ops batman_iv __read_mostly = {
 	.name = "BATMAN IV",
-	.bat_ogm_init = bat_iv_ogm_init,
-	.bat_ogm_init_primary = bat_iv_ogm_init_primary,
+	.bat_iface_enable = bat_iv_ogm_iface_enable,
+	.bat_iface_disable = bat_iv_ogm_iface_disable,
+	.bat_primary_iface_set = bat_iv_ogm_primary_iface_set,
 	.bat_ogm_update_mac = bat_iv_ogm_update_mac,
 	.bat_ogm_schedule = bat_iv_ogm_schedule,
 	.bat_ogm_emit = bat_iv_ogm_emit,
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index 68ff759..2c81688 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -149,7 +149,7 @@
 		 atomic_read(attr) == 1 ? "enabled" : "disabled",
 		 enabled == 1 ? "enabled" : "disabled");
 
-	atomic_set(attr, (unsigned)enabled);
+	atomic_set(attr, (unsigned int)enabled);
 	return count;
 }
 
@@ -268,7 +268,7 @@
 		 "client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ?
 		 "client" : "server");
 
-	atomic_set(&bat_priv->vis_mode, (unsigned)vis_mode_tmp);
+	atomic_set(&bat_priv->vis_mode, (unsigned int)vis_mode_tmp);
 	return count;
 }
 
@@ -354,7 +354,7 @@
 		 curr_gw_mode_str, buff);
 
 	gw_deselect(bat_priv);
-	atomic_set(&bat_priv->gw_mode, (unsigned)gw_mode_tmp);
+	atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp);
 	return count;
 }
 
@@ -386,6 +386,9 @@
 
 BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
 BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
+#ifdef CONFIG_BATMAN_ADV_BLA
+BAT_ATTR_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
+#endif
 BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
 BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
 static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
@@ -398,12 +401,15 @@
 static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth,
 		store_gw_bwidth);
 #ifdef CONFIG_BATMAN_ADV_DEBUG
-BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 7, NULL);
+BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL);
 #endif
 
 static struct bat_attribute *mesh_attrs[] = {
 	&bat_attr_aggregated_ogms,
 	&bat_attr_bonding,
+#ifdef CONFIG_BATMAN_ADV_BLA
+	&bat_attr_bridge_loop_avoidance,
+#endif
 	&bat_attr_fragmentation,
 	&bat_attr_ap_isolation,
 	&bat_attr_vis_mode,
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index 6d0aa21..07ae6e1 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -24,100 +24,13 @@
 
 #include <linux/bitops.h>
 
-/* returns true if the corresponding bit in the given seq_bits indicates true
- * and curr_seqno is within range of last_seqno */
-int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno,
-		   uint32_t curr_seqno)
-{
-	int32_t diff, word_offset, word_num;
-
-	diff = last_seqno - curr_seqno;
-	if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE) {
-		return 0;
-	} else {
-		/* which word */
-		word_num = (last_seqno - curr_seqno) / WORD_BIT_SIZE;
-		/* which position in the selected word */
-		word_offset = (last_seqno - curr_seqno) % WORD_BIT_SIZE;
-
-		if (test_bit(word_offset, &seq_bits[word_num]))
-			return 1;
-		else
-			return 0;
-	}
-}
-
-/* turn corresponding bit on, so we can remember that we got the packet */
-void bit_mark(unsigned long *seq_bits, int32_t n)
-{
-	int32_t word_offset, word_num;
-
-	/* if too old, just drop it */
-	if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE)
-		return;
-
-	/* which word */
-	word_num = n / WORD_BIT_SIZE;
-	/* which position in the selected word */
-	word_offset = n % WORD_BIT_SIZE;
-
-	set_bit(word_offset, &seq_bits[word_num]); /* turn the position on */
-}
-
 /* shift the packet array by n places. */
-static void bit_shift(unsigned long *seq_bits, int32_t n)
+static void bat_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
 {
-	int32_t word_offset, word_num;
-	int32_t i;
-
 	if (n <= 0 || n >= TQ_LOCAL_WINDOW_SIZE)
 		return;
 
-	word_offset = n % WORD_BIT_SIZE;/* shift how much inside each word */
-	word_num = n / WORD_BIT_SIZE;	/* shift over how much (full) words */
-
-	for (i = NUM_WORDS - 1; i > word_num; i--) {
-		/* going from old to new, so we don't overwrite the data we copy
-		 * from.
-		 *
-		 * left is high, right is low: FEDC BA98 7654 3210
-		 *					  ^^ ^^
-		 *			       vvvv
-		 * ^^^^ = from, vvvvv =to, we'd have word_num==1 and
-		 * word_offset==WORD_BIT_SIZE/2 ????? in this example.
-		 * (=24 bits)
-		 *
-		 * our desired output would be: 9876 5432 1000 0000
-		 * */
-
-		seq_bits[i] =
-			(seq_bits[i - word_num] << word_offset) +
-			/* take the lower port from the left half, shift it left
-			 * to its final position */
-			(seq_bits[i - word_num - 1] >>
-			 (WORD_BIT_SIZE-word_offset));
-		/* and the upper part of the right half and shift it left to
-		 * its position */
-		/* for our example that would be: word[0] = 9800 + 0076 =
-		 * 9876 */
-	}
-	/* now for our last word, i==word_num, we only have its "left" half.
-	 * that's the 1000 word in our example.*/
-
-	seq_bits[i] = (seq_bits[i - word_num] << word_offset);
-
-	/* pad the rest with 0, if there is anything */
-	i--;
-
-	for (; i >= 0; i--)
-		seq_bits[i] = 0;
-}
-
-static void bit_reset_window(unsigned long *seq_bits)
-{
-	int i;
-	for (i = 0; i < NUM_WORDS; i++)
-		seq_bits[i] = 0;
+	bitmap_shift_left(seq_bits, seq_bits, n, TQ_LOCAL_WINDOW_SIZE);
 }
 
 
@@ -137,7 +50,7 @@
 
 	if ((seq_num_diff <= 0) && (seq_num_diff > -TQ_LOCAL_WINDOW_SIZE)) {
 		if (set_mark)
-			bit_mark(seq_bits, -seq_num_diff);
+			bat_set_bit(seq_bits, -seq_num_diff);
 		return 0;
 	}
 
@@ -145,10 +58,10 @@
 	 * set the mark if required */
 
 	if ((seq_num_diff > 0) && (seq_num_diff < TQ_LOCAL_WINDOW_SIZE)) {
-		bit_shift(seq_bits, seq_num_diff);
+		bat_bitmap_shift_left(seq_bits, seq_num_diff);
 
 		if (set_mark)
-			bit_mark(seq_bits, 0);
+			bat_set_bit(seq_bits, 0);
 		return 1;
 	}
 
@@ -159,9 +72,9 @@
 		bat_dbg(DBG_BATMAN, bat_priv,
 			"We missed a lot of packets (%i) !\n",
 			seq_num_diff - 1);
-		bit_reset_window(seq_bits);
+		bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE);
 		if (set_mark)
-			bit_mark(seq_bits, 0);
+			bat_set_bit(seq_bits, 0);
 		return 1;
 	}
 
@@ -176,9 +89,9 @@
 		bat_dbg(DBG_BATMAN, bat_priv,
 			"Other host probably restarted!\n");
 
-		bit_reset_window(seq_bits);
+		bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE);
 		if (set_mark)
-			bit_mark(seq_bits, 0);
+			bat_set_bit(seq_bits, 0);
 
 		return 1;
 	}
@@ -186,16 +99,3 @@
 	/* never reached */
 	return 0;
 }
-
-/* count the hamming weight, how many good packets did we receive? just count
- * the 1's.
- */
-int bit_packet_count(const unsigned long *seq_bits)
-{
-	int i, hamming = 0;
-
-	for (i = 0; i < NUM_WORDS; i++)
-		hamming += hweight_long(seq_bits[i]);
-
-	return hamming;
-}
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index c613572..1835c15 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -22,23 +22,33 @@
 #ifndef _NET_BATMAN_ADV_BITARRAY_H_
 #define _NET_BATMAN_ADV_BITARRAY_H_
 
-#define WORD_BIT_SIZE (sizeof(unsigned long) * 8)
-
 /* returns true if the corresponding bit in the given seq_bits indicates true
  * and curr_seqno is within range of last_seqno */
-int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno,
-		   uint32_t curr_seqno);
+static inline int bat_test_bit(const unsigned long *seq_bits,
+			       uint32_t last_seqno, uint32_t curr_seqno)
+{
+	int32_t diff;
+
+	diff = last_seqno - curr_seqno;
+	if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE)
+		return 0;
+	else
+		return  test_bit(diff, seq_bits);
+}
 
 /* turn corresponding bit on, so we can remember that we got the packet */
-void bit_mark(unsigned long *seq_bits, int32_t n);
+static inline void bat_set_bit(unsigned long *seq_bits, int32_t n)
+{
+	/* if too old, just drop it */
+	if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE)
+		return;
 
+	set_bit(n, seq_bits); /* turn the position on */
+}
 
 /* receive and process one packet, returns 1 if received seq_num is considered
  * new, 0 if old  */
 int bit_get_packet(void *priv, unsigned long *seq_bits,
 		   int32_t seq_num_diff, int set_mark);
 
-/* count the hamming weight, how many good packets did we receive? */
-int bit_packet_count(const unsigned long *seq_bits);
-
 #endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
new file mode 100644
index 0000000..ad394c6
--- /dev/null
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -0,0 +1,1580 @@
+/*
+ * Copyright (C) 2011 B.A.T.M.A.N. contributors:
+ *
+ * Simon Wunderlich
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ */
+
+#include "main.h"
+#include "hash.h"
+#include "hard-interface.h"
+#include "originator.h"
+#include "bridge_loop_avoidance.h"
+#include "translation-table.h"
+#include "send.h"
+
+#include <linux/etherdevice.h>
+#include <linux/crc16.h>
+#include <linux/if_arp.h>
+#include <net/arp.h>
+#include <linux/if_vlan.h>
+
+static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
+
+static void bla_periodic_work(struct work_struct *work);
+static void bla_send_announce(struct bat_priv *bat_priv,
+			      struct backbone_gw *backbone_gw);
+
+/* return the index of the claim */
+static inline uint32_t choose_claim(const void *data, uint32_t size)
+{
+	const unsigned char *key = data;
+	uint32_t hash = 0;
+	size_t i;
+
+	for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
+		hash += key[i];
+		hash += (hash << 10);
+		hash ^= (hash >> 6);
+	}
+
+	hash += (hash << 3);
+	hash ^= (hash >> 11);
+	hash += (hash << 15);
+
+	return hash % size;
+}
+
+/* return the index of the backbone gateway */
+static inline uint32_t choose_backbone_gw(const void *data, uint32_t size)
+{
+	const unsigned char *key = data;
+	uint32_t hash = 0;
+	size_t i;
+
+	for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
+		hash += key[i];
+		hash += (hash << 10);
+		hash ^= (hash >> 6);
+	}
+
+	hash += (hash << 3);
+	hash ^= (hash >> 11);
+	hash += (hash << 15);
+
+	return hash % size;
+}
+
+
+/* compares address and vid of two backbone gws */
+static int compare_backbone_gw(const struct hlist_node *node, const void *data2)
+{
+	const void *data1 = container_of(node, struct backbone_gw,
+					 hash_entry);
+
+	return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
+}
+
+/* compares address and vid of two claims */
+static int compare_claim(const struct hlist_node *node, const void *data2)
+{
+	const void *data1 = container_of(node, struct claim,
+					 hash_entry);
+
+	return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
+}
+
+/* free a backbone gw */
+static void backbone_gw_free_ref(struct backbone_gw *backbone_gw)
+{
+	if (atomic_dec_and_test(&backbone_gw->refcount))
+		kfree_rcu(backbone_gw, rcu);
+}
+
+/* finally deinitialize the claim */
+static void claim_free_rcu(struct rcu_head *rcu)
+{
+	struct claim *claim;
+
+	claim = container_of(rcu, struct claim, rcu);
+
+	backbone_gw_free_ref(claim->backbone_gw);
+	kfree(claim);
+}
+
+/* free a claim, call claim_free_rcu if its the last reference */
+static void claim_free_ref(struct claim *claim)
+{
+	if (atomic_dec_and_test(&claim->refcount))
+		call_rcu(&claim->rcu, claim_free_rcu);
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @data: search data (may be local/static data)
+ *
+ * looks for a claim in the hash, and returns it if found
+ * or NULL otherwise.
+ */
+static struct claim *claim_hash_find(struct bat_priv *bat_priv,
+				     struct claim *data)
+{
+	struct hashtable_t *hash = bat_priv->claim_hash;
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct claim *claim;
+	struct claim *claim_tmp = NULL;
+	int index;
+
+	if (!hash)
+		return NULL;
+
+	index = choose_claim(data, hash->size);
+	head = &hash->table[index];
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+		if (!compare_claim(&claim->hash_entry, data))
+			continue;
+
+		if (!atomic_inc_not_zero(&claim->refcount))
+			continue;
+
+		claim_tmp = claim;
+		break;
+	}
+	rcu_read_unlock();
+
+	return claim_tmp;
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the address of the originator
+ * @vid: the VLAN ID
+ *
+ * looks for a claim in the hash, and returns it if found
+ * or NULL otherwise.
+ */
+static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
+					      uint8_t *addr, short vid)
+{
+	struct hashtable_t *hash = bat_priv->backbone_hash;
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct backbone_gw search_entry, *backbone_gw;
+	struct backbone_gw *backbone_gw_tmp = NULL;
+	int index;
+
+	if (!hash)
+		return NULL;
+
+	memcpy(search_entry.orig, addr, ETH_ALEN);
+	search_entry.vid = vid;
+
+	index = choose_backbone_gw(&search_entry, hash->size);
+	head = &hash->table[index];
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+		if (!compare_backbone_gw(&backbone_gw->hash_entry,
+					 &search_entry))
+			continue;
+
+		if (!atomic_inc_not_zero(&backbone_gw->refcount))
+			continue;
+
+		backbone_gw_tmp = backbone_gw;
+		break;
+	}
+	rcu_read_unlock();
+
+	return backbone_gw_tmp;
+}
+
+/* delete all claims for a backbone */
+static void bla_del_backbone_claims(struct backbone_gw *backbone_gw)
+{
+	struct hashtable_t *hash;
+	struct hlist_node *node, *node_tmp;
+	struct hlist_head *head;
+	struct claim *claim;
+	int i;
+	spinlock_t *list_lock;	/* protects write access to the hash lists */
+
+	hash = backbone_gw->bat_priv->claim_hash;
+	if (!hash)
+		return;
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+		list_lock = &hash->list_locks[i];
+
+		spin_lock_bh(list_lock);
+		hlist_for_each_entry_safe(claim, node, node_tmp,
+					  head, hash_entry) {
+
+			if (claim->backbone_gw != backbone_gw)
+				continue;
+
+			claim_free_ref(claim);
+			hlist_del_rcu(node);
+		}
+		spin_unlock_bh(list_lock);
+	}
+
+	/* all claims gone, intialize CRC */
+	backbone_gw->crc = BLA_CRC_INIT;
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the mac address to be announced within the claim
+ * @vid: the VLAN ID
+ * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
+ *
+ * sends a claim frame according to the provided info.
+ */
+static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
+			   short vid, int claimtype)
+{
+	struct sk_buff *skb;
+	struct ethhdr *ethhdr;
+	struct hard_iface *primary_if;
+	struct net_device *soft_iface;
+	uint8_t *hw_src;
+	struct bla_claim_dst local_claim_dest;
+	uint32_t zeroip = 0;
+
+	primary_if = primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		return;
+
+	memcpy(&local_claim_dest, &bat_priv->claim_dest,
+	       sizeof(local_claim_dest));
+	local_claim_dest.type = claimtype;
+
+	soft_iface = primary_if->soft_iface;
+
+	skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
+			 /* IP DST: 0.0.0.0 */
+			 zeroip,
+			 primary_if->soft_iface,
+			 /* IP SRC: 0.0.0.0 */
+			 zeroip,
+			 /* Ethernet DST: Broadcast */
+			 NULL,
+			 /* Ethernet SRC/HW SRC:  originator mac */
+			 primary_if->net_dev->dev_addr,
+			 /* HW DST: FF:43:05:XX:00:00
+			  * with XX   = claim type
+			  * and YY:YY = group id
+			  */
+			 (uint8_t *)&local_claim_dest);
+
+	if (!skb)
+		goto out;
+
+	ethhdr = (struct ethhdr *)skb->data;
+	hw_src = (uint8_t *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
+
+	/* now we pretend that the client would have sent this ... */
+	switch (claimtype) {
+	case CLAIM_TYPE_ADD:
+		/* normal claim frame
+		 * set Ethernet SRC to the clients mac
+		 */
+		memcpy(ethhdr->h_source, mac, ETH_ALEN);
+		bat_dbg(DBG_BLA, bat_priv,
+			"bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
+		break;
+	case CLAIM_TYPE_DEL:
+		/* unclaim frame
+		 * set HW SRC to the clients mac
+		 */
+		memcpy(hw_src, mac, ETH_ALEN);
+		bat_dbg(DBG_BLA, bat_priv,
+			"bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid);
+		break;
+	case CLAIM_TYPE_ANNOUNCE:
+		/* announcement frame
+		 * set HW SRC to the special mac containg the crc
+		 */
+		memcpy(hw_src, mac, ETH_ALEN);
+		bat_dbg(DBG_BLA, bat_priv,
+			"bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
+			ethhdr->h_source, vid);
+		break;
+	case CLAIM_TYPE_REQUEST:
+		/* request frame
+		 * set HW SRC to the special mac containg the crc
+		 */
+		memcpy(hw_src, mac, ETH_ALEN);
+		memcpy(ethhdr->h_dest, mac, ETH_ALEN);
+		bat_dbg(DBG_BLA, bat_priv,
+			"bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
+			ethhdr->h_source, ethhdr->h_dest, vid);
+		break;
+
+	}
+
+	if (vid != -1)
+		skb = vlan_insert_tag(skb, vid);
+
+	skb_reset_mac_header(skb);
+	skb->protocol = eth_type_trans(skb, soft_iface);
+	bat_priv->stats.rx_packets++;
+	bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
+	soft_iface->last_rx = jiffies;
+
+	netif_rx(skb);
+out:
+	if (primary_if)
+		hardif_free_ref(primary_if);
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the mac address of the originator
+ * @vid: the VLAN ID
+ *
+ * searches for the backbone gw or creates a new one if it could not
+ * be found.
+ */
+static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
+					       uint8_t *orig, short vid)
+{
+	struct backbone_gw *entry;
+	struct orig_node *orig_node;
+	int hash_added;
+
+	entry = backbone_hash_find(bat_priv, orig, vid);
+
+	if (entry)
+		return entry;
+
+	bat_dbg(DBG_BLA, bat_priv,
+		"bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
+		orig, vid);
+
+	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+	if (!entry)
+		return NULL;
+
+	entry->vid = vid;
+	entry->lasttime = jiffies;
+	entry->crc = BLA_CRC_INIT;
+	entry->bat_priv = bat_priv;
+	atomic_set(&entry->request_sent, 0);
+	memcpy(entry->orig, orig, ETH_ALEN);
+
+	/* one for the hash, one for returning */
+	atomic_set(&entry->refcount, 2);
+
+	hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw,
+			      choose_backbone_gw, entry, &entry->hash_entry);
+
+	if (unlikely(hash_added != 0)) {
+		/* hash failed, free the structure */
+		kfree(entry);
+		return NULL;
+	}
+
+	/* this is a gateway now, remove any tt entries */
+	orig_node = orig_hash_find(bat_priv, orig);
+	if (orig_node) {
+		tt_global_del_orig(bat_priv, orig_node,
+				   "became a backbone gateway");
+		orig_node_free_ref(orig_node);
+	}
+	return entry;
+}
+
+/* update or add the own backbone gw to make sure we announce
+ * where we receive other backbone gws
+ */
+static void bla_update_own_backbone_gw(struct bat_priv *bat_priv,
+				       struct hard_iface *primary_if,
+				       short vid)
+{
+	struct backbone_gw *backbone_gw;
+
+	backbone_gw = bla_get_backbone_gw(bat_priv,
+					  primary_if->net_dev->dev_addr, vid);
+	if (unlikely(!backbone_gw))
+		return;
+
+	backbone_gw->lasttime = jiffies;
+	backbone_gw_free_ref(backbone_gw);
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the vid where the request came on
+ *
+ * Repeat all of our own claims, and finally send an ANNOUNCE frame
+ * to allow the requester another check if the CRC is correct now.
+ */
+static void bla_answer_request(struct bat_priv *bat_priv,
+			       struct hard_iface *primary_if, short vid)
+{
+	struct hlist_node *node;
+	struct hlist_head *head;
+	struct hashtable_t *hash;
+	struct claim *claim;
+	struct backbone_gw *backbone_gw;
+	int i;
+
+	bat_dbg(DBG_BLA, bat_priv,
+		"bla_answer_request(): received a claim request, send all of our own claims again\n");
+
+	backbone_gw = backbone_hash_find(bat_priv,
+					 primary_if->net_dev->dev_addr, vid);
+	if (!backbone_gw)
+		return;
+
+	hash = bat_priv->claim_hash;
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+			/* only own claims are interesting */
+			if (claim->backbone_gw != backbone_gw)
+				continue;
+
+			bla_send_claim(bat_priv, claim->addr, claim->vid,
+				       CLAIM_TYPE_ADD);
+		}
+		rcu_read_unlock();
+	}
+
+	/* finally, send an announcement frame */
+	bla_send_announce(bat_priv, backbone_gw);
+	backbone_gw_free_ref(backbone_gw);
+}
+
+/**
+ * @backbone_gw: the backbone gateway from whom we are out of sync
+ *
+ * When the crc is wrong, ask the backbone gateway for a full table update.
+ * After the request, it will repeat all of his own claims and finally
+ * send an announcement claim with which we can check again.
+ */
+static void bla_send_request(struct backbone_gw *backbone_gw)
+{
+	/* first, remove all old entries */
+	bla_del_backbone_claims(backbone_gw);
+
+	bat_dbg(DBG_BLA, backbone_gw->bat_priv,
+		"Sending REQUEST to %pM\n",
+		backbone_gw->orig);
+
+	/* send request */
+	bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
+		       backbone_gw->vid, CLAIM_TYPE_REQUEST);
+
+	/* no local broadcasts should be sent or received, for now. */
+	if (!atomic_read(&backbone_gw->request_sent)) {
+		atomic_inc(&backbone_gw->bat_priv->bla_num_requests);
+		atomic_set(&backbone_gw->request_sent, 1);
+	}
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @backbone_gw: our backbone gateway which should be announced
+ *
+ * This function sends an announcement. It is called from multiple
+ * places.
+ */
+static void bla_send_announce(struct bat_priv *bat_priv,
+			      struct backbone_gw *backbone_gw)
+{
+	uint8_t mac[ETH_ALEN];
+	uint16_t crc;
+
+	memcpy(mac, announce_mac, 4);
+	crc = htons(backbone_gw->crc);
+	memcpy(&mac[4], (uint8_t *)&crc, 2);
+
+	bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE);
+
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @mac: the mac address of the claim
+ * @vid: the VLAN ID of the frame
+ * @backbone_gw: the backbone gateway which claims it
+ *
+ * Adds a claim in the claim hash.
+ */
+static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
+			  const short vid, struct backbone_gw *backbone_gw)
+{
+	struct claim *claim;
+	struct claim search_claim;
+	int hash_added;
+
+	memcpy(search_claim.addr, mac, ETH_ALEN);
+	search_claim.vid = vid;
+	claim = claim_hash_find(bat_priv, &search_claim);
+
+	/* create a new claim entry if it does not exist yet. */
+	if (!claim) {
+		claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
+		if (!claim)
+			return;
+
+		memcpy(claim->addr, mac, ETH_ALEN);
+		claim->vid = vid;
+		claim->lasttime = jiffies;
+		claim->backbone_gw = backbone_gw;
+
+		atomic_set(&claim->refcount, 2);
+		bat_dbg(DBG_BLA, bat_priv,
+			"bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
+			mac, vid);
+		hash_added = hash_add(bat_priv->claim_hash, compare_claim,
+				      choose_claim, claim, &claim->hash_entry);
+
+		if (unlikely(hash_added != 0)) {
+			/* only local changes happened. */
+			kfree(claim);
+			return;
+		}
+	} else {
+		claim->lasttime = jiffies;
+		if (claim->backbone_gw == backbone_gw)
+			/* no need to register a new backbone */
+			goto claim_free_ref;
+
+		bat_dbg(DBG_BLA, bat_priv,
+			"bla_add_claim(): changing ownership for %pM, vid %d\n",
+			mac, vid);
+
+		claim->backbone_gw->crc ^=
+			crc16(0, claim->addr, ETH_ALEN);
+		backbone_gw_free_ref(claim->backbone_gw);
+
+	}
+	/* set (new) backbone gw */
+	atomic_inc(&backbone_gw->refcount);
+	claim->backbone_gw = backbone_gw;
+
+	backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+	backbone_gw->lasttime = jiffies;
+
+claim_free_ref:
+	claim_free_ref(claim);
+}
+
+/* Delete a claim from the claim hash which has the
+ * given mac address and vid.
+ */
+static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac,
+			  const short vid)
+{
+	struct claim search_claim, *claim;
+
+	memcpy(search_claim.addr, mac, ETH_ALEN);
+	search_claim.vid = vid;
+	claim = claim_hash_find(bat_priv, &search_claim);
+	if (!claim)
+		return;
+
+	bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid);
+
+	hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim);
+	claim_free_ref(claim); /* reference from the hash is gone */
+
+	claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+
+	/* don't need the reference from hash_find() anymore */
+	claim_free_ref(claim);
+}
+
+/* check for ANNOUNCE frame, return 1 if handled */
+static int handle_announce(struct bat_priv *bat_priv,
+			   uint8_t *an_addr, uint8_t *backbone_addr, short vid)
+{
+	struct backbone_gw *backbone_gw;
+	uint16_t crc;
+
+	if (memcmp(an_addr, announce_mac, 4) != 0)
+		return 0;
+
+	backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
+
+	if (unlikely(!backbone_gw))
+		return 1;
+
+
+	/* handle as ANNOUNCE frame */
+	backbone_gw->lasttime = jiffies;
+	crc = ntohs(*((uint16_t *)(&an_addr[4])));
+
+	bat_dbg(DBG_BLA, bat_priv,
+		"handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
+		vid, backbone_gw->orig, crc);
+
+	if (backbone_gw->crc != crc) {
+		bat_dbg(DBG_BLA, backbone_gw->bat_priv,
+			"handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
+			backbone_gw->orig, backbone_gw->vid, backbone_gw->crc,
+			crc);
+
+		bla_send_request(backbone_gw);
+	} else {
+		/* if we have sent a request and the crc was OK,
+		 * we can allow traffic again.
+		 */
+		if (atomic_read(&backbone_gw->request_sent)) {
+			atomic_dec(&backbone_gw->bat_priv->bla_num_requests);
+			atomic_set(&backbone_gw->request_sent, 0);
+		}
+	}
+
+	backbone_gw_free_ref(backbone_gw);
+	return 1;
+}
+
+/* check for REQUEST frame, return 1 if handled */
+static int handle_request(struct bat_priv *bat_priv,
+			  struct hard_iface *primary_if,
+			  uint8_t *backbone_addr,
+			  struct ethhdr *ethhdr, short vid)
+{
+	/* check for REQUEST frame */
+	if (!compare_eth(backbone_addr, ethhdr->h_dest))
+		return 0;
+
+	/* sanity check, this should not happen on a normal switch,
+	 * we ignore it in this case.
+	 */
+	if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
+		return 1;
+
+	bat_dbg(DBG_BLA, bat_priv,
+		"handle_request(): REQUEST vid %d (sent by %pM)...\n",
+		vid, ethhdr->h_source);
+
+	bla_answer_request(bat_priv, primary_if, vid);
+	return 1;
+}
+
+/* check for UNCLAIM frame, return 1 if handled */
+static int handle_unclaim(struct bat_priv *bat_priv,
+			  struct hard_iface *primary_if,
+			  uint8_t *backbone_addr,
+			  uint8_t *claim_addr, short vid)
+{
+	struct backbone_gw *backbone_gw;
+
+	/* unclaim in any case if it is our own */
+	if (primary_if && compare_eth(backbone_addr,
+				      primary_if->net_dev->dev_addr))
+		bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL);
+
+	backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid);
+
+	if (!backbone_gw)
+		return 1;
+
+	/* this must be an UNCLAIM frame */
+	bat_dbg(DBG_BLA, bat_priv,
+		"handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
+		claim_addr, vid, backbone_gw->orig);
+
+	bla_del_claim(bat_priv, claim_addr, vid);
+	backbone_gw_free_ref(backbone_gw);
+	return 1;
+}
+
+/* check for CLAIM frame, return 1 if handled */
+static int handle_claim(struct bat_priv *bat_priv,
+			struct hard_iface *primary_if, uint8_t *backbone_addr,
+			uint8_t *claim_addr, short vid)
+{
+	struct backbone_gw *backbone_gw;
+
+	/* register the gateway if not yet available, and add the claim. */
+
+	backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
+
+	if (unlikely(!backbone_gw))
+		return 1;
+
+	/* this must be a CLAIM frame */
+	bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
+	if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
+		bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD);
+
+	/* TODO: we could call something like tt_local_del() here. */
+
+	backbone_gw_free_ref(backbone_gw);
+	return 1;
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @hw_src: the Hardware source in the ARP Header
+ * @hw_dst: the Hardware destination in the ARP Header
+ * @ethhdr: pointer to the Ethernet header of the claim frame
+ *
+ * checks if it is a claim packet and if its on the same group.
+ * This function also applies the group ID of the sender
+ * if it is in the same mesh.
+ *
+ * returns:
+ *	2  - if it is a claim packet and on the same group
+ *	1  - if is a claim packet from another group
+ *	0  - if it is not a claim packet
+ */
+static int check_claim_group(struct bat_priv *bat_priv,
+			     struct hard_iface *primary_if,
+			     uint8_t *hw_src, uint8_t *hw_dst,
+			     struct ethhdr *ethhdr)
+{
+	uint8_t *backbone_addr;
+	struct orig_node *orig_node;
+	struct bla_claim_dst *bla_dst, *bla_dst_own;
+
+	bla_dst = (struct bla_claim_dst *)hw_dst;
+	bla_dst_own = &bat_priv->claim_dest;
+
+	/* check if it is a claim packet in general */
+	if (memcmp(bla_dst->magic, bla_dst_own->magic,
+		   sizeof(bla_dst->magic)) != 0)
+		return 0;
+
+	/* if announcement packet, use the source,
+	 * otherwise assume it is in the hw_src
+	 */
+	switch (bla_dst->type) {
+	case CLAIM_TYPE_ADD:
+		backbone_addr = hw_src;
+		break;
+	case CLAIM_TYPE_REQUEST:
+	case CLAIM_TYPE_ANNOUNCE:
+	case CLAIM_TYPE_DEL:
+		backbone_addr = ethhdr->h_source;
+		break;
+	default:
+		return 0;
+	}
+
+	/* don't accept claim frames from ourselves */
+	if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
+		return 0;
+
+	/* if its already the same group, it is fine. */
+	if (bla_dst->group == bla_dst_own->group)
+		return 2;
+
+	/* lets see if this originator is in our mesh */
+	orig_node = orig_hash_find(bat_priv, backbone_addr);
+
+	/* dont accept claims from gateways which are not in
+	 * the same mesh or group.
+	 */
+	if (!orig_node)
+		return 1;
+
+	/* if our mesh friends mac is bigger, use it for ourselves. */
+	if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
+		bat_dbg(DBG_BLA, bat_priv,
+			"taking other backbones claim group: %04x\n",
+			ntohs(bla_dst->group));
+		bla_dst_own->group = bla_dst->group;
+	}
+
+	orig_node_free_ref(orig_node);
+
+	return 2;
+}
+
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the frame to be checked
+ *
+ * Check if this is a claim frame, and process it accordingly.
+ *
+ * returns 1 if it was a claim frame, otherwise return 0 to
+ * tell the callee that it can use the frame on its own.
+ */
+static int bla_process_claim(struct bat_priv *bat_priv,
+			     struct hard_iface *primary_if,
+			     struct sk_buff *skb)
+{
+	struct ethhdr *ethhdr;
+	struct vlan_ethhdr *vhdr;
+	struct arphdr *arphdr;
+	uint8_t *hw_src, *hw_dst;
+	struct bla_claim_dst *bla_dst;
+	uint16_t proto;
+	int headlen;
+	short vid = -1;
+	int ret;
+
+	ethhdr = (struct ethhdr *)skb_mac_header(skb);
+
+	if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
+		vhdr = (struct vlan_ethhdr *)ethhdr;
+		vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
+		proto = ntohs(vhdr->h_vlan_encapsulated_proto);
+		headlen = sizeof(*vhdr);
+	} else {
+		proto = ntohs(ethhdr->h_proto);
+		headlen = ETH_HLEN;
+	}
+
+	if (proto != ETH_P_ARP)
+		return 0; /* not a claim frame */
+
+	/* this must be a ARP frame. check if it is a claim. */
+
+	if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
+		return 0;
+
+	/* pskb_may_pull() may have modified the pointers, get ethhdr again */
+	ethhdr = (struct ethhdr *)skb_mac_header(skb);
+	arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
+
+	/* Check whether the ARP frame carries a valid
+	 * IP information
+	 */
+
+	if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
+		return 0;
+	if (arphdr->ar_pro != htons(ETH_P_IP))
+		return 0;
+	if (arphdr->ar_hln != ETH_ALEN)
+		return 0;
+	if (arphdr->ar_pln != 4)
+		return 0;
+
+	hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
+	hw_dst = hw_src + ETH_ALEN + 4;
+	bla_dst = (struct bla_claim_dst *)hw_dst;
+
+	/* check if it is a claim frame. */
+	ret = check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ethhdr);
+	if (ret == 1)
+		bat_dbg(DBG_BLA, bat_priv,
+			"bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
+			ethhdr->h_source, vid, hw_src, hw_dst);
+
+	if (ret < 2)
+		return ret;
+
+	/* become a backbone gw ourselves on this vlan if not happened yet */
+	bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+
+	/* check for the different types of claim frames ... */
+	switch (bla_dst->type) {
+	case CLAIM_TYPE_ADD:
+		if (handle_claim(bat_priv, primary_if, hw_src,
+				 ethhdr->h_source, vid))
+			return 1;
+		break;
+	case CLAIM_TYPE_DEL:
+		if (handle_unclaim(bat_priv, primary_if,
+				   ethhdr->h_source, hw_src, vid))
+			return 1;
+		break;
+
+	case CLAIM_TYPE_ANNOUNCE:
+		if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid))
+			return 1;
+		break;
+	case CLAIM_TYPE_REQUEST:
+		if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid))
+			return 1;
+		break;
+	}
+
+	bat_dbg(DBG_BLA, bat_priv,
+		"bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
+		ethhdr->h_source, vid, hw_src, hw_dst);
+	return 1;
+}
+
+/* Check when we last heard from other nodes, and remove them in case of
+ * a time out, or clean all backbone gws if now is set.
+ */
+static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now)
+{
+	struct backbone_gw *backbone_gw;
+	struct hlist_node *node, *node_tmp;
+	struct hlist_head *head;
+	struct hashtable_t *hash;
+	spinlock_t *list_lock;	/* protects write access to the hash lists */
+	int i;
+
+	hash = bat_priv->backbone_hash;
+	if (!hash)
+		return;
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+		list_lock = &hash->list_locks[i];
+
+		spin_lock_bh(list_lock);
+		hlist_for_each_entry_safe(backbone_gw, node, node_tmp,
+					  head, hash_entry) {
+			if (now)
+				goto purge_now;
+			if (!has_timed_out(backbone_gw->lasttime,
+					   BLA_BACKBONE_TIMEOUT))
+				continue;
+
+			bat_dbg(DBG_BLA, backbone_gw->bat_priv,
+				"bla_purge_backbone_gw(): backbone gw %pM timed out\n",
+				backbone_gw->orig);
+
+purge_now:
+			/* don't wait for the pending request anymore */
+			if (atomic_read(&backbone_gw->request_sent))
+				atomic_dec(&bat_priv->bla_num_requests);
+
+			bla_del_backbone_claims(backbone_gw);
+
+			hlist_del_rcu(node);
+			backbone_gw_free_ref(backbone_gw);
+		}
+		spin_unlock_bh(list_lock);
+	}
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the selected primary interface, may be NULL if now is set
+ * @now: whether the whole hash shall be wiped now
+ *
+ * Check when we heard last time from our own claims, and remove them in case of
+ * a time out, or clean all claims if now is set
+ */
+static void bla_purge_claims(struct bat_priv *bat_priv,
+			     struct hard_iface *primary_if, int now)
+{
+	struct claim *claim;
+	struct hlist_node *node;
+	struct hlist_head *head;
+	struct hashtable_t *hash;
+	int i;
+
+	hash = bat_priv->claim_hash;
+	if (!hash)
+		return;
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+			if (now)
+				goto purge_now;
+			if (!compare_eth(claim->backbone_gw->orig,
+					 primary_if->net_dev->dev_addr))
+				continue;
+			if (!has_timed_out(claim->lasttime,
+					   BLA_CLAIM_TIMEOUT))
+				continue;
+
+			bat_dbg(DBG_BLA, bat_priv,
+				"bla_purge_claims(): %pM, vid %d, time out\n",
+				claim->addr, claim->vid);
+
+purge_now:
+			handle_unclaim(bat_priv, primary_if,
+				       claim->backbone_gw->orig,
+				       claim->addr, claim->vid);
+		}
+		rcu_read_unlock();
+	}
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the new selected primary_if
+ * @oldif: the old primary interface, may be NULL
+ *
+ * Update the backbone gateways when the own orig address changes.
+ *
+ */
+void bla_update_orig_address(struct bat_priv *bat_priv,
+			     struct hard_iface *primary_if,
+			     struct hard_iface *oldif)
+{
+	struct backbone_gw *backbone_gw;
+	struct hlist_node *node;
+	struct hlist_head *head;
+	struct hashtable_t *hash;
+	int i;
+
+	/* reset bridge loop avoidance group id */
+	bat_priv->claim_dest.group =
+		htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
+
+	if (!oldif) {
+		bla_purge_claims(bat_priv, NULL, 1);
+		bla_purge_backbone_gw(bat_priv, 1);
+		return;
+	}
+
+	hash = bat_priv->backbone_hash;
+	if (!hash)
+		return;
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+			/* own orig still holds the old value. */
+			if (!compare_eth(backbone_gw->orig,
+					 oldif->net_dev->dev_addr))
+				continue;
+
+			memcpy(backbone_gw->orig,
+			       primary_if->net_dev->dev_addr, ETH_ALEN);
+			/* send an announce frame so others will ask for our
+			 * claims and update their tables.
+			 */
+			bla_send_announce(bat_priv, backbone_gw);
+		}
+		rcu_read_unlock();
+	}
+}
+
+
+
+/* (re)start the timer */
+static void bla_start_timer(struct bat_priv *bat_priv)
+{
+	INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work);
+	queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work,
+			   msecs_to_jiffies(BLA_PERIOD_LENGTH));
+}
+
+/* periodic work to do:
+ *  * purge structures when they are too old
+ *  * send announcements
+ */
+static void bla_periodic_work(struct work_struct *work)
+{
+	struct delayed_work *delayed_work =
+		container_of(work, struct delayed_work, work);
+	struct bat_priv *bat_priv =
+		container_of(delayed_work, struct bat_priv, bla_work);
+	struct hlist_node *node;
+	struct hlist_head *head;
+	struct backbone_gw *backbone_gw;
+	struct hashtable_t *hash;
+	struct hard_iface *primary_if;
+	int i;
+
+	primary_if = primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto out;
+
+	bla_purge_claims(bat_priv, primary_if, 0);
+	bla_purge_backbone_gw(bat_priv, 0);
+
+	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+		goto out;
+
+	hash = bat_priv->backbone_hash;
+	if (!hash)
+		goto out;
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+			if (!compare_eth(backbone_gw->orig,
+					 primary_if->net_dev->dev_addr))
+				continue;
+
+			backbone_gw->lasttime = jiffies;
+
+			bla_send_announce(bat_priv, backbone_gw);
+		}
+		rcu_read_unlock();
+	}
+out:
+	if (primary_if)
+		hardif_free_ref(primary_if);
+
+	bla_start_timer(bat_priv);
+}
+
+/* initialize all bla structures */
+int bla_init(struct bat_priv *bat_priv)
+{
+	int i;
+	uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
+	struct hard_iface *primary_if;
+
+	bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n");
+
+	/* setting claim destination address */
+	memcpy(&bat_priv->claim_dest.magic, claim_dest, 3);
+	bat_priv->claim_dest.type = 0;
+	primary_if = primary_if_get_selected(bat_priv);
+	if (primary_if) {
+		bat_priv->claim_dest.group =
+			htons(crc16(0, primary_if->net_dev->dev_addr,
+				    ETH_ALEN));
+		hardif_free_ref(primary_if);
+	} else {
+		bat_priv->claim_dest.group = 0; /* will be set later */
+	}
+
+	/* initialize the duplicate list */
+	for (i = 0; i < DUPLIST_SIZE; i++)
+		bat_priv->bcast_duplist[i].entrytime =
+			jiffies - msecs_to_jiffies(DUPLIST_TIMEOUT);
+	bat_priv->bcast_duplist_curr = 0;
+
+	if (bat_priv->claim_hash)
+		return 1;
+
+	bat_priv->claim_hash = hash_new(128);
+	bat_priv->backbone_hash = hash_new(32);
+
+	if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
+		return -1;
+
+	bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n");
+
+	bla_start_timer(bat_priv);
+	return 1;
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @bcast_packet: originator mac address
+ * @hdr_size: maximum length of the frame
+ *
+ * check if it is on our broadcast list. Another gateway might
+ * have sent the same packet because it is connected to the same backbone,
+ * so we have to remove this duplicate.
+ *
+ * This is performed by checking the CRC, which will tell us
+ * with a good chance that it is the same packet. If it is furthermore
+ * sent by another host, drop it. We allow equal packets from
+ * the same host however as this might be intended.
+ *
+ **/
+
+int bla_check_bcast_duplist(struct bat_priv *bat_priv,
+			    struct bcast_packet *bcast_packet,
+			    int hdr_size)
+{
+	int i, length, curr;
+	uint8_t *content;
+	uint16_t crc;
+	struct bcast_duplist_entry *entry;
+
+	length = hdr_size - sizeof(*bcast_packet);
+	content = (uint8_t *)bcast_packet;
+	content += sizeof(*bcast_packet);
+
+	/* calculate the crc ... */
+	crc = crc16(0, content, length);
+
+	for (i = 0 ; i < DUPLIST_SIZE; i++) {
+		curr = (bat_priv->bcast_duplist_curr + i) % DUPLIST_SIZE;
+		entry = &bat_priv->bcast_duplist[curr];
+
+		/* we can stop searching if the entry is too old ;
+		 * later entries will be even older
+		 */
+		if (has_timed_out(entry->entrytime, DUPLIST_TIMEOUT))
+			break;
+
+		if (entry->crc != crc)
+			continue;
+
+		if (compare_eth(entry->orig, bcast_packet->orig))
+			continue;
+
+		/* this entry seems to match: same crc, not too old,
+		 * and from another gw. therefore return 1 to forbid it.
+		 */
+		return 1;
+	}
+	/* not found, add a new entry (overwrite the oldest entry) */
+	curr = (bat_priv->bcast_duplist_curr + DUPLIST_SIZE - 1) % DUPLIST_SIZE;
+	entry = &bat_priv->bcast_duplist[curr];
+	entry->crc = crc;
+	entry->entrytime = jiffies;
+	memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
+	bat_priv->bcast_duplist_curr = curr;
+
+	/* allow it, its the first occurence. */
+	return 0;
+}
+
+
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: originator mac address
+ *
+ * check if the originator is a gateway for any VLAN ID.
+ *
+ * returns 1 if it is found, 0 otherwise
+ *
+ */
+
+int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
+{
+	struct hashtable_t *hash = bat_priv->backbone_hash;
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct backbone_gw *backbone_gw;
+	int i;
+
+	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+		return 0;
+
+	if (!hash)
+		return 0;
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+			if (compare_eth(backbone_gw->orig, orig)) {
+				rcu_read_unlock();
+				return 1;
+			}
+		}
+		rcu_read_unlock();
+	}
+
+	return 0;
+}
+
+
+/**
+ * @skb: the frame to be checked
+ * @orig_node: the orig_node of the frame
+ * @hdr_size: maximum length of the frame
+ *
+ * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
+ * if the orig_node is also a gateway on the soft interface, otherwise it
+ * returns 0.
+ *
+ */
+int bla_is_backbone_gw(struct sk_buff *skb,
+		       struct orig_node *orig_node, int hdr_size)
+{
+	struct ethhdr *ethhdr;
+	struct vlan_ethhdr *vhdr;
+	struct backbone_gw *backbone_gw;
+	short vid = -1;
+
+	if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
+		return 0;
+
+	/* first, find out the vid. */
+	if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
+		return 0;
+
+	ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size);
+
+	if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
+		if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
+			return 0;
+
+		vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) +
+					      hdr_size);
+		vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
+	}
+
+	/* see if this originator is a backbone gw for this VLAN */
+
+	backbone_gw = backbone_hash_find(orig_node->bat_priv,
+					 orig_node->orig, vid);
+	if (!backbone_gw)
+		return 0;
+
+	backbone_gw_free_ref(backbone_gw);
+	return 1;
+}
+
+/* free all bla structures (for softinterface free or module unload) */
+void bla_free(struct bat_priv *bat_priv)
+{
+	struct hard_iface *primary_if;
+
+	cancel_delayed_work_sync(&bat_priv->bla_work);
+	primary_if = primary_if_get_selected(bat_priv);
+
+	if (bat_priv->claim_hash) {
+		bla_purge_claims(bat_priv, primary_if, 1);
+		hash_destroy(bat_priv->claim_hash);
+		bat_priv->claim_hash = NULL;
+	}
+	if (bat_priv->backbone_hash) {
+		bla_purge_backbone_gw(bat_priv, 1);
+		hash_destroy(bat_priv->backbone_hash);
+		bat_priv->backbone_hash = NULL;
+	}
+	if (primary_if)
+		hardif_free_ref(primary_if);
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the frame to be checked
+ * @vid: the VLAN ID of the frame
+ *
+ * bla_rx avoidance checks if:
+ *  * we have to race for a claim
+ *  * if the frame is allowed on the LAN
+ *
+ * in these cases, the skb is further handled by this function and
+ * returns 1, otherwise it returns 0 and the caller shall further
+ * process the skb.
+ *
+ */
+int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
+{
+	struct ethhdr *ethhdr;
+	struct claim search_claim, *claim = NULL;
+	struct hard_iface *primary_if;
+	int ret;
+
+	ethhdr = (struct ethhdr *)skb_mac_header(skb);
+
+	primary_if = primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto handled;
+
+	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+		goto allow;
+
+
+	if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
+		/* don't allow broadcasts while requests are in flight */
+		if (is_multicast_ether_addr(ethhdr->h_dest))
+			goto handled;
+
+	memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
+	search_claim.vid = vid;
+	claim = claim_hash_find(bat_priv, &search_claim);
+
+	if (!claim) {
+		/* possible optimization: race for a claim */
+		/* No claim exists yet, claim it for us!
+		 */
+		handle_claim(bat_priv, primary_if,
+			     primary_if->net_dev->dev_addr,
+			     ethhdr->h_source, vid);
+		goto allow;
+	}
+
+	/* if it is our own claim ... */
+	if (compare_eth(claim->backbone_gw->orig,
+			primary_if->net_dev->dev_addr)) {
+		/* ... allow it in any case */
+		claim->lasttime = jiffies;
+		goto allow;
+	}
+
+	/* if it is a broadcast ... */
+	if (is_multicast_ether_addr(ethhdr->h_dest)) {
+		/* ... drop it. the responsible gateway is in charge. */
+		goto handled;
+	} else {
+		/* seems the client considers us as its best gateway.
+		 * send a claim and update the claim table
+		 * immediately.
+		 */
+		handle_claim(bat_priv, primary_if,
+			     primary_if->net_dev->dev_addr,
+			     ethhdr->h_source, vid);
+		goto allow;
+	}
+allow:
+	bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+	ret = 0;
+	goto out;
+
+handled:
+	kfree_skb(skb);
+	ret = 1;
+
+out:
+	if (primary_if)
+		hardif_free_ref(primary_if);
+	if (claim)
+		claim_free_ref(claim);
+	return ret;
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the frame to be checked
+ * @vid: the VLAN ID of the frame
+ *
+ * bla_tx checks if:
+ *  * a claim was received which has to be processed
+ *  * the frame is allowed on the mesh
+ *
+ * in these cases, the skb is further handled by this function and
+ * returns 1, otherwise it returns 0 and the caller shall further
+ * process the skb.
+ *
+ */
+int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
+{
+	struct ethhdr *ethhdr;
+	struct claim search_claim, *claim = NULL;
+	struct hard_iface *primary_if;
+	int ret = 0;
+
+	primary_if = primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto out;
+
+	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+		goto allow;
+
+	/* in VLAN case, the mac header might not be set. */
+	skb_reset_mac_header(skb);
+
+	if (bla_process_claim(bat_priv, primary_if, skb))
+		goto handled;
+
+	ethhdr = (struct ethhdr *)skb_mac_header(skb);
+
+	if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
+		/* don't allow broadcasts while requests are in flight */
+		if (is_multicast_ether_addr(ethhdr->h_dest))
+			goto handled;
+
+	memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
+	search_claim.vid = vid;
+
+	claim = claim_hash_find(bat_priv, &search_claim);
+
+	/* if no claim exists, allow it. */
+	if (!claim)
+		goto allow;
+
+	/* check if we are responsible. */
+	if (compare_eth(claim->backbone_gw->orig,
+			primary_if->net_dev->dev_addr)) {
+		/* if yes, the client has roamed and we have
+		 * to unclaim it.
+		 */
+		handle_unclaim(bat_priv, primary_if,
+			       primary_if->net_dev->dev_addr,
+			       ethhdr->h_source, vid);
+		goto allow;
+	}
+
+	/* check if it is a multicast/broadcast frame */
+	if (is_multicast_ether_addr(ethhdr->h_dest)) {
+		/* drop it. the responsible gateway has forwarded it into
+		 * the backbone network.
+		 */
+		goto handled;
+	} else {
+		/* we must allow it. at least if we are
+		 * responsible for the DESTINATION.
+		 */
+		goto allow;
+	}
+allow:
+	bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+	ret = 0;
+	goto out;
+handled:
+	ret = 1;
+out:
+	if (primary_if)
+		hardif_free_ref(primary_if);
+	if (claim)
+		claim_free_ref(claim);
+	return ret;
+}
+
+int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
+{
+	struct net_device *net_dev = (struct net_device *)seq->private;
+	struct bat_priv *bat_priv = netdev_priv(net_dev);
+	struct hashtable_t *hash = bat_priv->claim_hash;
+	struct claim *claim;
+	struct hard_iface *primary_if;
+	struct hlist_node *node;
+	struct hlist_head *head;
+	uint32_t i;
+	bool is_own;
+	int ret = 0;
+
+	primary_if = primary_if_get_selected(bat_priv);
+	if (!primary_if) {
+		ret = seq_printf(seq,
+				 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
+				 net_dev->name);
+		goto out;
+	}
+
+	if (primary_if->if_status != IF_ACTIVE) {
+		ret = seq_printf(seq,
+				 "BATMAN mesh %s disabled - primary interface not active\n",
+				 net_dev->name);
+		goto out;
+	}
+
+	seq_printf(seq,
+		   "Claims announced for the mesh %s (orig %pM, group id %04x)\n",
+		   net_dev->name, primary_if->net_dev->dev_addr,
+		   ntohs(bat_priv->claim_dest.group));
+	seq_printf(seq, "   %-17s    %-5s    %-17s [o] (%-4s)\n",
+		   "Client", "VID", "Originator", "CRC");
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+			is_own = compare_eth(claim->backbone_gw->orig,
+					     primary_if->net_dev->dev_addr);
+			seq_printf(seq,	" * %pM on % 5d by %pM [%c] (%04x)\n",
+				   claim->addr, claim->vid,
+				   claim->backbone_gw->orig,
+				   (is_own ? 'x' : ' '),
+				   claim->backbone_gw->crc);
+		}
+		rcu_read_unlock();
+	}
+out:
+	if (primary_if)
+		hardif_free_ref(primary_if);
+	return ret;
+}
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
new file mode 100644
index 0000000..4a8e4fc
--- /dev/null
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2011 B.A.T.M.A.N. contributors:
+ *
+ * Simon Wunderlich
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ */
+
+#ifndef _NET_BATMAN_ADV_BLA_H_
+#define _NET_BATMAN_ADV_BLA_H_
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
+int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
+int bla_is_backbone_gw(struct sk_buff *skb,
+		       struct orig_node *orig_node, int hdr_size);
+int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
+int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig);
+int bla_check_bcast_duplist(struct bat_priv *bat_priv,
+			    struct bcast_packet *bcast_packet, int hdr_size);
+void bla_update_orig_address(struct bat_priv *bat_priv,
+			     struct hard_iface *primary_if,
+			     struct hard_iface *oldif);
+int bla_init(struct bat_priv *bat_priv);
+void bla_free(struct bat_priv *bat_priv);
+
+#define BLA_CRC_INIT	0
+#else /* ifdef CONFIG_BATMAN_ADV_BLA */
+
+static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb,
+			 short vid)
+{
+	return 0;
+}
+
+static inline int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb,
+			 short vid)
+{
+	return 0;
+}
+
+static inline int bla_is_backbone_gw(struct sk_buff *skb,
+				     struct orig_node *orig_node,
+				     int hdr_size)
+{
+	return 0;
+}
+
+static inline int bla_claim_table_seq_print_text(struct seq_file *seq,
+						 void *offset)
+{
+	return 0;
+}
+
+static inline int bla_is_backbone_gw_orig(struct bat_priv *bat_priv,
+					  uint8_t *orig)
+{
+	return 0;
+}
+
+static inline int bla_check_bcast_duplist(struct bat_priv *bat_priv,
+					  struct bcast_packet *bcast_packet,
+					  int hdr_size)
+{
+	return 0;
+}
+
+static inline void bla_update_orig_address(struct bat_priv *bat_priv,
+					   struct hard_iface *primary_if,
+					   struct hard_iface *oldif)
+{
+}
+
+static inline int bla_init(struct bat_priv *bat_priv)
+{
+	return 1;
+}
+
+static inline void bla_free(struct bat_priv *bat_priv)
+{
+}
+
+#endif /* ifdef CONFIG_BATMAN_ADV_BLA */
+
+#endif /* ifndef _NET_BATMAN_ADV_BLA_H_ */
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 3778977..47c79d7 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -28,6 +28,7 @@
 #include "bat_sysfs.h"
 #include "originator.h"
 #include "hash.h"
+#include "bridge_loop_avoidance.h"
 
 #include <linux/if_arp.h>
 
@@ -107,7 +108,8 @@
 	return hard_iface;
 }
 
-static void primary_if_update_addr(struct bat_priv *bat_priv)
+static void primary_if_update_addr(struct bat_priv *bat_priv,
+				   struct hard_iface *oldif)
 {
 	struct vis_packet *vis_packet;
 	struct hard_iface *primary_if;
@@ -122,6 +124,7 @@
 	memcpy(vis_packet->sender_orig,
 	       primary_if->net_dev->dev_addr, ETH_ALEN);
 
+	bla_update_orig_address(bat_priv, primary_if, oldif);
 out:
 	if (primary_if)
 		hardif_free_ref(primary_if);
@@ -140,14 +143,15 @@
 	curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1);
 	rcu_assign_pointer(bat_priv->primary_if, new_hard_iface);
 
+	if (!new_hard_iface)
+		goto out;
+
+	bat_priv->bat_algo_ops->bat_primary_iface_set(new_hard_iface);
+	primary_if_update_addr(bat_priv, curr_hard_iface);
+
+out:
 	if (curr_hard_iface)
 		hardif_free_ref(curr_hard_iface);
-
-	if (!new_hard_iface)
-		return;
-
-	bat_priv->bat_algo_ops->bat_ogm_init_primary(new_hard_iface);
-	primary_if_update_addr(bat_priv);
 }
 
 static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
@@ -300,22 +304,17 @@
 	if (!softif_is_valid(soft_iface)) {
 		pr_err("Can't create batman mesh interface %s: already exists as regular interface\n",
 		       soft_iface->name);
-		dev_put(soft_iface);
 		ret = -EINVAL;
-		goto err;
+		goto err_dev;
 	}
 
 	hard_iface->soft_iface = soft_iface;
 	bat_priv = netdev_priv(hard_iface->soft_iface);
 
-	bat_priv->bat_algo_ops->bat_ogm_init(hard_iface);
-
-	if (!hard_iface->packet_buff) {
-		bat_err(hard_iface->soft_iface,
-			"Can't add interface packet (%s): out of memory\n",
-			hard_iface->net_dev->name);
+	ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface);
+	if (ret < 0) {
 		ret = -ENOMEM;
-		goto err;
+		goto err_dev;
 	}
 
 	hard_iface->if_num = bat_priv->num_ifaces;
@@ -328,7 +327,6 @@
 	hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
 	dev_add_pack(&hard_iface->batman_adv_ptype);
 
-	atomic_set(&hard_iface->seqno, 1);
 	atomic_set(&hard_iface->frag_seqno, 1);
 	bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
 		 hard_iface->net_dev->name);
@@ -360,6 +358,8 @@
 out:
 	return 0;
 
+err_dev:
+	dev_put(soft_iface);
 err:
 	hardif_free_ref(hard_iface);
 	return ret;
@@ -394,8 +394,7 @@
 			hardif_free_ref(new_if);
 	}
 
-	kfree(hard_iface->packet_buff);
-	hard_iface->packet_buff = NULL;
+	bat_priv->bat_algo_ops->bat_iface_disable(hard_iface);
 	hard_iface->if_status = IF_NOT_IN_USE;
 
 	/* delete all references to this hard_iface */
@@ -447,6 +446,13 @@
 	check_known_mac_addr(hard_iface->net_dev);
 	list_add_tail_rcu(&hard_iface->list, &hardif_list);
 
+	/**
+	 * This can't be called via a bat_priv callback because
+	 * we have no bat_priv yet.
+	 */
+	atomic_set(&hard_iface->seqno, 1);
+	hard_iface->packet_buff = NULL;
+
 	return hard_iface;
 
 free_if:
@@ -531,7 +537,7 @@
 			goto hardif_put;
 
 		if (hard_iface == primary_if)
-			primary_if_update_addr(bat_priv);
+			primary_if_update_addr(bat_priv, NULL);
 		break;
 	default:
 		break;
@@ -568,8 +574,7 @@
 		goto err_free;
 
 	/* expect a valid ethernet header here. */
-	if (unlikely(skb->mac_len != sizeof(struct ethhdr) ||
-		     !skb_mac_header(skb)))
+	if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
 		goto err_free;
 
 	if (!hard_iface->soft_iface)
@@ -598,7 +603,7 @@
 
 	switch (batman_ogm_packet->header.packet_type) {
 		/* batman originator packet */
-	case BAT_OGM:
+	case BAT_IV_OGM:
 		ret = recv_bat_ogm_packet(skb, hard_iface);
 		break;
 
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index b87518e..2e98a57 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -175,13 +175,13 @@
 	if (len >= sizeof(struct icmp_packet_rr))
 		packet_len = sizeof(struct icmp_packet_rr);
 
-	skb = dev_alloc_skb(packet_len + sizeof(struct ethhdr));
+	skb = dev_alloc_skb(packet_len + ETH_HLEN);
 	if (!skb) {
 		len = -ENOMEM;
 		goto out;
 	}
 
-	skb_reserve(skb, sizeof(struct ethhdr));
+	skb_reserve(skb, ETH_HLEN);
 	icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len);
 
 	if (copy_from_user(icmp_packet, buff, packet_len)) {
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 6d51caa..7913272 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -30,6 +30,7 @@
 #include "translation-table.h"
 #include "hard-interface.h"
 #include "gateway_client.h"
+#include "bridge_loop_avoidance.h"
 #include "vis.h"
 #include "hash.h"
 #include "bat_algo.h"
@@ -96,13 +97,10 @@
 	spin_lock_init(&bat_priv->gw_list_lock);
 	spin_lock_init(&bat_priv->vis_hash_lock);
 	spin_lock_init(&bat_priv->vis_list_lock);
-	spin_lock_init(&bat_priv->softif_neigh_lock);
-	spin_lock_init(&bat_priv->softif_neigh_vid_lock);
 
 	INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
 	INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
 	INIT_HLIST_HEAD(&bat_priv->gw_list);
-	INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids);
 	INIT_LIST_HEAD(&bat_priv->tt_changes_list);
 	INIT_LIST_HEAD(&bat_priv->tt_req_list);
 	INIT_LIST_HEAD(&bat_priv->tt_roam_list);
@@ -118,6 +116,9 @@
 	if (vis_init(bat_priv) < 1)
 		goto err;
 
+	if (bla_init(bat_priv) < 1)
+		goto err;
+
 	atomic_set(&bat_priv->gw_reselect, 0);
 	atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
 	goto end;
@@ -145,7 +146,7 @@
 
 	tt_free(bat_priv);
 
-	softif_neigh_purge(bat_priv);
+	bla_free(bat_priv);
 
 	atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
 }
@@ -207,8 +208,9 @@
 	}
 
 	/* all algorithms must implement all ops (for now) */
-	if (!bat_algo_ops->bat_ogm_init ||
-	    !bat_algo_ops->bat_ogm_init_primary ||
+	if (!bat_algo_ops->bat_iface_enable ||
+	    !bat_algo_ops->bat_iface_disable ||
+	    !bat_algo_ops->bat_primary_iface_set ||
 	    !bat_algo_ops->bat_ogm_update_mac ||
 	    !bat_algo_ops->bat_ogm_schedule ||
 	    !bat_algo_ops->bat_ogm_emit ||
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 94fa1c2..d9832ac 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -65,7 +65,7 @@
 
 #define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */
 
-#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
+#define NUM_WORDS BITS_TO_LONGS(TQ_LOCAL_WINDOW_SIZE)
 
 #define LOG_BUF_LEN 8192	  /* has to be a power of 2 */
 
@@ -80,8 +80,12 @@
 #define MAX_AGGREGATION_BYTES 512
 #define MAX_AGGREGATION_MS 100
 
-#define SOFTIF_NEIGH_TIMEOUT 180000 /* 3 minutes */
+#define BLA_PERIOD_LENGTH	10000	/* 10 seconds */
+#define BLA_BACKBONE_TIMEOUT	(BLA_PERIOD_LENGTH * 3)
+#define BLA_CLAIM_TIMEOUT	(BLA_PERIOD_LENGTH * 10)
 
+#define DUPLIST_SIZE		16
+#define DUPLIST_TIMEOUT		500	/* 500 ms */
 /* don't reset again within 30 seconds */
 #define RESET_PROTECTION_MS 30000
 #define EXPECTED_SEQNO_RANGE	65536
@@ -119,7 +123,8 @@
 	DBG_BATMAN = 1 << 0,
 	DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
 	DBG_TT	   = 1 << 2, /* translation table operations */
-	DBG_ALL    = 7
+	DBG_BLA    = 1 << 3, /* bridge loop avoidance */
+	DBG_ALL    = 15
 };
 
 /* Kernel headers */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 43c0a4f..ce49698 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -28,6 +28,7 @@
 #include "hard-interface.h"
 #include "unicast.h"
 #include "soft-interface.h"
+#include "bridge_loop_avoidance.h"
 
 static void purge_orig(struct work_struct *work);
 
@@ -375,8 +376,6 @@
 
 	gw_node_purge(bat_priv);
 	gw_election(bat_priv);
-
-	softif_neigh_purge(bat_priv);
 }
 
 static void purge_orig(struct work_struct *work)
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 441f3db..f54969c 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -25,7 +25,7 @@
 #define ETH_P_BATMAN  0x4305	/* unofficial/not registered Ethertype */
 
 enum bat_packettype {
-	BAT_OGM		 = 0x01,
+	BAT_IV_OGM	 = 0x01,
 	BAT_ICMP	 = 0x02,
 	BAT_UNICAST	 = 0x03,
 	BAT_BCAST	 = 0x04,
@@ -38,7 +38,7 @@
 /* this file is included by batctl which needs these defines */
 #define COMPAT_VERSION 14
 
-enum batman_flags {
+enum batman_iv_flags {
 	PRIMARIES_FIRST_HOP = 1 << 4,
 	VIS_SERVER	    = 1 << 5,
 	DIRECTLINK	    = 1 << 6
@@ -90,6 +90,23 @@
 	TT_CLIENT_PENDING = 1 << 10
 };
 
+/* claim frame types for the bridge loop avoidance */
+enum bla_claimframe {
+	CLAIM_TYPE_ADD		= 0x00,
+	CLAIM_TYPE_DEL		= 0x01,
+	CLAIM_TYPE_ANNOUNCE	= 0x02,
+	CLAIM_TYPE_REQUEST	= 0x03
+};
+
+/* the destination hardware field in the ARP frame is used to
+ * transport the claim type and the group id
+ */
+struct bla_claim_dst {
+	uint8_t magic[3];	/* FF:43:05 */
+	uint8_t type;		/* bla_claimframe */
+	uint16_t group;		/* group id */
+} __packed;
+
 struct batman_header {
 	uint8_t  packet_type;
 	uint8_t  version;  /* batman version field */
@@ -100,8 +117,8 @@
 	struct batman_header header;
 	uint8_t  flags;    /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
 	uint32_t seqno;
-	uint8_t  orig[6];
-	uint8_t  prev_sender[6];
+	uint8_t  orig[ETH_ALEN];
+	uint8_t  prev_sender[ETH_ALEN];
 	uint8_t  gw_flags;  /* flags related to gateway class */
 	uint8_t  tq;
 	uint8_t  tt_num_changes;
@@ -109,13 +126,13 @@
 	uint16_t tt_crc;
 } __packed;
 
-#define BATMAN_OGM_LEN sizeof(struct batman_ogm_packet)
+#define BATMAN_OGM_HLEN sizeof(struct batman_ogm_packet)
 
 struct icmp_packet {
 	struct batman_header header;
 	uint8_t  msg_type; /* see ICMP message types above */
-	uint8_t  dst[6];
-	uint8_t  orig[6];
+	uint8_t  dst[ETH_ALEN];
+	uint8_t  orig[ETH_ALEN];
 	uint16_t seqno;
 	uint8_t  uid;
 	uint8_t  reserved;
@@ -128,8 +145,8 @@
 struct icmp_packet_rr {
 	struct batman_header header;
 	uint8_t  msg_type; /* see ICMP message types above */
-	uint8_t  dst[6];
-	uint8_t  orig[6];
+	uint8_t  dst[ETH_ALEN];
+	uint8_t  orig[ETH_ALEN];
 	uint16_t seqno;
 	uint8_t  uid;
 	uint8_t  rr_cur;
@@ -139,16 +156,16 @@
 struct unicast_packet {
 	struct batman_header header;
 	uint8_t  ttvn; /* destination translation table version number */
-	uint8_t  dest[6];
+	uint8_t  dest[ETH_ALEN];
 } __packed;
 
 struct unicast_frag_packet {
 	struct batman_header header;
 	uint8_t  ttvn; /* destination translation table version number */
-	uint8_t  dest[6];
+	uint8_t  dest[ETH_ALEN];
 	uint8_t  flags;
 	uint8_t  align;
-	uint8_t  orig[6];
+	uint8_t  orig[ETH_ALEN];
 	uint16_t seqno;
 } __packed;
 
@@ -156,7 +173,7 @@
 	struct batman_header header;
 	uint8_t  reserved;
 	uint32_t seqno;
-	uint8_t  orig[6];
+	uint8_t  orig[ETH_ALEN];
 } __packed;
 
 struct vis_packet {
@@ -165,9 +182,9 @@
 	uint32_t seqno;		 /* sequence number */
 	uint8_t  entries;	 /* number of entries behind this struct */
 	uint8_t  reserved;
-	uint8_t  vis_orig[6];	 /* originator that announces its neighbors */
-	uint8_t  target_orig[6]; /* who should receive this packet */
-	uint8_t  sender_orig[6]; /* who sent or rebroadcasted this packet */
+	uint8_t  vis_orig[ETH_ALEN];	/* originator reporting its neighbors */
+	uint8_t  target_orig[ETH_ALEN]; /* who should receive this packet */
+	uint8_t  sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */
 } __packed;
 
 struct tt_query_packet {
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 7f8e158..ff56086 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -29,6 +29,10 @@
 #include "originator.h"
 #include "vis.h"
 #include "unicast.h"
+#include "bridge_loop_avoidance.h"
+
+static int route_unicast_packet(struct sk_buff *skb,
+				struct hard_iface *recv_if);
 
 void slide_own_bcast_window(struct hard_iface *hard_iface)
 {
@@ -52,7 +56,7 @@
 
 			bit_get_packet(bat_priv, word, 1, 0);
 			orig_node->bcast_own_sum[hard_iface->if_num] =
-				bit_packet_count(word);
+				bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
 			spin_unlock_bh(&orig_node->ogm_cnt_lock);
 		}
 		rcu_read_unlock();
@@ -250,7 +254,7 @@
 	struct ethhdr *ethhdr;
 
 	/* drop packet if it has not necessary minimum size */
-	if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_LEN)))
+	if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_HLEN)))
 		return NET_RX_DROP;
 
 	ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -309,7 +313,7 @@
 		goto out;
 
 	/* create a copy of the skb, if needed, to modify it. */
-	if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+	if (skb_cow(skb, ETH_HLEN) < 0)
 		goto out;
 
 	icmp_packet = (struct icmp_packet_rr *)skb->data;
@@ -364,7 +368,7 @@
 		goto out;
 
 	/* create a copy of the skb, if needed, to modify it. */
-	if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+	if (skb_cow(skb, ETH_HLEN) < 0)
 		goto out;
 
 	icmp_packet = (struct icmp_packet *)skb->data;
@@ -450,7 +454,7 @@
 		goto out;
 
 	/* create a copy of the skb, if needed, to modify it. */
-	if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+	if (skb_cow(skb, ETH_HLEN) < 0)
 		goto out;
 
 	icmp_packet = (struct icmp_packet_rr *)skb->data;
@@ -669,6 +673,13 @@
 	if (!is_my_mac(roam_adv_packet->dst))
 		return route_unicast_packet(skb, recv_if);
 
+	/* check if it is a backbone gateway. we don't accept
+	 * roaming advertisement from it, as it has the same
+	 * entries as we have.
+	 */
+	if (bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
+		goto out;
+
 	orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
 	if (!orig_node)
 		goto out;
@@ -798,7 +809,7 @@
 	return 0;
 }
 
-int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
+static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
 {
 	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
 	struct orig_node *orig_node = NULL;
@@ -830,7 +841,7 @@
 		goto out;
 
 	/* create a copy of the skb, if needed, to modify it. */
-	if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+	if (skb_cow(skb, ETH_HLEN) < 0)
 		goto out;
 
 	unicast_packet = (struct unicast_packet *)skb->data;
@@ -1047,8 +1058,8 @@
 	spin_lock_bh(&orig_node->bcast_seqno_lock);
 
 	/* check whether the packet is a duplicate */
-	if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno,
-			   ntohl(bcast_packet->seqno)))
+	if (bat_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
+			 ntohl(bcast_packet->seqno)))
 		goto spin_unlock;
 
 	seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
@@ -1065,9 +1076,19 @@
 
 	spin_unlock_bh(&orig_node->bcast_seqno_lock);
 
+	/* check whether this has been sent by another originator before */
+	if (bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size))
+		goto out;
+
 	/* rebroadcast packet */
 	add_bcast_packet_to_list(bat_priv, skb, 1);
 
+	/* don't hand the broadcast up if it is from an originator
+	 * from the same backbone.
+	 */
+	if (bla_is_backbone_gw(skb, orig_node, hdr_size))
+		goto out;
+
 	/* broadcast for me */
 	interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
 	ret = NET_RX_SUCCESS;
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 92ac100..3d729cb 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -25,7 +25,6 @@
 void slide_own_bcast_window(struct hard_iface *hard_iface);
 void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
 		  struct neigh_node *neigh_node);
-int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index af7a674..7c66b61 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -51,7 +51,7 @@
 	}
 
 	/* push to the ethernet header. */
-	if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0)
+	if (my_skb_head_push(skb, ETH_HLEN) < 0)
 		goto send_skb_err;
 
 	skb_reset_mac_header(skb);
@@ -87,7 +87,7 @@
 	/* keep old buffer if kmalloc should fail */
 	if (new_buff) {
 		memcpy(new_buff, hard_iface->packet_buff,
-		       BATMAN_OGM_LEN);
+		       BATMAN_OGM_HLEN);
 
 		kfree(hard_iface->packet_buff);
 		hard_iface->packet_buff = new_buff;
@@ -101,13 +101,13 @@
 {
 	int new_len;
 
-	new_len = BATMAN_OGM_LEN +
+	new_len = BATMAN_OGM_HLEN +
 		  tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
 
 	/* if we have too many changes for one packet don't send any
 	 * and wait for the tt table request which will be fragmented */
 	if (new_len > hard_iface->soft_iface->mtu)
-		new_len = BATMAN_OGM_LEN;
+		new_len = BATMAN_OGM_HLEN;
 
 	realloc_packet_buffer(hard_iface, new_len);
 
@@ -117,14 +117,14 @@
 	atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
 
 	return tt_changes_fill_buffer(bat_priv,
-				      hard_iface->packet_buff + BATMAN_OGM_LEN,
-				      hard_iface->packet_len - BATMAN_OGM_LEN);
+				      hard_iface->packet_buff + BATMAN_OGM_HLEN,
+				      hard_iface->packet_len - BATMAN_OGM_HLEN);
 }
 
 static int reset_packet_buffer(struct bat_priv *bat_priv,
 				struct hard_iface *hard_iface)
 {
-	realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN);
+	realloc_packet_buffer(hard_iface, BATMAN_OGM_HLEN);
 	return 0;
 }
 
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index a5590f4..6e2530b 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -36,6 +36,7 @@
 #include <linux/etherdevice.h>
 #include <linux/if_vlan.h>
 #include "unicast.h"
+#include "bridge_loop_avoidance.h"
 
 
 static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
@@ -73,439 +74,6 @@
 	return 0;
 }
 
-static void softif_neigh_free_ref(struct softif_neigh *softif_neigh)
-{
-	if (atomic_dec_and_test(&softif_neigh->refcount))
-		kfree_rcu(softif_neigh, rcu);
-}
-
-static void softif_neigh_vid_free_rcu(struct rcu_head *rcu)
-{
-	struct softif_neigh_vid *softif_neigh_vid;
-	struct softif_neigh *softif_neigh;
-	struct hlist_node *node, *node_tmp;
-	struct bat_priv *bat_priv;
-
-	softif_neigh_vid = container_of(rcu, struct softif_neigh_vid, rcu);
-	bat_priv = softif_neigh_vid->bat_priv;
-
-	spin_lock_bh(&bat_priv->softif_neigh_lock);
-	hlist_for_each_entry_safe(softif_neigh, node, node_tmp,
-				  &softif_neigh_vid->softif_neigh_list, list) {
-		hlist_del_rcu(&softif_neigh->list);
-		softif_neigh_free_ref(softif_neigh);
-	}
-	spin_unlock_bh(&bat_priv->softif_neigh_lock);
-
-	kfree(softif_neigh_vid);
-}
-
-static void softif_neigh_vid_free_ref(struct softif_neigh_vid *softif_neigh_vid)
-{
-	if (atomic_dec_and_test(&softif_neigh_vid->refcount))
-		call_rcu(&softif_neigh_vid->rcu, softif_neigh_vid_free_rcu);
-}
-
-static struct softif_neigh_vid *softif_neigh_vid_get(struct bat_priv *bat_priv,
-						     short vid)
-{
-	struct softif_neigh_vid *softif_neigh_vid;
-	struct hlist_node *node;
-
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(softif_neigh_vid, node,
-				 &bat_priv->softif_neigh_vids, list) {
-		if (softif_neigh_vid->vid != vid)
-			continue;
-
-		if (!atomic_inc_not_zero(&softif_neigh_vid->refcount))
-			continue;
-
-		goto out;
-	}
-
-	softif_neigh_vid = kzalloc(sizeof(*softif_neigh_vid), GFP_ATOMIC);
-	if (!softif_neigh_vid)
-		goto out;
-
-	softif_neigh_vid->vid = vid;
-	softif_neigh_vid->bat_priv = bat_priv;
-
-	/* initialize with 2 - caller decrements counter by one */
-	atomic_set(&softif_neigh_vid->refcount, 2);
-	INIT_HLIST_HEAD(&softif_neigh_vid->softif_neigh_list);
-	INIT_HLIST_NODE(&softif_neigh_vid->list);
-	spin_lock_bh(&bat_priv->softif_neigh_vid_lock);
-	hlist_add_head_rcu(&softif_neigh_vid->list,
-			   &bat_priv->softif_neigh_vids);
-	spin_unlock_bh(&bat_priv->softif_neigh_vid_lock);
-
-out:
-	rcu_read_unlock();
-	return softif_neigh_vid;
-}
-
-static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
-					     const uint8_t *addr, short vid)
-{
-	struct softif_neigh_vid *softif_neigh_vid;
-	struct softif_neigh *softif_neigh = NULL;
-	struct hlist_node *node;
-
-	softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
-	if (!softif_neigh_vid)
-		goto out;
-
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(softif_neigh, node,
-				 &softif_neigh_vid->softif_neigh_list,
-				 list) {
-		if (!compare_eth(softif_neigh->addr, addr))
-			continue;
-
-		if (!atomic_inc_not_zero(&softif_neigh->refcount))
-			continue;
-
-		softif_neigh->last_seen = jiffies;
-		goto unlock;
-	}
-
-	softif_neigh = kzalloc(sizeof(*softif_neigh), GFP_ATOMIC);
-	if (!softif_neigh)
-		goto unlock;
-
-	memcpy(softif_neigh->addr, addr, ETH_ALEN);
-	softif_neigh->last_seen = jiffies;
-	/* initialize with 2 - caller decrements counter by one */
-	atomic_set(&softif_neigh->refcount, 2);
-
-	INIT_HLIST_NODE(&softif_neigh->list);
-	spin_lock_bh(&bat_priv->softif_neigh_lock);
-	hlist_add_head_rcu(&softif_neigh->list,
-			   &softif_neigh_vid->softif_neigh_list);
-	spin_unlock_bh(&bat_priv->softif_neigh_lock);
-
-unlock:
-	rcu_read_unlock();
-out:
-	if (softif_neigh_vid)
-		softif_neigh_vid_free_ref(softif_neigh_vid);
-	return softif_neigh;
-}
-
-static struct softif_neigh *softif_neigh_get_selected(
-				struct softif_neigh_vid *softif_neigh_vid)
-{
-	struct softif_neigh *softif_neigh;
-
-	rcu_read_lock();
-	softif_neigh = rcu_dereference(softif_neigh_vid->softif_neigh);
-
-	if (softif_neigh && !atomic_inc_not_zero(&softif_neigh->refcount))
-		softif_neigh = NULL;
-
-	rcu_read_unlock();
-	return softif_neigh;
-}
-
-static struct softif_neigh *softif_neigh_vid_get_selected(
-						struct bat_priv *bat_priv,
-						short vid)
-{
-	struct softif_neigh_vid *softif_neigh_vid;
-	struct softif_neigh *softif_neigh = NULL;
-
-	softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
-	if (!softif_neigh_vid)
-		goto out;
-
-	softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
-out:
-	if (softif_neigh_vid)
-		softif_neigh_vid_free_ref(softif_neigh_vid);
-	return softif_neigh;
-}
-
-static void softif_neigh_vid_select(struct bat_priv *bat_priv,
-				    struct softif_neigh *new_neigh,
-				    short vid)
-{
-	struct softif_neigh_vid *softif_neigh_vid;
-	struct softif_neigh *curr_neigh;
-
-	softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
-	if (!softif_neigh_vid)
-		goto out;
-
-	spin_lock_bh(&bat_priv->softif_neigh_lock);
-
-	if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount))
-		new_neigh = NULL;
-
-	curr_neigh = rcu_dereference_protected(softif_neigh_vid->softif_neigh,
-					       1);
-	rcu_assign_pointer(softif_neigh_vid->softif_neigh, new_neigh);
-
-	if ((curr_neigh) && (!new_neigh))
-		bat_dbg(DBG_ROUTES, bat_priv,
-			"Removing mesh exit point on vid: %d (prev: %pM).\n",
-			vid, curr_neigh->addr);
-	else if ((curr_neigh) && (new_neigh))
-		bat_dbg(DBG_ROUTES, bat_priv,
-			"Changing mesh exit point on vid: %d from %pM to %pM.\n",
-			vid, curr_neigh->addr, new_neigh->addr);
-	else if ((!curr_neigh) && (new_neigh))
-		bat_dbg(DBG_ROUTES, bat_priv,
-			"Setting mesh exit point on vid: %d to %pM.\n",
-			vid, new_neigh->addr);
-
-	if (curr_neigh)
-		softif_neigh_free_ref(curr_neigh);
-
-	spin_unlock_bh(&bat_priv->softif_neigh_lock);
-
-out:
-	if (softif_neigh_vid)
-		softif_neigh_vid_free_ref(softif_neigh_vid);
-}
-
-static void softif_neigh_vid_deselect(struct bat_priv *bat_priv,
-				      struct softif_neigh_vid *softif_neigh_vid)
-{
-	struct softif_neigh *curr_neigh;
-	struct softif_neigh *softif_neigh = NULL, *softif_neigh_tmp;
-	struct hard_iface *primary_if = NULL;
-	struct hlist_node *node;
-
-	primary_if = primary_if_get_selected(bat_priv);
-	if (!primary_if)
-		goto out;
-
-	/* find new softif_neigh immediately to avoid temporary loops */
-	rcu_read_lock();
-	curr_neigh = rcu_dereference(softif_neigh_vid->softif_neigh);
-
-	hlist_for_each_entry_rcu(softif_neigh_tmp, node,
-				 &softif_neigh_vid->softif_neigh_list,
-				 list) {
-		if (softif_neigh_tmp == curr_neigh)
-			continue;
-
-		/* we got a neighbor but its mac is 'bigger' than ours  */
-		if (memcmp(primary_if->net_dev->dev_addr,
-			   softif_neigh_tmp->addr, ETH_ALEN) < 0)
-			continue;
-
-		if (!atomic_inc_not_zero(&softif_neigh_tmp->refcount))
-			continue;
-
-		softif_neigh = softif_neigh_tmp;
-		goto unlock;
-	}
-
-unlock:
-	rcu_read_unlock();
-out:
-	softif_neigh_vid_select(bat_priv, softif_neigh, softif_neigh_vid->vid);
-
-	if (primary_if)
-		hardif_free_ref(primary_if);
-	if (softif_neigh)
-		softif_neigh_free_ref(softif_neigh);
-}
-
-int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
-{
-	struct net_device *net_dev = (struct net_device *)seq->private;
-	struct bat_priv *bat_priv = netdev_priv(net_dev);
-	struct softif_neigh_vid *softif_neigh_vid;
-	struct softif_neigh *softif_neigh;
-	struct hard_iface *primary_if;
-	struct hlist_node *node, *node_tmp;
-	struct softif_neigh *curr_softif_neigh;
-	int ret = 0, last_seen_secs, last_seen_msecs;
-
-	primary_if = primary_if_get_selected(bat_priv);
-	if (!primary_if) {
-		ret = seq_printf(seq,
-				 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
-				 net_dev->name);
-		goto out;
-	}
-
-	if (primary_if->if_status != IF_ACTIVE) {
-		ret = seq_printf(seq,
-				 "BATMAN mesh %s disabled - primary interface not active\n",
-				 net_dev->name);
-		goto out;
-	}
-
-	seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name);
-
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(softif_neigh_vid, node,
-				 &bat_priv->softif_neigh_vids, list) {
-		seq_printf(seq, "     %-15s %s on vid: %d\n",
-			   "Originator", "last-seen", softif_neigh_vid->vid);
-
-		curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
-
-		hlist_for_each_entry_rcu(softif_neigh, node_tmp,
-					 &softif_neigh_vid->softif_neigh_list,
-					 list) {
-			last_seen_secs = jiffies_to_msecs(jiffies -
-						softif_neigh->last_seen) / 1000;
-			last_seen_msecs = jiffies_to_msecs(jiffies -
-						softif_neigh->last_seen) % 1000;
-			seq_printf(seq, "%s %pM  %3i.%03is\n",
-				   curr_softif_neigh == softif_neigh
-				   ? "=>" : "  ", softif_neigh->addr,
-				   last_seen_secs, last_seen_msecs);
-		}
-
-		if (curr_softif_neigh)
-			softif_neigh_free_ref(curr_softif_neigh);
-
-		seq_printf(seq, "\n");
-	}
-	rcu_read_unlock();
-
-out:
-	if (primary_if)
-		hardif_free_ref(primary_if);
-	return ret;
-}
-
-void softif_neigh_purge(struct bat_priv *bat_priv)
-{
-	struct softif_neigh *softif_neigh, *curr_softif_neigh;
-	struct softif_neigh_vid *softif_neigh_vid;
-	struct hlist_node *node, *node_tmp, *node_tmp2;
-	int do_deselect;
-
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(softif_neigh_vid, node,
-				 &bat_priv->softif_neigh_vids, list) {
-		if (!atomic_inc_not_zero(&softif_neigh_vid->refcount))
-			continue;
-
-		curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
-		do_deselect = 0;
-
-		spin_lock_bh(&bat_priv->softif_neigh_lock);
-		hlist_for_each_entry_safe(softif_neigh, node_tmp, node_tmp2,
-					  &softif_neigh_vid->softif_neigh_list,
-					  list) {
-			if ((!has_timed_out(softif_neigh->last_seen,
-					    SOFTIF_NEIGH_TIMEOUT)) &&
-			    (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE))
-				continue;
-
-			if (curr_softif_neigh == softif_neigh) {
-				bat_dbg(DBG_ROUTES, bat_priv,
-					"Current mesh exit point on vid: %d '%pM' vanished.\n",
-					softif_neigh_vid->vid,
-					softif_neigh->addr);
-				do_deselect = 1;
-			}
-
-			hlist_del_rcu(&softif_neigh->list);
-			softif_neigh_free_ref(softif_neigh);
-		}
-		spin_unlock_bh(&bat_priv->softif_neigh_lock);
-
-		/* soft_neigh_vid_deselect() needs to acquire the
-		 * softif_neigh_lock */
-		if (do_deselect)
-			softif_neigh_vid_deselect(bat_priv, softif_neigh_vid);
-
-		if (curr_softif_neigh)
-			softif_neigh_free_ref(curr_softif_neigh);
-
-		softif_neigh_vid_free_ref(softif_neigh_vid);
-	}
-	rcu_read_unlock();
-
-	spin_lock_bh(&bat_priv->softif_neigh_vid_lock);
-	hlist_for_each_entry_safe(softif_neigh_vid, node, node_tmp,
-				  &bat_priv->softif_neigh_vids, list) {
-		if (!hlist_empty(&softif_neigh_vid->softif_neigh_list))
-			continue;
-
-		hlist_del_rcu(&softif_neigh_vid->list);
-		softif_neigh_vid_free_ref(softif_neigh_vid);
-	}
-	spin_unlock_bh(&bat_priv->softif_neigh_vid_lock);
-
-}
-
-static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
-			       short vid)
-{
-	struct bat_priv *bat_priv = netdev_priv(dev);
-	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
-	struct batman_ogm_packet *batman_ogm_packet;
-	struct softif_neigh *softif_neigh = NULL;
-	struct hard_iface *primary_if = NULL;
-	struct softif_neigh *curr_softif_neigh = NULL;
-
-	if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
-		batman_ogm_packet = (struct batman_ogm_packet *)
-					(skb->data + ETH_HLEN + VLAN_HLEN);
-	else
-		batman_ogm_packet = (struct batman_ogm_packet *)
-							(skb->data + ETH_HLEN);
-
-	if (batman_ogm_packet->header.version != COMPAT_VERSION)
-		goto out;
-
-	if (batman_ogm_packet->header.packet_type != BAT_OGM)
-		goto out;
-
-	if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
-		goto out;
-
-	if (is_my_mac(batman_ogm_packet->orig))
-		goto out;
-
-	softif_neigh = softif_neigh_get(bat_priv, batman_ogm_packet->orig, vid);
-	if (!softif_neigh)
-		goto out;
-
-	curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
-	if (curr_softif_neigh == softif_neigh)
-		goto out;
-
-	primary_if = primary_if_get_selected(bat_priv);
-	if (!primary_if)
-		goto out;
-
-	/* we got a neighbor but its mac is 'bigger' than ours  */
-	if (memcmp(primary_if->net_dev->dev_addr,
-		   softif_neigh->addr, ETH_ALEN) < 0)
-		goto out;
-
-	/* close own batX device and use softif_neigh as exit node */
-	if (!curr_softif_neigh) {
-		softif_neigh_vid_select(bat_priv, softif_neigh, vid);
-		goto out;
-	}
-
-	/* switch to new 'smallest neighbor' */
-	if (memcmp(softif_neigh->addr, curr_softif_neigh->addr, ETH_ALEN) < 0)
-		softif_neigh_vid_select(bat_priv, softif_neigh, vid);
-
-out:
-	kfree_skb(skb);
-	if (softif_neigh)
-		softif_neigh_free_ref(softif_neigh);
-	if (curr_softif_neigh)
-		softif_neigh_free_ref(curr_softif_neigh);
-	if (primary_if)
-		hardif_free_ref(primary_if);
-	return;
-}
-
 static int interface_open(struct net_device *dev)
 {
 	netif_start_queue(dev);
@@ -562,10 +130,11 @@
 	struct hard_iface *primary_if = NULL;
 	struct bcast_packet *bcast_packet;
 	struct vlan_ethhdr *vhdr;
-	struct softif_neigh *curr_softif_neigh = NULL;
+	static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00,
+						   0x00};
 	unsigned int header_len = 0;
 	int data_len = skb->len, ret;
-	short vid = -1;
+	short vid __maybe_unused = -1;
 	bool do_bcast = false;
 
 	if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
@@ -583,21 +152,21 @@
 
 		/* fall through */
 	case ETH_P_BATMAN:
-		softif_batman_recv(skb, soft_iface, vid);
-		goto end;
+		goto dropped;
 	}
 
-	/**
-	 * if we have a another chosen mesh exit node in range
-	 * it will transport the packets to the mesh
-	 */
-	curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
-	if (curr_softif_neigh)
+	if (bla_tx(bat_priv, skb, vid))
 		goto dropped;
 
 	/* Register the client MAC in the transtable */
 	tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
 
+	/* don't accept stp packets. STP does not help in meshes.
+	 * better use the bridge loop avoidance ...
+	 */
+	if (compare_eth(ethhdr->h_dest, stp_addr))
+		goto dropped;
+
 	if (is_multicast_ether_addr(ethhdr->h_dest)) {
 		do_bcast = true;
 
@@ -675,8 +244,6 @@
 dropped_freed:
 	bat_priv->stats.tx_dropped++;
 end:
-	if (curr_softif_neigh)
-		softif_neigh_free_ref(curr_softif_neigh);
 	if (primary_if)
 		hardif_free_ref(primary_if);
 	return NETDEV_TX_OK;
@@ -687,12 +254,9 @@
 		  int hdr_size)
 {
 	struct bat_priv *bat_priv = netdev_priv(soft_iface);
-	struct unicast_packet *unicast_packet;
 	struct ethhdr *ethhdr;
 	struct vlan_ethhdr *vhdr;
-	struct softif_neigh *curr_softif_neigh = NULL;
-	short vid = -1;
-	int ret;
+	short vid __maybe_unused = -1;
 
 	/* check if enough space is available for pulling, and pull */
 	if (!pskb_may_pull(skb, hdr_size))
@@ -716,30 +280,6 @@
 		goto dropped;
 	}
 
-	/**
-	 * if we have a another chosen mesh exit node in range
-	 * it will transport the packets to the non-mesh network
-	 */
-	curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
-	if (curr_softif_neigh) {
-		skb_push(skb, hdr_size);
-		unicast_packet = (struct unicast_packet *)skb->data;
-
-		if ((unicast_packet->header.packet_type != BAT_UNICAST) &&
-		    (unicast_packet->header.packet_type != BAT_UNICAST_FRAG))
-			goto dropped;
-
-		skb_reset_mac_header(skb);
-
-		memcpy(unicast_packet->dest,
-		       curr_softif_neigh->addr, ETH_ALEN);
-		ret = route_unicast_packet(skb, recv_if);
-		if (ret == NET_RX_DROP)
-			goto dropped;
-
-		goto out;
-	}
-
 	/* skb->dev & skb->pkt_type are set here */
 	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
 		goto dropped;
@@ -752,21 +292,25 @@
 /*	skb->ip_summed = CHECKSUM_UNNECESSARY;*/
 
 	bat_priv->stats.rx_packets++;
-	bat_priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr);
+	bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
 
 	soft_iface->last_rx = jiffies;
 
 	if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
 		goto dropped;
 
+	/* Let the bridge loop avoidance check the packet. If will
+	 * not handle it, we can safely push it up.
+	 */
+	if (bla_rx(bat_priv, skb, vid))
+		goto out;
+
 	netif_rx(skb);
 	goto out;
 
 dropped:
 	kfree_skb(skb);
 out:
-	if (curr_softif_neigh)
-		softif_neigh_free_ref(curr_softif_neigh);
 	return;
 }
 
@@ -828,13 +372,14 @@
 
 	atomic_set(&bat_priv->aggregated_ogms, 1);
 	atomic_set(&bat_priv->bonding, 0);
+	atomic_set(&bat_priv->bridge_loop_avoidance, 0);
 	atomic_set(&bat_priv->ap_isolation, 0);
 	atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
 	atomic_set(&bat_priv->gw_mode, GW_MODE_OFF);
 	atomic_set(&bat_priv->gw_sel_class, 20);
 	atomic_set(&bat_priv->gw_bandwidth, 41);
 	atomic_set(&bat_priv->orig_interval, 1000);
-	atomic_set(&bat_priv->hop_penalty, 10);
+	atomic_set(&bat_priv->hop_penalty, 30);
 	atomic_set(&bat_priv->log_level, 0);
 	atomic_set(&bat_priv->fragmentation, 1);
 	atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN);
@@ -845,6 +390,7 @@
 	atomic_set(&bat_priv->ttvn, 0);
 	atomic_set(&bat_priv->tt_local_changes, 0);
 	atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
+	atomic_set(&bat_priv->bla_num_requests, 0);
 
 	bat_priv->tt_buff = NULL;
 	bat_priv->tt_buff_len = 0;
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 756eab5..0203006 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -23,8 +23,6 @@
 #define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
 
 int my_skb_head_push(struct sk_buff *skb, unsigned int len);
-int softif_neigh_seq_print_text(struct seq_file *seq, void *offset);
-void softif_neigh_purge(struct bat_priv *bat_priv);
 void interface_rx(struct net_device *soft_iface,
 		  struct sk_buff *skb, struct hard_iface *recv_if,
 		  int hdr_size);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 1f86921..a38d315 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -27,13 +27,14 @@
 #include "hash.h"
 #include "originator.h"
 #include "routing.h"
+#include "bridge_loop_avoidance.h"
 
 #include <linux/crc16.h>
 
-static void _tt_global_del(struct bat_priv *bat_priv,
-			   struct tt_global_entry *tt_global_entry,
-			   const char *message);
+static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
+			  struct orig_node *orig_node);
 static void tt_purge(struct work_struct *work);
+static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry);
 
 /* returns 1 if they are the same mac addr */
 static int compare_tt(const struct hlist_node *node, const void *data2)
@@ -123,17 +124,31 @@
 	tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
 				       common);
 
-	if (tt_global_entry->orig_node)
-		orig_node_free_ref(tt_global_entry->orig_node);
-
 	kfree(tt_global_entry);
 }
 
 static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
 {
-	if (atomic_dec_and_test(&tt_global_entry->common.refcount))
+	if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
+		tt_global_del_orig_list(tt_global_entry);
 		call_rcu(&tt_global_entry->common.rcu,
 			 tt_global_entry_free_rcu);
+	}
+}
+
+static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
+{
+	struct tt_orig_list_entry *orig_entry;
+
+	orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
+	atomic_dec(&orig_entry->orig_node->tt_size);
+	orig_node_free_ref(orig_entry->orig_node);
+	kfree(orig_entry);
+}
+
+static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
+{
+	call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
 }
 
 static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -182,6 +197,9 @@
 	struct bat_priv *bat_priv = netdev_priv(soft_iface);
 	struct tt_local_entry *tt_local_entry = NULL;
 	struct tt_global_entry *tt_global_entry = NULL;
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct tt_orig_list_entry *orig_entry;
 	int hash_added;
 
 	tt_local_entry = tt_local_hash_find(bat_priv, addr);
@@ -232,14 +250,21 @@
 
 	/* Check whether it is a roaming! */
 	if (tt_global_entry) {
-		/* This node is probably going to update its tt table */
-		tt_global_entry->orig_node->tt_poss_change = true;
-		/* The global entry has to be marked as ROAMING and has to be
-		 * kept for consistency purpose */
+		/* These node are probably going to update their tt table */
+		head = &tt_global_entry->orig_list;
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+			orig_entry->orig_node->tt_poss_change = true;
+
+			send_roam_adv(bat_priv, tt_global_entry->common.addr,
+				      orig_entry->orig_node);
+		}
+		rcu_read_unlock();
+		/* The global entry has to be marked as ROAMING and
+		 * has to be kept for consistency purpose
+		 */
 		tt_global_entry->common.flags |= TT_CLIENT_ROAM;
 		tt_global_entry->roam_at = jiffies;
-		send_roam_adv(bat_priv, tt_global_entry->common.addr,
-			      tt_global_entry->orig_node);
 	}
 out:
 	if (tt_local_entry)
@@ -490,33 +515,76 @@
 	spin_unlock_bh(&bat_priv->tt_changes_list_lock);
 }
 
+/* find out if an orig_node is already in the list of a tt_global_entry.
+ * returns 1 if found, 0 otherwise
+ */
+static bool tt_global_entry_has_orig(const struct tt_global_entry *entry,
+				     const struct orig_node *orig_node)
+{
+	struct tt_orig_list_entry *tmp_orig_entry;
+	const struct hlist_head *head;
+	struct hlist_node *node;
+	bool found = false;
+
+	rcu_read_lock();
+	head = &entry->orig_list;
+	hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
+		if (tmp_orig_entry->orig_node == orig_node) {
+			found = true;
+			break;
+		}
+	}
+	rcu_read_unlock();
+	return found;
+}
+
+static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
+				     struct orig_node *orig_node,
+				     int ttvn)
+{
+	struct tt_orig_list_entry *orig_entry;
+
+	orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
+	if (!orig_entry)
+		return;
+
+	INIT_HLIST_NODE(&orig_entry->list);
+	atomic_inc(&orig_node->refcount);
+	atomic_inc(&orig_node->tt_size);
+	orig_entry->orig_node = orig_node;
+	orig_entry->ttvn = ttvn;
+
+	spin_lock_bh(&tt_global_entry->list_lock);
+	hlist_add_head_rcu(&orig_entry->list,
+			   &tt_global_entry->orig_list);
+	spin_unlock_bh(&tt_global_entry->list_lock);
+}
+
 /* caller must hold orig_node refcount */
 int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
 		  const unsigned char *tt_addr, uint8_t ttvn, bool roaming,
 		  bool wifi)
 {
-	struct tt_global_entry *tt_global_entry;
-	struct orig_node *orig_node_tmp;
+	struct tt_global_entry *tt_global_entry = NULL;
 	int ret = 0;
 	int hash_added;
 
 	tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
 
 	if (!tt_global_entry) {
-		tt_global_entry =
-			kmalloc(sizeof(*tt_global_entry),
-				GFP_ATOMIC);
+		tt_global_entry = kzalloc(sizeof(*tt_global_entry),
+					  GFP_ATOMIC);
 		if (!tt_global_entry)
 			goto out;
 
 		memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN);
+
 		tt_global_entry->common.flags = NO_FLAGS;
-		atomic_set(&tt_global_entry->common.refcount, 2);
-		/* Assign the new orig_node */
-		atomic_inc(&orig_node->refcount);
-		tt_global_entry->orig_node = orig_node;
-		tt_global_entry->ttvn = ttvn;
 		tt_global_entry->roam_at = 0;
+		atomic_set(&tt_global_entry->common.refcount, 2);
+
+		INIT_HLIST_HEAD(&tt_global_entry->orig_list);
+		spin_lock_init(&tt_global_entry->list_lock);
 
 		hash_added = hash_add(bat_priv->tt_global_hash, compare_tt,
 				 choose_orig, &tt_global_entry->common,
@@ -527,19 +595,27 @@
 			tt_global_entry_free_ref(tt_global_entry);
 			goto out_remove;
 		}
-		atomic_inc(&orig_node->tt_size);
+
+		tt_global_add_orig_entry(tt_global_entry, orig_node, ttvn);
 	} else {
-		if (tt_global_entry->orig_node != orig_node) {
-			atomic_dec(&tt_global_entry->orig_node->tt_size);
-			orig_node_tmp = tt_global_entry->orig_node;
-			atomic_inc(&orig_node->refcount);
-			tt_global_entry->orig_node = orig_node;
-			orig_node_free_ref(orig_node_tmp);
-			atomic_inc(&orig_node->tt_size);
+		/* there is already a global entry, use this one. */
+
+		/* If there is the TT_CLIENT_ROAM flag set, there is only one
+		 * originator left in the list and we previously received a
+		 * delete + roaming change for this originator.
+		 *
+		 * We should first delete the old originator before adding the
+		 * new one.
+		 */
+		if (tt_global_entry->common.flags & TT_CLIENT_ROAM) {
+			tt_global_del_orig_list(tt_global_entry);
+			tt_global_entry->common.flags &= ~TT_CLIENT_ROAM;
+			tt_global_entry->roam_at = 0;
 		}
-		tt_global_entry->common.flags = NO_FLAGS;
-		tt_global_entry->ttvn = ttvn;
-		tt_global_entry->roam_at = 0;
+
+		if (!tt_global_entry_has_orig(tt_global_entry, orig_node))
+			tt_global_add_orig_entry(tt_global_entry, orig_node,
+						 ttvn);
 	}
 
 	if (wifi)
@@ -560,6 +636,34 @@
 	return ret;
 }
 
+/* print all orig nodes who announce the address for this global entry.
+ * it is assumed that the caller holds rcu_read_lock();
+ */
+static void tt_global_print_entry(struct tt_global_entry *tt_global_entry,
+				  struct seq_file *seq)
+{
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct tt_orig_list_entry *orig_entry;
+	struct tt_common_entry *tt_common_entry;
+	uint16_t flags;
+	uint8_t last_ttvn;
+
+	tt_common_entry = &tt_global_entry->common;
+
+	head = &tt_global_entry->orig_list;
+
+	hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+		flags = tt_common_entry->flags;
+		last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
+		seq_printf(seq, " * %pM  (%3u) via %pM     (%3u)   [%c%c]\n",
+			   tt_global_entry->common.addr, orig_entry->ttvn,
+			   orig_entry->orig_node->orig, last_ttvn,
+			   (flags & TT_CLIENT_ROAM ? 'R' : '.'),
+			   (flags & TT_CLIENT_WIFI ? 'W' : '.'));
+	}
+}
+
 int tt_global_seq_print_text(struct seq_file *seq, void *offset)
 {
 	struct net_device *net_dev = (struct net_device *)seq->private;
@@ -603,18 +707,7 @@
 			tt_global_entry = container_of(tt_common_entry,
 						       struct tt_global_entry,
 						       common);
-			seq_printf(seq,
-				   " * %pM  (%3u) via %pM     (%3u)   [%c%c]\n",
-				   tt_global_entry->common.addr,
-				   tt_global_entry->ttvn,
-				   tt_global_entry->orig_node->orig,
-				   (uint8_t) atomic_read(
-						&tt_global_entry->orig_node->
-						last_ttvn),
-				   (tt_global_entry->common.flags &
-				    TT_CLIENT_ROAM ? 'R' : '.'),
-				   (tt_global_entry->common.flags &
-				    TT_CLIENT_WIFI ? 'W' : '.'));
+			tt_global_print_entry(tt_global_entry, seq);
 		}
 		rcu_read_unlock();
 	}
@@ -624,59 +717,150 @@
 	return ret;
 }
 
-static void _tt_global_del(struct bat_priv *bat_priv,
-			   struct tt_global_entry *tt_global_entry,
-			   const char *message)
+/* deletes the orig list of a tt_global_entry */
+static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry)
 {
-	if (!tt_global_entry)
-		goto out;
+	struct hlist_head *head;
+	struct hlist_node *node, *safe;
+	struct tt_orig_list_entry *orig_entry;
 
+	spin_lock_bh(&tt_global_entry->list_lock);
+	head = &tt_global_entry->orig_list;
+	hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
+		hlist_del_rcu(node);
+		tt_orig_list_entry_free_ref(orig_entry);
+	}
+	spin_unlock_bh(&tt_global_entry->list_lock);
+
+}
+
+static void tt_global_del_orig_entry(struct bat_priv *bat_priv,
+				     struct tt_global_entry *tt_global_entry,
+				     struct orig_node *orig_node,
+				     const char *message)
+{
+	struct hlist_head *head;
+	struct hlist_node *node, *safe;
+	struct tt_orig_list_entry *orig_entry;
+
+	spin_lock_bh(&tt_global_entry->list_lock);
+	head = &tt_global_entry->orig_list;
+	hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
+		if (orig_entry->orig_node == orig_node) {
+			bat_dbg(DBG_TT, bat_priv,
+				"Deleting %pM from global tt entry %pM: %s\n",
+				orig_node->orig, tt_global_entry->common.addr,
+				message);
+			hlist_del_rcu(node);
+			tt_orig_list_entry_free_ref(orig_entry);
+		}
+	}
+	spin_unlock_bh(&tt_global_entry->list_lock);
+}
+
+static void tt_global_del_struct(struct bat_priv *bat_priv,
+				 struct tt_global_entry *tt_global_entry,
+				 const char *message)
+{
 	bat_dbg(DBG_TT, bat_priv,
-		"Deleting global tt entry %pM (via %pM): %s\n",
-		tt_global_entry->common.addr, tt_global_entry->orig_node->orig,
-		message);
-
-	atomic_dec(&tt_global_entry->orig_node->tt_size);
+		"Deleting global tt entry %pM: %s\n",
+		tt_global_entry->common.addr, message);
 
 	hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
 		    tt_global_entry->common.addr);
-out:
-	if (tt_global_entry)
-		tt_global_entry_free_ref(tt_global_entry);
+	tt_global_entry_free_ref(tt_global_entry);
+
 }
 
-void tt_global_del(struct bat_priv *bat_priv,
-		   struct orig_node *orig_node, const unsigned char *addr,
-		   const char *message, bool roaming)
+/* If the client is to be deleted, we check if it is the last origantor entry
+ * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer,
+ * otherwise we simply remove the originator scheduled for deletion.
+ */
+static void tt_global_del_roaming(struct bat_priv *bat_priv,
+				  struct tt_global_entry *tt_global_entry,
+				  struct orig_node *orig_node,
+				  const char *message)
+{
+	bool last_entry = true;
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct tt_orig_list_entry *orig_entry;
+
+	/* no local entry exists, case 1:
+	 * Check if this is the last one or if other entries exist.
+	 */
+
+	rcu_read_lock();
+	head = &tt_global_entry->orig_list;
+	hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+		if (orig_entry->orig_node != orig_node) {
+			last_entry = false;
+			break;
+		}
+	}
+	rcu_read_unlock();
+
+	if (last_entry) {
+		/* its the last one, mark for roaming. */
+		tt_global_entry->common.flags |= TT_CLIENT_ROAM;
+		tt_global_entry->roam_at = jiffies;
+	} else
+		/* there is another entry, we can simply delete this
+		 * one and can still use the other one.
+		 */
+		tt_global_del_orig_entry(bat_priv, tt_global_entry,
+					 orig_node, message);
+}
+
+
+
+static void tt_global_del(struct bat_priv *bat_priv,
+			  struct orig_node *orig_node,
+			  const unsigned char *addr,
+			  const char *message, bool roaming)
 {
 	struct tt_global_entry *tt_global_entry = NULL;
 	struct tt_local_entry *tt_local_entry = NULL;
 
 	tt_global_entry = tt_global_hash_find(bat_priv, addr);
-	if (!tt_global_entry || tt_global_entry->orig_node != orig_node)
+	if (!tt_global_entry)
 		goto out;
 
-	if (!roaming)
-		goto out_del;
+	if (!roaming) {
+		tt_global_del_orig_entry(bat_priv, tt_global_entry, orig_node,
+					 message);
 
-	/* if we are deleting a global entry due to a roam
-	 * event, there are two possibilities:
-	 * 1) the client roamed from node A to node B => we mark
-	 *    it with TT_CLIENT_ROAM, we start a timer and we
-	 *    wait for node B to claim it. In case of timeout
-	 *    the entry is purged.
-	 * 2) the client roamed to us => we can directly delete
-	 *    the global entry, since it is useless now. */
-	tt_local_entry = tt_local_hash_find(bat_priv,
-					    tt_global_entry->common.addr);
-	if (!tt_local_entry) {
-		tt_global_entry->common.flags |= TT_CLIENT_ROAM;
-		tt_global_entry->roam_at = jiffies;
+		if (hlist_empty(&tt_global_entry->orig_list))
+			tt_global_del_struct(bat_priv, tt_global_entry,
+					     message);
+
 		goto out;
 	}
 
-out_del:
-	_tt_global_del(bat_priv, tt_global_entry, message);
+	/* if we are deleting a global entry due to a roam
+	 * event, there are two possibilities:
+	 * 1) the client roamed from node A to node B => if there
+	 *    is only one originator left for this client, we mark
+	 *    it with TT_CLIENT_ROAM, we start a timer and we
+	 *    wait for node B to claim it. In case of timeout
+	 *    the entry is purged.
+	 *
+	 *    If there are other originators left, we directly delete
+	 *    the originator.
+	 * 2) the client roamed to us => we can directly delete
+	 *    the global entry, since it is useless now. */
+
+	tt_local_entry = tt_local_hash_find(bat_priv,
+					    tt_global_entry->common.addr);
+	if (tt_local_entry) {
+		/* local entry exists, case 2: client roamed to us. */
+		tt_global_del_orig_list(tt_global_entry);
+		tt_global_del_struct(bat_priv, tt_global_entry, message);
+	} else
+		/* no local entry exists, case 1: check for roaming */
+		tt_global_del_roaming(bat_priv, tt_global_entry, orig_node,
+				      message);
+
 
 out:
 	if (tt_global_entry)
@@ -709,11 +893,14 @@
 			tt_global_entry = container_of(tt_common_entry,
 						       struct tt_global_entry,
 						       common);
-			if (tt_global_entry->orig_node == orig_node) {
+
+			tt_global_del_orig_entry(bat_priv, tt_global_entry,
+						 orig_node, message);
+
+			if (hlist_empty(&tt_global_entry->orig_list)) {
 				bat_dbg(DBG_TT, bat_priv,
-					"Deleting global tt entry %pM (via %pM): %s\n",
+					"Deleting global tt entry %pM: %s\n",
 					tt_global_entry->common.addr,
-					tt_global_entry->orig_node->orig,
 					message);
 				hlist_del_rcu(node);
 				tt_global_entry_free_ref(tt_global_entry);
@@ -754,7 +941,7 @@
 			bat_dbg(DBG_TT, bat_priv,
 				"Deleting global tt entry (%pM): Roaming timeout\n",
 				tt_global_entry->common.addr);
-			atomic_dec(&tt_global_entry->orig_node->tt_size);
+
 			hlist_del_rcu(node);
 			tt_global_entry_free_ref(tt_global_entry);
 		}
@@ -817,6 +1004,11 @@
 	struct tt_local_entry *tt_local_entry = NULL;
 	struct tt_global_entry *tt_global_entry = NULL;
 	struct orig_node *orig_node = NULL;
+	struct neigh_node *router = NULL;
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct tt_orig_list_entry *orig_entry;
+	int best_tq;
 
 	if (src && atomic_read(&bat_priv->ap_isolation)) {
 		tt_local_entry = tt_local_hash_find(bat_priv, src);
@@ -833,11 +1025,25 @@
 	if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
 		goto out;
 
-	if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
-		goto out;
+	best_tq = 0;
 
-	orig_node = tt_global_entry->orig_node;
+	rcu_read_lock();
+	head = &tt_global_entry->orig_list;
+	hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+		router = orig_node_get_router(orig_entry->orig_node);
+		if (!router)
+			continue;
 
+		if (router->tq_avg > best_tq) {
+			orig_node = orig_entry->orig_node;
+			best_tq = router->tq_avg;
+		}
+		neigh_node_free_ref(router);
+	}
+	/* found anything? */
+	if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
+		orig_node = NULL;
+	rcu_read_unlock();
 out:
 	if (tt_global_entry)
 		tt_global_entry_free_ref(tt_global_entry);
@@ -848,7 +1054,8 @@
 }
 
 /* Calculates the checksum of the local table of a given orig_node */
-uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
+static uint16_t tt_global_crc(struct bat_priv *bat_priv,
+			      struct orig_node *orig_node)
 {
 	uint16_t total = 0, total_one;
 	struct hashtable_t *hash = bat_priv->tt_global_hash;
@@ -868,20 +1075,26 @@
 			tt_global_entry = container_of(tt_common_entry,
 						       struct tt_global_entry,
 						       common);
-			if (compare_eth(tt_global_entry->orig_node,
-					orig_node)) {
-				/* Roaming clients are in the global table for
-				 * consistency only. They don't have to be
-				 * taken into account while computing the
-				 * global crc */
-				if (tt_common_entry->flags & TT_CLIENT_ROAM)
-					continue;
-				total_one = 0;
-				for (j = 0; j < ETH_ALEN; j++)
-					total_one = crc16_byte(total_one,
-						tt_common_entry->addr[j]);
-				total ^= total_one;
-			}
+			/* Roaming clients are in the global table for
+			 * consistency only. They don't have to be
+			 * taken into account while computing the
+			 * global crc
+			 */
+			if (tt_global_entry->common.flags & TT_CLIENT_ROAM)
+				continue;
+
+			/* find out if this global entry is announced by this
+			 * originator
+			 */
+			if (!tt_global_entry_has_orig(tt_global_entry,
+						      orig_node))
+				continue;
+
+			total_one = 0;
+			for (j = 0; j < ETH_ALEN; j++)
+				total_one = crc16_byte(total_one,
+					tt_global_entry->common.addr[j]);
+			total ^= total_one;
 		}
 		rcu_read_unlock();
 	}
@@ -936,8 +1149,10 @@
 	spin_unlock_bh(&bat_priv->tt_req_list_lock);
 }
 
-void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
-			 const unsigned char *tt_buff, uint8_t tt_num_changes)
+static void tt_save_orig_buffer(struct bat_priv *bat_priv,
+				struct orig_node *orig_node,
+				const unsigned char *tt_buff,
+				uint8_t tt_num_changes)
 {
 	uint16_t tt_buff_len = tt_len(tt_num_changes);
 
@@ -1020,7 +1235,7 @@
 	tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
 				       common);
 
-	return (tt_global_entry->orig_node == orig_node);
+	return tt_global_entry_has_orig(tt_global_entry, orig_node);
 }
 
 static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
@@ -1124,7 +1339,7 @@
 	memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
 	tt_request->header.ttl = TTL;
 	tt_request->ttvn = ttvn;
-	tt_request->tt_data = tt_crc;
+	tt_request->tt_data = htons(tt_crc);
 	tt_request->flags = TT_REQUEST;
 
 	if (full_table)
@@ -1401,10 +1616,15 @@
 bool send_tt_response(struct bat_priv *bat_priv,
 		      struct tt_query_packet *tt_request)
 {
-	if (is_my_mac(tt_request->dst))
+	if (is_my_mac(tt_request->dst)) {
+		/* don't answer backbone gws! */
+		if (bla_is_backbone_gw_orig(bat_priv, tt_request->src))
+			return true;
+
 		return send_my_tt_response(bat_priv, tt_request);
-	else
+	} else {
 		return send_other_tt_response(bat_priv, tt_request);
+	}
 }
 
 static void _tt_update_changes(struct bat_priv *bat_priv,
@@ -1508,6 +1728,10 @@
 		tt_response->src, tt_response->ttvn, tt_response->tt_data,
 		(tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
 
+	/* we should have never asked a backbone gw */
+	if (bla_is_backbone_gw_orig(bat_priv, tt_response->src))
+		goto out;
+
 	orig_node = orig_hash_find(bat_priv, tt_response->src);
 	if (!orig_node)
 		goto out;
@@ -1627,8 +1851,8 @@
 	return ret;
 }
 
-void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
-		   struct orig_node *orig_node)
+static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
+			  struct orig_node *orig_node)
 {
 	struct neigh_node *neigh_node = NULL;
 	struct sk_buff *skb = NULL;
@@ -1796,6 +2020,8 @@
 
 	/* Increment the TTVN only once per OGM interval */
 	atomic_inc(&bat_priv->ttvn);
+	bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n",
+		(uint8_t)atomic_read(&bat_priv->ttvn));
 	bat_priv->tt_poss_change = false;
 }
 
@@ -1836,6 +2062,10 @@
 	uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
 	bool full_table = true;
 
+	/* don't care about a backbone gateways updates. */
+	if (bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
+		return;
+
 	/* orig table not initialised AND first diff is in the OGM OR the ttvn
 	 * increased by one -> we can apply the attached changes */
 	if ((!orig_node->tt_initialised && ttvn == 1) ||
@@ -1873,6 +2103,7 @@
 	} else {
 		/* if we missed more than one change or our tables are not
 		 * in sync anymore -> request fresh tt data */
+
 		if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
 		    orig_node->tt_crc != tt_crc) {
 request_table:
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index c753633..bfebe26 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -39,23 +39,15 @@
 int tt_global_seq_print_text(struct seq_file *seq, void *offset);
 void tt_global_del_orig(struct bat_priv *bat_priv,
 			struct orig_node *orig_node, const char *message);
-void tt_global_del(struct bat_priv *bat_priv,
-		   struct orig_node *orig_node, const unsigned char *addr,
-		   const char *message, bool roaming);
 struct orig_node *transtable_search(struct bat_priv *bat_priv,
 				    const uint8_t *src, const uint8_t *addr);
-void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
-			 const unsigned char *tt_buff, uint8_t tt_num_changes);
 uint16_t tt_local_crc(struct bat_priv *bat_priv);
-uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node);
 void tt_free(struct bat_priv *bat_priv);
 bool send_tt_response(struct bat_priv *bat_priv,
 		      struct tt_query_packet *tt_request);
 bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr);
 void handle_tt_response(struct bat_priv *bat_priv,
 			struct tt_query_packet *tt_response);
-void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
-		   struct orig_node *orig_node);
 void tt_commit_changes(struct bat_priv *bat_priv);
 bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst);
 void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 302efb5..2f4848b 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -27,7 +27,7 @@
 #include "packet.h"
 #include "bitarray.h"
 
-#define BAT_HEADER_LEN (sizeof(struct ethhdr) + \
+#define BAT_HEADER_LEN (ETH_HLEN + \
 	((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? \
 	 sizeof(struct unicast_packet) : \
 	 sizeof(struct bcast_packet))))
@@ -90,7 +90,7 @@
 	bool tt_poss_change;
 	uint32_t last_real_seqno;
 	uint8_t last_ttl;
-	unsigned long bcast_bits[NUM_WORDS];
+	DECLARE_BITMAP(bcast_bits, TQ_LOCAL_WINDOW_SIZE);
 	uint32_t last_bcast_seqno;
 	struct hlist_head neigh_list;
 	struct list_head frag_list;
@@ -132,7 +132,7 @@
 	uint8_t last_ttl;
 	struct list_head bonding_list;
 	unsigned long last_valid;
-	unsigned long real_bits[NUM_WORDS];
+	DECLARE_BITMAP(real_bits, TQ_LOCAL_WINDOW_SIZE);
 	atomic_t refcount;
 	struct rcu_head rcu;
 	struct orig_node *orig_node;
@@ -140,6 +140,13 @@
 	spinlock_t tq_lock;	/* protects: tq_recv, tq_index */
 };
 
+#ifdef CONFIG_BATMAN_ADV_BLA
+struct bcast_duplist_entry {
+	uint8_t orig[ETH_ALEN];
+	uint16_t crc;
+	unsigned long entrytime;
+};
+#endif
 
 struct bat_priv {
 	atomic_t mesh_state;
@@ -148,6 +155,7 @@
 	atomic_t bonding;		/* boolean */
 	atomic_t fragmentation;		/* boolean */
 	atomic_t ap_isolation;		/* boolean */
+	atomic_t bridge_loop_avoidance;	/* boolean */
 	atomic_t vis_mode;		/* VIS_TYPE_* */
 	atomic_t gw_mode;		/* GW_MODE_* */
 	atomic_t gw_sel_class;		/* uint */
@@ -161,6 +169,7 @@
 	atomic_t ttvn; /* translation table version number */
 	atomic_t tt_ogm_append_cnt;
 	atomic_t tt_local_changes; /* changes registered in a OGM interval */
+	atomic_t bla_num_requests; /* number of bla requests in flight */
 	/* The tt_poss_change flag is used to detect an ongoing roaming phase.
 	 * If true, then I received a Roaming_adv and I have to inspect every
 	 * packet directed to me to check whether I am still the true
@@ -174,15 +183,23 @@
 	struct hlist_head forw_bat_list;
 	struct hlist_head forw_bcast_list;
 	struct hlist_head gw_list;
-	struct hlist_head softif_neigh_vids;
 	struct list_head tt_changes_list; /* tracks changes in a OGM int */
 	struct list_head vis_send_list;
 	struct hashtable_t *orig_hash;
 	struct hashtable_t *tt_local_hash;
 	struct hashtable_t *tt_global_hash;
+#ifdef CONFIG_BATMAN_ADV_BLA
+	struct hashtable_t *claim_hash;
+	struct hashtable_t *backbone_hash;
+#endif
 	struct list_head tt_req_list; /* list of pending tt_requests */
 	struct list_head tt_roam_list;
 	struct hashtable_t *vis_hash;
+#ifdef CONFIG_BATMAN_ADV_BLA
+	struct bcast_duplist_entry bcast_duplist[DUPLIST_SIZE];
+	int bcast_duplist_curr;
+	struct bla_claim_dst claim_dest;
+#endif
 	spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
 	spinlock_t forw_bcast_list_lock; /* protects  */
 	spinlock_t tt_changes_list_lock; /* protects tt_changes */
@@ -191,8 +208,6 @@
 	spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
 	spinlock_t vis_hash_lock; /* protects vis_hash */
 	spinlock_t vis_list_lock; /* protects vis_info::recv_list */
-	spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */
-	spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */
 	atomic_t num_local_tt;
 	/* Checksum of the local table, recomputed before sending a new OGM */
 	atomic_t tt_crc;
@@ -202,6 +217,7 @@
 	struct delayed_work tt_work;
 	struct delayed_work orig_work;
 	struct delayed_work vis_work;
+	struct delayed_work bla_work;
 	struct gw_node __rcu *curr_gw;  /* rcu protected pointer */
 	atomic_t gw_reselect;
 	struct hard_iface __rcu *primary_if;  /* rcu protected pointer */
@@ -239,11 +255,42 @@
 
 struct tt_global_entry {
 	struct tt_common_entry common;
-	struct orig_node *orig_node;
-	uint8_t ttvn;
+	struct hlist_head orig_list;
+	spinlock_t list_lock;	/* protects the list */
 	unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
 };
 
+struct tt_orig_list_entry {
+	struct orig_node *orig_node;
+	uint8_t ttvn;
+	struct rcu_head rcu;
+	struct hlist_node list;
+};
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+struct backbone_gw {
+	uint8_t orig[ETH_ALEN];
+	short vid;		/* used VLAN ID */
+	struct hlist_node hash_entry;
+	struct bat_priv *bat_priv;
+	unsigned long lasttime;	/* last time we heard of this backbone gw */
+	atomic_t request_sent;
+	atomic_t refcount;
+	struct rcu_head rcu;
+	uint16_t crc;		/* crc checksum over all claims */
+};
+
+struct claim {
+	uint8_t addr[ETH_ALEN];
+	short vid;
+	struct backbone_gw *backbone_gw;
+	unsigned long lasttime;	/* last time we heard of claim (locals only) */
+	struct rcu_head rcu;
+	atomic_t refcount;
+	struct hlist_node hash_entry;
+};
+#endif
+
 struct tt_change_node {
 	struct list_head list;
 	struct tt_change change;
@@ -327,31 +374,15 @@
 	uint8_t mac[ETH_ALEN];
 };
 
-struct softif_neigh_vid {
-	struct hlist_node list;
-	struct bat_priv *bat_priv;
-	short vid;
-	atomic_t refcount;
-	struct softif_neigh __rcu *softif_neigh;
-	struct rcu_head rcu;
-	struct hlist_head softif_neigh_list;
-};
-
-struct softif_neigh {
-	struct hlist_node list;
-	uint8_t addr[ETH_ALEN];
-	unsigned long last_seen;
-	atomic_t refcount;
-	struct rcu_head rcu;
-};
-
 struct bat_algo_ops {
 	struct hlist_node list;
 	char *name;
-	/* init OGM when hard-interface is enabled */
-	void (*bat_ogm_init)(struct hard_iface *hard_iface);
-	/* init primary OGM when primary interface is selected */
-	void (*bat_ogm_init_primary)(struct hard_iface *hard_iface);
+	/* init routing info when hard-interface is enabled */
+	int (*bat_iface_enable)(struct hard_iface *hard_iface);
+	/* de-init routing info when hard-interface is disabled */
+	void (*bat_iface_disable)(struct hard_iface *hard_iface);
+	/* called when primary interface is selected / changed */
+	void (*bat_primary_iface_set)(struct hard_iface *hard_iface);
 	/* init mac addresses of the OGM belonging to this hard-interface */
 	void (*bat_ogm_update_mac)(struct hard_iface *hard_iface);
 	/* prepare a new outgoing OGM for the send queue */
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index c4a5b8c..cec216f 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -434,12 +434,12 @@
 		return NULL;
 
 	info->skb_packet = dev_alloc_skb(sizeof(*packet) + vis_info_len +
-					 sizeof(struct ethhdr));
+					 ETH_HLEN);
 	if (!info->skb_packet) {
 		kfree(info);
 		return NULL;
 	}
-	skb_reserve(info->skb_packet, sizeof(struct ethhdr));
+	skb_reserve(info->skb_packet, ETH_HLEN);
 	packet = (struct vis_packet *)skb_put(info->skb_packet, sizeof(*packet)
 					      + vis_info_len);
 
@@ -894,11 +894,11 @@
 
 	bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) +
 							  MAX_VIS_PACKET_SIZE +
-							 sizeof(struct ethhdr));
+							  ETH_HLEN);
 	if (!bat_priv->my_vis_info->skb_packet)
 		goto free_info;
 
-	skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr));
+	skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN);
 	packet = (struct vis_packet *)skb_put(bat_priv->my_vis_info->skb_packet,
 					      sizeof(*packet));
 
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index ba829de..d6e5929 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -317,6 +317,9 @@
 	.ndo_add_slave		 = br_add_slave,
 	.ndo_del_slave		 = br_del_slave,
 	.ndo_fix_features        = br_fix_features,
+	.ndo_fdb_add		 = br_fdb_add,
+	.ndo_fdb_del		 = br_fdb_delete,
+	.ndo_fdb_dump		 = br_fdb_dump,
 };
 
 static void br_dev_free(struct net_device *dev)
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 5ba0c84..5945c54 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -487,14 +487,14 @@
 	ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
 	ndm->ndm_state   = fdb_to_nud(fdb);
 
-	NLA_PUT(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr);
-
+	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr))
+		goto nla_put_failure;
 	ci.ndm_used	 = jiffies_to_clock_t(now - fdb->used);
 	ci.ndm_confirmed = 0;
 	ci.ndm_updated	 = jiffies_to_clock_t(now - fdb->updated);
 	ci.ndm_refcnt	 = 0;
-	NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
-
+	if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
+		goto nla_put_failure;
 	return nlmsg_end(skb, nlh);
 
 nla_put_failure:
@@ -535,44 +535,38 @@
 }
 
 /* Dump information about entries, in response to GETNEIGH */
-int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
+int br_fdb_dump(struct sk_buff *skb,
+		struct netlink_callback *cb,
+		struct net_device *dev,
+		int idx)
 {
-	struct net *net = sock_net(skb->sk);
-	struct net_device *dev;
-	int idx = 0;
+	struct net_bridge *br = netdev_priv(dev);
+	int i;
 
-	rcu_read_lock();
-	for_each_netdev_rcu(net, dev) {
-		struct net_bridge *br = netdev_priv(dev);
-		int i;
+	if (!(dev->priv_flags & IFF_EBRIDGE))
+		goto out;
 
-		if (!(dev->priv_flags & IFF_EBRIDGE))
-			continue;
+	for (i = 0; i < BR_HASH_SIZE; i++) {
+		struct hlist_node *h;
+		struct net_bridge_fdb_entry *f;
 
-		for (i = 0; i < BR_HASH_SIZE; i++) {
-			struct hlist_node *h;
-			struct net_bridge_fdb_entry *f;
+		hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
+			if (idx < cb->args[0])
+				goto skip;
 
-			hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
-				if (idx < cb->args[0])
-					goto skip;
-
-				if (fdb_fill_info(skb, br, f,
-						  NETLINK_CB(cb->skb).pid,
-						  cb->nlh->nlmsg_seq,
-						  RTM_NEWNEIGH,
-						  NLM_F_MULTI) < 0)
-					break;
+			if (fdb_fill_info(skb, br, f,
+					  NETLINK_CB(cb->skb).pid,
+					  cb->nlh->nlmsg_seq,
+					  RTM_NEWNEIGH,
+					  NLM_F_MULTI) < 0)
+				break;
 skip:
-				++idx;
-			}
+			++idx;
 		}
 	}
-	rcu_read_unlock();
 
-	cb->args[0] = idx;
-
-	return skb->len;
+out:
+	return idx;
 }
 
 /* Update (create or replace) forwarding database entry */
@@ -614,43 +608,11 @@
 }
 
 /* Add new permanent fdb entry with RTM_NEWNEIGH */
-int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+int br_fdb_add(struct ndmsg *ndm, struct net_device *dev,
+	       unsigned char *addr, u16 nlh_flags)
 {
-	struct net *net = sock_net(skb->sk);
-	struct ndmsg *ndm;
-	struct nlattr *tb[NDA_MAX+1];
-	struct net_device *dev;
 	struct net_bridge_port *p;
-	const __u8 *addr;
-	int err;
-
-	ASSERT_RTNL();
-	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
-	if (err < 0)
-		return err;
-
-	ndm = nlmsg_data(nlh);
-	if (ndm->ndm_ifindex == 0) {
-		pr_info("bridge: RTM_NEWNEIGH with invalid ifindex\n");
-		return -EINVAL;
-	}
-
-	dev = __dev_get_by_index(net, ndm->ndm_ifindex);
-	if (dev == NULL) {
-		pr_info("bridge: RTM_NEWNEIGH with unknown ifindex\n");
-		return -ENODEV;
-	}
-
-	if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
-		pr_info("bridge: RTM_NEWNEIGH with invalid address\n");
-		return -EINVAL;
-	}
-
-	addr = nla_data(tb[NDA_LLADDR]);
-	if (!is_valid_ether_addr(addr)) {
-		pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
-		return -EINVAL;
-	}
+	int err = 0;
 
 	if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
 		pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
@@ -670,14 +632,14 @@
 		rcu_read_unlock();
 	} else {
 		spin_lock_bh(&p->br->hash_lock);
-		err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags);
+		err = fdb_add_entry(p, addr, ndm->ndm_state, nlh_flags);
 		spin_unlock_bh(&p->br->hash_lock);
 	}
 
 	return err;
 }
 
-static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr)
+static int fdb_delete_by_addr(struct net_bridge_port *p, u8 *addr)
 {
 	struct net_bridge *br = p->br;
 	struct hlist_head *head = &br->hash[br_mac_hash(addr)];
@@ -692,40 +654,12 @@
 }
 
 /* Remove neighbor entry with RTM_DELNEIGH */
-int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+int br_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
+		  unsigned char *addr)
 {
-	struct net *net = sock_net(skb->sk);
-	struct ndmsg *ndm;
 	struct net_bridge_port *p;
-	struct nlattr *llattr;
-	const __u8 *addr;
-	struct net_device *dev;
 	int err;
 
-	ASSERT_RTNL();
-	if (nlmsg_len(nlh) < sizeof(*ndm))
-		return -EINVAL;
-
-	ndm = nlmsg_data(nlh);
-	if (ndm->ndm_ifindex == 0) {
-		pr_info("bridge: RTM_DELNEIGH with invalid ifindex\n");
-		return -EINVAL;
-	}
-
-	dev = __dev_get_by_index(net, ndm->ndm_ifindex);
-	if (dev == NULL) {
-		pr_info("bridge: RTM_DELNEIGH with unknown ifindex\n");
-		return -ENODEV;
-	}
-
-	llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR);
-	if (llattr == NULL || nla_len(llattr) != ETH_ALEN) {
-		pr_info("bridge: RTM_DELNEIGH with invalid address\n");
-		return -EINVAL;
-	}
-
-	addr = nla_data(llattr);
-
 	p = br_port_get_rtnl(dev);
 	if (p == NULL) {
 		pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 61f6534..aab8470 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -34,7 +34,7 @@
 		p->state == BR_STATE_FORWARDING);
 }
 
-static inline unsigned packet_length(const struct sk_buff *skb)
+static inline unsigned int packet_length(const struct sk_buff *skb)
 {
 	return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
 }
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 702a1ae..708e84f 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -36,6 +36,8 @@
 #define mlock_dereference(X, br) \
 	rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
 
+static void br_multicast_start_querier(struct net_bridge *br);
+
 #if IS_ENABLED(CONFIG_IPV6)
 static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
 {
@@ -241,7 +243,6 @@
 	hlist_del_rcu(&mp->hlist[mdb->ver]);
 	mdb->size--;
 
-	del_timer(&mp->query_timer);
 	call_rcu_bh(&mp->rcu, br_multicast_free_group);
 
 out:
@@ -271,7 +272,6 @@
 		rcu_assign_pointer(*pp, p->next);
 		hlist_del_init(&p->mglist);
 		del_timer(&p->timer);
-		del_timer(&p->query_timer);
 		call_rcu_bh(&p->rcu, br_multicast_free_pg);
 
 		if (!mp->ports && !mp->mglist &&
@@ -507,74 +507,6 @@
 	return NULL;
 }
 
-static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp)
-{
-	struct net_bridge *br = mp->br;
-	struct sk_buff *skb;
-
-	skb = br_multicast_alloc_query(br, &mp->addr);
-	if (!skb)
-		goto timer;
-
-	netif_rx(skb);
-
-timer:
-	if (++mp->queries_sent < br->multicast_last_member_count)
-		mod_timer(&mp->query_timer,
-			  jiffies + br->multicast_last_member_interval);
-}
-
-static void br_multicast_group_query_expired(unsigned long data)
-{
-	struct net_bridge_mdb_entry *mp = (void *)data;
-	struct net_bridge *br = mp->br;
-
-	spin_lock(&br->multicast_lock);
-	if (!netif_running(br->dev) || !mp->mglist ||
-	    mp->queries_sent >= br->multicast_last_member_count)
-		goto out;
-
-	br_multicast_send_group_query(mp);
-
-out:
-	spin_unlock(&br->multicast_lock);
-}
-
-static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg)
-{
-	struct net_bridge_port *port = pg->port;
-	struct net_bridge *br = port->br;
-	struct sk_buff *skb;
-
-	skb = br_multicast_alloc_query(br, &pg->addr);
-	if (!skb)
-		goto timer;
-
-	br_deliver(port, skb);
-
-timer:
-	if (++pg->queries_sent < br->multicast_last_member_count)
-		mod_timer(&pg->query_timer,
-			  jiffies + br->multicast_last_member_interval);
-}
-
-static void br_multicast_port_group_query_expired(unsigned long data)
-{
-	struct net_bridge_port_group *pg = (void *)data;
-	struct net_bridge_port *port = pg->port;
-	struct net_bridge *br = port->br;
-
-	spin_lock(&br->multicast_lock);
-	if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
-	    pg->queries_sent >= br->multicast_last_member_count)
-		goto out;
-
-	br_multicast_send_port_group_query(pg);
-
-out:
-	spin_unlock(&br->multicast_lock);
-}
-
 static struct net_bridge_mdb_entry *br_multicast_get_group(
 	struct net_bridge *br, struct net_bridge_port *port,
 	struct br_ip *group, int hash)
@@ -582,8 +514,8 @@
 	struct net_bridge_mdb_htable *mdb;
 	struct net_bridge_mdb_entry *mp;
 	struct hlist_node *p;
-	unsigned count = 0;
-	unsigned max;
+	unsigned int count = 0;
+	unsigned int max;
 	int elasticity;
 	int err;
 
@@ -690,8 +622,6 @@
 	mp->addr = *group;
 	setup_timer(&mp->timer, br_multicast_group_expired,
 		    (unsigned long)mp);
-	setup_timer(&mp->query_timer, br_multicast_group_query_expired,
-		    (unsigned long)mp);
 
 	hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
 	mdb->size++;
@@ -746,8 +676,6 @@
 	hlist_add_head(&p->mglist, &port->mglist);
 	setup_timer(&p->timer, br_multicast_port_group_expired,
 		    (unsigned long)p);
-	setup_timer(&p->query_timer, br_multicast_port_group_query_expired,
-		    (unsigned long)p);
 
 	rcu_assign_pointer(*pp, p);
 
@@ -814,6 +742,21 @@
 {
 }
 
+static void br_multicast_querier_expired(unsigned long data)
+{
+	struct net_bridge_port *port = (void *)data;
+	struct net_bridge *br = port->br;
+
+	spin_lock(&br->multicast_lock);
+	if (!netif_running(br->dev) || br->multicast_disabled)
+		goto out;
+
+	br_multicast_start_querier(br);
+
+out:
+	spin_unlock(&br->multicast_lock);
+}
+
 static void __br_multicast_send_query(struct net_bridge *br,
 				      struct net_bridge_port *port,
 				      struct br_ip *ip)
@@ -840,6 +783,7 @@
 	struct br_ip br_group;
 
 	if (!netif_running(br->dev) || br->multicast_disabled ||
+	    !br->multicast_querier ||
 	    timer_pending(&br->multicast_querier_timer))
 		return;
 
@@ -1291,9 +1235,6 @@
 		     time_after(mp->timer.expires, time) :
 		     try_to_del_timer_sync(&mp->timer) >= 0)) {
 			mod_timer(&mp->timer, time);
-
-			mp->queries_sent = 0;
-			mod_timer(&mp->query_timer, now);
 		}
 
 		goto out;
@@ -1310,9 +1251,6 @@
 		     time_after(p->timer.expires, time) :
 		     try_to_del_timer_sync(&p->timer) >= 0)) {
 			mod_timer(&p->timer, time);
-
-			p->queries_sent = 0;
-			mod_timer(&p->query_timer, now);
 		}
 
 		break;
@@ -1361,8 +1299,8 @@
 	struct sk_buff *skb2 = skb;
 	const struct iphdr *iph;
 	struct igmphdr *ih;
-	unsigned len;
-	unsigned offset;
+	unsigned int len;
+	unsigned int offset;
 	int err;
 
 	/* We treat OOM as packet loss for now. */
@@ -1462,7 +1400,7 @@
 	u8 icmp6_type;
 	u8 nexthdr;
 	__be16 frag_off;
-	unsigned len;
+	unsigned int len;
 	int offset;
 	int err;
 
@@ -1628,6 +1566,7 @@
 	br->hash_max = 512;
 
 	br->multicast_router = 1;
+	br->multicast_querier = 0;
 	br->multicast_last_member_count = 2;
 	br->multicast_startup_query_count = 2;
 
@@ -1642,7 +1581,7 @@
 	setup_timer(&br->multicast_router_timer,
 		    br_multicast_local_router_expired, 0);
 	setup_timer(&br->multicast_querier_timer,
-		    br_multicast_local_router_expired, 0);
+		    br_multicast_querier_expired, 0);
 	setup_timer(&br->multicast_query_timer, br_multicast_query_expired,
 		    (unsigned long)br);
 }
@@ -1681,7 +1620,6 @@
 		hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
 					  hlist[ver]) {
 			del_timer(&mp->timer);
-			del_timer(&mp->query_timer);
 			call_rcu_bh(&mp->rcu, br_multicast_free_group);
 		}
 	}
@@ -1770,9 +1708,23 @@
 	return err;
 }
 
-int br_multicast_toggle(struct net_bridge *br, unsigned long val)
+static void br_multicast_start_querier(struct net_bridge *br)
 {
 	struct net_bridge_port *port;
+
+	br_multicast_open(br);
+
+	list_for_each_entry(port, &br->port_list, list) {
+		if (port->state == BR_STATE_DISABLED ||
+		    port->state == BR_STATE_BLOCKING)
+			continue;
+
+		__br_multicast_enable_port(port);
+	}
+}
+
+int br_multicast_toggle(struct net_bridge *br, unsigned long val)
+{
 	int err = 0;
 	struct net_bridge_mdb_htable *mdb;
 
@@ -1802,14 +1754,7 @@
 			goto rollback;
 	}
 
-	br_multicast_open(br);
-	list_for_each_entry(port, &br->port_list, list) {
-		if (port->state == BR_STATE_DISABLED ||
-		    port->state == BR_STATE_BLOCKING)
-			continue;
-
-		__br_multicast_enable_port(port);
-	}
+	br_multicast_start_querier(br);
 
 unlock:
 	spin_unlock_bh(&br->multicast_lock);
@@ -1817,6 +1762,24 @@
 	return err;
 }
 
+int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
+{
+	val = !!val;
+
+	spin_lock_bh(&br->multicast_lock);
+	if (br->multicast_querier == val)
+		goto unlock;
+
+	br->multicast_querier = val;
+	if (val)
+		br_multicast_start_querier(br);
+
+unlock:
+	spin_unlock_bh(&br->multicast_lock);
+
+	return 0;
+}
+
 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
 {
 	int err = -ENOENT;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index dec4f38..9d4f09c 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -1008,12 +1008,6 @@
 	},
 	{ }
 };
-
-static struct ctl_path brnf_path[] = {
-	{ .procname = "net", },
-	{ .procname = "bridge", },
-	{ }
-};
 #endif
 
 int __init br_netfilter_init(void)
@@ -1030,7 +1024,7 @@
 		return ret;
 	}
 #ifdef CONFIG_SYSCTL
-	brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table);
+	brnf_sysctl_header = register_net_sysctl(&init_net, "net/bridge", brnf_table);
 	if (brnf_sysctl_header == NULL) {
 		printk(KERN_WARNING
 		       "br_netfilter: can't register to sysctl.\n");
@@ -1047,7 +1041,7 @@
 {
 	nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
 #ifdef CONFIG_SYSCTL
-	unregister_sysctl_table(brnf_sysctl_header);
+	unregister_net_sysctl_table(brnf_sysctl_header);
 #endif
 	dst_entries_destroy(&fake_dst_ops);
 }
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index a1daf82..2080485 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -60,20 +60,17 @@
 	hdr->ifi_flags = dev_get_flags(dev);
 	hdr->ifi_change = 0;
 
-	NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
-	NLA_PUT_U32(skb, IFLA_MASTER, br->dev->ifindex);
-	NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
-	NLA_PUT_U8(skb, IFLA_OPERSTATE, operstate);
-
-	if (dev->addr_len)
-		NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
-
-	if (dev->ifindex != dev->iflink)
-		NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
-
-	if (event == RTM_NEWLINK)
-		NLA_PUT_U8(skb, IFLA_PROTINFO, port->state);
-
+	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
+	    nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
+	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
+	    nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
+	    (dev->addr_len &&
+	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
+	    (dev->ifindex != dev->iflink &&
+	     nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
+	    (event == RTM_NEWLINK &&
+	     nla_put_u8(skb, IFLA_PROTINFO, port->state)))
+		goto nla_put_failure;
 	return nlmsg_end(skb, nlh);
 
 nla_put_failure:
@@ -91,7 +88,7 @@
 	int err = -ENOBUFS;
 
 	br_debug(port->br, "port %u(%s) event %d\n",
-		 (unsigned)port->port_no, port->dev->name, event);
+		 (unsigned int)port->port_no, port->dev->name, event);
 
 	skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC);
 	if (skb == NULL)
@@ -235,18 +232,6 @@
 			      br_rtm_setlink, NULL, NULL);
 	if (err)
 		goto err3;
-	err = __rtnl_register(PF_BRIDGE, RTM_NEWNEIGH,
-			      br_fdb_add, NULL, NULL);
-	if (err)
-		goto err3;
-	err = __rtnl_register(PF_BRIDGE, RTM_DELNEIGH,
-			      br_fdb_delete, NULL, NULL);
-	if (err)
-		goto err3;
-	err = __rtnl_register(PF_BRIDGE, RTM_GETNEIGH,
-			      NULL, br_fdb_dump, NULL);
-	if (err)
-		goto err3;
 
 	return 0;
 
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 0b67a63..1a8ad4f 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -82,9 +82,7 @@
 	struct hlist_node		mglist;
 	struct rcu_head			rcu;
 	struct timer_list		timer;
-	struct timer_list		query_timer;
 	struct br_ip			addr;
-	u32				queries_sent;
 };
 
 struct net_bridge_mdb_entry
@@ -94,10 +92,8 @@
 	struct net_bridge_port_group __rcu *ports;
 	struct rcu_head			rcu;
 	struct timer_list		timer;
-	struct timer_list		query_timer;
 	struct br_ip			addr;
 	bool				mglist;
-	u32				queries_sent;
 };
 
 struct net_bridge_mdb_htable
@@ -228,6 +224,7 @@
 	unsigned char			multicast_router;
 
 	u8				multicast_disabled:1;
+	u8				multicast_querier:1;
 
 	u32				hash_elasticity;
 	u32				hash_max;
@@ -363,9 +360,18 @@
 extern void br_fdb_update(struct net_bridge *br,
 			  struct net_bridge_port *source,
 			  const unsigned char *addr);
-extern int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb);
-extern int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
-extern int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
+
+extern int br_fdb_delete(struct ndmsg *ndm,
+			 struct net_device *dev,
+			 unsigned char *addr);
+extern int br_fdb_add(struct ndmsg *nlh,
+		      struct net_device *dev,
+		      unsigned char *addr,
+		      u16 nlh_flags);
+extern int br_fdb_dump(struct sk_buff *skb,
+		       struct netlink_callback *cb,
+		       struct net_device *dev,
+		       int idx);
 
 /* br_forward.c */
 extern void br_deliver(const struct net_bridge_port *to,
@@ -421,6 +427,7 @@
 extern int br_multicast_set_port_router(struct net_bridge_port *p,
 					unsigned long val);
 extern int br_multicast_toggle(struct net_bridge *br, unsigned long val);
+extern int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
 extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
 
 static inline bool br_multicast_is_router(struct net_bridge *br)
diff --git a/net/bridge/br_private_stp.h b/net/bridge/br_private_stp.h
index 05ed9bc..0c0fe36 100644
--- a/net/bridge/br_private_stp.h
+++ b/net/bridge/br_private_stp.h
@@ -29,10 +29,9 @@
 #define BR_MIN_PATH_COST	1
 #define BR_MAX_PATH_COST	65535
 
-struct br_config_bpdu
-{
-	unsigned	topology_change:1;
-	unsigned	topology_change_ack:1;
+struct br_config_bpdu {
+	unsigned int	topology_change:1;
+	unsigned int	topology_change_ack:1;
 	bridge_id	root;
 	int		root_path_cost;
 	bridge_id	bridge_id;
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 8c836d9..af9a120 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -32,7 +32,7 @@
 void br_log_state(const struct net_bridge_port *p)
 {
 	br_info(p->br, "port %u(%s) entered %s state\n",
-		(unsigned) p->port_no, p->dev->name,
+		(unsigned int) p->port_no, p->dev->name,
 		br_port_state_names[p->state]);
 }
 
@@ -478,7 +478,7 @@
 {
 	if (br_is_designated_port(p)) {
 		br_info(p->br, "port %u(%s) received tcn bpdu\n",
-			(unsigned) p->port_no, p->dev->name);
+			(unsigned int) p->port_no, p->dev->name);
 
 		br_topology_change_detection(p->br);
 		br_topology_change_acknowledge(p);
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 58de2a0..a6747e6 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -56,7 +56,7 @@
 		return;
 
 	br_info(br, "port %u(%s) neighbor %.2x%.2x.%pM lost\n",
-		(unsigned) p->port_no, p->dev->name,
+		(unsigned int) p->port_no, p->dev->name,
 		id->prio[0], id->prio[1], &id->addr);
 
 	/*
@@ -84,7 +84,7 @@
 	struct net_bridge *br = p->br;
 
 	br_debug(br, "port %u(%s) forward delay timer\n",
-		 (unsigned) p->port_no, p->dev->name);
+		 (unsigned int) p->port_no, p->dev->name);
 	spin_lock(&br->lock);
 	if (p->state == BR_STATE_LISTENING) {
 		p->state = BR_STATE_LEARNING;
@@ -131,7 +131,7 @@
 	struct net_bridge_port *p = (struct net_bridge_port *) arg;
 
 	br_debug(p->br, "port %u(%s) hold timer expired\n",
-		 (unsigned) p->port_no, p->dev->name);
+		 (unsigned int) p->port_no, p->dev->name);
 
 	spin_lock(&p->br->lock);
 	if (p->config_pending)
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index c236c0e..c5c0593 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -297,7 +297,7 @@
 				const char *buf, size_t len)
 {
 	struct net_bridge *br = to_bridge(d);
-	unsigned new_addr[6];
+	unsigned int new_addr[6];
 	int i;
 
 	if (!capable(CAP_NET_ADMIN))
@@ -379,6 +379,23 @@
 static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR,
 		   show_multicast_snooping, store_multicast_snooping);
 
+static ssize_t show_multicast_querier(struct device *d,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct net_bridge *br = to_bridge(d);
+	return sprintf(buf, "%d\n", br->multicast_querier);
+}
+
+static ssize_t store_multicast_querier(struct device *d,
+				       struct device_attribute *attr,
+				       const char *buf, size_t len)
+{
+	return store_bridge_parm(d, buf, len, br_multicast_set_querier);
+}
+static DEVICE_ATTR(multicast_querier, S_IRUGO | S_IWUSR,
+		   show_multicast_querier, store_multicast_querier);
+
 static ssize_t show_hash_elasticity(struct device *d,
 				    struct device_attribute *attr, char *buf)
 {
@@ -702,6 +719,7 @@
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
 	&dev_attr_multicast_router.attr,
 	&dev_attr_multicast_snooping.attr,
+	&dev_attr_multicast_querier.attr,
 	&dev_attr_hash_elasticity.attr,
 	&dev_attr_hash_max.attr,
 	&dev_attr_multicast_last_member_count.attr,
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 5016fa5..0dccdb3 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -19,7 +19,7 @@
 #include <linux/uaccess.h>
 #include <linux/debugfs.h>
 #include <linux/caif/caif_socket.h>
-#include <linux/atomic.h>
+#include <linux/pkt_sched.h>
 #include <net/sock.h>
 #include <net/tcp_states.h>
 #include <net/caif/caif_layer.h>
@@ -130,7 +130,7 @@
 	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
 
 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-		(unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
+		(unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
 		if (net_ratelimit())
 			pr_debug("sending flow OFF (queue len = %d %d)\n",
 					atomic_read(&cf_sk->sk.sk_rmem_alloc),
@@ -505,6 +505,7 @@
 
 	pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
 	memset(skb->cb, 0, sizeof(struct caif_payload_info));
+	cfpkt_set_prio(pkt, cf_sk->sk.sk_priority);
 
 	if (cf_sk->layer.dn == NULL) {
 		kfree_skb(skb);
@@ -1062,6 +1063,18 @@
 	/* Store the protocol */
 	sk->sk_protocol = (unsigned char) protocol;
 
+	/* Initialize default priority for well-known cases */
+	switch (protocol) {
+	case CAIFPROTO_AT:
+		sk->sk_priority = TC_PRIO_CONTROL;
+		break;
+	case CAIFPROTO_RFM:
+		sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
+		break;
+	default:
+		sk->sk_priority = TC_PRIO_BESTEFFORT;
+	}
+
 	/*
 	 * Lock in order to try to stop someone from opening the socket
 	 * too early.
@@ -1081,7 +1094,6 @@
 	set_rx_flow_on(cf_sk);
 
 	/* Set default options on configuration */
-	cf_sk->sk.sk_priority = CAIF_PRIO_NORMAL;
 	cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
 	cf_sk->conn_req.protocol = protocol;
 	release_sock(&cf_sk->sk);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 5cf5222..047cd0e 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -9,6 +9,7 @@
 #include <linux/stddef.h>
 #include <linux/spinlock.h>
 #include <linux/slab.h>
+#include <linux/pkt_sched.h>
 #include <net/caif/caif_layer.h>
 #include <net/caif/cfpkt.h>
 #include <net/caif/cfctrl.h>
@@ -189,6 +190,7 @@
 	cfctrl->serv.dev_info.id = physlinkid;
 	cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM);
 	cfpkt_addbdy(pkt, physlinkid);
+	cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
 	dn->transmit(dn, pkt);
 }
 
@@ -281,6 +283,7 @@
 	 *	might arrive with the newly allocated channel ID.
 	 */
 	cfpkt_info(pkt)->dev_info->id = param->phyid;
+	cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
 	ret =
 	    dn->transmit(dn, pkt);
 	if (ret < 0) {
@@ -314,6 +317,7 @@
 	cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
 	cfpkt_addbdy(pkt, channelid);
 	init_info(cfpkt_info(pkt), cfctrl);
+	cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
 	ret =
 	    dn->transmit(dn, pkt);
 #ifndef CAIF_NO_LOOP
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index e335ba8..863dedd 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -381,6 +381,7 @@
 	memcpy(skb2->data, split, len2nd);
 	skb2->tail += len2nd;
 	skb2->len += len2nd;
+	skb2->priority = skb->priority;
 	return skb_to_pkt(skb2);
 }
 
@@ -394,3 +395,9 @@
 	return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
 }
 EXPORT_SYMBOL(cfpkt_info);
+
+void cfpkt_set_prio(struct cfpkt *pkt, int prio)
+{
+	pkt_to_skb(pkt)->priority = prio;
+}
+EXPORT_SYMBOL(cfpkt_set_prio);
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index 4aa33d4..dd485f6 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -11,6 +11,7 @@
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/pkt_sched.h>
 #include <net/caif/caif_layer.h>
 #include <net/caif/cfsrvl.h>
 #include <net/caif/cfpkt.h>
@@ -120,6 +121,7 @@
 			info->channel_id = service->layer.id;
 			info->hdr_len = 1;
 			info->dev_info = &service->dev_info;
+			cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
 			return layr->dn->transmit(layr->dn, pkt);
 		}
 	case CAIF_MODEMCMD_FLOW_OFF_REQ:
@@ -140,6 +142,7 @@
 			info->channel_id = service->layer.id;
 			info->hdr_len = 1;
 			info->dev_info = &service->dev_info;
+			cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
 			return layr->dn->transmit(layr->dn, pkt);
 		}
 	default:
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 20618dd..69771c0 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -103,6 +103,7 @@
 		skb->protocol = htons(ETH_P_IPV6);
 		break;
 	default:
+		kfree_skb(skb);
 		priv->netdev->stats.rx_errors++;
 		return -EINVAL;
 	}
@@ -220,14 +221,16 @@
 
 	if (skb->len > priv->netdev->mtu) {
 		pr_warn("Size of skb exceeded MTU\n");
+		kfree_skb(skb);
 		dev->stats.tx_errors++;
-		return -ENOSPC;
+		return NETDEV_TX_OK;
 	}
 
 	if (!priv->flowenabled) {
 		pr_debug("dropping packets flow off\n");
+		kfree_skb(skb);
 		dev->stats.tx_dropped++;
-		return NETDEV_TX_BUSY;
+		return NETDEV_TX_OK;
 	}
 
 	if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
@@ -242,7 +245,7 @@
 	result = priv->chnl.dn->transmit(priv->chnl.dn, pkt);
 	if (result) {
 		dev->stats.tx_dropped++;
-		return result;
+		return NETDEV_TX_OK;
 	}
 
 	/* Update statistics. */
@@ -421,14 +424,14 @@
 	struct chnl_net *priv;
 	u8 loop;
 	priv = netdev_priv(dev);
-	NLA_PUT_U32(skb, IFLA_CAIF_IPV4_CONNID,
-		    priv->conn_req.sockaddr.u.dgm.connection_id);
-	NLA_PUT_U32(skb, IFLA_CAIF_IPV6_CONNID,
-		    priv->conn_req.sockaddr.u.dgm.connection_id);
+	if (nla_put_u32(skb, IFLA_CAIF_IPV4_CONNID,
+			priv->conn_req.sockaddr.u.dgm.connection_id) ||
+	    nla_put_u32(skb, IFLA_CAIF_IPV6_CONNID,
+			priv->conn_req.sockaddr.u.dgm.connection_id))
+		goto nla_put_failure;
 	loop = priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP;
-	NLA_PUT_U8(skb, IFLA_CAIF_LOOPBACK, loop);
-
-
+	if (nla_put_u8(skb, IFLA_CAIF_LOOPBACK, loop))
+		goto nla_put_failure;
 	return 0;
 nla_put_failure:
 	return -EMSGSIZE;
diff --git a/net/can/gw.c b/net/can/gw.c
index 3d79b12..b41acf2 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -66,7 +66,7 @@
 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
 MODULE_ALIAS("can-gw");
 
-HLIST_HEAD(cgw_list);
+static HLIST_HEAD(cgw_list);
 static struct notifier_block notifier;
 
 static struct kmem_cache *cgw_cache __read_mostly;
diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
index e02da7a..f459e93 100644
--- a/net/ceph/auth_x.h
+++ b/net/ceph/auth_x.h
@@ -13,7 +13,7 @@
  */
 struct ceph_x_ticket_handler {
 	struct rb_node node;
-	unsigned service;
+	unsigned int service;
 
 	struct ceph_crypto_key session_key;
 	struct ceph_timespec validity;
@@ -27,7 +27,7 @@
 
 struct ceph_x_authorizer {
 	struct ceph_buffer *buf;
-	unsigned service;
+	unsigned int service;
 	u64 nonce;
 	char reply_buf[128];  /* big enough for encrypted blob */
 };
@@ -38,7 +38,7 @@
 	bool starting;
 	u64 server_challenge;
 
-	unsigned have_keys;
+	unsigned int have_keys;
 	struct rb_root ticket_handlers;
 
 	struct ceph_x_authorizer auth_authorizer;
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index cc91319..a776f75 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -441,8 +441,8 @@
  * create a fresh client instance
  */
 struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
-				       unsigned supported_features,
-				       unsigned required_features)
+				       unsigned int supported_features,
+				       unsigned int required_features)
 {
 	struct ceph_client *client;
 	struct ceph_entity_addr *myaddr = NULL;
diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c
index 0a1b53b..67bb1f1 100644
--- a/net/ceph/ceph_hash.c
+++ b/net/ceph/ceph_hash.c
@@ -20,7 +20,7 @@
 		c = c - a;  c = c - b;  c = c ^ (b >> 15);	\
 	} while (0)
 
-unsigned ceph_str_hash_rjenkins(const char *str, unsigned length)
+unsigned int ceph_str_hash_rjenkins(const char *str, unsigned int length)
 {
 	const unsigned char *k = (const unsigned char *)str;
 	__u32 a, b, c;  /* the internal state */
@@ -81,7 +81,7 @@
 /*
  * linux dcache hash
  */
-unsigned ceph_str_hash_linux(const char *str, unsigned length)
+unsigned int ceph_str_hash_linux(const char *str, unsigned int length)
 {
 	unsigned long hash = 0;
 	unsigned char c;
@@ -94,7 +94,7 @@
 }
 
 
-unsigned ceph_str_hash(int type, const char *s, unsigned len)
+unsigned int ceph_str_hash(int type, const char *s, unsigned int len)
 {
 	switch (type) {
 	case CEPH_STR_HASH_LINUX:
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index b79747c..854ac53 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -68,8 +68,8 @@
 static int bucket_perm_choose(struct crush_bucket *bucket,
 			      int x, int r)
 {
-	unsigned pr = r % bucket->size;
-	unsigned i, s;
+	unsigned int pr = r % bucket->size;
+	unsigned int i, s;
 
 	/* start a new permutation if @x has changed */
 	if (bucket->perm_x != x || bucket->perm_n == 0) {
@@ -100,13 +100,13 @@
 	for (i = 0; i < bucket->perm_n; i++)
 		dprintk(" perm_choose have %d: %d\n", i, bucket->perm[i]);
 	while (bucket->perm_n <= pr) {
-		unsigned p = bucket->perm_n;
+		unsigned int p = bucket->perm_n;
 		/* no point in swapping the final entry */
 		if (p < bucket->size - 1) {
 			i = crush_hash32_3(bucket->hash, x, bucket->id, p) %
 				(bucket->size - p);
 			if (i) {
-				unsigned t = bucket->perm[p + i];
+				unsigned int t = bucket->perm[p + i];
 				bucket->perm[p + i] = bucket->perm[p];
 				bucket->perm[p] = t;
 			}
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 27d4ea31..54b531a 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -94,9 +94,9 @@
 	mutex_lock(&monc->mutex);
 
 	if (monc->have_mdsmap)
-		seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap);
+		seq_printf(s, "have mdsmap %u\n", (unsigned int)monc->have_mdsmap);
 	if (monc->have_osdmap)
-		seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap);
+		seq_printf(s, "have osdmap %u\n", (unsigned int)monc->have_osdmap);
 	if (monc->want_next_osdmap)
 		seq_printf(s, "want next osdmap\n");
 
@@ -146,7 +146,7 @@
 
 		if (req->r_reassert_version.epoch)
 			seq_printf(s, "\t%u'%llu",
-			   (unsigned)le32_to_cpu(req->r_reassert_version.epoch),
+			   (unsigned int)le32_to_cpu(req->r_reassert_version.epoch),
 			   le64_to_cpu(req->r_reassert_version.version));
 		else
 			seq_printf(s, "\t");
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index f0993af..36fa6bf 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -699,7 +699,7 @@
 				 struct ceph_connection *con,
 				 int include_banner)
 {
-	unsigned global_seq = get_global_seq(con->msgr, 0);
+	unsigned int global_seq = get_global_seq(con->msgr, 0);
 	int proto;
 
 	switch (con->peer_name.type) {
@@ -816,7 +816,7 @@
 static int write_partial_msg_pages(struct ceph_connection *con)
 {
 	struct ceph_msg *msg = con->out_msg;
-	unsigned data_len = le32_to_cpu(msg->hdr.data_len);
+	unsigned int data_len = le32_to_cpu(msg->hdr.data_len);
 	size_t len;
 	bool do_datacrc = !con->msgr->nocrc;
 	int ret;
@@ -1554,7 +1554,7 @@
 
 static int read_partial_message_pages(struct ceph_connection *con,
 				      struct page **pages,
-				      unsigned data_len, bool do_datacrc)
+				      unsigned int data_len, bool do_datacrc)
 {
 	void *p;
 	int ret;
@@ -1587,7 +1587,7 @@
 #ifdef CONFIG_BLOCK
 static int read_partial_message_bio(struct ceph_connection *con,
 				    struct bio **bio_iter, int *bio_seg,
-				    unsigned data_len, bool do_datacrc)
+				    unsigned int data_len, bool do_datacrc)
 {
 	struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg);
 	void *p;
@@ -1629,7 +1629,7 @@
 	struct ceph_msg *m = con->in_msg;
 	int ret;
 	int to, left;
-	unsigned front_len, middle_len, data_len;
+	unsigned int front_len, middle_len, data_len;
 	bool do_datacrc = !con->msgr->nocrc;
 	int skip;
 	u64 seq;
@@ -2345,9 +2345,9 @@
 {
 	mutex_lock(&con->mutex);
 	if (con->in_msg && con->in_msg == msg) {
-		unsigned front_len = le32_to_cpu(con->in_hdr.front_len);
-		unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len);
-		unsigned data_len = le32_to_cpu(con->in_hdr.data_len);
+		unsigned int front_len = le32_to_cpu(con->in_hdr.front_len);
+		unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len);
+		unsigned int data_len = le32_to_cpu(con->in_hdr.data_len);
 
 		/* skip rest of message */
 		dout("con_revoke_pages %p msg %p revoked\n", con, msg);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 1845cde..10d6008 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -168,7 +168,7 @@
  */
 static void __schedule_delayed(struct ceph_mon_client *monc)
 {
-	unsigned delay;
+	unsigned int delay;
 
 	if (monc->cur_mon < 0 || __sub_expired(monc))
 		delay = 10 * HZ;
@@ -184,7 +184,7 @@
 static void __send_subscribe(struct ceph_mon_client *monc)
 {
 	dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n",
-	     (unsigned)monc->sub_sent, __sub_expired(monc),
+	     (unsigned int)monc->sub_sent, __sub_expired(monc),
 	     monc->want_next_osdmap);
 	if ((__sub_expired(monc) && !monc->sub_sent) ||
 	    monc->want_next_osdmap == 1) {
@@ -201,7 +201,7 @@
 
 		if (monc->want_next_osdmap) {
 			dout("__send_subscribe to 'osdmap' %u\n",
-			     (unsigned)monc->have_osdmap);
+			     (unsigned int)monc->have_osdmap);
 			ceph_encode_string(&p, end, "osdmap", 6);
 			i = p;
 			i->have = cpu_to_le64(monc->have_osdmap);
@@ -211,7 +211,7 @@
 		}
 		if (monc->want_mdsmap) {
 			dout("__send_subscribe to 'mdsmap' %u+\n",
-			     (unsigned)monc->have_mdsmap);
+			     (unsigned int)monc->have_mdsmap);
 			ceph_encode_string(&p, end, "mdsmap", 6);
 			i = p;
 			i->have = cpu_to_le64(monc->have_mdsmap);
@@ -236,7 +236,7 @@
 static void handle_subscribe_ack(struct ceph_mon_client *monc,
 				 struct ceph_msg *msg)
 {
-	unsigned seconds;
+	unsigned int seconds;
 	struct ceph_mon_subscribe_ack *h = msg->front.iov_base;
 
 	if (msg->front.iov_len < sizeof(*h))
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 5e25405..1b0ef3c 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1214,7 +1214,7 @@
 	}
 
 	if (!req->r_got_reply) {
-		unsigned bytes;
+		unsigned int bytes;
 
 		req->r_result = le32_to_cpu(rhead->result);
 		bytes = le32_to_cpu(msg->hdr.data_len);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 29ad46e..56e561a 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -38,7 +38,7 @@
 
 /* maps */
 
-static int calc_bits_of(unsigned t)
+static int calc_bits_of(unsigned int t)
 {
 	int b = 0;
 	while (t) {
@@ -154,7 +154,7 @@
 	magic = ceph_decode_32(p);
 	if (magic != CRUSH_MAGIC) {
 		pr_err("crush_decode magic %x != current %x\n",
-		       (unsigned)magic, (unsigned)CRUSH_MAGIC);
+		       (unsigned int)magic, (unsigned int)CRUSH_MAGIC);
 		goto bad;
 	}
 	c->max_buckets = ceph_decode_32(p);
@@ -460,7 +460,7 @@
 
 static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
 {
-	unsigned n, m;
+	unsigned int n, m;
 
 	ceph_decode_copy(p, &pi->v, sizeof(pi->v));
 	calc_pg_masks(pi);
@@ -970,7 +970,7 @@
 	objsetno = stripeno / su_per_object;
 
 	*ono = objsetno * sc + stripepos;
-	dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned)*ono);
+	dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono);
 
 	/* *oxoff = *off % layout->fl_stripe_unit;  # offset in su */
 	t = off;
@@ -998,12 +998,12 @@
 			    struct ceph_file_layout *fl,
 			    struct ceph_osdmap *osdmap)
 {
-	unsigned num, num_mask;
+	unsigned int num, num_mask;
 	struct ceph_pg pgid;
 	s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred);
 	int poolid = le32_to_cpu(fl->fl_pg_pool);
 	struct ceph_pg_pool_info *pool;
-	unsigned ps;
+	unsigned int ps;
 
 	BUG_ON(!osdmap);
 
@@ -1045,7 +1045,7 @@
 	struct ceph_pg_mapping *pg;
 	struct ceph_pg_pool_info *pool;
 	int ruleno;
-	unsigned poolid, ps, pps, t;
+	unsigned int poolid, ps, pps, t;
 	int preferred;
 
 	poolid = le32_to_cpu(pgid.pool);
diff --git a/net/compat.c b/net/compat.c
index e055708..e240441 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -741,13 +741,13 @@
 };
 #undef AL
 
-asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags)
+asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
 {
 	return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
 }
 
 asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
-				    unsigned vlen, unsigned int flags)
+				    unsigned int vlen, unsigned int flags)
 {
 	return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
 			      flags | MSG_CMSG_COMPAT);
@@ -758,20 +758,20 @@
 	return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
 }
 
-asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned flags)
+asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned int flags)
 {
 	return sys_recv(fd, buf, len, flags | MSG_CMSG_COMPAT);
 }
 
 asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len,
-				    unsigned flags, struct sockaddr __user *addr,
+				    unsigned int flags, struct sockaddr __user *addr,
 				    int __user *addrlen)
 {
 	return sys_recvfrom(fd, buf, len, flags | MSG_CMSG_COMPAT, addr, addrlen);
 }
 
 asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
-				    unsigned vlen, unsigned int flags,
+				    unsigned int vlen, unsigned int flags,
 				    struct compat_timespec __user *timeout)
 {
 	int datagrams;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index e4fbfd6..ae6acf6 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -65,7 +65,7 @@
 	return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
 }
 
-static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync,
+static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync,
 				  void *key)
 {
 	unsigned long bits = (unsigned long)key;
@@ -158,7 +158,7 @@
  *	quite explicitly by POSIX 1003.1g, don't change them without having
  *	the standard around please.
  */
-struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
+struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
 				    int *peeked, int *off, int *err)
 {
 	struct sk_buff *skb;
@@ -216,7 +216,7 @@
 }
 EXPORT_SYMBOL(__skb_recv_datagram);
 
-struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
+struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
 				  int noblock, int *err)
 {
 	int peeked, off = 0;
diff --git a/net/core/dev.c b/net/core/dev.c
index c25d453..501f3cc 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -208,7 +208,8 @@
 
 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
 {
-	unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
+	unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
+
 	return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
 }
 
@@ -1409,14 +1410,34 @@
  *	register_netdevice_notifier(). The notifier is unlinked into the
  *	kernel structures and may then be reused. A negative errno code
  *	is returned on a failure.
+ *
+ * 	After unregistering unregister and down device events are synthesized
+ *	for all devices on the device list to the removed notifier to remove
+ *	the need for special case cleanup code.
  */
 
 int unregister_netdevice_notifier(struct notifier_block *nb)
 {
+	struct net_device *dev;
+	struct net *net;
 	int err;
 
 	rtnl_lock();
 	err = raw_notifier_chain_unregister(&netdev_chain, nb);
+	if (err)
+		goto unlock;
+
+	for_each_net(net) {
+		for_each_netdev(net, dev) {
+			if (dev->flags & IFF_UP) {
+				nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
+				nb->notifier_call(nb, NETDEV_DOWN, dev);
+			}
+			nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
+			nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
+		}
+	}
+unlock:
 	rtnl_unlock();
 	return err;
 }
@@ -3521,10 +3542,13 @@
 		break;
 
 	case GRO_DROP:
-	case GRO_MERGED_FREE:
 		kfree_skb(skb);
 		break;
 
+	case GRO_MERGED_FREE:
+		consume_skb(skb);
+		break;
+
 	case GRO_HELD:
 	case GRO_MERGED:
 		break;
@@ -4598,9 +4622,9 @@
  *
  *	Get the combination of flag bits exported through APIs to userspace.
  */
-unsigned dev_get_flags(const struct net_device *dev)
+unsigned int dev_get_flags(const struct net_device *dev)
 {
-	unsigned flags;
+	unsigned int flags;
 
 	flags = (dev->flags & ~(IFF_PROMISC |
 				IFF_ALLMULTI |
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 626698f..c4cc2bc 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -21,12 +21,35 @@
  * General list handling functions
  */
 
+static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
+			       unsigned char *addr, int addr_len,
+			       unsigned char addr_type, bool global)
+{
+	struct netdev_hw_addr *ha;
+	int alloc_size;
+
+	alloc_size = sizeof(*ha);
+	if (alloc_size < L1_CACHE_BYTES)
+		alloc_size = L1_CACHE_BYTES;
+	ha = kmalloc(alloc_size, GFP_ATOMIC);
+	if (!ha)
+		return -ENOMEM;
+	memcpy(ha->addr, addr, addr_len);
+	ha->type = addr_type;
+	ha->refcount = 1;
+	ha->global_use = global;
+	ha->synced = false;
+	list_add_tail_rcu(&ha->list, &list->list);
+	list->count++;
+
+	return 0;
+}
+
 static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
 			    unsigned char *addr, int addr_len,
 			    unsigned char addr_type, bool global)
 {
 	struct netdev_hw_addr *ha;
-	int alloc_size;
 
 	if (addr_len > MAX_ADDR_LEN)
 		return -EINVAL;
@@ -46,21 +69,7 @@
 		}
 	}
 
-
-	alloc_size = sizeof(*ha);
-	if (alloc_size < L1_CACHE_BYTES)
-		alloc_size = L1_CACHE_BYTES;
-	ha = kmalloc(alloc_size, GFP_ATOMIC);
-	if (!ha)
-		return -ENOMEM;
-	memcpy(ha->addr, addr, addr_len);
-	ha->type = addr_type;
-	ha->refcount = 1;
-	ha->global_use = global;
-	ha->synced = false;
-	list_add_tail_rcu(&ha->list, &list->list);
-	list->count++;
-	return 0;
+	return __hw_addr_create_ex(list, addr, addr_len, addr_type, global);
 }
 
 static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
@@ -377,6 +386,34 @@
  */
 
 /**
+ *	dev_uc_add_excl - Add a global secondary unicast address
+ *	@dev: device
+ *	@addr: address to add
+ */
+int dev_uc_add_excl(struct net_device *dev, unsigned char *addr)
+{
+	struct netdev_hw_addr *ha;
+	int err;
+
+	netif_addr_lock_bh(dev);
+	list_for_each_entry(ha, &dev->uc.list, list) {
+		if (!memcmp(ha->addr, addr, dev->addr_len) &&
+		    ha->type == NETDEV_HW_ADDR_T_UNICAST) {
+			err = -EEXIST;
+			goto out;
+		}
+	}
+	err = __hw_addr_create_ex(&dev->uc, addr, dev->addr_len,
+				  NETDEV_HW_ADDR_T_UNICAST, true);
+	if (!err)
+		__dev_set_rx_mode(dev);
+out:
+	netif_addr_unlock_bh(dev);
+	return err;
+}
+EXPORT_SYMBOL(dev_uc_add_excl);
+
+/**
  *	dev_uc_add - Add a secondary unicast address
  *	@dev: device
  *	@addr: address to add
@@ -501,6 +538,34 @@
  * Multicast list handling functions
  */
 
+/**
+ *	dev_mc_add_excl - Add a global secondary multicast address
+ *	@dev: device
+ *	@addr: address to add
+ */
+int dev_mc_add_excl(struct net_device *dev, unsigned char *addr)
+{
+	struct netdev_hw_addr *ha;
+	int err;
+
+	netif_addr_lock_bh(dev);
+	list_for_each_entry(ha, &dev->mc.list, list) {
+		if (!memcmp(ha->addr, addr, dev->addr_len) &&
+		    ha->type == NETDEV_HW_ADDR_T_MULTICAST) {
+			err = -EEXIST;
+			goto out;
+		}
+	}
+	err = __hw_addr_create_ex(&dev->mc, addr, dev->addr_len,
+				  NETDEV_HW_ADDR_T_MULTICAST, true);
+	if (!err)
+		__dev_set_rx_mode(dev);
+out:
+	netif_addr_unlock_bh(dev);
+	return err;
+}
+EXPORT_SYMBOL(dev_mc_add_excl);
+
 static int __dev_mc_add(struct net_device *dev, unsigned char *addr,
 			bool global)
 {
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 7f36b38..5c3c81a 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -150,6 +150,7 @@
 	for (i = 0; i < msg->entries; i++) {
 		if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
 			msg->points[i].count++;
+			atomic_inc(&data->dm_hit_count);
 			goto out;
 		}
 	}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 6d6d7d2..beacdd9 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -17,6 +17,8 @@
 #include <linux/errno.h>
 #include <linux/ethtool.h>
 #include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
 #include <linux/bitops.h>
 #include <linux/uaccess.h>
 #include <linux/vmalloc.h>
@@ -36,6 +38,17 @@
 }
 EXPORT_SYMBOL(ethtool_op_get_link);
 
+int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+{
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_SOFTWARE |
+		SOF_TIMESTAMPING_RX_SOFTWARE |
+		SOF_TIMESTAMPING_SOFTWARE;
+	info->phc_index = -1;
+	return 0;
+}
+EXPORT_SYMBOL(ethtool_op_get_ts_info);
+
 /* Handlers for each ethtool command */
 
 #define ETHTOOL_DEV_FEATURE_WORDS	((NETDEV_FEATURE_COUNT + 31) / 32)
@@ -1278,6 +1291,40 @@
 	return ret;
 }
 
+static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr)
+{
+	int err = 0;
+	struct ethtool_ts_info info;
+	const struct ethtool_ops *ops = dev->ethtool_ops;
+	struct phy_device *phydev = dev->phydev;
+
+	memset(&info, 0, sizeof(info));
+	info.cmd = ETHTOOL_GET_TS_INFO;
+
+	if (phydev && phydev->drv && phydev->drv->ts_info) {
+
+		err = phydev->drv->ts_info(phydev, &info);
+
+	} else if (dev->ethtool_ops && dev->ethtool_ops->get_ts_info) {
+
+		err = ops->get_ts_info(dev, &info);
+
+	} else {
+		info.so_timestamping =
+			SOF_TIMESTAMPING_RX_SOFTWARE |
+			SOF_TIMESTAMPING_SOFTWARE;
+		info.phc_index = -1;
+	}
+
+	if (err)
+		return err;
+
+	if (copy_to_user(useraddr, &info, sizeof(info)))
+		err = -EFAULT;
+
+	return err;
+}
+
 /* The main entry point in this file.  Called from net/core/dev.c */
 
 int dev_ethtool(struct net *net, struct ifreq *ifr)
@@ -1295,11 +1342,13 @@
 		return -EFAULT;
 
 	if (!dev->ethtool_ops) {
-		/* ETHTOOL_GDRVINFO does not require any driver support.
-		 * It is also unprivileged and does not change anything,
-		 * so we can take a shortcut to it. */
+		/* A few commands do not require any driver support,
+		 * are unprivileged, and do not change anything, so we
+		 * can take a shortcut to them. */
 		if (ethcmd == ETHTOOL_GDRVINFO)
 			return ethtool_get_drvinfo(dev, useraddr);
+		else if (ethcmd == ETHTOOL_GET_TS_INFO)
+			return ethtool_get_ts_info(dev, useraddr);
 		else
 			return -EOPNOTSUPP;
 	}
@@ -1330,6 +1379,7 @@
 	case ETHTOOL_GRXCLSRULE:
 	case ETHTOOL_GRXCLSRLALL:
 	case ETHTOOL_GFEATURES:
+	case ETHTOOL_GET_TS_INFO:
 		break;
 	default:
 		if (!capable(CAP_NET_ADMIN))
@@ -1496,6 +1546,9 @@
 	case ETHTOOL_GET_DUMP_DATA:
 		rc = ethtool_get_dump_data(dev, useraddr);
 		break;
+	case ETHTOOL_GET_TS_INFO:
+		rc = ethtool_get_ts_info(dev, useraddr);
+		break;
 	default:
 		rc = -EOPNOTSUPP;
 	}
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index c02e63c..72cceb7 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -542,7 +542,8 @@
 	frh = nlmsg_data(nlh);
 	frh->family = ops->family;
 	frh->table = rule->table;
-	NLA_PUT_U32(skb, FRA_TABLE, rule->table);
+	if (nla_put_u32(skb, FRA_TABLE, rule->table))
+		goto nla_put_failure;
 	frh->res1 = 0;
 	frh->res2 = 0;
 	frh->action = rule->action;
@@ -553,31 +554,28 @@
 		frh->flags |= FIB_RULE_UNRESOLVED;
 
 	if (rule->iifname[0]) {
-		NLA_PUT_STRING(skb, FRA_IIFNAME, rule->iifname);
-
+		if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
+			goto nla_put_failure;
 		if (rule->iifindex == -1)
 			frh->flags |= FIB_RULE_IIF_DETACHED;
 	}
 
 	if (rule->oifname[0]) {
-		NLA_PUT_STRING(skb, FRA_OIFNAME, rule->oifname);
-
+		if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
+			goto nla_put_failure;
 		if (rule->oifindex == -1)
 			frh->flags |= FIB_RULE_OIF_DETACHED;
 	}
 
-	if (rule->pref)
-		NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref);
-
-	if (rule->mark)
-		NLA_PUT_U32(skb, FRA_FWMARK, rule->mark);
-
-	if (rule->mark_mask || rule->mark)
-		NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask);
-
-	if (rule->target)
-		NLA_PUT_U32(skb, FRA_GOTO, rule->target);
-
+	if ((rule->pref &&
+	     nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
+	    (rule->mark &&
+	     nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
+	    ((rule->mark_mask || rule->mark) &&
+	     nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
+	    (rule->target &&
+	     nla_put_u32(skb, FRA_GOTO, rule->target)))
+		goto nla_put_failure;
 	if (ops->fill(rule, skb, frh) < 0)
 		goto nla_put_failure;
 
diff --git a/net/core/filter.c b/net/core/filter.c
index 6f755cc..47a5f05 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -317,6 +317,9 @@
 		case BPF_S_ANC_CPU:
 			A = raw_smp_processor_id();
 			continue;
+		case BPF_S_ANC_ALU_XOR_X:
+			A ^= X;
+			continue;
 		case BPF_S_ANC_NLATTR: {
 			struct nlattr *nla;
 
@@ -528,7 +531,7 @@
 			 * Compare this with conditional jumps below,
 			 * where offsets are limited. --ANK (981016)
 			 */
-			if (ftest->k >= (unsigned)(flen-pc-1))
+			if (ftest->k >= (unsigned int)(flen-pc-1))
 				return -EINVAL;
 			break;
 		case BPF_S_JMP_JEQ_K:
@@ -561,6 +564,7 @@
 			ANCILLARY(HATYPE);
 			ANCILLARY(RXHASH);
 			ANCILLARY(CPU);
+			ANCILLARY(ALU_XOR_X);
 			}
 		}
 		ftest->code = code;
@@ -589,6 +593,67 @@
 }
 EXPORT_SYMBOL(sk_filter_release_rcu);
 
+static int __sk_prepare_filter(struct sk_filter *fp)
+{
+	int err;
+
+	fp->bpf_func = sk_run_filter;
+
+	err = sk_chk_filter(fp->insns, fp->len);
+	if (err)
+		return err;
+
+	bpf_jit_compile(fp);
+	return 0;
+}
+
+/**
+ *	sk_unattached_filter_create - create an unattached filter
+ *	@fprog: the filter program
+ *	@sk: the socket to use
+ *
+ * Create a filter independent ofr any socket. We first run some
+ * sanity checks on it to make sure it does not explode on us later.
+ * If an error occurs or there is insufficient memory for the filter
+ * a negative errno code is returned. On success the return is zero.
+ */
+int sk_unattached_filter_create(struct sk_filter **pfp,
+				struct sock_fprog *fprog)
+{
+	struct sk_filter *fp;
+	unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
+	int err;
+
+	/* Make sure new filter is there and in the right amounts. */
+	if (fprog->filter == NULL)
+		return -EINVAL;
+
+	fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL);
+	if (!fp)
+		return -ENOMEM;
+	memcpy(fp->insns, fprog->filter, fsize);
+
+	atomic_set(&fp->refcnt, 1);
+	fp->len = fprog->len;
+
+	err = __sk_prepare_filter(fp);
+	if (err)
+		goto free_mem;
+
+	*pfp = fp;
+	return 0;
+free_mem:
+	kfree(fp);
+	return err;
+}
+EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
+
+void sk_unattached_filter_destroy(struct sk_filter *fp)
+{
+	sk_filter_release(fp);
+}
+EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
+
 /**
  *	sk_attach_filter - attach a socket filter
  *	@fprog: the filter program
@@ -619,16 +684,13 @@
 
 	atomic_set(&fp->refcnt, 1);
 	fp->len = fprog->len;
-	fp->bpf_func = sk_run_filter;
 
-	err = sk_chk_filter(fp->insns, fp->len);
+	err = __sk_prepare_filter(fp);
 	if (err) {
 		sk_filter_uncharge(sk, fp);
 		return err;
 	}
 
-	bpf_jit_compile(fp);
-
 	old_fp = rcu_dereference_protected(sk->sk_filter,
 					   sock_owned_by_user(sk));
 	rcu_assign_pointer(sk->sk_filter, fp);
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 0452eb2..ddedf21 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -27,7 +27,8 @@
 static inline int
 gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
 {
-	NLA_PUT(d->skb, type, size, buf);
+	if (nla_put(d->skb, type, size, buf))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/core/kmap_skb.h b/net/core/kmap_skb.h
deleted file mode 100644
index 52d0a44..0000000
--- a/net/core/kmap_skb.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#include <linux/highmem.h>
-
-static inline void *kmap_skb_frag(const skb_frag_t *frag)
-{
-#ifdef CONFIG_HIGHMEM
-	BUG_ON(in_irq());
-
-	local_bh_disable();
-#endif
-	return kmap_atomic(skb_frag_page(frag));
-}
-
-static inline void kunmap_skb_frag(void *vaddr)
-{
-	kunmap_atomic(vaddr);
-#ifdef CONFIG_HIGHMEM
-	local_bh_enable();
-#endif
-}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 0a68045..fadaa81 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -890,7 +890,7 @@
 {
 	unsigned long now, next;
 	struct neighbour *neigh = (struct neighbour *)arg;
-	unsigned state;
+	unsigned int state;
 	int notify = 0;
 
 	write_lock(&neigh->lock);
@@ -1500,7 +1500,7 @@
 
 static struct lock_class_key neigh_table_proxy_queue_class;
 
-void neigh_table_init_no_netlink(struct neigh_table *tbl)
+static void neigh_table_init_no_netlink(struct neigh_table *tbl)
 {
 	unsigned long now = jiffies;
 	unsigned long phsize;
@@ -1538,7 +1538,6 @@
 	tbl->last_flush = now;
 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
 }
-EXPORT_SYMBOL(neigh_table_init_no_netlink);
 
 void neigh_table_init(struct neigh_table *tbl)
 {
@@ -1768,29 +1767,29 @@
 	if (nest == NULL)
 		return -ENOBUFS;
 
-	if (parms->dev)
-		NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
-
-	NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
-	NLA_PUT_U32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes);
-	/* approximative value for deprecated QUEUE_LEN (in packets) */
-	NLA_PUT_U32(skb, NDTPA_QUEUE_LEN,
-		    DIV_ROUND_UP(parms->queue_len_bytes,
-				 SKB_TRUESIZE(ETH_FRAME_LEN)));
-	NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
-	NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
-	NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
-	NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
-	NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
-	NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
-		      parms->base_reachable_time);
-	NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
-	NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
-	NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
-	NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
-	NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
-	NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
-
+	if ((parms->dev &&
+	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
+	    nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
+	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes) ||
+	    /* approximative value for deprecated QUEUE_LEN (in packets) */
+	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
+			DIV_ROUND_UP(parms->queue_len_bytes,
+				     SKB_TRUESIZE(ETH_FRAME_LEN))) ||
+	    nla_put_u32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen) ||
+	    nla_put_u32(skb, NDTPA_APP_PROBES, parms->app_probes) ||
+	    nla_put_u32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes) ||
+	    nla_put_u32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes) ||
+	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
+	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
+			  parms->base_reachable_time) ||
+	    nla_put_msecs(skb, NDTPA_GC_STALETIME, parms->gc_staletime) ||
+	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
+			  parms->delay_probe_time) ||
+	    nla_put_msecs(skb, NDTPA_RETRANS_TIME, parms->retrans_time) ||
+	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay) ||
+	    nla_put_msecs(skb, NDTPA_PROXY_DELAY, parms->proxy_delay) ||
+	    nla_put_msecs(skb, NDTPA_LOCKTIME, parms->locktime))
+		goto nla_put_failure;
 	return nla_nest_end(skb, nest);
 
 nla_put_failure:
@@ -1815,12 +1814,12 @@
 	ndtmsg->ndtm_pad1   = 0;
 	ndtmsg->ndtm_pad2   = 0;
 
-	NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
-	NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
-	NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
-	NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
-	NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
-
+	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
+	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
+	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
+	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
+	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
+		goto nla_put_failure;
 	{
 		unsigned long now = jiffies;
 		unsigned int flush_delta = now - tbl->last_flush;
@@ -1841,7 +1840,8 @@
 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
 		rcu_read_unlock_bh();
 
-		NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
+		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
+			goto nla_put_failure;
 	}
 
 	{
@@ -1866,7 +1866,8 @@
 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
 		}
 
-		NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
+		if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
+			goto nla_put_failure;
 	}
 
 	BUG_ON(tbl->parms.dev);
@@ -2137,7 +2138,8 @@
 	ndm->ndm_type	 = neigh->type;
 	ndm->ndm_ifindex = neigh->dev->ifindex;
 
-	NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
+	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
+		goto nla_put_failure;
 
 	read_lock_bh(&neigh->lock);
 	ndm->ndm_state	 = neigh->nud_state;
@@ -2157,8 +2159,9 @@
 	ci.ndm_refcnt	 = atomic_read(&neigh->refcnt) - 1;
 	read_unlock_bh(&neigh->lock);
 
-	NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
-	NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
+	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
+	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
+		goto nla_put_failure;
 
 	return nlmsg_end(skb, nlh);
 
@@ -2187,7 +2190,8 @@
 	ndm->ndm_ifindex = pn->dev->ifindex;
 	ndm->ndm_state	 = NUD_NONE;
 
-	NLA_PUT(skb, NDA_DST, tbl->key_len, pn->key);
+	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
+		goto nla_put_failure;
 
 	return nlmsg_end(skb, nlh);
 
@@ -2795,7 +2799,6 @@
 static struct neigh_sysctl_table {
 	struct ctl_table_header *sysctl_header;
 	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
-	char *dev_name;
 } neigh_sysctl_template __read_mostly = {
 	.neigh_vars = {
 		[NEIGH_VAR_MCAST_PROBE] = {
@@ -2921,19 +2924,7 @@
 {
 	struct neigh_sysctl_table *t;
 	const char *dev_name_source = NULL;
-
-#define NEIGH_CTL_PATH_ROOT	0
-#define NEIGH_CTL_PATH_PROTO	1
-#define NEIGH_CTL_PATH_NEIGH	2
-#define NEIGH_CTL_PATH_DEV	3
-
-	struct ctl_path neigh_path[] = {
-		{ .procname = "net",	 },
-		{ .procname = "proto",	 },
-		{ .procname = "neigh",	 },
-		{ .procname = "default", },
-		{ },
-	};
+	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
 
 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
 	if (!t)
@@ -2961,7 +2952,7 @@
 		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
 		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
 	} else {
-		dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
+		dev_name_source = "default";
 		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
 		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
 		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
@@ -2984,23 +2975,16 @@
 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
 	}
 
-	t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
-	if (!t->dev_name)
-		goto free;
-
-	neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
-	neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
-
+	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
+		p_name, dev_name_source);
 	t->sysctl_header =
-		register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
+		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
 	if (!t->sysctl_header)
-		goto free_procname;
+		goto free;
 
 	p->sysctl_table = t;
 	return 0;
 
-free_procname:
-	kfree(t->dev_name);
 free:
 	kfree(t);
 err:
@@ -3013,8 +2997,7 @@
 	if (p->sysctl_table) {
 		struct neigh_sysctl_table *t = p->sysctl_table;
 		p->sysctl_table = NULL;
-		unregister_sysctl_table(t->sysctl_header);
-		kfree(t->dev_name);
+		unregister_net_sysctl_table(t->sysctl_header);
 		kfree(t);
 	}
 }
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 4955862..fdf9e61 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -74,15 +74,14 @@
 			    int (*set)(struct net_device *, unsigned long))
 {
 	struct net_device *net = to_net_dev(dev);
-	char *endp;
 	unsigned long new;
 	int ret = -EINVAL;
 
 	if (!capable(CAP_NET_ADMIN))
 		return -EPERM;
 
-	new = simple_strtoul(buf, &endp, 0);
-	if (endp == buf)
+	ret = kstrtoul(buf, 0, &new);
+	if (ret)
 		goto err;
 
 	if (!rtnl_trylock())
@@ -232,7 +231,7 @@
 
 static int change_flags(struct net_device *net, unsigned long new_flags)
 {
-	return dev_change_flags(net, (unsigned) new_flags);
+	return dev_change_flags(net, (unsigned int) new_flags);
 }
 
 static ssize_t store_flags(struct device *dev, struct device_attribute *attr,
@@ -582,7 +581,7 @@
 		return err;
 	}
 
-	map = kzalloc(max_t(unsigned,
+	map = kzalloc(max_t(unsigned int,
 	    RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
 	    GFP_KERNEL);
 	if (!map) {
@@ -903,7 +902,7 @@
 				 const char *buf, size_t len)
 {
 	struct dql *dql = &queue->dql;
-	unsigned value;
+	unsigned int value;
 	int err;
 
 	err = kstrtouint(buf, 10, &value);
@@ -1107,7 +1106,7 @@
 		return err;
 	}
 
-	new_dev_maps = kzalloc(max_t(unsigned,
+	new_dev_maps = kzalloc(max_t(unsigned int,
 	    XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
 	if (!new_dev_maps) {
 		free_cpumask_var(mask);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 0e950fd..31a5ae5 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -83,21 +83,29 @@
 
 static int ops_init(const struct pernet_operations *ops, struct net *net)
 {
-	int err;
+	int err = -ENOMEM;
+	void *data = NULL;
+
 	if (ops->id && ops->size) {
-		void *data = kzalloc(ops->size, GFP_KERNEL);
+		data = kzalloc(ops->size, GFP_KERNEL);
 		if (!data)
-			return -ENOMEM;
+			goto out;
 
 		err = net_assign_generic(net, *ops->id, data);
-		if (err) {
-			kfree(data);
-			return err;
-		}
+		if (err)
+			goto cleanup;
 	}
+	err = 0;
 	if (ops->init)
-		return ops->init(net);
-	return 0;
+		err = ops->init(net);
+	if (!err)
+		return 0;
+
+cleanup:
+	kfree(data);
+
+out:
+	return err;
 }
 
 static void ops_free(const struct pernet_operations *ops, struct net *net)
@@ -448,12 +456,7 @@
 static int __register_pernet_operations(struct list_head *list,
 					struct pernet_operations *ops)
 {
-	int err = 0;
-	err = ops_init(ops, &init_net);
-	if (err)
-		ops_free(ops, &init_net);
-	return err;
-	
+	return ops_init(ops, &init_net);
 }
 
 static void __unregister_pernet_operations(struct pernet_operations *ops)
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 4d8ce93..ffb5d38 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -320,7 +320,7 @@
 				(see RFC 3260, sec. 4) */
 
 	/* MPLS */
-	unsigned nr_labels;	/* Depth of stack, 0 = no MPLS */
+	unsigned int nr_labels;	/* Depth of stack, 0 = no MPLS */
 	__be32 labels[MAX_MPLS_LABELS];
 
 	/* VLAN/SVLAN (802.1Q/Q-in-Q) */
@@ -373,10 +373,10 @@
 				  */
 	char odevname[32];
 	struct flow_state *flows;
-	unsigned cflows;	/* Concurrent flows (config) */
-	unsigned lflow;		/* Flow length  (config) */
-	unsigned nflows;	/* accumulated flows (stats) */
-	unsigned curfl;		/* current sequenced flow (state)*/
+	unsigned int cflows;	/* Concurrent flows (config) */
+	unsigned int lflow;		/* Flow length  (config) */
+	unsigned int nflows;	/* accumulated flows (stats) */
+	unsigned int curfl;		/* current sequenced flow (state)*/
 
 	u16 queue_map_min;
 	u16 queue_map_max;
@@ -592,7 +592,7 @@
 		   pkt_dev->src_mac_count, pkt_dev->dst_mac_count);
 
 	if (pkt_dev->nr_labels) {
-		unsigned i;
+		unsigned int i;
 		seq_printf(seq, "     mpls: ");
 		for (i = 0; i < pkt_dev->nr_labels; i++)
 			seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]),
@@ -812,7 +812,7 @@
 
 static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev)
 {
-	unsigned n = 0;
+	unsigned int n = 0;
 	char c;
 	ssize_t i = 0;
 	int len;
@@ -1510,7 +1510,7 @@
 	}
 
 	if (!strcmp(name, "mpls")) {
-		unsigned n, cnt;
+		unsigned int n, cnt;
 
 		len = get_labels(&user_buffer[i], pkt_dev);
 		if (len < 0)
@@ -2324,7 +2324,7 @@
 	}
 
 	if (pkt_dev->flags & F_MPLS_RND) {
-		unsigned i;
+		unsigned int i;
 		for (i = 0; i < pkt_dev->nr_labels; i++)
 			if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM)
 				pkt_dev->labels[i] = MPLS_STACK_BOTTOM |
@@ -2550,7 +2550,7 @@
 
 static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev)
 {
-	unsigned i;
+	unsigned int i;
 	for (i = 0; i < pkt_dev->nr_labels; i++)
 		*mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM;
 
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 90430b7..b442d35 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -35,7 +35,9 @@
 #include <linux/security.h>
 #include <linux/mutex.h>
 #include <linux/if_addr.h>
+#include <linux/if_bridge.h>
 #include <linux/pci.h>
+#include <linux/etherdevice.h>
 
 #include <asm/uaccess.h>
 
@@ -552,7 +554,7 @@
 }
 EXPORT_SYMBOL(__rta_fill);
 
-int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo)
+int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
 {
 	struct sock *rtnl = net->rtnl;
 	int err = 0;
@@ -607,7 +609,8 @@
 	for (i = 0; i < RTAX_MAX; i++) {
 		if (metrics[i]) {
 			valid++;
-			NLA_PUT_U32(skb, i+1, metrics[i]);
+			if (nla_put_u32(skb, i+1, metrics[i]))
+				goto nla_put_failure;
 		}
 	}
 
@@ -782,6 +785,7 @@
 	       + nla_total_size(4) /* IFLA_MTU */
 	       + nla_total_size(4) /* IFLA_LINK */
 	       + nla_total_size(4) /* IFLA_MASTER */
+	       + nla_total_size(4) /* IFLA_PROMISCUITY */
 	       + nla_total_size(1) /* IFLA_OPERSTATE */
 	       + nla_total_size(1) /* IFLA_LINKMODE */
 	       + nla_total_size(ext_filter_mask
@@ -807,7 +811,8 @@
 		vf_port = nla_nest_start(skb, IFLA_VF_PORT);
 		if (!vf_port)
 			goto nla_put_failure;
-		NLA_PUT_U32(skb, IFLA_PORT_VF, vf);
+		if (nla_put_u32(skb, IFLA_PORT_VF, vf))
+			goto nla_put_failure;
 		err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
 		if (err == -EMSGSIZE)
 			goto nla_put_failure;
@@ -891,25 +896,23 @@
 	ifm->ifi_flags = dev_get_flags(dev);
 	ifm->ifi_change = change;
 
-	NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
-	NLA_PUT_U32(skb, IFLA_TXQLEN, dev->tx_queue_len);
-	NLA_PUT_U8(skb, IFLA_OPERSTATE,
-		   netif_running(dev) ? dev->operstate : IF_OPER_DOWN);
-	NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode);
-	NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
-	NLA_PUT_U32(skb, IFLA_GROUP, dev->group);
-
-	if (dev->ifindex != dev->iflink)
-		NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
-
-	if (dev->master)
-		NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex);
-
-	if (dev->qdisc)
-		NLA_PUT_STRING(skb, IFLA_QDISC, dev->qdisc->ops->id);
-
-	if (dev->ifalias)
-		NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias);
+	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
+	    nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
+	    nla_put_u8(skb, IFLA_OPERSTATE,
+		       netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
+	    nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
+	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
+	    nla_put_u32(skb, IFLA_GROUP, dev->group) ||
+	    nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
+	    (dev->ifindex != dev->iflink &&
+	     nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
+	    (dev->master &&
+	     nla_put_u32(skb, IFLA_MASTER, dev->master->ifindex)) ||
+	    (dev->qdisc &&
+	     nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
+	    (dev->ifalias &&
+	     nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)))
+		goto nla_put_failure;
 
 	if (1) {
 		struct rtnl_link_ifmap map = {
@@ -920,12 +923,14 @@
 			.dma         = dev->dma,
 			.port        = dev->if_port,
 		};
-		NLA_PUT(skb, IFLA_MAP, sizeof(map), &map);
+		if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
+			goto nla_put_failure;
 	}
 
 	if (dev->addr_len) {
-		NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
-		NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast);
+		if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
+		    nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
+			goto nla_put_failure;
 	}
 
 	attr = nla_reserve(skb, IFLA_STATS,
@@ -942,8 +947,9 @@
 		goto nla_put_failure;
 	copy_rtnl_link_stats64(nla_data(attr), stats);
 
-	if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF))
-		NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
+	if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
+	    nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
+		goto nla_put_failure;
 
 	if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
 	    && (ext_filter_mask & RTEXT_FILTER_VF)) {
@@ -986,12 +992,13 @@
 				nla_nest_cancel(skb, vfinfo);
 				goto nla_put_failure;
 			}
-			NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac);
-			NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan);
-			NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
-				&vf_tx_rate);
-			NLA_PUT(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
-				&vf_spoofchk);
+			if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
+			    nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
+			    nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
+				    &vf_tx_rate) ||
+			    nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
+				    &vf_spoofchk))
+				goto nla_put_failure;
 			nla_nest_end(skb, vf);
 		}
 		nla_nest_end(skb, vfinfo);
@@ -1113,6 +1120,7 @@
 	[IFLA_PORT_SELF]	= { .type = NLA_NESTED },
 	[IFLA_AF_SPEC]		= { .type = NLA_NESTED },
 	[IFLA_EXT_MASK]		= { .type = NLA_U32 },
+	[IFLA_PROMISCUITY]	= { .type = NLA_U32 },
 };
 EXPORT_SYMBOL(ifla_policy);
 
@@ -1634,14 +1642,14 @@
 	int err;
 	struct net_device *dev;
 	unsigned int num_queues = 1;
-	unsigned int real_num_queues = 1;
 
 	if (ops->get_tx_queues) {
-		err = ops->get_tx_queues(src_net, tb, &num_queues,
-					 &real_num_queues);
-		if (err)
+		err = ops->get_tx_queues(src_net, tb);
+		if (err < 0)
 			goto err;
+		num_queues = err;
 	}
+
 	err = -ENOMEM;
 	dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues);
 	if (!dev)
@@ -1947,7 +1955,7 @@
 	return skb->len;
 }
 
-void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
+void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change)
 {
 	struct net *net = dev_net(dev);
 	struct sk_buff *skb;
@@ -1972,6 +1980,267 @@
 		rtnl_set_sk_err(net, RTNLGRP_LINK, err);
 }
 
+static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
+				   struct net_device *dev,
+				   u8 *addr, u32 pid, u32 seq,
+				   int type, unsigned int flags)
+{
+	struct nlmsghdr *nlh;
+	struct ndmsg *ndm;
+
+	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI);
+	if (!nlh)
+		return -EMSGSIZE;
+
+	ndm = nlmsg_data(nlh);
+	ndm->ndm_family  = AF_BRIDGE;
+	ndm->ndm_pad1	 = 0;
+	ndm->ndm_pad2    = 0;
+	ndm->ndm_flags	 = flags;
+	ndm->ndm_type	 = 0;
+	ndm->ndm_ifindex = dev->ifindex;
+	ndm->ndm_state   = NUD_PERMANENT;
+
+	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
+		goto nla_put_failure;
+
+	return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+	nlmsg_cancel(skb, nlh);
+	return -EMSGSIZE;
+}
+
+static inline size_t rtnl_fdb_nlmsg_size(void)
+{
+	return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN);
+}
+
+static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
+{
+	struct net *net = dev_net(dev);
+	struct sk_buff *skb;
+	int err = -ENOBUFS;
+
+	skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
+	if (!skb)
+		goto errout;
+
+	err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF);
+	if (err < 0) {
+		kfree_skb(skb);
+		goto errout;
+	}
+
+	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
+	return;
+errout:
+	rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
+}
+
+static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+	struct net *net = sock_net(skb->sk);
+	struct net_device *master = NULL;
+	struct ndmsg *ndm;
+	struct nlattr *tb[NDA_MAX+1];
+	struct net_device *dev;
+	u8 *addr;
+	int err;
+
+	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
+	if (err < 0)
+		return err;
+
+	ndm = nlmsg_data(nlh);
+	if (ndm->ndm_ifindex == 0) {
+		pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ifindex\n");
+		return -EINVAL;
+	}
+
+	dev = __dev_get_by_index(net, ndm->ndm_ifindex);
+	if (dev == NULL) {
+		pr_info("PF_BRIDGE: RTM_NEWNEIGH with unknown ifindex\n");
+		return -ENODEV;
+	}
+
+	if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
+		pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid address\n");
+		return -EINVAL;
+	}
+
+	addr = nla_data(tb[NDA_LLADDR]);
+	if (!is_valid_ether_addr(addr)) {
+		pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ether address\n");
+		return -EINVAL;
+	}
+
+	err = -EOPNOTSUPP;
+
+	/* Support fdb on master device the net/bridge default case */
+	if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
+	    (dev->priv_flags & IFF_BRIDGE_PORT)) {
+		master = dev->master;
+		err = master->netdev_ops->ndo_fdb_add(ndm, dev, addr,
+						      nlh->nlmsg_flags);
+		if (err)
+			goto out;
+		else
+			ndm->ndm_flags &= ~NTF_MASTER;
+	}
+
+	/* Embedded bridge, macvlan, and any other device support */
+	if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_add) {
+		err = dev->netdev_ops->ndo_fdb_add(ndm, dev, addr,
+						   nlh->nlmsg_flags);
+
+		if (!err) {
+			rtnl_fdb_notify(dev, addr, RTM_NEWNEIGH);
+			ndm->ndm_flags &= ~NTF_SELF;
+		}
+	}
+out:
+	return err;
+}
+
+static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+	struct net *net = sock_net(skb->sk);
+	struct ndmsg *ndm;
+	struct nlattr *llattr;
+	struct net_device *dev;
+	int err = -EINVAL;
+	__u8 *addr;
+
+	if (nlmsg_len(nlh) < sizeof(*ndm))
+		return -EINVAL;
+
+	ndm = nlmsg_data(nlh);
+	if (ndm->ndm_ifindex == 0) {
+		pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ifindex\n");
+		return -EINVAL;
+	}
+
+	dev = __dev_get_by_index(net, ndm->ndm_ifindex);
+	if (dev == NULL) {
+		pr_info("PF_BRIDGE: RTM_DELNEIGH with unknown ifindex\n");
+		return -ENODEV;
+	}
+
+	llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR);
+	if (llattr == NULL || nla_len(llattr) != ETH_ALEN) {
+		pr_info("PF_BRIGDE: RTM_DELNEIGH with invalid address\n");
+		return -EINVAL;
+	}
+
+	addr = nla_data(llattr);
+	err = -EOPNOTSUPP;
+
+	/* Support fdb on master device the net/bridge default case */
+	if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
+	    (dev->priv_flags & IFF_BRIDGE_PORT)) {
+		struct net_device *master = dev->master;
+
+		if (master->netdev_ops->ndo_fdb_del)
+			err = master->netdev_ops->ndo_fdb_del(ndm, dev, addr);
+
+		if (err)
+			goto out;
+		else
+			ndm->ndm_flags &= ~NTF_MASTER;
+	}
+
+	/* Embedded bridge, macvlan, and any other device support */
+	if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_del) {
+		err = dev->netdev_ops->ndo_fdb_del(ndm, dev, addr);
+
+		if (!err) {
+			rtnl_fdb_notify(dev, addr, RTM_DELNEIGH);
+			ndm->ndm_flags &= ~NTF_SELF;
+		}
+	}
+out:
+	return err;
+}
+
+static int nlmsg_populate_fdb(struct sk_buff *skb,
+			      struct netlink_callback *cb,
+			      struct net_device *dev,
+			      int *idx,
+			      struct netdev_hw_addr_list *list)
+{
+	struct netdev_hw_addr *ha;
+	int err;
+	u32 pid, seq;
+
+	pid = NETLINK_CB(cb->skb).pid;
+	seq = cb->nlh->nlmsg_seq;
+
+	list_for_each_entry(ha, &list->list, list) {
+		if (*idx < cb->args[0])
+			goto skip;
+
+		err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
+					      pid, seq, 0, NTF_SELF);
+		if (err < 0)
+			return err;
+skip:
+		*idx += 1;
+	}
+	return 0;
+}
+
+/**
+ * ndo_dflt_fdb_dump: default netdevice operation to dump an FDB table.
+ * @nlh: netlink message header
+ * @dev: netdevice
+ *
+ * Default netdevice operation to dump the existing unicast address list.
+ * Returns zero on success.
+ */
+int ndo_dflt_fdb_dump(struct sk_buff *skb,
+		      struct netlink_callback *cb,
+		      struct net_device *dev,
+		      int idx)
+{
+	int err;
+
+	netif_addr_lock_bh(dev);
+	err = nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->uc);
+	if (err)
+		goto out;
+	nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->mc);
+out:
+	netif_addr_unlock_bh(dev);
+	return idx;
+}
+EXPORT_SYMBOL(ndo_dflt_fdb_dump);
+
+static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	int idx = 0;
+	struct net *net = sock_net(skb->sk);
+	struct net_device *dev;
+
+	rcu_read_lock();
+	for_each_netdev_rcu(net, dev) {
+		if (dev->priv_flags & IFF_BRIDGE_PORT) {
+			struct net_device *master = dev->master;
+			const struct net_device_ops *ops = master->netdev_ops;
+
+			if (ops->ndo_fdb_dump)
+				idx = ops->ndo_fdb_dump(skb, cb, dev, idx);
+		}
+
+		if (dev->netdev_ops->ndo_fdb_dump)
+			idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, idx);
+	}
+	rcu_read_unlock();
+
+	cb->args[0] = idx;
+	return skb->len;
+}
+
 /* Protected by RTNL sempahore.  */
 static struct rtattr **rta_buf;
 static int rtattr_max;
@@ -2042,7 +2311,7 @@
 		struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
 
 		while (RTA_OK(attr, attrlen)) {
-			unsigned flavor = attr->rta_type;
+			unsigned int flavor = attr->rta_type;
 			if (flavor) {
 				if (flavor > rta_max[sz_idx])
 					return -EINVAL;
@@ -2144,5 +2413,9 @@
 
 	rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL);
 	rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL);
+
+	rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL);
+	rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL);
+	rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL);
 }
 
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index baf8d28..2342a72 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -67,8 +67,7 @@
 
 #include <asm/uaccess.h>
 #include <trace/events/skb.h>
-
-#include "kmap_skb.h"
+#include <linux/highmem.h>
 
 static struct kmem_cache *skbuff_head_cache __read_mostly;
 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
@@ -707,10 +706,10 @@
 			}
 			return -ENOMEM;
 		}
-		vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
+		vaddr = kmap_atomic(skb_frag_page(f));
 		memcpy(page_address(page),
 		       vaddr + f->page_offset, skb_frag_size(f));
-		kunmap_skb_frag(vaddr);
+		kunmap_atomic(vaddr);
 		page->private = (unsigned long)head;
 		head = page;
 	}
@@ -952,9 +951,11 @@
 		goto adjust_others;
 	}
 
-	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
+	data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+		       gfp_mask);
 	if (!data)
 		goto nodata;
+	size = SKB_WITH_OVERHEAD(ksize(data));
 
 	/* Copy only real data... and, alas, header. This should be
 	 * optimized for the cases when header is void.
@@ -1273,7 +1274,7 @@
 				return -ENOMEM;
 
 			nfrag->next = frag->next;
-			kfree_skb(frag);
+			consume_skb(frag);
 			frag = nfrag;
 			*fragp = frag;
 		}
@@ -1485,21 +1486,22 @@
 
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 		int end;
+		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
 
 		WARN_ON(start > offset + len);
 
-		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
+		end = start + skb_frag_size(f);
 		if ((copy = end - offset) > 0) {
 			u8 *vaddr;
 
 			if (copy > len)
 				copy = len;
 
-			vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
+			vaddr = kmap_atomic(skb_frag_page(f));
 			memcpy(to,
-			       vaddr + skb_shinfo(skb)->frags[i].page_offset+
-			       offset - start, copy);
-			kunmap_skb_frag(vaddr);
+			       vaddr + f->page_offset + offset - start,
+			       copy);
+			kunmap_atomic(vaddr);
 
 			if ((len -= copy) == 0)
 				return 0;
@@ -1545,9 +1547,9 @@
 	put_page(spd->pages[i]);
 }
 
-static inline struct page *linear_to_page(struct page *page, unsigned int *len,
-					  unsigned int *offset,
-					  struct sk_buff *skb, struct sock *sk)
+static struct page *linear_to_page(struct page *page, unsigned int *len,
+				   unsigned int *offset,
+				   struct sk_buff *skb, struct sock *sk)
 {
 	struct page *p = sk->sk_sndmsg_page;
 	unsigned int off;
@@ -1563,6 +1565,9 @@
 	} else {
 		unsigned int mlen;
 
+		/* If we are the only user of the page, we can reset offset */
+		if (page_count(p) == 1)
+			sk->sk_sndmsg_off = 0;
 		off = sk->sk_sndmsg_off;
 		mlen = PAGE_SIZE - off;
 		if (mlen < 64 && mlen < *len) {
@@ -1576,36 +1581,48 @@
 	memcpy(page_address(p) + off, page_address(page) + *offset, *len);
 	sk->sk_sndmsg_off += *len;
 	*offset = off;
-	get_page(p);
 
 	return p;
 }
 
+static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
+			     struct page *page,
+			     unsigned int offset)
+{
+	return	spd->nr_pages &&
+		spd->pages[spd->nr_pages - 1] == page &&
+		(spd->partial[spd->nr_pages - 1].offset +
+		 spd->partial[spd->nr_pages - 1].len == offset);
+}
+
 /*
  * Fill page/offset/length into spd, if it can hold more pages.
  */
-static inline int spd_fill_page(struct splice_pipe_desc *spd,
-				struct pipe_inode_info *pipe, struct page *page,
-				unsigned int *len, unsigned int offset,
-				struct sk_buff *skb, int linear,
-				struct sock *sk)
+static bool spd_fill_page(struct splice_pipe_desc *spd,
+			  struct pipe_inode_info *pipe, struct page *page,
+			  unsigned int *len, unsigned int offset,
+			  struct sk_buff *skb, bool linear,
+			  struct sock *sk)
 {
-	if (unlikely(spd->nr_pages == pipe->buffers))
-		return 1;
+	if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
+		return true;
 
 	if (linear) {
 		page = linear_to_page(page, len, &offset, skb, sk);
 		if (!page)
-			return 1;
-	} else
-		get_page(page);
-
+			return true;
+	}
+	if (spd_can_coalesce(spd, page, offset)) {
+		spd->partial[spd->nr_pages - 1].len += *len;
+		return false;
+	}
+	get_page(page);
 	spd->pages[spd->nr_pages] = page;
 	spd->partial[spd->nr_pages].len = *len;
 	spd->partial[spd->nr_pages].offset = offset;
 	spd->nr_pages++;
 
-	return 0;
+	return false;
 }
 
 static inline void __segment_seek(struct page **page, unsigned int *poff,
@@ -1622,20 +1639,20 @@
 	*plen -= off;
 }
 
-static inline int __splice_segment(struct page *page, unsigned int poff,
-				   unsigned int plen, unsigned int *off,
-				   unsigned int *len, struct sk_buff *skb,
-				   struct splice_pipe_desc *spd, int linear,
-				   struct sock *sk,
-				   struct pipe_inode_info *pipe)
+static bool __splice_segment(struct page *page, unsigned int poff,
+			     unsigned int plen, unsigned int *off,
+			     unsigned int *len, struct sk_buff *skb,
+			     struct splice_pipe_desc *spd, bool linear,
+			     struct sock *sk,
+			     struct pipe_inode_info *pipe)
 {
 	if (!*len)
-		return 1;
+		return true;
 
 	/* skip this segment if already processed */
 	if (*off >= plen) {
 		*off -= plen;
-		return 0;
+		return false;
 	}
 
 	/* ignore any bits we already processed */
@@ -1651,23 +1668,23 @@
 		flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
 
 		if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
-			return 1;
+			return true;
 
 		__segment_seek(&page, &poff, &plen, flen);
 		*len -= flen;
 
 	} while (*len && plen);
 
-	return 0;
+	return false;
 }
 
 /*
- * Map linear and fragment data from the skb to spd. It reports failure if the
+ * Map linear and fragment data from the skb to spd. It reports true if the
  * pipe is full or if we already spliced the requested length.
  */
-static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
-			     unsigned int *offset, unsigned int *len,
-			     struct splice_pipe_desc *spd, struct sock *sk)
+static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
+			      unsigned int *offset, unsigned int *len,
+			      struct splice_pipe_desc *spd, struct sock *sk)
 {
 	int seg;
 
@@ -1677,8 +1694,8 @@
 	if (__splice_segment(virt_to_page(skb->data),
 			     (unsigned long) skb->data & (PAGE_SIZE - 1),
 			     skb_headlen(skb),
-			     offset, len, skb, spd, 1, sk, pipe))
-		return 1;
+			     offset, len, skb, spd, true, sk, pipe))
+		return true;
 
 	/*
 	 * then map the fragments
@@ -1688,11 +1705,11 @@
 
 		if (__splice_segment(skb_frag_page(f),
 				     f->page_offset, skb_frag_size(f),
-				     offset, len, skb, spd, 0, sk, pipe))
-			return 1;
+				     offset, len, skb, spd, false, sk, pipe))
+			return true;
 	}
 
-	return 0;
+	return false;
 }
 
 /*
@@ -1705,8 +1722,8 @@
 		    struct pipe_inode_info *pipe, unsigned int tlen,
 		    unsigned int flags)
 {
-	struct partial_page partial[PIPE_DEF_BUFFERS];
-	struct page *pages[PIPE_DEF_BUFFERS];
+	struct partial_page partial[MAX_SKB_FRAGS];
+	struct page *pages[MAX_SKB_FRAGS];
 	struct splice_pipe_desc spd = {
 		.pages = pages,
 		.partial = partial,
@@ -1718,9 +1735,6 @@
 	struct sock *sk = skb->sk;
 	int ret = 0;
 
-	if (splice_grow_spd(pipe, &spd))
-		return -ENOMEM;
-
 	/*
 	 * __skb_splice_bits() only fails if the output has no room left,
 	 * so no point in going over the frag_list for the error case.
@@ -1756,7 +1770,6 @@
 		lock_sock(sk);
 	}
 
-	splice_shrink_spd(pipe, &spd);
 	return ret;
 }
 
@@ -1804,10 +1817,10 @@
 			if (copy > len)
 				copy = len;
 
-			vaddr = kmap_skb_frag(frag);
+			vaddr = kmap_atomic(skb_frag_page(frag));
 			memcpy(vaddr + frag->page_offset + offset - start,
 			       from, copy);
-			kunmap_skb_frag(vaddr);
+			kunmap_atomic(vaddr);
 
 			if ((len -= copy) == 0)
 				return 0;
@@ -1867,21 +1880,21 @@
 
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 		int end;
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
 		WARN_ON(start > offset + len);
 
-		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
+		end = start + skb_frag_size(frag);
 		if ((copy = end - offset) > 0) {
 			__wsum csum2;
 			u8 *vaddr;
-			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
 			if (copy > len)
 				copy = len;
-			vaddr = kmap_skb_frag(frag);
+			vaddr = kmap_atomic(skb_frag_page(frag));
 			csum2 = csum_partial(vaddr + frag->page_offset +
 					     offset - start, copy, 0);
-			kunmap_skb_frag(vaddr);
+			kunmap_atomic(vaddr);
 			csum = csum_block_add(csum, csum2, pos);
 			if (!(len -= copy))
 				return csum;
@@ -1953,12 +1966,12 @@
 
 			if (copy > len)
 				copy = len;
-			vaddr = kmap_skb_frag(frag);
+			vaddr = kmap_atomic(skb_frag_page(frag));
 			csum2 = csum_partial_copy_nocheck(vaddr +
 							  frag->page_offset +
 							  offset - start, to,
 							  copy, 0);
-			kunmap_skb_frag(vaddr);
+			kunmap_atomic(vaddr);
 			csum = csum_block_add(csum, csum2, pos);
 			if (!(len -= copy))
 				return csum;
@@ -2478,7 +2491,7 @@
 
 		if (abs_offset < block_limit) {
 			if (!st->frag_data)
-				st->frag_data = kmap_skb_frag(frag);
+				st->frag_data = kmap_atomic(skb_frag_page(frag));
 
 			*data = (u8 *) st->frag_data + frag->page_offset +
 				(abs_offset - st->stepped_offset);
@@ -2487,7 +2500,7 @@
 		}
 
 		if (st->frag_data) {
-			kunmap_skb_frag(st->frag_data);
+			kunmap_atomic(st->frag_data);
 			st->frag_data = NULL;
 		}
 
@@ -2496,7 +2509,7 @@
 	}
 
 	if (st->frag_data) {
-		kunmap_skb_frag(st->frag_data);
+		kunmap_atomic(st->frag_data);
 		st->frag_data = NULL;
 	}
 
@@ -2524,7 +2537,7 @@
 void skb_abort_seq_read(struct skb_seq_state *st)
 {
 	if (st->frag_data)
-		kunmap_skb_frag(st->frag_data);
+		kunmap_atomic(st->frag_data);
 }
 EXPORT_SYMBOL(skb_abort_seq_read);
 
@@ -3164,7 +3177,7 @@
 	int len = skb->len;
 
 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-	    (unsigned)sk->sk_rcvbuf)
+	    (unsigned int)sk->sk_rcvbuf)
 		return -ENOMEM;
 
 	skb_orphan(skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index b2e14c0..0431aaf 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -389,7 +389,7 @@
 
 	skb->dev = NULL;
 
-	if (sk_rcvqueues_full(sk, skb)) {
+	if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
 		atomic_inc(&sk->sk_drops);
 		goto discard_and_relse;
 	}
@@ -406,7 +406,7 @@
 		rc = sk_backlog_rcv(sk, skb);
 
 		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
-	} else if (sk_add_backlog(sk, skb)) {
+	} else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
 		bh_unlock_sock(sk);
 		atomic_inc(&sk->sk_drops);
 		goto discard_and_relse;
@@ -561,7 +561,7 @@
 			sock_valbool_flag(sk, SOCK_DBG, valbool);
 		break;
 	case SO_REUSEADDR:
-		sk->sk_reuse = valbool;
+		sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
 		break;
 	case SO_TYPE:
 	case SO_PROTOCOL:
@@ -1534,7 +1534,7 @@
  */
 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
 {
-	if ((unsigned)size <= sysctl_optmem_max &&
+	if ((unsigned int)size <= sysctl_optmem_max &&
 	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
 		void *mem;
 		/* First do the add, to avoid the race if kmalloc
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 0c28508..a7c3684 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -14,6 +14,7 @@
 #include <linux/vmalloc.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <linux/kmemleak.h>
 
 #include <net/ip.h>
 #include <net/sock.h>
@@ -202,12 +203,6 @@
 	{ }
 };
 
-__net_initdata struct ctl_path net_core_path[] = {
-	{ .procname = "net", },
-	{ .procname = "core", },
-	{ },
-};
-
 static __net_init int sysctl_core_net_init(struct net *net)
 {
 	struct ctl_table *tbl;
@@ -223,8 +218,7 @@
 		tbl[0].data = &net->core.sysctl_somaxconn;
 	}
 
-	net->core.sysctl_hdr = register_net_sysctl_table(net,
-			net_core_path, tbl);
+	net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
 	if (net->core.sysctl_hdr == NULL)
 		goto err_reg;
 
@@ -254,10 +248,7 @@
 
 static __init int sysctl_core_init(void)
 {
-	static struct ctl_table empty[1];
-
-	register_sysctl_paths(net_core_path, empty);
-	register_net_sysctl_rotable(net_core_path, net_core_table);
+	register_net_sysctl(&init_net, "net/core", net_core_table);
 	return register_pernet_subsys(&sysctl_core_ops);
 }
 
diff --git a/net/core/utils.c b/net/core/utils.c
index dc3c3fa..39895a6 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -58,14 +58,11 @@
 	int i;
 
 	l = 0;
-	for (i = 0; i < 4; i++)
-	{
+	for (i = 0; i < 4; i++)	{
 		l <<= 8;
-		if (*str != '\0')
-		{
+		if (*str != '\0') {
 			val = 0;
-			while (*str != '\0' && *str != '.' && *str != '\n')
-			{
+			while (*str != '\0' && *str != '.' && *str != '\n') {
 				val *= 10;
 				val += *str - '0';
 				str++;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index d860530..8dfa1da 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -178,6 +178,7 @@
 	[DCB_ATTR_IEEE_ETS]	    = {.len = sizeof(struct ieee_ets)},
 	[DCB_ATTR_IEEE_PFC]	    = {.len = sizeof(struct ieee_pfc)},
 	[DCB_ATTR_IEEE_APP_TABLE]   = {.type = NLA_NESTED},
+	[DCB_ATTR_IEEE_MAXRATE]   = {.len = sizeof(struct ieee_maxrate)},
 };
 
 static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
@@ -1205,13 +1206,15 @@
 		if (!app)
 			goto nla_put_failure;
 
-		if (app_info_type)
-			NLA_PUT(skb, app_info_type, sizeof(info), &info);
+		if (app_info_type &&
+		    nla_put(skb, app_info_type, sizeof(info), &info))
+			goto nla_put_failure;
 
-		for (i = 0; i < app_count; i++)
-			NLA_PUT(skb, app_entry_type, sizeof(struct dcb_app),
-				&table[i]);
-
+		for (i = 0; i < app_count; i++) {
+			if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
+				    &table[i]))
+				goto nla_put_failure;
+		}
 		nla_nest_end(skb, app);
 	}
 	err = 0;
@@ -1230,8 +1233,8 @@
 	int dcbx;
 	int err = -EMSGSIZE;
 
-	NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
-
+	if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
+		goto nla_put_failure;
 	ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
 	if (!ieee)
 		goto nla_put_failure;
@@ -1239,15 +1242,28 @@
 	if (ops->ieee_getets) {
 		struct ieee_ets ets;
 		err = ops->ieee_getets(netdev, &ets);
-		if (!err)
-			NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets);
+		if (!err &&
+		    nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
+			goto nla_put_failure;
+	}
+
+	if (ops->ieee_getmaxrate) {
+		struct ieee_maxrate maxrate;
+		err = ops->ieee_getmaxrate(netdev, &maxrate);
+		if (!err) {
+			err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
+				      sizeof(maxrate), &maxrate);
+			if (err)
+				goto nla_put_failure;
+		}
 	}
 
 	if (ops->ieee_getpfc) {
 		struct ieee_pfc pfc;
 		err = ops->ieee_getpfc(netdev, &pfc);
-		if (!err)
-			NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc);
+		if (!err &&
+		    nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
+			goto nla_put_failure;
 	}
 
 	app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
@@ -1278,15 +1294,17 @@
 	if (ops->ieee_peer_getets) {
 		struct ieee_ets ets;
 		err = ops->ieee_peer_getets(netdev, &ets);
-		if (!err)
-			NLA_PUT(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets);
+		if (!err &&
+		    nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
+			goto nla_put_failure;
 	}
 
 	if (ops->ieee_peer_getpfc) {
 		struct ieee_pfc pfc;
 		err = ops->ieee_peer_getpfc(netdev, &pfc);
-		if (!err)
-			NLA_PUT(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc);
+		if (!err &&
+		    nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
+			goto nla_put_failure;
 	}
 
 	if (ops->peer_getappinfo && ops->peer_getapptable) {
@@ -1340,10 +1358,11 @@
 			ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
 					  &prio, &pgid, &tc_pct, &up_map);
 
-		NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_PGID, pgid);
-		NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
-		NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
-		NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct);
+		if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
+		    nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
+		    nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
+		    nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
+			goto nla_put_failure;
 		nla_nest_end(skb, tc_nest);
 	}
 
@@ -1356,7 +1375,8 @@
 		else
 			ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
 					   &tc_pct);
-		NLA_PUT_U8(skb, i, tc_pct);
+		if (nla_put_u8(skb, i, tc_pct))
+			goto nla_put_failure;
 	}
 	nla_nest_end(skb, pg);
 	return 0;
@@ -1373,8 +1393,8 @@
 	int dcbx, i, err = -EMSGSIZE;
 	u8 value;
 
-	NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
-
+	if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
+		goto nla_put_failure;
 	cee = nla_nest_start(skb, DCB_ATTR_CEE);
 	if (!cee)
 		goto nla_put_failure;
@@ -1401,7 +1421,8 @@
 
 		for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
 			ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
-			NLA_PUT_U8(skb, i, value);
+			if (nla_put_u8(skb, i, value))
+				goto nla_put_failure;
 		}
 		nla_nest_end(skb, pfc_nest);
 	}
@@ -1454,8 +1475,9 @@
 
 		for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
 		     i++)
-			if (!ops->getfeatcfg(netdev, i, &value))
-				NLA_PUT_U8(skb, i, value);
+			if (!ops->getfeatcfg(netdev, i, &value) &&
+			    nla_put_u8(skb, i, value))
+				goto nla_put_failure;
 
 		nla_nest_end(skb, feat);
 	}
@@ -1464,15 +1486,17 @@
 	if (ops->cee_peer_getpg) {
 		struct cee_pg pg;
 		err = ops->cee_peer_getpg(netdev, &pg);
-		if (!err)
-			NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
+		if (!err &&
+		    nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
+			goto nla_put_failure;
 	}
 
 	if (ops->cee_peer_getpfc) {
 		struct cee_pfc pfc;
 		err = ops->cee_peer_getpfc(netdev, &pfc);
-		if (!err)
-			NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
+		if (!err &&
+		    nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
+			goto nla_put_failure;
 	}
 
 	if (ops->peer_getappinfo && ops->peer_getapptable) {
@@ -1589,6 +1613,14 @@
 			goto err;
 	}
 
+	if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
+		struct ieee_maxrate *maxrate =
+			nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
+		err = ops->ieee_setmaxrate(netdev, maxrate);
+		if (err)
+			goto err;
+	}
+
 	if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
 		struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
 		err = ops->ieee_setpfc(netdev, pfc);
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 70bfaf2..8c67bed 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -100,7 +100,7 @@
 
 	DCCP_BUG_ON(hc->tx_t_ipi == 0);
 	ccid3_pr_debug("t_ipi=%u, s=%u, X=%u\n", hc->tx_t_ipi,
-		       hc->tx_s, (unsigned)(hc->tx_x >> 6));
+		       hc->tx_s, (unsigned int)(hc->tx_x >> 6));
 }
 
 static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now)
@@ -153,9 +153,9 @@
 
 	if (hc->tx_x != old_x) {
 		ccid3_pr_debug("X_prev=%u, X_now=%u, X_calc=%u, "
-			       "X_recv=%u\n", (unsigned)(old_x >> 6),
-			       (unsigned)(hc->tx_x >> 6), hc->tx_x_calc,
-			       (unsigned)(hc->tx_x_recv >> 6));
+			       "X_recv=%u\n", (unsigned int)(old_x >> 6),
+			       (unsigned int)(hc->tx_x >> 6), hc->tx_x_calc,
+			       (unsigned int)(hc->tx_x_recv >> 6));
 
 		ccid3_update_send_interval(hc);
 	}
@@ -425,8 +425,8 @@
 			       "p=%u, X_calc=%u, X_recv=%u, X=%u\n",
 			       dccp_role(sk), sk, hc->tx_rtt, r_sample,
 			       hc->tx_s, hc->tx_p, hc->tx_x_calc,
-			       (unsigned)(hc->tx_x_recv >> 6),
-			       (unsigned)(hc->tx_x >> 6));
+			       (unsigned int)(hc->tx_x_recv >> 6),
+			       (unsigned int)(hc->tx_x >> 6));
 
 	/* unschedule no feedback timer */
 	sk_stop_timer(sk, &hc->tx_no_feedback_timer);
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 29d6bb6..9040be0 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -75,7 +75,7 @@
 				     * state, about 60 seconds */
 
 /* RFC 1122, 4.2.3.1 initial RTO value */
-#define DCCP_TIMEOUT_INIT ((unsigned)(3 * HZ))
+#define DCCP_TIMEOUT_INIT ((unsigned int)(3 * HZ))
 
 /*
  * The maximum back-off value for retransmissions. This is needed for
@@ -84,7 +84,7 @@
  *  - feature-negotiation retransmission (sec. 6.6.3),
  *  - Acks in client-PARTOPEN state (sec. 8.1.5).
  */
-#define DCCP_RTO_MAX ((unsigned)(64 * HZ))
+#define DCCP_RTO_MAX ((unsigned int)(64 * HZ))
 
 /*
  * RTT sampling: sanity bounds and fallback RTT value from RFC 4340, section 3.4
@@ -287,9 +287,9 @@
 extern int dccp_child_process(struct sock *parent, struct sock *child,
 			      struct sk_buff *skb);
 extern int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
-				  struct dccp_hdr *dh, unsigned len);
+				  struct dccp_hdr *dh, unsigned int len);
 extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
-				const struct dccp_hdr *dh, const unsigned len);
+				const struct dccp_hdr *dh, const unsigned int len);
 
 extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
 extern void dccp_destroy_sock(struct sock *sk);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 51d5fe5..bc93a33 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -285,7 +285,7 @@
 }
 
 static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
-				  const struct dccp_hdr *dh, const unsigned len)
+				  const struct dccp_hdr *dh, const unsigned int len)
 {
 	struct dccp_sock *dp = dccp_sk(sk);
 
@@ -366,7 +366,7 @@
 }
 
 int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
-			 const struct dccp_hdr *dh, const unsigned len)
+			 const struct dccp_hdr *dh, const unsigned int len)
 {
 	if (dccp_check_seqno(sk, skb))
 		goto discard;
@@ -388,7 +388,7 @@
 static int dccp_rcv_request_sent_state_process(struct sock *sk,
 					       struct sk_buff *skb,
 					       const struct dccp_hdr *dh,
-					       const unsigned len)
+					       const unsigned int len)
 {
 	/*
 	 *  Step 4: Prepare sequence numbers in REQUEST
@@ -521,7 +521,7 @@
 static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
 						   struct sk_buff *skb,
 						   const struct dccp_hdr *dh,
-						   const unsigned len)
+						   const unsigned int len)
 {
 	struct dccp_sock *dp = dccp_sk(sk);
 	u32 sample = dp->dccps_options_received.dccpor_timestamp_echo;
@@ -572,7 +572,7 @@
 }
 
 int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
-			   struct dccp_hdr *dh, unsigned len)
+			   struct dccp_hdr *dh, unsigned int len)
 {
 	struct dccp_sock *dp = dccp_sk(sk);
 	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index caf6e17..07f5579 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -574,6 +574,11 @@
 	kfree(inet_rsk(req)->opt);
 }
 
+void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
+{
+}
+EXPORT_SYMBOL(dccp_syn_ack_timeout);
+
 static struct request_sock_ops dccp_request_sock_ops __read_mostly = {
 	.family		= PF_INET,
 	.obj_size	= sizeof(struct dccp_request_sock),
@@ -581,6 +586,7 @@
 	.send_ack	= dccp_reqsk_send_ack,
 	.destructor	= dccp_v4_reqsk_destructor,
 	.send_reset	= dccp_v4_ctl_send_reset,
+	.syn_ack_timeout = dccp_syn_ack_timeout,
 };
 
 int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 4dc588f..fa9512d 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -343,6 +343,7 @@
 	.send_ack	= dccp_reqsk_send_ack,
 	.destructor	= dccp_v6_reqsk_destructor,
 	.send_reset	= dccp_v6_ctl_send_reset,
+	.syn_ack_timeout = dccp_syn_ack_timeout,
 };
 
 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
@@ -579,7 +580,7 @@
 	newnp->pktoptions = NULL;
 	if (ireq6->pktopts != NULL) {
 		newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
-		kfree_skb(ireq6->pktopts);
+		consume_skb(ireq6->pktopts);
 		ireq6->pktopts = NULL;
 		if (newnp->pktoptions)
 			skb_set_owner_r(newnp->pktoptions, newsk);
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c
index 4234882..607ab71 100644
--- a/net/dccp/sysctl.c
+++ b/net/dccp/sysctl.c
@@ -98,18 +98,11 @@
 	{ }
 };
 
-static struct ctl_path dccp_path[] = {
-	{ .procname = "net", },
-	{ .procname = "dccp", },
-	{ .procname = "default", },
-	{ }
-};
-
 static struct ctl_table_header *dccp_table_header;
 
 int __init dccp_sysctl_init(void)
 {
-	dccp_table_header = register_sysctl_paths(dccp_path,
+	dccp_table_header = register_net_sysctl(&init_net, "net/dccp/default",
 			dccp_default_table);
 
 	return dccp_table_header != NULL ? 0 : -ENOMEM;
@@ -118,7 +111,7 @@
 void dccp_sysctl_exit(void)
 {
 	if (dccp_table_header != NULL) {
-		unregister_sysctl_table(dccp_table_header);
+		unregister_net_sysctl_table(dccp_table_header);
 		dccp_table_header = NULL;
 	}
 }
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 4136987..2ba1a28 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -250,7 +250,7 @@
 static struct hlist_head *listen_hash(struct sockaddr_dn *addr)
 {
 	int i;
-	unsigned hash = addr->sdn_objnum;
+	unsigned int hash = addr->sdn_objnum;
 
 	if (hash == 0) {
 		hash = addr->sdn_objnamel;
@@ -1844,9 +1844,9 @@
  * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't
  * make much practical difference.
  */
-unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu)
+unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu)
 {
-	unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER;
+	unsigned int mss = 230 - DN_MAX_NSP_DATA_HEADER;
 	if (dev) {
 		struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
 		mtu -= LL_RESERVED_SPACE(dev);
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index c00e307..f3924ab 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -209,15 +209,7 @@
 	struct dn_dev_sysctl_table *t;
 	int i;
 
-#define DN_CTL_PATH_DEV	3
-
-	struct ctl_path dn_ctl_path[] = {
-		{ .procname = "net",  },
-		{ .procname = "decnet",  },
-		{ .procname = "conf",  },
-		{ /* to be set */ },
-		{ },
-	};
+	char path[sizeof("net/decnet/conf/") + IFNAMSIZ];
 
 	t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL);
 	if (t == NULL)
@@ -228,15 +220,12 @@
 		t->dn_dev_vars[i].data = ((char *)parms) + offset;
 	}
 
-	if (dev) {
-		dn_ctl_path[DN_CTL_PATH_DEV].procname = dev->name;
-	} else {
-		dn_ctl_path[DN_CTL_PATH_DEV].procname = parms->name;
-	}
+	snprintf(path, sizeof(path), "net/decnet/conf/%s",
+		dev? dev->name : parms->name);
 
 	t->dn_dev_vars[0].extra1 = (void *)dev;
 
-	t->sysctl_header = register_sysctl_paths(dn_ctl_path, t->dn_dev_vars);
+	t->sysctl_header = register_net_sysctl(&init_net, path, t->dn_dev_vars);
 	if (t->sysctl_header == NULL)
 		kfree(t);
 	else
@@ -248,7 +237,7 @@
 	if (parms->sysctl) {
 		struct dn_dev_sysctl_table *t = parms->sysctl;
 		parms->sysctl = NULL;
-		unregister_sysctl_table(t->sysctl_header);
+		unregister_net_sysctl_table(t->sysctl_header);
 		kfree(t);
 	}
 }
@@ -694,13 +683,13 @@
 	ifm->ifa_scope = ifa->ifa_scope;
 	ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
 
-	if (ifa->ifa_address)
-		NLA_PUT_LE16(skb, IFA_ADDRESS, ifa->ifa_address);
-	if (ifa->ifa_local)
-		NLA_PUT_LE16(skb, IFA_LOCAL, ifa->ifa_local);
-	if (ifa->ifa_label[0])
-		NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label);
-
+	if ((ifa->ifa_address &&
+	     nla_put_le16(skb, IFA_ADDRESS, ifa->ifa_address)) ||
+	    (ifa->ifa_local &&
+	     nla_put_le16(skb, IFA_LOCAL, ifa->ifa_local)) ||
+	    (ifa->ifa_label[0] &&
+	     nla_put_string(skb, IFA_LABEL, ifa->ifa_label)))
+		goto nla_put_failure;
 	return nlmsg_end(skb, nlh);
 
 nla_put_failure:
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 9e885f1..65a8cd7 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -302,11 +302,12 @@
 		struct rtattr *attr = RTA_DATA(rta->rta_mx);
 
 		while(RTA_OK(attr, attrlen)) {
-			unsigned flavour = attr->rta_type;
+			unsigned int flavour = attr->rta_type;
+
 			if (flavour) {
 				if (flavour > RTAX_MAX)
 					goto err_inval;
-				fi->fib_metrics[flavour-1] = *(unsigned*)RTA_DATA(attr);
+				fi->fib_metrics[flavour-1] = *(unsigned int *)RTA_DATA(attr);
 			}
 			attr = RTA_NEXT(attr, attrlen);
 		}
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index f6544b2..58084f3 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -588,7 +588,7 @@
 	   number of warnings when compiling with -W --ANK
 	 */
 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-	    (unsigned)sk->sk_rcvbuf) {
+	    (unsigned int)sk->sk_rcvbuf) {
 		err = -ENOMEM;
 		goto out;
 	}
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index e446e85..b952f88 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -209,7 +209,7 @@
  *
  * Returns: The number of times the packet has been sent previously
  */
-static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb,
+static inline unsigned int dn_nsp_clone_and_send(struct sk_buff *skb,
 					     gfp_t gfp)
 {
 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
@@ -240,7 +240,7 @@
 {
 	struct dn_scp *scp = DN_SK(sk);
 	struct sk_buff *skb;
-	unsigned reduce_win = 0;
+	unsigned int reduce_win = 0;
 
 	/*
 	 * First we check for otherdata/linkservice messages
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 80a3de4..7e1f878 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -122,7 +122,7 @@
 static void dn_run_flush(unsigned long dummy);
 
 static struct dn_rt_hash_bucket *dn_rt_hash_table;
-static unsigned dn_rt_hash_mask;
+static unsigned int dn_rt_hash_mask;
 
 static struct timer_list dn_route_timer;
 static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0);
@@ -149,13 +149,13 @@
 	dst_destroy_metrics_generic(dst);
 }
 
-static __inline__ unsigned dn_hash(__le16 src, __le16 dst)
+static __inline__ unsigned int dn_hash(__le16 src, __le16 dst)
 {
 	__u16 tmp = (__u16 __force)(src ^ dst);
 	tmp ^= (tmp >> 3);
 	tmp ^= (tmp >> 5);
 	tmp ^= (tmp >> 10);
-	return dn_rt_hash_mask & (unsigned)tmp;
+	return dn_rt_hash_mask & (unsigned int)tmp;
 }
 
 static inline void dnrt_free(struct dn_route *rt)
@@ -297,7 +297,7 @@
 		(fl1->flowidn_iif ^ fl2->flowidn_iif)) == 0;
 }
 
-static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
+static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_route **rp)
 {
 	struct dn_route *rth;
 	struct dn_route __rcu **rthp;
@@ -934,8 +934,8 @@
 	struct dn_route *rt = NULL;
 	struct net_device *dev_out = NULL, *dev;
 	struct neighbour *neigh = NULL;
-	unsigned hash;
-	unsigned flags = 0;
+	unsigned int hash;
+	unsigned int flags = 0;
 	struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST };
 	int err;
 	int free_res = 0;
@@ -1209,7 +1209,7 @@
  */
 static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *flp, int flags)
 {
-	unsigned hash = dn_hash(flp->saddr, flp->daddr);
+	unsigned int hash = dn_hash(flp->saddr, flp->daddr);
 	struct dn_route *rt = NULL;
 
 	if (!(flags & MSG_TRYHARD)) {
@@ -1275,7 +1275,7 @@
 	struct net_device *out_dev = NULL;
 	struct dn_dev *dn_db;
 	struct neighbour *neigh = NULL;
-	unsigned hash;
+	unsigned int hash;
 	int flags = 0;
 	__le16 gateway = 0;
 	__le16 local_src = 0;
@@ -1490,7 +1490,7 @@
 {
 	struct dn_route *rt;
 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
-	unsigned hash = dn_hash(cb->src, cb->dst);
+	unsigned int hash = dn_hash(cb->src, cb->dst);
 
 	if (skb_dst(skb))
 		return 0;
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index f65c9dd..e65f2c8 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -177,11 +177,11 @@
 	return 1;
 }
 
-unsigned dnet_addr_type(__le16 addr)
+unsigned int dnet_addr_type(__le16 addr)
 {
 	struct flowidn fld = { .daddr = addr };
 	struct dn_fib_res res;
-	unsigned ret = RTN_UNICAST;
+	unsigned int ret = RTN_UNICAST;
 	struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0);
 
 	res.r = NULL;
@@ -204,11 +204,11 @@
 	frh->src_len = r->src_len;
 	frh->tos = 0;
 
-	if (r->dst_len)
-		NLA_PUT_LE16(skb, FRA_DST, r->dst);
-	if (r->src_len)
-		NLA_PUT_LE16(skb, FRA_SRC, r->src);
-
+	if ((r->dst_len &&
+	     nla_put_le16(skb, FRA_DST, r->dst)) ||
+	    (r->src_len &&
+	     nla_put_le16(skb, FRA_SRC, r->src)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index 02e75d1..a55eecca 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -351,20 +351,14 @@
 	{ }
 };
 
-static struct ctl_path dn_path[] = {
-	{ .procname = "net", },
-	{ .procname = "decnet", },
-	{ }
-};
-
 void dn_register_sysctl(void)
 {
-	dn_table_header = register_sysctl_paths(dn_path, dn_table);
+	dn_table_header = register_net_sysctl(&init_net, "net/decnet", dn_table);
 }
 
 void dn_unregister_sysctl(void)
 {
-	unregister_sysctl_table(dn_table_header);
+	unregister_net_sysctl_table(dn_table_header);
 }
 
 #else  /* CONFIG_SYSCTL */
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index c73bba3..6f70ea9 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -38,7 +38,7 @@
 MODULE_AUTHOR("Wang Lei");
 MODULE_LICENSE("GPL");
 
-unsigned dns_resolver_debug;
+unsigned int dns_resolver_debug;
 module_param_named(debug, dns_resolver_debug, uint, S_IWUSR | S_IRUGO);
 MODULE_PARM_DESC(debug, "DNS Resolver debugging mask");
 
diff --git a/net/dns_resolver/internal.h b/net/dns_resolver/internal.h
index 189ca9e..17c7886 100644
--- a/net/dns_resolver/internal.h
+++ b/net/dns_resolver/internal.h
@@ -31,7 +31,7 @@
 /*
  * debug tracing
  */
-extern unsigned dns_resolver_debug;
+extern unsigned int dns_resolver_debug;
 
 #define	kdebug(FMT, ...)				\
 do {							\
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 71b5edc..fa14ca7 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -617,7 +617,7 @@
 	if (sk == NULL)
 		goto out;
 
-	sk->sk_reuse = 1;
+	sk->sk_reuse = SK_CAN_REUSE;
 	sock->ops = &econet_ops;
 	sock_init_data(sock, sk);
 
@@ -1012,7 +1012,7 @@
 		return error;
 	}
 
-	udpsock->sk->sk_reuse = 1;
+	udpsock->sk->sk_reuse = SK_CAN_REUSE;
 	udpsock->sk->sk_allocation = GFP_ATOMIC; /* we're going to call it
 						    from interrupts */
 
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index bf10a31..5889a6c 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -77,7 +77,7 @@
  */
 int eth_header(struct sk_buff *skb, struct net_device *dev,
 	       unsigned short type,
-	       const void *daddr, const void *saddr, unsigned len)
+	       const void *daddr, const void *saddr, unsigned int len)
 {
 	struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
 
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 3685158..58c8895 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -371,7 +371,7 @@
 static int lowpan_header_create(struct sk_buff *skb,
 			   struct net_device *dev,
 			   unsigned short type, const void *_daddr,
-			   const void *_saddr, unsigned len)
+			   const void *_saddr, unsigned int len)
 {
 	u8 tmp, iphc0, iphc1, *hc06_ptr;
 	struct ipv6hdr *hdr;
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 1b09eaab..6fbb2ad 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -44,8 +44,8 @@
 	struct ieee802154_addr src_addr;
 	struct ieee802154_addr dst_addr;
 
-	unsigned bound:1;
-	unsigned want_ack:1;
+	unsigned int bound:1;
+	unsigned int want_ack:1;
 };
 
 static inline struct dgram_sock *dgram_sk(const struct sock *sk)
@@ -206,7 +206,7 @@
 		struct msghdr *msg, size_t size)
 {
 	struct net_device *dev;
-	unsigned mtu;
+	unsigned int mtu;
 	struct sk_buff *skb;
 	struct dgram_sock *ro = dgram_sk(sk);
 	int hlen, tlen;
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index adaf462..ca92587 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -63,15 +63,14 @@
 	if (!msg)
 		return -ENOBUFS;
 
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-	NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-	NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-			dev->dev_addr);
-
-	NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
-			addr->hwaddr);
-
-	NLA_PUT_U8(msg, IEEE802154_ATTR_CAPABILITY, cap);
+	if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+	    nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+	    nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+		    dev->dev_addr) ||
+	    nla_put(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
+		    addr->hwaddr) ||
+	    nla_put_u8(msg, IEEE802154_ATTR_CAPABILITY, cap))
+		goto nla_put_failure;
 
 	return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
@@ -92,14 +91,13 @@
 	if (!msg)
 		return -ENOBUFS;
 
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-	NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-	NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-			dev->dev_addr);
-
-	NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr);
-	NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
-
+	if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+	    nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+	    nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+		    dev->dev_addr) ||
+	    nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) ||
+	    nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
+		goto nla_put_failure;
 	return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
 nla_put_failure:
@@ -119,20 +117,22 @@
 	if (!msg)
 		return -ENOBUFS;
 
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-	NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-	NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-			dev->dev_addr);
-
-	if (addr->addr_type == IEEE802154_ADDR_LONG)
-		NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
-				addr->hwaddr);
-	else
-		NLA_PUT_U16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR,
-				addr->short_addr);
-
-	NLA_PUT_U8(msg, IEEE802154_ATTR_REASON, reason);
-
+	if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+	    nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+	    nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+		    dev->dev_addr))
+		goto nla_put_failure;
+	if (addr->addr_type == IEEE802154_ADDR_LONG) {
+		if (nla_put(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
+			    addr->hwaddr))
+			goto nla_put_failure;
+	} else {
+		if (nla_put_u16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR,
+				addr->short_addr))
+			goto nla_put_failure;
+	}
+	if (nla_put_u8(msg, IEEE802154_ATTR_REASON, reason))
+		goto nla_put_failure;
 	return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
 nla_put_failure:
@@ -151,13 +151,12 @@
 	if (!msg)
 		return -ENOBUFS;
 
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-	NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-	NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-			dev->dev_addr);
-
-	NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
-
+	if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+	    nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+	    nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+		    dev->dev_addr) ||
+	    nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
+		goto nla_put_failure;
 	return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
 nla_put_failure:
@@ -177,13 +176,13 @@
 	if (!msg)
 		return -ENOBUFS;
 
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-	NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-	NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-			dev->dev_addr);
-	NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr);
-	NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid);
-
+	if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+	    nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+	    nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+		    dev->dev_addr) ||
+	    nla_put_u16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr) ||
+	    nla_put_u16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid))
+		goto nla_put_failure;
 	return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
 nla_put_failure:
@@ -204,19 +203,17 @@
 	if (!msg)
 		return -ENOBUFS;
 
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-	NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-	NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-			dev->dev_addr);
-
-	NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
-	NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type);
-	NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned);
-	NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, page);
-
-	if (edl)
-		NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl);
-
+	if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+	    nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+	    nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+		    dev->dev_addr) ||
+	    nla_put_u8(msg, IEEE802154_ATTR_STATUS, status) ||
+	    nla_put_u8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type) ||
+	    nla_put_u32(msg, IEEE802154_ATTR_CHANNELS, unscanned) ||
+	    nla_put_u8(msg, IEEE802154_ATTR_PAGE, page) ||
+	    (edl &&
+	     nla_put(msg, IEEE802154_ATTR_ED_LIST, 27, edl)))
+		goto nla_put_failure;
 	return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
 nla_put_failure:
@@ -235,13 +232,12 @@
 	if (!msg)
 		return -ENOBUFS;
 
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-	NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-	NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-			dev->dev_addr);
-
-	NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
-
+	if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+	    nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+	    nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+		    dev->dev_addr) ||
+	    nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
+		goto nla_put_failure;
 	return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
 nla_put_failure:
@@ -266,16 +262,16 @@
 	phy = ieee802154_mlme_ops(dev)->get_phy(dev);
 	BUG_ON(!phy);
 
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
-	NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-
-	NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-		dev->dev_addr);
-	NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR,
-		ieee802154_mlme_ops(dev)->get_short_addr(dev));
-	NLA_PUT_U16(msg, IEEE802154_ATTR_PAN_ID,
-		ieee802154_mlme_ops(dev)->get_pan_id(dev));
+	if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+	    nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
+	    nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+	    nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+		    dev->dev_addr) ||
+	    nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR,
+			ieee802154_mlme_ops(dev)->get_short_addr(dev)) ||
+	    nla_put_u16(msg, IEEE802154_ATTR_PAN_ID,
+			ieee802154_mlme_ops(dev)->get_pan_id(dev)))
+		goto nla_put_failure;
 	wpan_phy_put(phy);
 	return genlmsg_end(msg, hdr);
 
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index c64a38d..3bdc430 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -53,18 +53,18 @@
 		goto out;
 
 	mutex_lock(&phy->pib_lock);
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
-
-	NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, phy->current_page);
-	NLA_PUT_U8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel);
+	if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
+	    nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) ||
+	    nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel))
+		goto nla_put_failure;
 	for (i = 0; i < 32; i++) {
 		if (phy->channels_supported[i])
 			buf[pages++] = phy->channels_supported[i] | (i << 27);
 	}
-	if (pages)
-		NLA_PUT(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST,
-				pages * sizeof(uint32_t), buf);
-
+	if (pages &&
+	    nla_put(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST,
+		    pages * sizeof(uint32_t), buf))
+		goto nla_put_failure;
 	mutex_unlock(&phy->pib_lock);
 	kfree(buf);
 	return genlmsg_end(msg, hdr);
@@ -245,9 +245,9 @@
 			goto dev_unregister;
 	}
 
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-
+	if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
+	    nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name))
+		goto nla_put_failure;
 	dev_put(dev);
 
 	wpan_phy_put(phy);
@@ -333,10 +333,9 @@
 
 	rtnl_unlock();
 
-
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, name);
-
+	if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
+	    nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, name))
+		goto nla_put_failure;
 	wpan_phy_put(phy);
 
 	return ieee802154_nl_reply(msg, info);
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
index f96bae8..50e8239 100644
--- a/net/ieee802154/raw.c
+++ b/net/ieee802154/raw.c
@@ -106,7 +106,7 @@
 		       size_t size)
 {
 	struct net_device *dev;
-	unsigned mtu;
+	unsigned int mtu;
 	struct sk_buff *skb;
 	int hlen, tlen;
 	int err;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 10e3751..c8f7aee 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -350,7 +350,7 @@
 	err = 0;
 	sk->sk_no_check = answer_no_check;
 	if (INET_PROTOSW_REUSE & answer_flags)
-		sk->sk_reuse = 1;
+		sk->sk_reuse = SK_CAN_REUSE;
 
 	inet = inet_sk(sk);
 	inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
@@ -541,7 +541,7 @@
 }
 EXPORT_SYMBOL(inet_bind);
 
-int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr,
+int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
 		       int addr_len, int flags)
 {
 	struct sock *sk = sock->sk;
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index fd508b5..3a28075 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -77,7 +77,7 @@
 
 static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
 {
-	unsigned char * optptr = (unsigned char*)(iph+1);
+	unsigned char *optptr = (unsigned char *)(iph+1);
 	int  l = iph->ihl*4 - sizeof(struct iphdr);
 	int  optlen;
 
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 18d9b81..373b56b 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1059,7 +1059,7 @@
 	neigh = __neigh_lookup_errno(&arp_tbl, &ip, dev);
 	err = PTR_ERR(neigh);
 	if (!IS_ERR(neigh)) {
-		unsigned state = NUD_STALE;
+		unsigned int state = NUD_STALE;
 		if (r->arp_flags & ATF_PERM)
 			state = NUD_PERMANENT;
 		err = neigh_update(neigh, (r->arp_flags & ATF_COM) ?
@@ -1071,7 +1071,7 @@
 	return err;
 }
 
-static unsigned arp_state_to_flags(struct neighbour *neigh)
+static unsigned int arp_state_to_flags(struct neighbour *neigh)
 {
 	if (neigh->nud_state&NUD_PERMANENT)
 		return ATF_PERM | ATF_COM;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 6e447ff..88c9e3f 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1125,7 +1125,7 @@
 	}
 }
 
-static inline bool inetdev_valid_mtu(unsigned mtu)
+static inline bool inetdev_valid_mtu(unsigned int mtu)
 {
 	return mtu >= 68;
 }
@@ -1266,17 +1266,15 @@
 	ifm->ifa_scope = ifa->ifa_scope;
 	ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
 
-	if (ifa->ifa_address)
-		NLA_PUT_BE32(skb, IFA_ADDRESS, ifa->ifa_address);
-
-	if (ifa->ifa_local)
-		NLA_PUT_BE32(skb, IFA_LOCAL, ifa->ifa_local);
-
-	if (ifa->ifa_broadcast)
-		NLA_PUT_BE32(skb, IFA_BROADCAST, ifa->ifa_broadcast);
-
-	if (ifa->ifa_label[0])
-		NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label);
+	if ((ifa->ifa_address &&
+	     nla_put_be32(skb, IFA_ADDRESS, ifa->ifa_address)) ||
+	    (ifa->ifa_local &&
+	     nla_put_be32(skb, IFA_LOCAL, ifa->ifa_local)) ||
+	    (ifa->ifa_broadcast &&
+	     nla_put_be32(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
+	    (ifa->ifa_label[0] &&
+	     nla_put_string(skb, IFA_LABEL, ifa->ifa_label)))
+		goto nla_put_failure;
 
 	return nlmsg_end(skb, nlh);
 
@@ -1587,7 +1585,6 @@
 static struct devinet_sysctl_table {
 	struct ctl_table_header *sysctl_header;
 	struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
-	char *dev_name;
 } devinet_sysctl = {
 	.devinet_vars = {
 		DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
@@ -1629,16 +1626,7 @@
 {
 	int i;
 	struct devinet_sysctl_table *t;
-
-#define DEVINET_CTL_PATH_DEV	3
-
-	struct ctl_path devinet_ctl_path[] = {
-		{ .procname = "net",  },
-		{ .procname = "ipv4", },
-		{ .procname = "conf", },
-		{ /* to be set */ },
-		{ },
-	};
+	char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
 
 	t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL);
 	if (!t)
@@ -1650,27 +1638,15 @@
 		t->devinet_vars[i].extra2 = net;
 	}
 
-	/*
-	 * Make a copy of dev_name, because '.procname' is regarded as const
-	 * by sysctl and we wouldn't want anyone to change it under our feet
-	 * (see SIOCSIFNAME).
-	 */
-	t->dev_name = kstrdup(dev_name, GFP_KERNEL);
-	if (!t->dev_name)
-		goto free;
+	snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
 
-	devinet_ctl_path[DEVINET_CTL_PATH_DEV].procname = t->dev_name;
-
-	t->sysctl_header = register_net_sysctl_table(net, devinet_ctl_path,
-			t->devinet_vars);
+	t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
 	if (!t->sysctl_header)
-		goto free_procname;
+		goto free;
 
 	p->sysctl = t;
 	return 0;
 
-free_procname:
-	kfree(t->dev_name);
 free:
 	kfree(t);
 out:
@@ -1686,7 +1662,6 @@
 
 	cnf->sysctl = NULL;
 	unregister_net_sysctl_table(t->sysctl_header);
-	kfree(t->dev_name);
 	kfree(t);
 }
 
@@ -1716,12 +1691,6 @@
 	},
 	{ },
 };
-
-static __net_initdata struct ctl_path net_ipv4_path[] = {
-	{ .procname = "net", },
-	{ .procname = "ipv4", },
-	{ },
-};
 #endif
 
 static __net_init int devinet_init_net(struct net *net)
@@ -1767,7 +1736,7 @@
 		goto err_reg_dflt;
 
 	err = -ENOMEM;
-	forw_hdr = register_net_sysctl_table(net, net_ipv4_path, tbl);
+	forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
 	if (forw_hdr == NULL)
 		goto err_reg_ctl;
 	net->ipv4.forw_hdr = forw_hdr;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index cbe3a68..3854411 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -136,13 +136,13 @@
  * Find address type as if only "dev" was present in the system. If
  * on_dev is NULL then all interfaces are taken into consideration.
  */
-static inline unsigned __inet_dev_addr_type(struct net *net,
-					    const struct net_device *dev,
-					    __be32 addr)
+static inline unsigned int __inet_dev_addr_type(struct net *net,
+						const struct net_device *dev,
+						__be32 addr)
 {
 	struct flowi4		fl4 = { .daddr = addr };
 	struct fib_result	res;
-	unsigned ret = RTN_BROADCAST;
+	unsigned int ret = RTN_BROADCAST;
 	struct fib_table *local_table;
 
 	if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
@@ -740,7 +740,7 @@
 #define BRD_OK		2
 #define BRD0_OK		4
 #define BRD1_OK		8
-	unsigned ok = 0;
+	unsigned int ok = 0;
 	int subnet = 0;		/* Primary network */
 	int gone = 1;		/* Address is missing */
 	int same_prefsrc = 0;	/* Another primary with same IP */
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 799fc79..2d043f7 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -221,15 +221,15 @@
 	frh->src_len = rule4->src_len;
 	frh->tos = rule4->tos;
 
-	if (rule4->dst_len)
-		NLA_PUT_BE32(skb, FRA_DST, rule4->dst);
-
-	if (rule4->src_len)
-		NLA_PUT_BE32(skb, FRA_SRC, rule4->src);
-
+	if ((rule4->dst_len &&
+	     nla_put_be32(skb, FRA_DST, rule4->dst)) ||
+	    (rule4->src_len &&
+	     nla_put_be32(skb, FRA_SRC, rule4->src)))
+		goto nla_put_failure;
 #ifdef CONFIG_IP_ROUTE_CLASSID
-	if (rule4->tclassid)
-		NLA_PUT_U32(skb, FRA_FLOW, rule4->tclassid);
+	if (rule4->tclassid &&
+	    nla_put_u32(skb, FRA_FLOW, rule4->tclassid))
+		goto nla_put_failure;
 #endif
 	return 0;
 
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 5063fa3..a8bdf740 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -931,33 +931,36 @@
 		rtm->rtm_table = tb_id;
 	else
 		rtm->rtm_table = RT_TABLE_COMPAT;
-	NLA_PUT_U32(skb, RTA_TABLE, tb_id);
+	if (nla_put_u32(skb, RTA_TABLE, tb_id))
+		goto nla_put_failure;
 	rtm->rtm_type = type;
 	rtm->rtm_flags = fi->fib_flags;
 	rtm->rtm_scope = fi->fib_scope;
 	rtm->rtm_protocol = fi->fib_protocol;
 
-	if (rtm->rtm_dst_len)
-		NLA_PUT_BE32(skb, RTA_DST, dst);
-
-	if (fi->fib_priority)
-		NLA_PUT_U32(skb, RTA_PRIORITY, fi->fib_priority);
-
+	if (rtm->rtm_dst_len &&
+	    nla_put_be32(skb, RTA_DST, dst))
+		goto nla_put_failure;
+	if (fi->fib_priority &&
+	    nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
+		goto nla_put_failure;
 	if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
 		goto nla_put_failure;
 
-	if (fi->fib_prefsrc)
-		NLA_PUT_BE32(skb, RTA_PREFSRC, fi->fib_prefsrc);
-
+	if (fi->fib_prefsrc &&
+	    nla_put_be32(skb, RTA_PREFSRC, fi->fib_prefsrc))
+		goto nla_put_failure;
 	if (fi->fib_nhs == 1) {
-		if (fi->fib_nh->nh_gw)
-			NLA_PUT_BE32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw);
-
-		if (fi->fib_nh->nh_oif)
-			NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif);
+		if (fi->fib_nh->nh_gw &&
+		    nla_put_be32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
+			goto nla_put_failure;
+		if (fi->fib_nh->nh_oif &&
+		    nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
+			goto nla_put_failure;
 #ifdef CONFIG_IP_ROUTE_CLASSID
-		if (fi->fib_nh[0].nh_tclassid)
-			NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid);
+		if (fi->fib_nh[0].nh_tclassid &&
+		    nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
+			goto nla_put_failure;
 #endif
 	}
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -978,11 +981,13 @@
 			rtnh->rtnh_hops = nh->nh_weight - 1;
 			rtnh->rtnh_ifindex = nh->nh_oif;
 
-			if (nh->nh_gw)
-				NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw);
+			if (nh->nh_gw &&
+			    nla_put_be32(skb, RTA_GATEWAY, nh->nh_gw))
+				goto nla_put_failure;
 #ifdef CONFIG_IP_ROUTE_CLASSID
-			if (nh->nh_tclassid)
-				NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid);
+			if (nh->nh_tclassid &&
+			    nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
+				goto nla_put_failure;
 #endif
 			/* length of rtnetlink header + attributes */
 			rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 5dfecfd..6699f23 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -344,10 +344,10 @@
 	pip->protocol = IPPROTO_IGMP;
 	pip->tot_len  = 0;	/* filled in later */
 	ip_select_ident(pip, &rt->dst, NULL);
-	((u8*)&pip[1])[0] = IPOPT_RA;
-	((u8*)&pip[1])[1] = 4;
-	((u8*)&pip[1])[2] = 0;
-	((u8*)&pip[1])[3] = 0;
+	((u8 *)&pip[1])[0] = IPOPT_RA;
+	((u8 *)&pip[1])[1] = 4;
+	((u8 *)&pip[1])[2] = 0;
+	((u8 *)&pip[1])[3] = 0;
 
 	skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4;
 	skb_put(skb, sizeof(*pig));
@@ -688,10 +688,10 @@
 	iph->saddr    = fl4.saddr;
 	iph->protocol = IPPROTO_IGMP;
 	ip_select_ident(iph, &rt->dst, NULL);
-	((u8*)&iph[1])[0] = IPOPT_RA;
-	((u8*)&iph[1])[1] = 4;
-	((u8*)&iph[1])[2] = 0;
-	((u8*)&iph[1])[3] = 0;
+	((u8 *)&iph[1])[0] = IPOPT_RA;
+	((u8 *)&iph[1])[1] = 4;
+	((u8 *)&iph[1])[2] = 0;
+	((u8 *)&iph[1])[3] = 0;
 
 	ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
 	ih->type = type;
@@ -774,7 +774,7 @@
 			if (psf->sf_count[MCAST_INCLUDE] ||
 			    pmc->sfcount[MCAST_EXCLUDE] !=
 			    psf->sf_count[MCAST_EXCLUDE])
-				continue;
+				break;
 			if (srcs[i] == psf->sf_inaddr) {
 				scount++;
 				break;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 19d66ce..95e61596 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -42,7 +42,8 @@
 
 void inet_get_local_port_range(int *low, int *high)
 {
-	unsigned seq;
+	unsigned int seq;
+
 	do {
 		seq = read_seqbegin(&sysctl_local_ports.lock);
 
@@ -53,7 +54,7 @@
 EXPORT_SYMBOL(inet_get_local_port_range);
 
 int inet_csk_bind_conflict(const struct sock *sk,
-			   const struct inet_bind_bucket *tb)
+			   const struct inet_bind_bucket *tb, bool relax)
 {
 	struct sock *sk2;
 	struct hlist_node *node;
@@ -79,6 +80,14 @@
 				    sk2_rcv_saddr == sk_rcv_saddr(sk))
 					break;
 			}
+			if (!relax && reuse && sk2->sk_reuse &&
+			    sk2->sk_state != TCP_LISTEN) {
+				const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
+
+				if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
+				    sk2_rcv_saddr == sk_rcv_saddr(sk))
+					break;
+			}
 		}
 	}
 	return node != NULL;
@@ -122,12 +131,13 @@
 					    (tb->num_owners < smallest_size || smallest_size == -1)) {
 						smallest_size = tb->num_owners;
 						smallest_rover = rover;
-						if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
+						if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
+						    !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
 							snum = smallest_rover;
 							goto tb_found;
 						}
 					}
-					if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
+					if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
 						snum = rover;
 						goto tb_found;
 					}
@@ -172,18 +182,22 @@
 	goto tb_not_found;
 tb_found:
 	if (!hlist_empty(&tb->owners)) {
+		if (sk->sk_reuse == SK_FORCE_REUSE)
+			goto success;
+
 		if (tb->fastreuse > 0 &&
 		    sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
 		    smallest_size == -1) {
 			goto success;
 		} else {
 			ret = 1;
-			if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
+			if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) {
 				if (sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
 				    smallest_size != -1 && --attempts >= 0) {
 					spin_unlock(&head->lock);
 					goto again;
 				}
+
 				goto fail_unlock;
 			}
 		}
@@ -514,7 +528,7 @@
 
 	/* Normally all the openreqs are young and become mature
 	 * (i.e. converted to established socket) for first timeout.
-	 * If synack was not acknowledged for 3 seconds, it means
+	 * If synack was not acknowledged for 1 second, it means
 	 * one of the following things: synack was lost, ack was lost,
 	 * rtt is high or nobody planned to ack (i.e. synflood).
 	 * When server is a bit loaded, queue is populated with old
@@ -555,8 +569,7 @@
 				syn_ack_recalc(req, thresh, max_retries,
 					       queue->rskq_defer_accept,
 					       &expire, &resend);
-				if (req->rsk_ops->syn_ack_timeout)
-					req->rsk_ops->syn_ack_timeout(parent, req);
+				req->rsk_ops->syn_ack_timeout(parent, req);
 				if (!expire &&
 				    (!resend ||
 				     !req->rsk_ops->rtx_syn_ack(parent, req, NULL) ||
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 984ec65..7880af9 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -217,7 +217,7 @@
 }
 EXPORT_SYMBOL_GPL(__inet_lookup_listener);
 
-struct sock * __inet_lookup_established(struct net *net,
+struct sock *__inet_lookup_established(struct net *net,
 				  struct inet_hashinfo *hashinfo,
 				  const __be32 saddr, const __be16 sport,
 				  const __be32 daddr, const u16 hnum,
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 89168c6..543ef62 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -263,7 +263,7 @@
 void inet_twdr_hangman(unsigned long data)
 {
 	struct inet_timewait_death_row *twdr;
-	int unsigned need_timer;
+	unsigned int need_timer;
 
 	twdr = (struct inet_timewait_death_row *)data;
 	spin_lock(&twdr->death_lock);
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 29a07b6..e5c44fc 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -41,7 +41,7 @@
 
 static int ip_forward_finish(struct sk_buff *skb)
 {
-	struct ip_options * opt	= &(IPCB(skb)->opt);
+	struct ip_options *opt	= &(IPCB(skb)->opt);
 
 	IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
 
@@ -55,7 +55,7 @@
 {
 	struct iphdr *iph;	/* Our header */
 	struct rtable *rt;	/* Route we use */
-	struct ip_options * opt	= &(IPCB(skb)->opt);
+	struct ip_options *opt	= &(IPCB(skb)->opt);
 
 	if (skb_warn_if_lro(skb))
 		goto drop;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 3727e23..71e5c32 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -569,7 +569,7 @@
 		skb_morph(head, qp->q.fragments);
 		head->next = qp->q.fragments->next;
 
-		kfree_skb(qp->q.fragments);
+		consume_skb(qp->q.fragments);
 		qp->q.fragments = head;
 	}
 
@@ -782,7 +782,7 @@
 		table[2].data = &net->ipv4.frags.timeout;
 	}
 
-	hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
+	hdr = register_net_sysctl(net, "net/ipv4", table);
 	if (hdr == NULL)
 		goto err_reg;
 
@@ -807,7 +807,7 @@
 
 static void ip4_frags_ctl_register(void)
 {
-	register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table);
+	register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table);
 }
 #else
 static inline int ip4_frags_ns_ctl_register(struct net *net)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index b57532d..f49047b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -169,37 +169,56 @@
 
 /* often modified stats are per cpu, other are shared (netdev->stats) */
 struct pcpu_tstats {
-	unsigned long	rx_packets;
-	unsigned long	rx_bytes;
-	unsigned long	tx_packets;
-	unsigned long	tx_bytes;
-} __attribute__((aligned(4*sizeof(unsigned long))));
+	u64	rx_packets;
+	u64	rx_bytes;
+	u64	tx_packets;
+	u64	tx_bytes;
+	struct u64_stats_sync	syncp;
+};
 
-static struct net_device_stats *ipgre_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
+						   struct rtnl_link_stats64 *tot)
 {
-	struct pcpu_tstats sum = { 0 };
 	int i;
 
 	for_each_possible_cpu(i) {
 		const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+		unsigned int start;
 
-		sum.rx_packets += tstats->rx_packets;
-		sum.rx_bytes   += tstats->rx_bytes;
-		sum.tx_packets += tstats->tx_packets;
-		sum.tx_bytes   += tstats->tx_bytes;
+		do {
+			start = u64_stats_fetch_begin_bh(&tstats->syncp);
+			rx_packets = tstats->rx_packets;
+			tx_packets = tstats->tx_packets;
+			rx_bytes = tstats->rx_bytes;
+			tx_bytes = tstats->tx_bytes;
+		} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+		tot->rx_packets += rx_packets;
+		tot->tx_packets += tx_packets;
+		tot->rx_bytes   += rx_bytes;
+		tot->tx_bytes   += tx_bytes;
 	}
-	dev->stats.rx_packets = sum.rx_packets;
-	dev->stats.rx_bytes   = sum.rx_bytes;
-	dev->stats.tx_packets = sum.tx_packets;
-	dev->stats.tx_bytes   = sum.tx_bytes;
-	return &dev->stats;
+
+	tot->multicast = dev->stats.multicast;
+	tot->rx_crc_errors = dev->stats.rx_crc_errors;
+	tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
+	tot->rx_length_errors = dev->stats.rx_length_errors;
+	tot->rx_errors = dev->stats.rx_errors;
+	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+	tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
+	tot->tx_dropped = dev->stats.tx_dropped;
+	tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
+	tot->tx_errors = dev->stats.tx_errors;
+
+	return tot;
 }
 
 /* Given src, dst and key, find appropriate for input tunnel. */
 
-static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
-					      __be32 remote, __be32 local,
-					      __be32 key, __be16 gre_proto)
+static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
+					     __be32 remote, __be32 local,
+					     __be32 key, __be16 gre_proto)
 {
 	struct net *net = dev_net(dev);
 	int link = dev->ifindex;
@@ -464,7 +483,7 @@
  */
 
 	const struct iphdr *iph = (const struct iphdr *)skb->data;
-	__be16	     *p = (__be16*)(skb->data+(iph->ihl<<2));
+	__be16	     *p = (__be16 *)(skb->data+(iph->ihl<<2));
 	int grehlen = (iph->ihl<<2) + 4;
 	const int type = icmp_hdr(skb)->type;
 	const int code = icmp_hdr(skb)->code;
@@ -574,7 +593,7 @@
 
 	iph = ip_hdr(skb);
 	h = skb->data;
-	flags = *(__be16*)h;
+	flags = *(__be16 *)h;
 
 	if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
 		/* - Version must be 0.
@@ -598,11 +617,11 @@
 			offset += 4;
 		}
 		if (flags&GRE_KEY) {
-			key = *(__be32*)(h + offset);
+			key = *(__be32 *)(h + offset);
 			offset += 4;
 		}
 		if (flags&GRE_SEQ) {
-			seqno = ntohl(*(__be32*)(h + offset));
+			seqno = ntohl(*(__be32 *)(h + offset));
 			offset += 4;
 		}
 	}
@@ -672,8 +691,10 @@
 		}
 
 		tstats = this_cpu_ptr(tunnel->dev->tstats);
+		u64_stats_update_begin(&tstats->syncp);
 		tstats->rx_packets++;
 		tstats->rx_bytes += skb->len;
+		u64_stats_update_end(&tstats->syncp);
 
 		__skb_tunnel_rx(skb, tunnel->dev);
 
@@ -900,7 +921,7 @@
 				   htons(ETH_P_TEB) : skb->protocol;
 
 	if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
-		__be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
+		__be32 *ptr = (__be32 *)(((u8 *)iph) + tunnel->hlen - 4);
 
 		if (tunnel->parms.o_flags&GRE_SEQ) {
 			++tunnel->o_seqno;
@@ -913,7 +934,7 @@
 		}
 		if (tunnel->parms.o_flags&GRE_CSUM) {
 			*ptr = 0;
-			*(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
+			*(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr));
 		}
 	}
 
@@ -1169,7 +1190,7 @@
 {
 	struct ip_tunnel *t = netdev_priv(dev);
 	struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
-	__be16 *p = (__be16*)(iph+1);
+	__be16 *p = (__be16 *)(iph+1);
 
 	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
 	p[0]		= t->parms.o_flags;
@@ -1253,7 +1274,7 @@
 	.ndo_start_xmit		= ipgre_tunnel_xmit,
 	.ndo_do_ioctl		= ipgre_tunnel_ioctl,
 	.ndo_change_mtu		= ipgre_tunnel_change_mtu,
-	.ndo_get_stats		= ipgre_get_stats,
+	.ndo_get_stats64	= ipgre_get_stats64,
 };
 
 static void ipgre_dev_free(struct net_device *dev)
@@ -1507,7 +1528,7 @@
 	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_change_mtu		= ipgre_tunnel_change_mtu,
-	.ndo_get_stats		= ipgre_get_stats,
+	.ndo_get_stats64	= ipgre_get_stats64,
 };
 
 static void ipgre_tap_setup(struct net_device *dev)
@@ -1654,17 +1675,18 @@
 	struct ip_tunnel *t = netdev_priv(dev);
 	struct ip_tunnel_parm *p = &t->parms;
 
-	NLA_PUT_U32(skb, IFLA_GRE_LINK, p->link);
-	NLA_PUT_BE16(skb, IFLA_GRE_IFLAGS, p->i_flags);
-	NLA_PUT_BE16(skb, IFLA_GRE_OFLAGS, p->o_flags);
-	NLA_PUT_BE32(skb, IFLA_GRE_IKEY, p->i_key);
-	NLA_PUT_BE32(skb, IFLA_GRE_OKEY, p->o_key);
-	NLA_PUT_BE32(skb, IFLA_GRE_LOCAL, p->iph.saddr);
-	NLA_PUT_BE32(skb, IFLA_GRE_REMOTE, p->iph.daddr);
-	NLA_PUT_U8(skb, IFLA_GRE_TTL, p->iph.ttl);
-	NLA_PUT_U8(skb, IFLA_GRE_TOS, p->iph.tos);
-	NLA_PUT_U8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF)));
-
+	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
+	    nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
+	    nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
+	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
+	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
+	    nla_put_be32(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
+	    nla_put_be32(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
+	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
+	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
+	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
+		       !!(p->iph.frag_off & htons(IP_DF))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index a0d0d9d..95722ed 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -210,10 +210,10 @@
  *	Simple and stupid 8), but the most efficient way.
  */
 
-void ip_options_fragment(struct sk_buff * skb)
+void ip_options_fragment(struct sk_buff *skb)
 {
 	unsigned char *optptr = skb_network_header(skb) + sizeof(struct iphdr);
-	struct ip_options * opt = &(IPCB(skb)->opt);
+	struct ip_options *opt = &(IPCB(skb)->opt);
 	int  l = opt->optlen;
 	int  optlen;
 
@@ -248,13 +248,13 @@
  */
 
 int ip_options_compile(struct net *net,
-		       struct ip_options * opt, struct sk_buff * skb)
+		       struct ip_options *opt, struct sk_buff *skb)
 {
 	int l;
-	unsigned char * iph;
-	unsigned char * optptr;
+	unsigned char *iph;
+	unsigned char *optptr;
 	int optlen;
-	unsigned char * pp_ptr = NULL;
+	unsigned char *pp_ptr = NULL;
 	struct rtable *rt = NULL;
 
 	if (skb != NULL) {
@@ -413,7 +413,7 @@
 					opt->is_changed = 1;
 				}
 			} else {
-				unsigned overflow = optptr[3]>>4;
+				unsigned int overflow = optptr[3]>>4;
 				if (overflow == 15) {
 					pp_ptr = optptr + 3;
 					goto error;
@@ -473,20 +473,20 @@
  *	Undo all the changes done by ip_options_compile().
  */
 
-void ip_options_undo(struct ip_options * opt)
+void ip_options_undo(struct ip_options *opt)
 {
 	if (opt->srr) {
-		unsigned  char * optptr = opt->__data+opt->srr-sizeof(struct  iphdr);
+		unsigned  char *optptr = opt->__data+opt->srr-sizeof(struct  iphdr);
 		memmove(optptr+7, optptr+3, optptr[1]-7);
 		memcpy(optptr+3, &opt->faddr, 4);
 	}
 	if (opt->rr_needaddr) {
-		unsigned  char * optptr = opt->__data+opt->rr-sizeof(struct  iphdr);
+		unsigned  char *optptr = opt->__data+opt->rr-sizeof(struct  iphdr);
 		optptr[2] -= 4;
 		memset(&optptr[optptr[2]-1], 0, 4);
 	}
 	if (opt->ts) {
-		unsigned  char * optptr = opt->__data+opt->ts-sizeof(struct  iphdr);
+		unsigned  char *optptr = opt->__data+opt->ts-sizeof(struct  iphdr);
 		if (opt->ts_needtime) {
 			optptr[2] -= 4;
 			memset(&optptr[optptr[2]-1], 0, 4);
@@ -549,8 +549,8 @@
 
 void ip_forward_options(struct sk_buff *skb)
 {
-	struct   ip_options * opt	= &(IPCB(skb)->opt);
-	unsigned char * optptr;
+	struct   ip_options *opt	= &(IPCB(skb)->opt);
+	unsigned char *optptr;
 	struct rtable *rt = skb_rtable(skb);
 	unsigned char *raw = skb_network_header(skb);
 
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 2fd0fba..51c6c67 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -90,7 +90,7 @@
 static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
 {
 	unsigned char optbuf[sizeof(struct ip_options) + 40];
-	struct ip_options * opt = (struct ip_options *)optbuf;
+	struct ip_options *opt = (struct ip_options *)optbuf;
 
 	if (IPCB(skb)->opt.optlen == 0)
 		return;
@@ -147,7 +147,7 @@
 void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
 {
 	struct inet_sock *inet = inet_sk(skb->sk);
-	unsigned flags = inet->cmsg_flags;
+	unsigned int flags = inet->cmsg_flags;
 
 	/* Ordered by supposed usage frequency */
 	if (flags & 1)
@@ -1094,7 +1094,7 @@
  */
 
 static int do_ip_getsockopt(struct sock *sk, int level, int optname,
-			    char __user *optval, int __user *optlen, unsigned flags)
+			    char __user *optval, int __user *optlen, unsigned int flags)
 {
 	struct inet_sock *inet = inet_sk(sk);
 	int val;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 92ac7e7..f267280 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1198,7 +1198,7 @@
 	d = ic_first_dev;
 	retries = CONF_SEND_RETRIES;
 	get_random_bytes(&timeout, sizeof(timeout));
-	timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned) CONF_TIMEOUT_RANDOM);
+	timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned int) CONF_TIMEOUT_RANDOM);
 	for (;;) {
 		/* Track the device we are configuring */
 		ic_dev_xid = d->xid;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index ae1413e..2d0f99b 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -144,33 +144,48 @@
 
 /* often modified stats are per cpu, other are shared (netdev->stats) */
 struct pcpu_tstats {
-	unsigned long	rx_packets;
-	unsigned long	rx_bytes;
-	unsigned long	tx_packets;
-	unsigned long	tx_bytes;
-} __attribute__((aligned(4*sizeof(unsigned long))));
+	u64	rx_packets;
+	u64	rx_bytes;
+	u64	tx_packets;
+	u64	tx_bytes;
+	struct u64_stats_sync	syncp;
+};
 
-static struct net_device_stats *ipip_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
+						  struct rtnl_link_stats64 *tot)
 {
-	struct pcpu_tstats sum = { 0 };
 	int i;
 
 	for_each_possible_cpu(i) {
 		const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+		unsigned int start;
 
-		sum.rx_packets += tstats->rx_packets;
-		sum.rx_bytes   += tstats->rx_bytes;
-		sum.tx_packets += tstats->tx_packets;
-		sum.tx_bytes   += tstats->tx_bytes;
+		do {
+			start = u64_stats_fetch_begin_bh(&tstats->syncp);
+			rx_packets = tstats->rx_packets;
+			tx_packets = tstats->tx_packets;
+			rx_bytes = tstats->rx_bytes;
+			tx_bytes = tstats->tx_bytes;
+		} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+		tot->rx_packets += rx_packets;
+		tot->tx_packets += tx_packets;
+		tot->rx_bytes   += rx_bytes;
+		tot->tx_bytes   += tx_bytes;
 	}
-	dev->stats.rx_packets = sum.rx_packets;
-	dev->stats.rx_bytes   = sum.rx_bytes;
-	dev->stats.tx_packets = sum.tx_packets;
-	dev->stats.tx_bytes   = sum.tx_bytes;
-	return &dev->stats;
+
+	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+	tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
+	tot->tx_dropped = dev->stats.tx_dropped;
+	tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
+	tot->tx_errors = dev->stats.tx_errors;
+	tot->collisions = dev->stats.collisions;
+
+	return tot;
 }
 
-static struct ip_tunnel * ipip_tunnel_lookup(struct net *net,
+static struct ip_tunnel *ipip_tunnel_lookup(struct net *net,
 		__be32 remote, __be32 local)
 {
 	unsigned int h0 = HASH(remote);
@@ -245,7 +260,7 @@
 	rcu_assign_pointer(*tp, t);
 }
 
-static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
+static struct ip_tunnel *ipip_tunnel_locate(struct net *net,
 		struct ip_tunnel_parm *parms, int create)
 {
 	__be32 remote = parms->iph.daddr;
@@ -404,8 +419,10 @@
 		skb->pkt_type = PACKET_HOST;
 
 		tstats = this_cpu_ptr(tunnel->dev->tstats);
+		u64_stats_update_begin(&tstats->syncp);
 		tstats->rx_packets++;
 		tstats->rx_bytes += skb->len;
+		u64_stats_update_end(&tstats->syncp);
 
 		__skb_tunnel_rx(skb, tunnel->dev);
 
@@ -730,7 +747,7 @@
 	.ndo_start_xmit	= ipip_tunnel_xmit,
 	.ndo_do_ioctl	= ipip_tunnel_ioctl,
 	.ndo_change_mtu	= ipip_tunnel_change_mtu,
-	.ndo_get_stats  = ipip_get_stats,
+	.ndo_get_stats64 = ipip_get_stats64,
 };
 
 static void ipip_dev_free(struct net_device *dev)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 960fbfc3..5bef604 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -2119,15 +2119,16 @@
 	rtm->rtm_src_len  = 32;
 	rtm->rtm_tos      = 0;
 	rtm->rtm_table    = mrt->id;
-	NLA_PUT_U32(skb, RTA_TABLE, mrt->id);
+	if (nla_put_u32(skb, RTA_TABLE, mrt->id))
+		goto nla_put_failure;
 	rtm->rtm_type     = RTN_MULTICAST;
 	rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
 	rtm->rtm_protocol = RTPROT_UNSPEC;
 	rtm->rtm_flags    = 0;
 
-	NLA_PUT_BE32(skb, RTA_SRC, c->mfc_origin);
-	NLA_PUT_BE32(skb, RTA_DST, c->mfc_mcastgrp);
-
+	if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) ||
+	    nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp))
+		goto nla_put_failure;
 	if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0)
 		goto nla_put_failure;
 
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 4f47e06..ed1b367 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -12,7 +12,7 @@
 #include <net/netfilter/nf_queue.h>
 
 /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
-int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
+int ip_route_me_harder(struct sk_buff *skb, unsigned int addr_type)
 {
 	struct net *net = dev_net(skb_dst(skb)->dev);
 	const struct iphdr *iph = ip_hdr(skb);
@@ -237,13 +237,3 @@
 
 module_init(ipv4_netfilter_init);
 module_exit(ipv4_netfilter_fini);
-
-#ifdef CONFIG_SYSCTL
-struct ctl_path nf_net_ipv4_netfilter_sysctl_path[] = {
-	{ .procname = "net", },
-	{ .procname = "ipv4", },
-	{ .procname = "netfilter", },
-	{ }
-};
-EXPORT_SYMBOL_GPL(nf_net_ipv4_netfilter_sysctl_path);
-#endif /* CONFIG_SYSCTL */
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index fd7a3f6..a393527 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -303,7 +303,7 @@
 			if (v < 0) {
 				/* Pop from stack? */
 				if (v != XT_RETURN) {
-					verdict = (unsigned)(-v) - 1;
+					verdict = (unsigned int)(-v) - 1;
 					break;
 				}
 				e = back;
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 94d45e1..09775a1 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -586,7 +586,7 @@
 #endif
 	register_netdevice_notifier(&ipq_dev_notifier);
 #ifdef CONFIG_SYSCTL
-	ipq_sysctl_header = register_sysctl_paths(net_ipv4_ctl_path, ipq_table);
+	ipq_sysctl_header = register_net_sysctl(&init_net, "net/ipv4", ipq_table);
 #endif
 	status = nf_register_queue_handler(NFPROTO_IPV4, &nfqh);
 	if (status < 0) {
@@ -597,7 +597,7 @@
 
 cleanup_sysctl:
 #ifdef CONFIG_SYSCTL
-	unregister_sysctl_table(ipq_sysctl_header);
+	unregister_net_sysctl_table(ipq_sysctl_header);
 #endif
 	unregister_netdevice_notifier(&ipq_dev_notifier);
 	proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
@@ -618,7 +618,7 @@
 	ipq_flush(NULL, 0);
 
 #ifdef CONFIG_SYSCTL
-	unregister_sysctl_table(ipq_sysctl_header);
+	unregister_net_sysctl_table(ipq_sysctl_header);
 #endif
 	unregister_netdevice_notifier(&ipq_dev_notifier);
 	proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 24e556e..585b80f 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -377,7 +377,7 @@
 			if (v < 0) {
 				/* Pop from stack? */
 				if (v != XT_RETURN) {
-					verdict = (unsigned)(-v) - 1;
+					verdict = (unsigned int)(-v) - 1;
 					break;
 				}
 				if (*stackptr <= origptr) {
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index de9da21..91747d4 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -74,16 +74,24 @@
 
 	iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
 	if (iph == NULL)
-		return -NF_DROP;
+		return -NF_ACCEPT;
 
 	/* Conntrack defragments packets, we might still see fragments
 	 * inside ICMP packets though. */
 	if (iph->frag_off & htons(IP_OFFSET))
-		return -NF_DROP;
+		return -NF_ACCEPT;
 
 	*dataoff = nhoff + (iph->ihl << 2);
 	*protonum = iph->protocol;
 
+	/* Check bogus IP headers */
+	if (*dataoff > skb->len) {
+		pr_debug("nf_conntrack_ipv4: bogus IPv4 packet: "
+			 "nhoff %u, ihl %u, skblen %u\n",
+			 nhoff, iph->ihl << 2, skb->len);
+		return -NF_ACCEPT;
+	}
+
 	return NF_ACCEPT;
 }
 
@@ -303,8 +311,9 @@
 static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
 				const struct nf_conntrack_tuple *tuple)
 {
-	NLA_PUT_BE32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip);
-	NLA_PUT_BE32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip);
+	if (nla_put_be32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
+	    nla_put_be32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -356,7 +365,7 @@
 	.nla_policy	 = ipv4_nla_policy,
 #endif
 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
-	.ctl_table_path  = nf_net_ipv4_netfilter_sysctl_path,
+	.ctl_table_path  = "net/ipv4/netfilter",
 	.ctl_table	 = ip_ct_sysctl_table,
 #endif
 	.me		 = THIS_MODULE,
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 7cbe9cb..0847e37 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -228,10 +228,10 @@
 static int icmp_tuple_to_nlattr(struct sk_buff *skb,
 				const struct nf_conntrack_tuple *t)
 {
-	NLA_PUT_BE16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id);
-	NLA_PUT_U8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type);
-	NLA_PUT_U8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code);
-
+	if (nla_put_be16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id) ||
+	    nla_put_u8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type) ||
+	    nla_put_u8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -293,8 +293,8 @@
 {
 	const unsigned int *timeout = data;
 
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ));
-
+	if (nla_put_be32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index 57932c4..ea4a2381 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -283,7 +283,7 @@
 	__be32 newip;
 	u_int16_t port;
 	char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
-	unsigned buflen;
+	unsigned int buflen;
 
 	/* Connection will come from reply */
 	if (ct->tuplehash[dir].tuple.src.u3.ip == ct->tuplehash[!dir].tuple.dst.u3.ip)
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 50009c7..6e930c7 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -51,15 +51,16 @@
 
 static u16 ping_port_rover;
 
-static inline int ping_hashfn(struct net *net, unsigned num, unsigned mask)
+static inline int ping_hashfn(struct net *net, unsigned int num, unsigned int mask)
 {
 	int res = (num + net_hash_mix(net)) & mask;
+
 	pr_debug("hash(%d) = %d\n", num, res);
 	return res;
 }
 
 static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table,
-					     struct net *net, unsigned num)
+					     struct net *net, unsigned int num)
 {
 	return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)];
 }
@@ -188,7 +189,8 @@
 					  gid_t *high)
 {
 	gid_t *data = net->ipv4.sysctl_ping_group_range;
-	unsigned seq;
+	unsigned int seq;
+
 	do {
 		seq = read_seqbegin(&sysctl_local_ports.lock);
 
@@ -410,7 +412,7 @@
 	__wsum wcheck;
 };
 
-static int ping_getfrag(void *from, char * to,
+static int ping_getfrag(void *from, char *to,
 			int offset, int fraglen, int odd, struct sk_buff *skb)
 {
 	struct pingfakehdr *pfh = (struct pingfakehdr *)from;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bbd604c..4032b81 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -288,7 +288,7 @@
 	read_unlock(&raw_v4_hashinfo.lock);
 }
 
-static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
+static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
 	/* Charge it to the socket. */
 
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 167ea10..5773f5d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -109,6 +109,7 @@
 #include <net/rtnetlink.h>
 #ifdef CONFIG_SYSCTL
 #include <linux/sysctl.h>
+#include <linux/kmemleak.h>
 #endif
 #include <net/secure_seq.h>
 
@@ -229,7 +230,7 @@
 	TC_PRIO_INTERACTIVE_BULK,
 	ECN_OR_COST(INTERACTIVE_BULK)
 };
-
+EXPORT_SYMBOL(ip_tos2prio);
 
 /*
  * Route cache.
@@ -296,7 +297,7 @@
 #endif
 
 static struct rt_hash_bucket 	*rt_hash_table __read_mostly;
-static unsigned			rt_hash_mask __read_mostly;
+static unsigned int		rt_hash_mask __read_mostly;
 static unsigned int		rt_hash_log  __read_mostly;
 
 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
@@ -1143,7 +1144,7 @@
 	return 0;
 }
 
-static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt,
+static struct rtable *rt_intern_hash(unsigned int hash, struct rtable *rt,
 				     struct sk_buff *skb, int ifindex)
 {
 	struct rtable	*rth, *cand;
@@ -1384,7 +1385,7 @@
 }
 EXPORT_SYMBOL(__ip_select_ident);
 
-static void rt_del(unsigned hash, struct rtable *rt)
+static void rt_del(unsigned int hash, struct rtable *rt)
 {
 	struct rtable __rcu **rthp;
 	struct rtable *aux;
@@ -1538,7 +1539,7 @@
 			ip_rt_put(rt);
 			ret = NULL;
 		} else if (rt->rt_flags & RTCF_REDIRECTED) {
-			unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
+			unsigned int hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
 						rt->rt_oif,
 						rt_genid(dev_net(dst->dev)));
 			rt_del(hash, rt);
@@ -2215,9 +2216,9 @@
 			    struct in_device *in_dev,
 			    __be32 daddr, __be32 saddr, u32 tos)
 {
-	struct rtable* rth = NULL;
+	struct rtable *rth = NULL;
 	int err;
-	unsigned hash;
+	unsigned int hash;
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 	if (res->fi && res->fi->fib_nhs > 1)
@@ -2255,13 +2256,13 @@
 	struct fib_result res;
 	struct in_device *in_dev = __in_dev_get_rcu(dev);
 	struct flowi4	fl4;
-	unsigned	flags = 0;
+	unsigned int	flags = 0;
 	u32		itag = 0;
-	struct rtable * rth;
-	unsigned	hash;
+	struct rtable	*rth;
+	unsigned int	hash;
 	__be32		spec_dst;
 	int		err = -EINVAL;
-	struct net    * net = dev_net(dev);
+	struct net    *net = dev_net(dev);
 
 	/* IP on this device is disabled. */
 
@@ -2433,8 +2434,8 @@
 int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 			   u8 tos, struct net_device *dev, bool noref)
 {
-	struct rtable * rth;
-	unsigned	hash;
+	struct rtable	*rth;
+	unsigned int	hash;
 	int iif = dev->ifindex;
 	struct net *net;
 	int res;
@@ -2972,7 +2973,8 @@
 	r->rtm_src_len	= 0;
 	r->rtm_tos	= rt->rt_key_tos;
 	r->rtm_table	= RT_TABLE_MAIN;
-	NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
+	if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
+		goto nla_put_failure;
 	r->rtm_type	= rt->rt_type;
 	r->rtm_scope	= RT_SCOPE_UNIVERSE;
 	r->rtm_protocol = RTPROT_UNSPEC;
@@ -2980,31 +2982,38 @@
 	if (rt->rt_flags & RTCF_NOTIFY)
 		r->rtm_flags |= RTM_F_NOTIFY;
 
-	NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
-
+	if (nla_put_be32(skb, RTA_DST, rt->rt_dst))
+		goto nla_put_failure;
 	if (rt->rt_key_src) {
 		r->rtm_src_len = 32;
-		NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src);
+		if (nla_put_be32(skb, RTA_SRC, rt->rt_key_src))
+			goto nla_put_failure;
 	}
-	if (rt->dst.dev)
-		NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
+	if (rt->dst.dev &&
+	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
+		goto nla_put_failure;
 #ifdef CONFIG_IP_ROUTE_CLASSID
-	if (rt->dst.tclassid)
-		NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
+	if (rt->dst.tclassid &&
+	    nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
+		goto nla_put_failure;
 #endif
-	if (rt_is_input_route(rt))
-		NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
-	else if (rt->rt_src != rt->rt_key_src)
-		NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
-
-	if (rt->rt_dst != rt->rt_gateway)
-		NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
+	if (rt_is_input_route(rt)) {
+		if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_spec_dst))
+			goto nla_put_failure;
+	} else if (rt->rt_src != rt->rt_key_src) {
+		if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_src))
+			goto nla_put_failure;
+	}
+	if (rt->rt_dst != rt->rt_gateway &&
+	    nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
+		goto nla_put_failure;
 
 	if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
 		goto nla_put_failure;
 
-	if (rt->rt_mark)
-		NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
+	if (rt->rt_mark &&
+	    nla_put_be32(skb, RTA_MARK, rt->rt_mark))
+		goto nla_put_failure;
 
 	error = rt->dst.error;
 	if (peer) {
@@ -3045,7 +3054,8 @@
 			}
 		} else
 #endif
-			NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif);
+			if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
+				goto nla_put_failure;
 	}
 
 	if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
@@ -3059,7 +3069,7 @@
 	return -EMSGSIZE;
 }
 
-static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
+static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
 {
 	struct net *net = sock_net(in_skb->sk);
 	struct rtmsg *rtm;
@@ -3334,23 +3344,6 @@
 	{ }
 };
 
-static struct ctl_table empty[1];
-
-static struct ctl_table ipv4_skeleton[] =
-{
-	{ .procname = "route", 
-	  .mode = 0555, .child = ipv4_route_table},
-	{ .procname = "neigh", 
-	  .mode = 0555, .child = empty},
-	{ }
-};
-
-static __net_initdata struct ctl_path ipv4_path[] = {
-	{ .procname = "net", },
-	{ .procname = "ipv4", },
-	{ },
-};
-
 static struct ctl_table ipv4_route_flush_table[] = {
 	{
 		.procname	= "flush",
@@ -3361,13 +3354,6 @@
 	{ },
 };
 
-static __net_initdata struct ctl_path ipv4_route_path[] = {
-	{ .procname = "net", },
-	{ .procname = "ipv4", },
-	{ .procname = "route", },
-	{ },
-};
-
 static __net_init int sysctl_route_net_init(struct net *net)
 {
 	struct ctl_table *tbl;
@@ -3380,8 +3366,7 @@
 	}
 	tbl[0].extra1 = net;
 
-	net->ipv4.route_hdr =
-		register_net_sysctl_table(net, ipv4_route_path, tbl);
+	net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
 	if (net->ipv4.route_hdr == NULL)
 		goto err_reg;
 	return 0;
@@ -3505,6 +3490,6 @@
  */
 void __init ip_static_sysctl_init(void)
 {
-	register_sysctl_paths(ipv4_path, ipv4_skeleton);
+	register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
 }
 #endif
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 7a7724d..33417f8 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -78,7 +78,7 @@
 static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high)
 {
 	gid_t *data = table->data;
-	unsigned seq;
+	unsigned int seq;
 	do {
 		seq = read_seqbegin(&sysctl_local_ports.lock);
 
@@ -768,13 +768,6 @@
 	{ }
 };
 
-struct ctl_path net_ipv4_ctl_path[] = {
-	{ .procname = "net", },
-	{ .procname = "ipv4", },
-	{ },
-};
-EXPORT_SYMBOL_GPL(net_ipv4_ctl_path);
-
 static __net_init int ipv4_sysctl_init_net(struct net *net)
 {
 	struct ctl_table *table;
@@ -815,8 +808,7 @@
 
 	tcp_init_mem(net);
 
-	net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
-			net_ipv4_ctl_path, table);
+	net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
 	if (net->ipv4.ipv4_hdr == NULL)
 		goto err_reg;
 
@@ -857,12 +849,12 @@
 	if (!i->procname)
 		return -EINVAL;
 
-	hdr = register_sysctl_paths(net_ipv4_ctl_path, ipv4_table);
+	hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
 	if (hdr == NULL)
 		return -ENOMEM;
 
 	if (register_pernet_subsys(&ipv4_sysctl_ops)) {
-		unregister_sysctl_table(hdr);
+		unregister_net_sysctl_table(hdr);
 		return -ENOMEM;
 	}
 
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5d54ed3..de6a238 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -363,6 +363,70 @@
 	return period;
 }
 
+/* Address-family independent initialization for a tcp_sock.
+ *
+ * NOTE: A lot of things set to zero explicitly by call to
+ *       sk_alloc() so need not be done here.
+ */
+void tcp_init_sock(struct sock *sk)
+{
+	struct inet_connection_sock *icsk = inet_csk(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	skb_queue_head_init(&tp->out_of_order_queue);
+	tcp_init_xmit_timers(sk);
+	tcp_prequeue_init(tp);
+
+	icsk->icsk_rto = TCP_TIMEOUT_INIT;
+	tp->mdev = TCP_TIMEOUT_INIT;
+
+	/* So many TCP implementations out there (incorrectly) count the
+	 * initial SYN frame in their delayed-ACK and congestion control
+	 * algorithms that we must have the following bandaid to talk
+	 * efficiently to them.  -DaveM
+	 */
+	tp->snd_cwnd = TCP_INIT_CWND;
+
+	/* See draft-stevens-tcpca-spec-01 for discussion of the
+	 * initialization of these values.
+	 */
+	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+	tp->snd_cwnd_clamp = ~0;
+	tp->mss_cache = TCP_MSS_DEFAULT;
+
+	tp->reordering = sysctl_tcp_reordering;
+	icsk->icsk_ca_ops = &tcp_init_congestion_ops;
+
+	sk->sk_state = TCP_CLOSE;
+
+	sk->sk_write_space = sk_stream_write_space;
+	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
+
+	icsk->icsk_sync_mss = tcp_sync_mss;
+
+	/* TCP Cookie Transactions */
+	if (sysctl_tcp_cookie_size > 0) {
+		/* Default, cookies without s_data_payload. */
+		tp->cookie_values =
+			kzalloc(sizeof(*tp->cookie_values),
+				sk->sk_allocation);
+		if (tp->cookie_values != NULL)
+			kref_init(&tp->cookie_values->kref);
+	}
+	/* Presumed zeroed, in order of appearance:
+	 *	cookie_in_always, cookie_out_never,
+	 *	s_data_constant, s_data_in, s_data_out
+	 */
+	sk->sk_sndbuf = sysctl_tcp_wmem[1];
+	sk->sk_rcvbuf = sysctl_tcp_rmem[1];
+
+	local_bh_disable();
+	sock_update_memcg(sk);
+	sk_sockets_allocated_inc(sk);
+	local_bh_enable();
+}
+EXPORT_SYMBOL(tcp_init_sock);
+
 /*
  *	Wait for a TCP event.
  *
@@ -701,11 +765,12 @@
 	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
 	if (skb) {
 		if (sk_wmem_schedule(sk, skb->truesize)) {
+			skb_reserve(skb, sk->sk_prot->max_header);
 			/*
 			 * Make sure that we have exactly size bytes
 			 * available to the caller, no more, no less.
 			 */
-			skb_reserve(skb, skb_tailroom(skb) - size);
+			skb->avail_size = size;
 			return skb;
 		}
 		__kfree_skb(skb);
@@ -783,9 +848,10 @@
 	while (psize > 0) {
 		struct sk_buff *skb = tcp_write_queue_tail(sk);
 		struct page *page = pages[poffset / PAGE_SIZE];
-		int copy, i, can_coalesce;
+		int copy, i;
 		int offset = poffset % PAGE_SIZE;
 		int size = min_t(size_t, psize, PAGE_SIZE - offset);
+		bool can_coalesce;
 
 		if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
 new_segment:
@@ -911,6 +977,39 @@
 	return tmp;
 }
 
+static int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
+{
+	struct sk_buff *skb;
+	struct tcp_skb_cb *cb;
+	struct tcphdr *th;
+
+	skb = alloc_skb(size + sizeof(*th), sk->sk_allocation);
+	if (!skb)
+		goto err;
+
+	th = (struct tcphdr *)skb_put(skb, sizeof(*th));
+	skb_reset_transport_header(skb);
+	memset(th, 0, sizeof(*th));
+
+	if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size))
+		goto err_free;
+
+	cb = TCP_SKB_CB(skb);
+
+	TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
+	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size;
+	TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1;
+
+	tcp_queue_rcv(sk, skb, sizeof(*th));
+
+	return size;
+
+err_free:
+	kfree_skb(skb);
+err:
+	return -ENOMEM;
+}
+
 int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 		size_t size)
 {
@@ -918,7 +1017,7 @@
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct sk_buff *skb;
 	int iovlen, flags, err, copied;
-	int mss_now, size_goal;
+	int mss_now = 0, size_goal;
 	bool sg;
 	long timeo;
 
@@ -932,6 +1031,19 @@
 		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
 			goto out_err;
 
+	if (unlikely(tp->repair)) {
+		if (tp->repair_queue == TCP_RECV_QUEUE) {
+			copied = tcp_send_rcvq(sk, msg, size);
+			goto out;
+		}
+
+		err = -EINVAL;
+		if (tp->repair_queue == TCP_NO_QUEUE)
+			goto out_err;
+
+		/* 'common' sending to sendq */
+	}
+
 	/* This should be in poll */
 	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
 
@@ -995,10 +1107,9 @@
 				copy = seglen;
 
 			/* Where to copy to? */
-			if (skb_tailroom(skb) > 0) {
+			if (skb_availroom(skb) > 0) {
 				/* We have some space in skb head. Superb! */
-				if (copy > skb_tailroom(skb))
-					copy = skb_tailroom(skb);
+				copy = min_t(int, copy, skb_availroom(skb));
 				err = skb_add_data_nocache(sk, skb, from, copy);
 				if (err)
 					goto do_fault;
@@ -1089,7 +1200,7 @@
 			if ((seglen -= copy) == 0 && iovlen == 0)
 				goto out;
 
-			if (skb->len < max || (flags & MSG_OOB))
+			if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
 				continue;
 
 			if (forced_push(tp)) {
@@ -1102,7 +1213,7 @@
 wait_for_sndbuf:
 			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 wait_for_memory:
-			if (copied)
+			if (copied && likely(!tp->repair))
 				tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
 
 			if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
@@ -1113,7 +1224,7 @@
 	}
 
 out:
-	if (copied)
+	if (copied && likely(!tp->repair))
 		tcp_push(sk, flags, mss_now, tp->nonagle);
 	release_sock(sk);
 	return copied;
@@ -1187,6 +1298,24 @@
 	return -EAGAIN;
 }
 
+static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
+{
+	struct sk_buff *skb;
+	int copied = 0, err = 0;
+
+	/* XXX -- need to support SO_PEEK_OFF */
+
+	skb_queue_walk(&sk->sk_write_queue, skb) {
+		err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len);
+		if (err)
+			break;
+
+		copied += skb->len;
+	}
+
+	return err ?: copied;
+}
+
 /* Clean up the receive buffer for full frames taken by the user,
  * then send an ACK if necessary.  COPIED is the number of bytes
  * tcp_recvmsg has given to the user so far, it speeds up the
@@ -1432,6 +1561,21 @@
 	if (flags & MSG_OOB)
 		goto recv_urg;
 
+	if (unlikely(tp->repair)) {
+		err = -EPERM;
+		if (!(flags & MSG_PEEK))
+			goto out;
+
+		if (tp->repair_queue == TCP_SEND_QUEUE)
+			goto recv_sndq;
+
+		err = -EINVAL;
+		if (tp->repair_queue == TCP_NO_QUEUE)
+			goto out;
+
+		/* 'common' recv queue MSG_PEEK-ing */
+	}
+
 	seq = &tp->copied_seq;
 	if (flags & MSG_PEEK) {
 		peek_seq = tp->copied_seq;
@@ -1452,7 +1596,7 @@
 		if ((available < target) &&
 		    (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
 		    !sysctl_tcp_low_latency &&
-		    dma_find_channel(DMA_MEMCPY)) {
+		    net_dma_find_channel()) {
 			preempt_enable_no_resched();
 			tp->ucopy.pinned_list =
 					dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1667,7 +1811,7 @@
 		if (!(flags & MSG_TRUNC)) {
 #ifdef CONFIG_NET_DMA
 			if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-				tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
+				tp->ucopy.dma_chan = net_dma_find_channel();
 
 			if (tp->ucopy.dma_chan) {
 				tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
@@ -1783,6 +1927,10 @@
 recv_urg:
 	err = tcp_recv_urg(sk, msg, len, flags);
 	goto out;
+
+recv_sndq:
+	err = tcp_peek_sndq(sk, msg, len);
+	goto out;
 }
 EXPORT_SYMBOL(tcp_recvmsg);
 
@@ -1935,7 +2083,9 @@
 	 * advertise a zero window, then kill -9 the FTP client, wheee...
 	 * Note: timeout is always zero in such a case.
 	 */
-	if (data_was_unread) {
+	if (unlikely(tcp_sk(sk)->repair)) {
+		sk->sk_prot->disconnect(sk, 0);
+	} else if (data_was_unread) {
 		/* Unread data was tossed, zap the connection. */
 		NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
 		tcp_set_state(sk, TCP_CLOSE);
@@ -2074,6 +2224,8 @@
 	/* ABORT function of RFC793 */
 	if (old_state == TCP_LISTEN) {
 		inet_csk_listen_stop(sk);
+	} else if (unlikely(tp->repair)) {
+		sk->sk_err = ECONNABORTED;
 	} else if (tcp_need_reset(old_state) ||
 		   (tp->snd_nxt != tp->write_seq &&
 		    (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
@@ -2125,6 +2277,74 @@
 }
 EXPORT_SYMBOL(tcp_disconnect);
 
+static inline int tcp_can_repair_sock(struct sock *sk)
+{
+	return capable(CAP_NET_ADMIN) &&
+		((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
+}
+
+static int tcp_repair_options_est(struct tcp_sock *tp, char __user *optbuf, unsigned int len)
+{
+	/*
+	 * Options are stored in CODE:VALUE form where CODE is 8bit and VALUE
+	 * fits the respective TCPOLEN_ size
+	 */
+
+	while (len > 0) {
+		u8 opcode;
+
+		if (get_user(opcode, optbuf))
+			return -EFAULT;
+
+		optbuf++;
+		len--;
+
+		switch (opcode) {
+		case TCPOPT_MSS: {
+			u16 in_mss;
+
+			if (len < sizeof(in_mss))
+				return -ENODATA;
+			if (get_user(in_mss, optbuf))
+				return -EFAULT;
+
+			tp->rx_opt.mss_clamp = in_mss;
+
+			optbuf += sizeof(in_mss);
+			len -= sizeof(in_mss);
+			break;
+		}
+		case TCPOPT_WINDOW: {
+			u8 wscale;
+
+			if (len < sizeof(wscale))
+				return -ENODATA;
+			if (get_user(wscale, optbuf))
+				return -EFAULT;
+
+			if (wscale > 14)
+				return -EFBIG;
+
+			tp->rx_opt.snd_wscale = wscale;
+
+			optbuf += sizeof(wscale);
+			len -= sizeof(wscale);
+			break;
+		}
+		case TCPOPT_SACK_PERM:
+			tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
+			if (sysctl_tcp_fack)
+				tcp_enable_fack(tp);
+			break;
+		case TCPOPT_TIMESTAMP:
+			tp->rx_opt.tstamp_ok = 1;
+			break;
+		}
+	}
+
+	return 0;
+}
+
 /*
  *	Socket option code for TCP.
  */
@@ -2297,6 +2517,51 @@
 			tp->thin_dupack = val;
 		break;
 
+	case TCP_REPAIR:
+		if (!tcp_can_repair_sock(sk))
+			err = -EPERM;
+		else if (val == 1) {
+			tp->repair = 1;
+			sk->sk_reuse = SK_FORCE_REUSE;
+			tp->repair_queue = TCP_NO_QUEUE;
+		} else if (val == 0) {
+			tp->repair = 0;
+			sk->sk_reuse = SK_NO_REUSE;
+			tcp_send_window_probe(sk);
+		} else
+			err = -EINVAL;
+
+		break;
+
+	case TCP_REPAIR_QUEUE:
+		if (!tp->repair)
+			err = -EPERM;
+		else if (val < TCP_QUEUES_NR)
+			tp->repair_queue = val;
+		else
+			err = -EINVAL;
+		break;
+
+	case TCP_QUEUE_SEQ:
+		if (sk->sk_state != TCP_CLOSE)
+			err = -EPERM;
+		else if (tp->repair_queue == TCP_SEND_QUEUE)
+			tp->write_seq = val;
+		else if (tp->repair_queue == TCP_RECV_QUEUE)
+			tp->rcv_nxt = val;
+		else
+			err = -EINVAL;
+		break;
+
+	case TCP_REPAIR_OPTIONS:
+		if (!tp->repair)
+			err = -EINVAL;
+		else if (sk->sk_state == TCP_ESTABLISHED)
+			err = tcp_repair_options_est(tp, optval, optlen);
+		else
+			err = -EPERM;
+		break;
+
 	case TCP_CORK:
 		/* When set indicates to always queue non-full frames.
 		 * Later the user clears this option and we transmit
@@ -2530,6 +2795,8 @@
 		val = tp->mss_cache;
 		if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
 			val = tp->rx_opt.user_mss;
+		if (tp->repair)
+			val = tp->rx_opt.mss_clamp;
 		break;
 	case TCP_NODELAY:
 		val = !!(tp->nonagle&TCP_NAGLE_OFF);
@@ -2632,6 +2899,26 @@
 		val = tp->thin_dupack;
 		break;
 
+	case TCP_REPAIR:
+		val = tp->repair;
+		break;
+
+	case TCP_REPAIR_QUEUE:
+		if (tp->repair)
+			val = tp->repair_queue;
+		else
+			return -EINVAL;
+		break;
+
+	case TCP_QUEUE_SEQ:
+		if (tp->repair_queue == TCP_SEND_QUEUE)
+			val = tp->write_seq;
+		else if (tp->repair_queue == TCP_RECV_QUEUE)
+			val = tp->rcv_nxt;
+		else
+			return -EINVAL;
+		break;
+
 	case TCP_USER_TIMEOUT:
 		val = jiffies_to_msecs(icsk->icsk_user_timeout);
 		break;
@@ -2675,7 +2962,7 @@
 {
 	struct sk_buff *segs = ERR_PTR(-EINVAL);
 	struct tcphdr *th;
-	unsigned thlen;
+	unsigned int thlen;
 	unsigned int seq;
 	__be32 delta;
 	unsigned int oldlen;
@@ -3033,9 +3320,9 @@
 	struct scatterlist sg;
 	const struct tcphdr *tp = tcp_hdr(skb);
 	struct hash_desc *desc = &hp->md5_desc;
-	unsigned i;
-	const unsigned head_data_len = skb_headlen(skb) > header_len ?
-				       skb_headlen(skb) - header_len : 0;
+	unsigned int i;
+	const unsigned int head_data_len = skb_headlen(skb) > header_len ?
+					   skb_headlen(skb) - header_len : 0;
 	const struct skb_shared_info *shi = skb_shinfo(skb);
 	struct sk_buff *frag_iter;
 
@@ -3302,8 +3589,7 @@
 
 	tcp_init_mem(&init_net);
 	/* Set per-socket limits to no more than 1/128 the pressure threshold */
-	limit = nr_free_buffer_pages() << (PAGE_SHIFT - 10);
-	limit = max(limit, 128UL);
+	limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
 	max_share = min(4UL*1024*1024, limit);
 
 	sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e886e2f..c93b0cb 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -175,7 +175,7 @@
 static void tcp_incr_quickack(struct sock *sk)
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
-	unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
+	unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
 
 	if (quickacks == 0)
 		quickacks = 2;
@@ -335,6 +335,7 @@
 			incr = __tcp_grow_window(sk, skb);
 
 		if (incr) {
+			incr = max_t(int, incr, 2 * skb->len);
 			tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
 					       tp->window_clamp);
 			inet_csk(sk)->icsk_ack.quick |= 1;
@@ -474,8 +475,11 @@
 		if (!win_dep) {
 			m -= (new_sample >> 3);
 			new_sample += m;
-		} else if (m < new_sample)
-			new_sample = m << 3;
+		} else {
+			m <<= 3;
+			if (m < new_sample)
+				new_sample = m;
+		}
 	} else {
 		/* No previous measure. */
 		new_sample = m << 3;
@@ -933,7 +937,7 @@
 	tcp_set_rto(sk);
 reset:
 	if (tp->srtt == 0) {
-		/* RFC2988bis: We've failed to get a valid RTT sample from
+		/* RFC6298: 5.7 We've failed to get a valid RTT sample from
 		 * 3WHS. This is most likely due to retransmission,
 		 * including spurious one. Reset the RTO back to 3secs
 		 * from the more aggressive 1sec to avoid more spurious
@@ -943,7 +947,7 @@
 		inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
 	}
 	/* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
-	 * retransmitted. In light of RFC2988bis' more aggressive 1sec
+	 * retransmitted. In light of RFC6298 more aggressive 1sec
 	 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
 	 * retransmission has occurred.
 	 */
@@ -4446,6 +4450,58 @@
 	return 0;
 }
 
+/**
+ * tcp_try_coalesce - try to merge skb to prior one
+ * @sk: socket
+ * @to: prior buffer
+ * @from: buffer to add in queue
+ *
+ * Before queueing skb @from after @to, try to merge them
+ * to reduce overall memory use and queue lengths, if cost is small.
+ * Packets in ofo or receive queues can stay a long time.
+ * Better try to coalesce them right now to avoid future collapses.
+ * Returns true if caller should free @from instead of queueing it
+ */
+static bool tcp_try_coalesce(struct sock *sk,
+			     struct sk_buff *to,
+			     struct sk_buff *from)
+{
+	int len = from->len;
+
+	if (tcp_hdr(from)->fin)
+		return false;
+	if (len <= skb_tailroom(to)) {
+		BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
+merge:
+		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
+		TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
+		TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
+		return true;
+	}
+	if (skb_headlen(from) == 0 &&
+	    !skb_has_frag_list(to) &&
+	    !skb_has_frag_list(from) &&
+	    (skb_shinfo(to)->nr_frags +
+	     skb_shinfo(from)->nr_frags <= MAX_SKB_FRAGS)) {
+		int delta = from->truesize - ksize(from->head) -
+			    SKB_DATA_ALIGN(sizeof(struct sk_buff));
+
+		WARN_ON_ONCE(delta < len);
+		memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
+		       skb_shinfo(from)->frags,
+		       skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
+		skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
+		skb_shinfo(from)->nr_frags = 0;
+		to->truesize += delta;
+		atomic_add(delta, &sk->sk_rmem_alloc);
+		sk_mem_charge(sk, delta);
+		to->len += len;
+		to->data_len += len;
+		goto merge;
+	}
+	return false;
+}
+
 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
@@ -4484,23 +4540,11 @@
 	end_seq = TCP_SKB_CB(skb)->end_seq;
 
 	if (seq == TCP_SKB_CB(skb1)->end_seq) {
-		/* Packets in ofo can stay in queue a long time.
-		 * Better try to coalesce them right now
-		 * to avoid future tcp_collapse_ofo_queue(),
-		 * probably the most expensive function in tcp stack.
-		 */
-		if (skb->len <= skb_tailroom(skb1) && !tcp_hdr(skb)->fin) {
-			NET_INC_STATS_BH(sock_net(sk),
-					 LINUX_MIB_TCPRCVCOALESCE);
-			BUG_ON(skb_copy_bits(skb, 0,
-					     skb_put(skb1, skb->len),
-					     skb->len));
-			TCP_SKB_CB(skb1)->end_seq = end_seq;
-			TCP_SKB_CB(skb1)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
+		if (!tcp_try_coalesce(sk, skb1, skb)) {
+			__skb_queue_after(&tp->out_of_order_queue, skb1, skb);
+		} else {
 			__kfree_skb(skb);
 			skb = NULL;
-		} else {
-			__skb_queue_after(&tp->out_of_order_queue, skb1, skb);
 		}
 
 		if (!tp->rx_opt.num_sacks ||
@@ -4621,13 +4665,18 @@
 		}
 
 		if (eaten <= 0) {
+			struct sk_buff *tail;
 queue_and_out:
 			if (eaten < 0 &&
 			    tcp_try_rmem_schedule(sk, skb->truesize))
 				goto drop;
 
-			skb_set_owner_r(skb, sk);
-			__skb_queue_tail(&sk->sk_receive_queue, skb);
+			tail = skb_peek_tail(&sk->sk_receive_queue);
+			eaten = (tail && tcp_try_coalesce(sk, tail, skb)) ? 1 : 0;
+			if (eaten <= 0) {
+				skb_set_owner_r(skb, sk);
+				__skb_queue_tail(&sk->sk_receive_queue, skb);
+			}
 		}
 		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
 		if (skb->len)
@@ -5225,7 +5274,7 @@
 		return 0;
 
 	if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-		tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
+		tp->ucopy.dma_chan = net_dma_find_channel();
 
 	if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
 
@@ -5322,6 +5371,14 @@
 	return 0;
 }
 
+void tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen)
+{
+	__skb_pull(skb, hdrlen);
+	__skb_queue_tail(&sk->sk_receive_queue, skb);
+	skb_set_owner_r(skb, sk);
+	tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+}
+
 /*
  *	TCP receive function for the ESTABLISHED state.
  *
@@ -5487,10 +5544,7 @@
 				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
 
 				/* Bulk data transfer: receiver */
-				__skb_pull(skb, tcp_header_len);
-				__skb_queue_tail(&sk->sk_receive_queue, skb);
-				skb_set_owner_r(skb, sk);
-				tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+				tcp_queue_rcv(sk, skb, tcp_header_len);
 			}
 
 			tcp_event_data_recv(sk, skb);
@@ -5556,6 +5610,44 @@
 }
 EXPORT_SYMBOL(tcp_rcv_established);
 
+void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+	struct inet_connection_sock *icsk = inet_csk(sk);
+
+	tcp_set_state(sk, TCP_ESTABLISHED);
+
+	if (skb != NULL)
+		security_inet_conn_established(sk, skb);
+
+	/* Make sure socket is routed, for correct metrics.  */
+	icsk->icsk_af_ops->rebuild_header(sk);
+
+	tcp_init_metrics(sk);
+
+	tcp_init_congestion_control(sk);
+
+	/* Prevent spurious tcp_cwnd_restart() on first data
+	 * packet.
+	 */
+	tp->lsndtime = tcp_time_stamp;
+
+	tcp_init_buffer_space(sk);
+
+	if (sock_flag(sk, SOCK_KEEPOPEN))
+		inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
+
+	if (!tp->rx_opt.snd_wscale)
+		__tcp_fast_path_on(tp, tp->snd_wnd);
+	else
+		tp->pred_flags = 0;
+
+	if (!sock_flag(sk, SOCK_DEAD)) {
+		sk->sk_state_change(sk);
+		sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
+	}
+}
+
 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
 					 const struct tcphdr *th, unsigned int len)
 {
@@ -5688,36 +5780,8 @@
 		}
 
 		smp_mb();
-		tcp_set_state(sk, TCP_ESTABLISHED);
 
-		security_inet_conn_established(sk, skb);
-
-		/* Make sure socket is routed, for correct metrics.  */
-		icsk->icsk_af_ops->rebuild_header(sk);
-
-		tcp_init_metrics(sk);
-
-		tcp_init_congestion_control(sk);
-
-		/* Prevent spurious tcp_cwnd_restart() on first data
-		 * packet.
-		 */
-		tp->lsndtime = tcp_time_stamp;
-
-		tcp_init_buffer_space(sk);
-
-		if (sock_flag(sk, SOCK_KEEPOPEN))
-			inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
-
-		if (!tp->rx_opt.snd_wscale)
-			__tcp_fast_path_on(tp, tp->snd_wnd);
-		else
-			tp->pred_flags = 0;
-
-		if (!sock_flag(sk, SOCK_DEAD)) {
-			sk->sk_state_change(sk);
-			sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
-		}
+		tcp_finish_connect(sk, skb);
 
 		if (sk->sk_write_pending ||
 		    icsk->icsk_accept_queue.rskq_defer_accept ||
@@ -5731,8 +5795,6 @@
 			 */
 			inet_csk_schedule_ack(sk);
 			icsk->icsk_ack.lrcvtime = tcp_time_stamp;
-			icsk->icsk_ack.ato	 = TCP_ATO_MIN;
-			tcp_incr_quickack(sk);
 			tcp_enter_quickack_mode(sk);
 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
 						  TCP_DELACK_MAX, TCP_RTO_MAX);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3a25cf7..cf97e98 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -138,6 +138,14 @@
 }
 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
 
+static int tcp_repair_connect(struct sock *sk)
+{
+	tcp_connect_init(sk);
+	tcp_finish_connect(sk, NULL);
+
+	return 0;
+}
+
 /* This will initiate an outgoing connection. */
 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
@@ -196,7 +204,8 @@
 		/* Reset inherited state */
 		tp->rx_opt.ts_recent	   = 0;
 		tp->rx_opt.ts_recent_stamp = 0;
-		tp->write_seq		   = 0;
+		if (likely(!tp->repair))
+			tp->write_seq	   = 0;
 	}
 
 	if (tcp_death_row.sysctl_tw_recycle &&
@@ -247,7 +256,7 @@
 	sk->sk_gso_type = SKB_GSO_TCPV4;
 	sk_setup_caps(sk, &rt->dst);
 
-	if (!tp->write_seq)
+	if (!tp->write_seq && likely(!tp->repair))
 		tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
 							   inet->inet_daddr,
 							   inet->inet_sport,
@@ -255,7 +264,11 @@
 
 	inet->inet_id = tp->write_seq ^ jiffies;
 
-	err = tcp_connect(sk);
+	if (likely(!tp->repair))
+		err = tcp_connect(sk);
+	else
+		err = tcp_repair_connect(sk);
+
 	rt = NULL;
 	if (err)
 		goto failure;
@@ -1730,7 +1743,7 @@
 #ifdef CONFIG_NET_DMA
 		struct tcp_sock *tp = tcp_sk(sk);
 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-			tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
+			tp->ucopy.dma_chan = net_dma_find_channel();
 		if (tp->ucopy.dma_chan)
 			ret = tcp_v4_do_rcv(sk, skb);
 		else
@@ -1739,7 +1752,8 @@
 			if (!tcp_prequeue(sk, skb))
 				ret = tcp_v4_do_rcv(sk, skb);
 		}
-	} else if (unlikely(sk_add_backlog(sk, skb))) {
+	} else if (unlikely(sk_add_backlog(sk, skb,
+					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
 		bh_unlock_sock(sk);
 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
 		goto discard_and_relse;
@@ -1875,64 +1889,15 @@
 static int tcp_v4_init_sock(struct sock *sk)
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
-	struct tcp_sock *tp = tcp_sk(sk);
 
-	skb_queue_head_init(&tp->out_of_order_queue);
-	tcp_init_xmit_timers(sk);
-	tcp_prequeue_init(tp);
-
-	icsk->icsk_rto = TCP_TIMEOUT_INIT;
-	tp->mdev = TCP_TIMEOUT_INIT;
-
-	/* So many TCP implementations out there (incorrectly) count the
-	 * initial SYN frame in their delayed-ACK and congestion control
-	 * algorithms that we must have the following bandaid to talk
-	 * efficiently to them.  -DaveM
-	 */
-	tp->snd_cwnd = TCP_INIT_CWND;
-
-	/* See draft-stevens-tcpca-spec-01 for discussion of the
-	 * initialization of these values.
-	 */
-	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
-	tp->snd_cwnd_clamp = ~0;
-	tp->mss_cache = TCP_MSS_DEFAULT;
-
-	tp->reordering = sysctl_tcp_reordering;
-	icsk->icsk_ca_ops = &tcp_init_congestion_ops;
-
-	sk->sk_state = TCP_CLOSE;
-
-	sk->sk_write_space = sk_stream_write_space;
-	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
+	tcp_init_sock(sk);
 
 	icsk->icsk_af_ops = &ipv4_specific;
-	icsk->icsk_sync_mss = tcp_sync_mss;
+
 #ifdef CONFIG_TCP_MD5SIG
-	tp->af_specific = &tcp_sock_ipv4_specific;
+	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
 #endif
 
-	/* TCP Cookie Transactions */
-	if (sysctl_tcp_cookie_size > 0) {
-		/* Default, cookies without s_data_payload. */
-		tp->cookie_values =
-			kzalloc(sizeof(*tp->cookie_values),
-				sk->sk_allocation);
-		if (tp->cookie_values != NULL)
-			kref_init(&tp->cookie_values->kref);
-	}
-	/* Presumed zeroed, in order of appearance:
-	 *	cookie_in_always, cookie_out_never,
-	 *	s_data_constant, s_data_in, s_data_out
-	 */
-	sk->sk_sndbuf = sysctl_tcp_wmem[1];
-	sk->sk_rcvbuf = sysctl_tcp_rmem[1];
-
-	local_bh_disable();
-	sock_update_memcg(sk);
-	sk_sockets_allocated_inc(sk);
-	local_bh_enable();
-
 	return 0;
 }
 
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 364784a..7b7cf38 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -563,13 +563,13 @@
 /* Compute TCP options for SYN packets. This is not the final
  * network wire format yet.
  */
-static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
+static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
 				struct tcp_out_options *opts,
 				struct tcp_md5sig_key **md5)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct tcp_cookie_values *cvp = tp->cookie_values;
-	unsigned remaining = MAX_TCP_OPTION_SPACE;
+	unsigned int remaining = MAX_TCP_OPTION_SPACE;
 	u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
 			 tcp_cookie_size_check(cvp->cookie_desired) :
 			 0;
@@ -663,15 +663,15 @@
 }
 
 /* Set up TCP options for SYN-ACKs. */
-static unsigned tcp_synack_options(struct sock *sk,
+static unsigned int tcp_synack_options(struct sock *sk,
 				   struct request_sock *req,
-				   unsigned mss, struct sk_buff *skb,
+				   unsigned int mss, struct sk_buff *skb,
 				   struct tcp_out_options *opts,
 				   struct tcp_md5sig_key **md5,
 				   struct tcp_extend_values *xvp)
 {
 	struct inet_request_sock *ireq = inet_rsk(req);
-	unsigned remaining = MAX_TCP_OPTION_SPACE;
+	unsigned int remaining = MAX_TCP_OPTION_SPACE;
 	u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ?
 			 xvp->cookie_plus :
 			 0;
@@ -742,13 +742,13 @@
 /* Compute TCP options for ESTABLISHED sockets. This is not the
  * final wire format yet.
  */
-static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
+static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
 					struct tcp_out_options *opts,
 					struct tcp_md5sig_key **md5)
 {
 	struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
 	struct tcp_sock *tp = tcp_sk(sk);
-	unsigned size = 0;
+	unsigned int size = 0;
 	unsigned int eff_sacks;
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -770,9 +770,9 @@
 
 	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
 	if (unlikely(eff_sacks)) {
-		const unsigned remaining = MAX_TCP_OPTION_SPACE - size;
+		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
 		opts->num_sack_blocks =
-			min_t(unsigned, eff_sacks,
+			min_t(unsigned int, eff_sacks,
 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
 			      TCPOLEN_SACK_PERBLOCK);
 		size += TCPOLEN_SACK_BASE_ALIGNED +
@@ -801,7 +801,7 @@
 	struct tcp_sock *tp;
 	struct tcp_skb_cb *tcb;
 	struct tcp_out_options opts;
-	unsigned tcp_options_size, tcp_header_size;
+	unsigned int tcp_options_size, tcp_header_size;
 	struct tcp_md5sig_key *md5;
 	struct tcphdr *th;
 	int err;
@@ -1096,6 +1096,7 @@
 	eat = min_t(int, len, skb_headlen(skb));
 	if (eat) {
 		__skb_pull(skb, eat);
+		skb->avail_size -= eat;
 		len -= eat;
 		if (!len)
 			return;
@@ -1258,7 +1259,7 @@
 	const struct tcp_sock *tp = tcp_sk(sk);
 	const struct dst_entry *dst = __sk_dst_get(sk);
 	u32 mss_now;
-	unsigned header_len;
+	unsigned int header_len;
 	struct tcp_out_options opts;
 	struct tcp_md5sig_key *md5;
 
@@ -1389,7 +1390,7 @@
  */
 static inline int tcp_nagle_check(const struct tcp_sock *tp,
 				  const struct sk_buff *skb,
-				  unsigned mss_now, int nonagle)
+				  unsigned int mss_now, int nonagle)
 {
 	return skb->len < mss_now &&
 		((nonagle & TCP_NAGLE_CORK) ||
@@ -2060,7 +2061,7 @@
 		/* Punt if not enough space exists in the first SKB for
 		 * the data in the second
 		 */
-		if (skb->len > skb_tailroom(to))
+		if (skb->len > skb_availroom(to))
 			break;
 
 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
@@ -2561,7 +2562,7 @@
 EXPORT_SYMBOL(tcp_make_synack);
 
 /* Do all connect socket setups that can be done AF independent. */
-static void tcp_connect_init(struct sock *sk)
+void tcp_connect_init(struct sock *sk)
 {
 	const struct dst_entry *dst = __sk_dst_get(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
@@ -2616,9 +2617,12 @@
 	tp->snd_una = tp->write_seq;
 	tp->snd_sml = tp->write_seq;
 	tp->snd_up = tp->write_seq;
-	tp->rcv_nxt = 0;
-	tp->rcv_wup = 0;
-	tp->copied_seq = 0;
+	tp->snd_nxt = tp->write_seq;
+
+	if (likely(!tp->repair))
+		tp->rcv_nxt = 0;
+	tp->rcv_wup = tp->rcv_nxt;
+	tp->copied_seq = tp->rcv_nxt;
 
 	inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
 	inet_csk(sk)->icsk_retransmits = 0;
@@ -2641,7 +2645,6 @@
 	/* Reserve space for headers. */
 	skb_reserve(buff, MAX_TCP_HEADER);
 
-	tp->snd_nxt = tp->write_seq;
 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
 	TCP_ECN_send_syn(sk, buff);
 
@@ -2790,6 +2793,15 @@
 	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
 }
 
+void tcp_send_window_probe(struct sock *sk)
+{
+	if (sk->sk_state == TCP_ESTABLISHED) {
+		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
+		tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq;
+		tcp_xmit_probe_skb(sk, 0);
+	}
+}
+
 /* Initiate keepalive or window probe from timer. */
 int tcp_write_wakeup(struct sock *sk)
 {
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index a981cdc..4526fe6 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -91,7 +91,7 @@
  * Note: arguments must match tcp_rcv_established()!
  */
 static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
-			       struct tcphdr *th, unsigned len)
+			       struct tcphdr *th, unsigned int len)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 	const struct inet_sock *inet = inet_sk(sk);
@@ -138,7 +138,7 @@
 	.entry	= jtcp_rcv_established,
 };
 
-static int tcpprobe_open(struct inode * inode, struct file * file)
+static int tcpprobe_open(struct inode *inode, struct file *file)
 {
 	/* Reset (empty) log */
 	spin_lock_bh(&tcp_probe.lock);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index fe14105..279fd08 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -107,6 +107,7 @@
 #include <net/checksum.h>
 #include <net/xfrm.h>
 #include <trace/events/udp.h>
+#include <linux/static_key.h>
 #include "udp_impl.h"
 
 struct udp_table udp_table __read_mostly;
@@ -206,7 +207,7 @@
 
 	if (!snum) {
 		int low, high, remaining;
-		unsigned rand;
+		unsigned int rand;
 		unsigned short first, last;
 		DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
 
@@ -846,7 +847,7 @@
 	 *	Get and verify the address.
 	 */
 	if (msg->msg_name) {
-		struct sockaddr_in * usin = (struct sockaddr_in *)msg->msg_name;
+		struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
 		if (msg->msg_namelen < sizeof(*usin))
 			return -EINVAL;
 		if (usin->sin_family != AF_INET) {
@@ -1379,6 +1380,14 @@
 
 }
 
+static struct static_key udp_encap_needed __read_mostly;
+void udp_encap_enable(void)
+{
+	if (!static_key_enabled(&udp_encap_needed))
+		static_key_slow_inc(&udp_encap_needed);
+}
+EXPORT_SYMBOL(udp_encap_enable);
+
 /* returns:
  *  -1: error
  *   0: success
@@ -1400,7 +1409,7 @@
 		goto drop;
 	nf_reset(skb);
 
-	if (up->encap_type) {
+	if (static_key_false(&udp_encap_needed) && up->encap_type) {
 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
 
 		/*
@@ -1470,7 +1479,7 @@
 		goto drop;
 
 
-	if (sk_rcvqueues_full(sk, skb))
+	if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
 		goto drop;
 
 	rc = 0;
@@ -1479,7 +1488,7 @@
 	bh_lock_sock(sk);
 	if (!sock_owned_by_user(sk))
 		rc = __udp_queue_rcv_skb(sk, skb);
-	else if (sk_add_backlog(sk, skb)) {
+	else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
 		bh_unlock_sock(sk);
 		goto drop;
 	}
@@ -1760,6 +1769,7 @@
 			/* FALLTHROUGH */
 		case UDP_ENCAP_L2TPINUDP:
 			up->encap_type = val;
+			udp_encap_enable();
 			break;
 		default:
 			err = -ENOPROTOOPT;
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index aaad650..5a681e2 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -25,7 +25,7 @@
 			    size_t len, int noblock, int flags, int *addr_len);
 extern int	udp_sendpage(struct sock *sk, struct page *page, int offset,
 			     size_t size, int flags);
-extern int	udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
+extern int	udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 extern void	udp_destroy_sock(struct sock *sk);
 
 #ifdef CONFIG_PROC_FS
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index a0b4c5d..0d3426c 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -152,7 +152,7 @@
 
 		case IPPROTO_AH:
 			if (pskb_may_pull(skb, xprth + 8 - skb->data)) {
-				__be32 *ah_hdr = (__be32*)xprth;
+				__be32 *ah_hdr = (__be32 *)xprth;
 
 				fl4->fl4_ipsec_spi = ah_hdr[1];
 			}
@@ -298,8 +298,8 @@
 	xfrm4_state_init();
 	xfrm4_policy_init();
 #ifdef CONFIG_SYSCTL
-	sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path,
-						xfrm4_policy_table);
+	sysctl_hdr = register_net_sysctl(&init_net, "net/ipv4",
+					 xfrm4_policy_table);
 #endif
 }
 
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6a3bb60..e3b3421 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -149,7 +149,7 @@
 				 unsigned long event);
 static int addrconf_ifdown(struct net_device *dev, int how);
 
-static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags);
+static void addrconf_dad_start(struct inet6_ifaddr *ifp);
 static void addrconf_dad_timer(unsigned long data);
 static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
 static void addrconf_dad_run(struct inet6_dev *idev);
@@ -336,10 +336,9 @@
 	snmp6_free_dev(idev);
 	kfree_rcu(idev, rcu);
 }
-
 EXPORT_SYMBOL(in6_dev_finish_destroy);
 
-static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
+static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
 {
 	struct inet6_dev *ndev;
 
@@ -441,7 +440,7 @@
 	return ndev;
 }
 
-static struct inet6_dev * ipv6_find_idev(struct net_device *dev)
+static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
 {
 	struct inet6_dev *idev;
 
@@ -803,8 +802,7 @@
 				ip6_del_rt(rt);
 				rt = NULL;
 			} else if (!(rt->rt6i_flags & RTF_EXPIRES)) {
-				rt->dst.expires = expires;
-				rt->rt6i_flags |= RTF_EXPIRES;
+				rt6_set_expires(rt, expires);
 			}
 		}
 		dst_release(&rt->dst);
@@ -930,7 +928,7 @@
 	ift->tstamp = tmp_tstamp;
 	spin_unlock_bh(&ift->lock);
 
-	addrconf_dad_start(ift, 0);
+	addrconf_dad_start(ift);
 	in6_ifa_put(ift);
 	in6_dev_put(idev);
 out:
@@ -1333,7 +1331,6 @@
 	rcu_read_unlock();
 	return onlink;
 }
-
 EXPORT_SYMBOL(ipv6_chk_prefix);
 
 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
@@ -1523,7 +1520,7 @@
 	if (dev->addr_len != ARCNET_ALEN)
 		return -1;
 	memset(eui, 0, 7);
-	eui[7] = *(u8*)dev->dev_addr;
+	eui[7] = *(u8 *)dev->dev_addr;
 	return 0;
 }
 
@@ -1668,7 +1665,8 @@
 	in6_dev_put(idev);
 }
 
-static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr) {
+static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
+{
 	int ret = 0;
 
 	if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
@@ -1887,11 +1885,9 @@
 				rt = NULL;
 			} else if (addrconf_finite_timeout(rt_expires)) {
 				/* not infinity */
-				rt->dst.expires = jiffies + rt_expires;
-				rt->rt6i_flags |= RTF_EXPIRES;
+				rt6_set_expires(rt, jiffies + rt_expires);
 			} else {
-				rt->rt6i_flags &= ~RTF_EXPIRES;
-				rt->dst.expires = 0;
+				rt6_clean_expires(rt);
 			}
 		} else if (valid_lft) {
 			clock_t expires = 0;
@@ -1911,7 +1907,7 @@
 	/* Try to figure out our local address for this prefix */
 
 	if (pinfo->autoconf && in6_dev->cnf.autoconf) {
-		struct inet6_ifaddr * ifp;
+		struct inet6_ifaddr *ifp;
 		struct in6_addr addr;
 		int create = 0, update_lft = 0;
 
@@ -1960,7 +1956,7 @@
 
 			update_lft = create = 1;
 			ifp->cstamp = jiffies;
-			addrconf_dad_start(ifp, RTF_ADDRCONF|RTF_PREFIX_RT);
+			addrconf_dad_start(ifp);
 		}
 
 		if (ifp) {
@@ -2239,7 +2235,7 @@
 		 * that the Optimistic flag should not be set for
 		 * manually configured addresses
 		 */
-		addrconf_dad_start(ifp, 0);
+		addrconf_dad_start(ifp);
 		in6_ifa_put(ifp);
 		addrconf_verify(0);
 		return 0;
@@ -2365,9 +2361,9 @@
 	}
 
 	for_each_netdev(net, dev) {
-		struct in_device * in_dev = __in_dev_get_rtnl(dev);
+		struct in_device *in_dev = __in_dev_get_rtnl(dev);
 		if (in_dev && (dev->flags & IFF_UP)) {
-			struct in_ifaddr * ifa;
+			struct in_ifaddr *ifa;
 
 			int flag = scope;
 
@@ -2413,7 +2409,7 @@
 
 static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
 {
-	struct inet6_ifaddr * ifp;
+	struct inet6_ifaddr *ifp;
 	u32 addr_flags = IFA_F_PERMANENT;
 
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
@@ -2426,7 +2422,7 @@
 	ifp = ipv6_add_addr(idev, addr, 64, IFA_LINK, addr_flags);
 	if (!IS_ERR(ifp)) {
 		addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
-		addrconf_dad_start(ifp, 0);
+		addrconf_dad_start(ifp);
 		in6_ifa_put(ifp);
 	}
 }
@@ -2434,7 +2430,7 @@
 static void addrconf_dev_config(struct net_device *dev)
 {
 	struct in6_addr addr;
-	struct inet6_dev    * idev;
+	struct inet6_dev *idev;
 
 	ASSERT_RTNL();
 
@@ -2570,7 +2566,7 @@
 }
 
 static int addrconf_notify(struct notifier_block *this, unsigned long event,
-			   void * data)
+			   void *data)
 {
 	struct net_device *dev = (struct net_device *) data;
 	struct inet6_dev *idev = __in6_dev_get(dev);
@@ -2921,7 +2917,7 @@
 	addrconf_mod_timer(ifp, AC_DAD, rand_num);
 }
 
-static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
+static void addrconf_dad_start(struct inet6_ifaddr *ifp)
 {
 	struct inet6_dev *idev = ifp->idev;
 	struct net_device *dev = idev->dev;
@@ -3794,7 +3790,7 @@
 	return inet6_dump_addr(skb, cb, type);
 }
 
-static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh,
+static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
 			     void *arg)
 {
 	struct net *net = sock_net(in_skb->sk);
@@ -3989,14 +3985,14 @@
 	struct nlattr *nla;
 	struct ifla_cacheinfo ci;
 
-	NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags);
-
+	if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
+		goto nla_put_failure;
 	ci.max_reasm_len = IPV6_MAXPLEN;
 	ci.tstamp = cstamp_delta(idev->tstamp);
 	ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
 	ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time);
-	NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci);
-
+	if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
+		goto nla_put_failure;
 	nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
 	if (nla == NULL)
 		goto nla_put_failure;
@@ -4061,15 +4057,13 @@
 	hdr->ifi_flags = dev_get_flags(dev);
 	hdr->ifi_change = 0;
 
-	NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
-
-	if (dev->addr_len)
-		NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
-
-	NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
-	if (dev->ifindex != dev->iflink)
-		NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
-
+	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
+	    (dev->addr_len &&
+	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
+	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
+	    (dev->ifindex != dev->iflink &&
+	     nla_put_u32(skb, IFLA_LINK, dev->iflink)))
+		goto nla_put_failure;
 	protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
 	if (protoinfo == NULL)
 		goto nla_put_failure;
@@ -4182,12 +4176,12 @@
 	if (pinfo->autoconf)
 		pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
 
-	NLA_PUT(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix);
-
+	if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
+		goto nla_put_failure;
 	ci.preferred_time = ntohl(pinfo->prefered);
 	ci.valid_time = ntohl(pinfo->valid);
-	NLA_PUT(skb, PREFIX_CACHEINFO, sizeof(ci), &ci);
-
+	if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
+		goto nla_put_failure;
 	return nlmsg_end(skb, nlh);
 
 nla_put_failure:
@@ -4371,7 +4365,6 @@
 {
 	struct ctl_table_header *sysctl_header;
 	ctl_table addrconf_vars[DEVCONF_MAX+1];
-	char *dev_name;
 } addrconf_sysctl __read_mostly = {
 	.sysctl_header = NULL,
 	.addrconf_vars = {
@@ -4600,17 +4593,7 @@
 {
 	int i;
 	struct addrconf_sysctl_table *t;
-
-#define ADDRCONF_CTL_PATH_DEV	3
-
-	struct ctl_path addrconf_ctl_path[] = {
-		{ .procname = "net", },
-		{ .procname = "ipv6", },
-		{ .procname = "conf", },
-		{ /* to be set */ },
-		{ },
-	};
-
+	char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
 
 	t = kmemdup(&addrconf_sysctl, sizeof(*t), GFP_KERNEL);
 	if (t == NULL)
@@ -4622,27 +4605,15 @@
 		t->addrconf_vars[i].extra2 = net;
 	}
 
-	/*
-	 * Make a copy of dev_name, because '.procname' is regarded as const
-	 * by sysctl and we wouldn't want anyone to change it under our feet
-	 * (see SIOCSIFNAME).
-	 */
-	t->dev_name = kstrdup(dev_name, GFP_KERNEL);
-	if (!t->dev_name)
-		goto free;
+	snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
 
-	addrconf_ctl_path[ADDRCONF_CTL_PATH_DEV].procname = t->dev_name;
-
-	t->sysctl_header = register_net_sysctl_table(net, addrconf_ctl_path,
-			t->addrconf_vars);
+	t->sysctl_header = register_net_sysctl(net, path, t->addrconf_vars);
 	if (t->sysctl_header == NULL)
-		goto free_procname;
+		goto free;
 
 	p->sysctl = t;
 	return 0;
 
-free_procname:
-	kfree(t->dev_name);
 free:
 	kfree(t);
 out:
@@ -4659,7 +4630,6 @@
 	t = p->sysctl;
 	p->sysctl = NULL;
 	unregister_net_sysctl_table(t->sysctl_header);
-	kfree(t->dev_name);
 	kfree(t);
 }
 
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 399287e..d051e5f 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -8,9 +8,9 @@
 
 #define IPV6_ADDR_SCOPE_TYPE(scope)	((scope) << 16)
 
-static inline unsigned ipv6_addr_scope2type(unsigned scope)
+static inline unsigned int ipv6_addr_scope2type(unsigned int scope)
 {
-	switch(scope) {
+	switch (scope) {
 	case IPV6_ADDR_SCOPE_NODELOCAL:
 		return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_NODELOCAL) |
 			IPV6_ADDR_LOOPBACK);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 8ed1b93..0ad046c7 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -180,7 +180,7 @@
 	err = 0;
 	sk->sk_no_check = answer_no_check;
 	if (INET_PROTOSW_REUSE & answer_flags)
-		sk->sk_reuse = 1;
+		sk->sk_reuse = SK_CAN_REUSE;
 
 	inet = inet_sk(sk);
 	inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
@@ -1111,11 +1111,6 @@
 	if (err)
 		goto out_sock_register_fail;
 
-#ifdef CONFIG_SYSCTL
-	err = ipv6_static_sysctl_register();
-	if (err)
-		goto static_sysctl_fail;
-#endif
 	tcpv6_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
 
 	/*
@@ -1242,10 +1237,6 @@
 icmp_fail:
 	unregister_pernet_subsys(&inet6_net_ops);
 register_pernet_fail:
-#ifdef CONFIG_SYSCTL
-	ipv6_static_sysctl_unregister();
-static_sysctl_fail:
-#endif
 	sock_unregister(PF_INET6);
 	rtnl_unregister_all(PF_INET6);
 out_sock_register_fail:
@@ -1272,9 +1263,6 @@
 	/* Disallow any further netlink messages */
 	rtnl_unregister_all(PF_INET6);
 
-#ifdef CONFIG_SYSCTL
-	ipv6_sysctl_unregister();
-#endif
 	udpv6_exit();
 	udplitev6_exit();
 	tcpv6_exit();
@@ -1302,9 +1290,6 @@
 	rawv6_exit();
 
 	unregister_pernet_subsys(&inet6_net_ops);
-#ifdef CONFIG_SYSCTL
-	ipv6_static_sysctl_unregister();
-#endif
 	proto_unregister(&rawv6_prot);
 	proto_unregister(&udplitev6_prot);
 	proto_unregister(&udpv6_prot);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 76832c8..7fba35a 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -98,7 +98,7 @@
 		sin.sin_port = usin->sin6_port;
 
 		err = ip4_datagram_connect(sk,
-					   (struct sockaddr*) &sin,
+					   (struct sockaddr *) &sin,
 					   sizeof(sin));
 
 ipv4_connected:
@@ -515,10 +515,10 @@
 		u8 nexthdr = ipv6_hdr(skb)->nexthdr;
 
 		while (off <= opt->lastopt) {
-			unsigned len;
+			unsigned int len;
 			u8 *ptr = nh + off;
 
-			switch(nexthdr) {
+			switch (nexthdr) {
 			case IPPROTO_DSTOPTS:
 				nexthdr = ptr[0];
 				len = (ptr[1] + 1) << 3;
@@ -827,9 +827,8 @@
 			int tc;
 
 			err = -EINVAL;
-			if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) {
+			if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
 				goto exit_f;
-			}
 
 			tc = *(int *)CMSG_DATA(cmsg);
 			if (tc < -1 || tc > 0xff)
@@ -846,9 +845,8 @@
 			int df;
 
 			err = -EINVAL;
-			if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) {
+			if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
 				goto exit_f;
-			}
 
 			df = *(int *)CMSG_DATA(cmsg);
 			if (df < 0 || df > 1)
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 3d641b6..aa0a51e 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -153,6 +153,7 @@
 
 	while (len > 0) {
 		int optlen = nh[off + 1] + 2;
+		int i;
 
 		switch (nh[off]) {
 		case IPV6_TLV_PAD0:
@@ -160,6 +161,21 @@
 			break;
 
 		case IPV6_TLV_PADN:
+			/* RFC 2460 states that the purpose of PadN is
+			 * to align the containing header to multiples
+			 * of 8. 7 is therefore the highest valid value.
+			 * See also RFC 4942, Section 2.1.9.5.
+			 */
+			if (optlen > 7)
+				goto bad;
+			/* RFC 4942 recommends receiving hosts to
+			 * actively check PadN payload to contain
+			 * only zeroes.
+			 */
+			for (i = 2; i < optlen; i++) {
+				if (nh[off + i] != 0)
+					goto bad;
+			}
 			break;
 
 		default: /* Other TLV code so scan list */
@@ -722,7 +738,6 @@
 	if (opt->hopopt)
 		ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt);
 }
-
 EXPORT_SYMBOL(ipv6_push_nfrag_opts);
 
 void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto)
@@ -738,20 +753,19 @@
 
 	opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC);
 	if (opt2) {
-		long dif = (char*)opt2 - (char*)opt;
+		long dif = (char *)opt2 - (char *)opt;
 		memcpy(opt2, opt, opt->tot_len);
 		if (opt2->hopopt)
-			*((char**)&opt2->hopopt) += dif;
+			*((char **)&opt2->hopopt) += dif;
 		if (opt2->dst0opt)
-			*((char**)&opt2->dst0opt) += dif;
+			*((char **)&opt2->dst0opt) += dif;
 		if (opt2->dst1opt)
-			*((char**)&opt2->dst1opt) += dif;
+			*((char **)&opt2->dst1opt) += dif;
 		if (opt2->srcrt)
-			*((char**)&opt2->srcrt) += dif;
+			*((char **)&opt2->srcrt) += dif;
 	}
 	return opt2;
 }
-
 EXPORT_SYMBOL_GPL(ipv6_dup_options);
 
 static int ipv6_renew_option(void *ohdr,
@@ -892,5 +906,4 @@
 	fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
 	return orig;
 }
-
 EXPORT_SYMBOL_GPL(fl6_update_dst);
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index 72957f4..7b1a884 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -21,6 +21,7 @@
 		 (nexthdr == NEXTHDR_NONE)	||
 		 (nexthdr == NEXTHDR_DEST);
 }
+EXPORT_SYMBOL(ipv6_ext_hdr);
 
 /*
  * Skip any extension headers. This is used by the ICMP module.
@@ -109,6 +110,4 @@
 	*nexthdrp = nexthdr;
 	return start;
 }
-
-EXPORT_SYMBOL(ipv6_ext_hdr);
 EXPORT_SYMBOL(ipv6_skip_exthdr);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index b6c5731..0ff1cfd 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -22,8 +22,7 @@
 #include <net/ip6_route.h>
 #include <net/netlink.h>
 
-struct fib6_rule
-{
+struct fib6_rule {
 	struct fib_rule		common;
 	struct rt6key		src;
 	struct rt6key		dst;
@@ -215,14 +214,13 @@
 	frh->src_len = rule6->src.plen;
 	frh->tos = rule6->tclass;
 
-	if (rule6->dst.plen)
-		NLA_PUT(skb, FRA_DST, sizeof(struct in6_addr),
-			&rule6->dst.addr);
-
-	if (rule6->src.plen)
-		NLA_PUT(skb, FRA_SRC, sizeof(struct in6_addr),
-			&rule6->src.addr);
-
+	if ((rule6->dst.plen &&
+	     nla_put(skb, FRA_DST, sizeof(struct in6_addr),
+		     &rule6->dst.addr)) ||
+	    (rule6->src.plen &&
+	     nla_put(skb, FRA_SRC, sizeof(struct in6_addr),
+		     &rule6->src.addr)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 27ac95a..cc079d8 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -498,7 +498,7 @@
 	err = ip6_append_data(sk, icmpv6_getfrag, &msg,
 			      len + sizeof(struct icmp6hdr),
 			      sizeof(struct icmp6hdr), hlimit,
-			      np->tclass, NULL, &fl6, (struct rt6_info*)dst,
+			      np->tclass, NULL, &fl6, (struct rt6_info *)dst,
 			      MSG_DONTWAIT, np->dontfrag);
 	if (err) {
 		ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
@@ -579,7 +579,7 @@
 
 	err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
 				sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6,
-				(struct rt6_info*)dst, MSG_DONTWAIT,
+				(struct rt6_info *)dst, MSG_DONTWAIT,
 				np->dontfrag);
 
 	if (err) {
@@ -950,7 +950,6 @@
 
 	return fatal;
 }
-
 EXPORT_SYMBOL(icmpv6_err_convert);
 
 #ifdef CONFIG_SYSCTL
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 02dd203..e6cee52 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -28,7 +28,7 @@
 #include <net/inet6_connection_sock.h>
 
 int inet6_csk_bind_conflict(const struct sock *sk,
-			    const struct inet_bind_bucket *tb)
+			    const struct inet_bind_bucket *tb, bool relax)
 {
 	const struct sock *sk2;
 	const struct hlist_node *node;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 5b27fbc..9371743 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -673,11 +673,10 @@
 					    &rt->rt6i_gateway)) {
 				if (!(iter->rt6i_flags & RTF_EXPIRES))
 					return -EEXIST;
-				iter->dst.expires = rt->dst.expires;
-				if (!(rt->rt6i_flags & RTF_EXPIRES)) {
-					iter->rt6i_flags &= ~RTF_EXPIRES;
-					iter->dst.expires = 0;
-				}
+				if (!(rt->rt6i_flags & RTF_EXPIRES))
+					rt6_clean_expires(iter);
+				else
+					rt6_set_expires(iter, rt->dst.expires);
 				return -EEXIST;
 			}
 		}
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index b7867a1..1dd6329 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -705,9 +705,9 @@
 		struct ip6_flowlabel *fl = v;
 		seq_printf(seq,
 			   "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
-			   (unsigned)ntohl(fl->label),
+			   (unsigned int)ntohl(fl->label),
 			   fl->share,
-			   (unsigned)fl->owner,
+			   (int)fl->owner,
 			   atomic_read(&fl->users),
 			   fl->linger/HZ,
 			   (long)(fl->expires - jiffies)/HZ,
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index aa21da6..5df487c 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -198,7 +198,7 @@
 {
 	const struct in6_addr *remote = &p->raddr;
 	const struct in6_addr *local = &p->laddr;
-	unsigned h = 0;
+	unsigned int h = 0;
 	int prio = 0;
 
 	if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
@@ -954,7 +954,7 @@
 
 		if (skb->sk)
 			skb_set_owner_w(new_skb, skb->sk);
-		kfree_skb(skb);
+		consume_skb(skb);
 		skb = new_skb;
 	}
 	skb_dst_drop(skb);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 8110362..efc0098 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2215,14 +2215,15 @@
 	rtm->rtm_src_len  = 128;
 	rtm->rtm_tos      = 0;
 	rtm->rtm_table    = mrt->id;
-	NLA_PUT_U32(skb, RTA_TABLE, mrt->id);
+	if (nla_put_u32(skb, RTA_TABLE, mrt->id))
+		goto nla_put_failure;
 	rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
 	rtm->rtm_protocol = RTPROT_UNSPEC;
 	rtm->rtm_flags    = 0;
 
-	NLA_PUT(skb, RTA_SRC, 16, &c->mf6c_origin);
-	NLA_PUT(skb, RTA_DST, 16, &c->mf6c_mcastgrp);
-
+	if (nla_put(skb, RTA_SRC, 16, &c->mf6c_origin) ||
+	    nla_put(skb, RTA_DST, 16, &c->mf6c_mcastgrp))
+		goto nla_put_failure;
 	if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0)
 		goto nla_put_failure;
 
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 63dd1f8..ba6d13d 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -678,7 +678,6 @@
 	}
 	case MCAST_MSFILTER:
 	{
-		extern int sysctl_mld_max_msf;
 		struct group_filter *gsf;
 
 		if (optlen < GROUP_FILTER_SIZE(0))
@@ -943,7 +942,7 @@
 }
 
 static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
-		    char __user *optval, int __user *optlen, unsigned flags)
+		    char __user *optval, int __user *optlen, unsigned int flags)
 {
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	int len;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index b2869ca..7dfb89f 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1061,7 +1061,7 @@
 			if (psf->sf_count[MCAST_INCLUDE] ||
 			    pmc->mca_sfcount[MCAST_EXCLUDE] !=
 			    psf->sf_count[MCAST_EXCLUDE])
-				continue;
+				break;
 			if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
 				scount++;
 				break;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 3dcdb81..35615c6 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -15,6 +15,7 @@
 /*
  *	Changes:
  *
+ *	Alexey I. Froloff		:	RFC6106 (DNSSL) support
  *	Pierre Ynard			:	export userland ND options
  *						through netlink (RDNSS support)
  *	Lars Fenneberg			:	fixed MTU setting on receipt
@@ -228,7 +229,8 @@
 
 static inline int ndisc_is_useropt(struct nd_opt_hdr *opt)
 {
-	return opt->nd_opt_type == ND_OPT_RDNSS;
+	return opt->nd_opt_type == ND_OPT_RDNSS ||
+		opt->nd_opt_type == ND_OPT_DNSSL;
 }
 
 static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur,
@@ -1099,8 +1101,9 @@
 
 	memcpy(ndmsg + 1, opt, opt->nd_opt_len << 3);
 
-	NLA_PUT(skb, NDUSEROPT_SRCADDR, sizeof(struct in6_addr),
-		&ipv6_hdr(ra)->saddr);
+	if (nla_put(skb, NDUSEROPT_SRCADDR, sizeof(struct in6_addr),
+		    &ipv6_hdr(ra)->saddr))
+		goto nla_put_failure;
 	nlmsg_end(skb, nlh);
 
 	rtnl_notify(skb, net, 0, RTNLGRP_ND_USEROPT, NULL, GFP_ATOMIC);
@@ -1264,8 +1267,7 @@
 	}
 
 	if (rt)
-		rt->dst.expires = jiffies + (HZ * lifetime);
-
+		rt6_set_expires(rt, jiffies + (HZ * lifetime));
 	if (ra_msg->icmph.icmp6_hop_limit) {
 		in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
 		if (rt)
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index a34c9e4..3ca9303 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -588,7 +588,7 @@
 #endif
 	register_netdevice_notifier(&ipq_dev_notifier);
 #ifdef CONFIG_SYSCTL
-	ipq_sysctl_header = register_sysctl_paths(net_ipv6_ctl_path, ipq_table);
+	ipq_sysctl_header = register_net_sysctl(&init_net, "net/ipv6", ipq_table);
 #endif
 	status = nf_register_queue_handler(NFPROTO_IPV6, &nfqh);
 	if (status < 0) {
@@ -599,7 +599,7 @@
 
 cleanup_sysctl:
 #ifdef CONFIG_SYSCTL
-	unregister_sysctl_table(ipq_sysctl_header);
+	unregister_net_sysctl_table(ipq_sysctl_header);
 #endif
 	unregister_netdevice_notifier(&ipq_dev_notifier);
 	proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
@@ -621,7 +621,7 @@
 	ipq_flush(NULL, 0);
 
 #ifdef CONFIG_SYSCTL
-	unregister_sysctl_table(ipq_sysctl_header);
+	unregister_net_sysctl_table(ipq_sysctl_header);
 #endif
 	unregister_netdevice_notifier(&ipq_dev_notifier);
 	proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 94874b0..d4e350f 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -78,19 +78,6 @@
 
    Hence the start of any table is given by get_table() below.  */
 
-/* Check for an extension */
-int
-ip6t_ext_hdr(u8 nexthdr)
-{
-	return  (nexthdr == IPPROTO_HOPOPTS)   ||
-		(nexthdr == IPPROTO_ROUTING)   ||
-		(nexthdr == IPPROTO_FRAGMENT)  ||
-		(nexthdr == IPPROTO_ESP)       ||
-		(nexthdr == IPPROTO_AH)        ||
-		(nexthdr == IPPROTO_NONE)      ||
-		(nexthdr == IPPROTO_DSTOPTS);
-}
-
 /* Returns whether matches rule or not. */
 /* Performance critical - called for every packet */
 static inline bool
@@ -409,7 +396,7 @@
 			if (v < 0) {
 				/* Pop from stack? */
 				if (v != XT_RETURN) {
-					verdict = (unsigned)(-v) - 1;
+					verdict = (unsigned int)(-v) - 1;
 					break;
 				}
 				if (*stackptr <= origptr)
@@ -2366,7 +2353,6 @@
 EXPORT_SYMBOL(ip6t_register_table);
 EXPORT_SYMBOL(ip6t_unregister_table);
 EXPORT_SYMBOL(ip6t_do_table);
-EXPORT_SYMBOL(ip6t_ext_hdr);
 EXPORT_SYMBOL(ipv6_find_hdr);
 
 module_init(ip6_tables_init);
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 4111050..fe925e4 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -278,10 +278,11 @@
 static int ipv6_tuple_to_nlattr(struct sk_buff *skb,
 				const struct nf_conntrack_tuple *tuple)
 {
-	NLA_PUT(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4,
-		&tuple->src.u3.ip6);
-	NLA_PUT(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4,
-		&tuple->dst.u3.ip6);
+	if (nla_put(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4,
+		    &tuple->src.u3.ip6) ||
+	    nla_put(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4,
+		    &tuple->dst.u3.ip6))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 92cc9f2..3e81904 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -234,10 +234,10 @@
 static int icmpv6_tuple_to_nlattr(struct sk_buff *skb,
 				  const struct nf_conntrack_tuple *t)
 {
-	NLA_PUT_BE16(skb, CTA_PROTO_ICMPV6_ID, t->src.u.icmp.id);
-	NLA_PUT_U8(skb, CTA_PROTO_ICMPV6_TYPE, t->dst.u.icmp.type);
-	NLA_PUT_U8(skb, CTA_PROTO_ICMPV6_CODE, t->dst.u.icmp.code);
-
+	if (nla_put_be16(skb, CTA_PROTO_ICMPV6_ID, t->src.u.icmp.id) ||
+	    nla_put_u8(skb, CTA_PROTO_ICMPV6_TYPE, t->dst.u.icmp.type) ||
+	    nla_put_u8(skb, CTA_PROTO_ICMPV6_CODE, t->dst.u.icmp.code))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -300,8 +300,8 @@
 {
 	const unsigned int *timeout = data;
 
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ));
-
+	if (nla_put_be32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 38f00b0..48a2be1 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -626,8 +626,8 @@
 	inet_frags_init(&nf_frags);
 
 #ifdef CONFIG_SYSCTL
-	nf_ct_frag6_sysctl_header = register_sysctl_paths(nf_net_netfilter_sysctl_path,
-							  nf_ct_frag6_sysctl_table);
+	nf_ct_frag6_sysctl_header = register_net_sysctl(&init_net, "net/netfilter",
+							nf_ct_frag6_sysctl_table);
 	if (!nf_ct_frag6_sysctl_header) {
 		inet_frags_fini(&nf_frags);
 		return -ENOMEM;
@@ -640,7 +640,7 @@
 void nf_ct_frag6_cleanup(void)
 {
 #ifdef CONFIG_SYSCTL
-	unregister_sysctl_table(nf_ct_frag6_sysctl_header);
+	unregister_net_sysctl_table(nf_ct_frag6_sysctl_header);
 	nf_ct_frag6_sysctl_header = NULL;
 #endif
 	inet_frags_fini(&nf_frags);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 9447bd6..36e04cf 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -646,7 +646,7 @@
 		table[2].data = &net->ipv6.frags.timeout;
 	}
 
-	hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table);
+	hdr = register_net_sysctl(net, "net/ipv6", table);
 	if (hdr == NULL)
 		goto err_reg;
 
@@ -674,7 +674,7 @@
 
 static int ip6_frags_sysctl_register(void)
 {
-	ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path,
+	ip6_ctl_header = register_net_sysctl(&init_net, "net/ipv6",
 			ip6_frags_ctl_table);
 	return ip6_ctl_header == NULL ? -ENOMEM : 0;
 }
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 3992e26..0aefc36 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -62,7 +62,7 @@
 #include <linux/sysctl.h>
 #endif
 
-static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
+static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
 				    const struct in6_addr *dest);
 static struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
 static unsigned int	 ip6_default_advmss(const struct dst_entry *dst);
@@ -82,7 +82,7 @@
 static struct rt6_info *rt6_add_route_info(struct net *net,
 					   const struct in6_addr *prefix, int prefixlen,
 					   const struct in6_addr *gwaddr, int ifindex,
-					   unsigned pref);
+					   unsigned int pref);
 static struct rt6_info *rt6_get_route_info(struct net *net,
 					   const struct in6_addr *prefix, int prefixlen,
 					   const struct in6_addr *gwaddr, int ifindex);
@@ -285,6 +285,10 @@
 		rt->rt6i_idev = NULL;
 		in6_dev_put(idev);
 	}
+
+	if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from)
+		dst_release(dst->from);
+
 	if (peer) {
 		rt->rt6i_peer = NULL;
 		inet_putpeer(peer);
@@ -329,8 +333,17 @@
 
 static __inline__ int rt6_check_expired(const struct rt6_info *rt)
 {
-	return (rt->rt6i_flags & RTF_EXPIRES) &&
-		time_after(jiffies, rt->dst.expires);
+	struct rt6_info *ort = NULL;
+
+	if (rt->rt6i_flags & RTF_EXPIRES) {
+		if (time_after(jiffies, rt->dst.expires))
+			return 1;
+	} else if (rt->dst.from) {
+		ort = (struct rt6_info *) rt->dst.from;
+		return (ort->rt6i_flags & RTF_EXPIRES) &&
+			time_after(jiffies, ort->dst.expires);
+	}
+	return 0;
 }
 
 static inline int rt6_need_strict(const struct in6_addr *daddr)
@@ -620,12 +633,11 @@
 				 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
 
 	if (rt) {
-		if (!addrconf_finite_timeout(lifetime)) {
-			rt->rt6i_flags &= ~RTF_EXPIRES;
-		} else {
-			rt->dst.expires = jiffies + HZ * lifetime;
-			rt->rt6i_flags |= RTF_EXPIRES;
-		}
+		if (!addrconf_finite_timeout(lifetime))
+			rt6_clean_expires(rt);
+		else
+			rt6_set_expires(rt, jiffies + HZ * lifetime);
+
 		dst_release(&rt->dst);
 	}
 	return 0;
@@ -730,7 +742,7 @@
 	return __ip6_ins_rt(rt, &info);
 }
 
-static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
+static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
 				      const struct in6_addr *daddr,
 				      const struct in6_addr *saddr)
 {
@@ -954,10 +966,10 @@
 		rt->rt6i_idev = ort->rt6i_idev;
 		if (rt->rt6i_idev)
 			in6_dev_hold(rt->rt6i_idev);
-		rt->dst.expires = 0;
 
 		rt->rt6i_gateway = ort->rt6i_gateway;
-		rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
+		rt->rt6i_flags = ort->rt6i_flags;
+		rt6_clean_expires(rt);
 		rt->rt6i_metric = 0;
 
 		memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
@@ -1019,10 +1031,9 @@
 
 	rt = (struct rt6_info *) skb_dst(skb);
 	if (rt) {
-		if (rt->rt6i_flags & RTF_CACHE) {
-			dst_set_expires(&rt->dst, 0);
-			rt->rt6i_flags |= RTF_EXPIRES;
-		} else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
+		if (rt->rt6i_flags & RTF_CACHE)
+			rt6_update_expires(rt, 0);
+		else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
 			rt->rt6i_node->fn_sernum = -1;
 	}
 }
@@ -1289,9 +1300,12 @@
 	}
 
 	rt->dst.obsolete = -1;
-	rt->dst.expires = (cfg->fc_flags & RTF_EXPIRES) ?
-				jiffies + clock_t_to_jiffies(cfg->fc_expires) :
-				0;
+
+	if (cfg->fc_flags & RTF_EXPIRES)
+		rt6_set_expires(rt, jiffies +
+				clock_t_to_jiffies(cfg->fc_expires));
+	else
+		rt6_clean_expires(rt);
 
 	if (cfg->fc_protocol == RTPROT_UNSPEC)
 		cfg->fc_protocol = RTPROT_BOOT;
@@ -1736,8 +1750,8 @@
 			features |= RTAX_FEATURE_ALLFRAG;
 			dst_metric_set(&rt->dst, RTAX_FEATURES, features);
 		}
-		dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
-		rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
+		rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
+		rt->rt6i_flags |= RTF_MODIFIED;
 		goto out;
 	}
 
@@ -1765,9 +1779,8 @@
 		 * which is 10 mins. After 10 mins the decreased pmtu is expired
 		 * and detecting PMTU increase will be automatically happened.
 		 */
-		dst_set_expires(&nrt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
-		nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
-
+		rt6_update_expires(nrt, net->ipv6.sysctl.ip6_rt_mtu_expires);
+		nrt->rt6i_flags |= RTF_DYNAMIC;
 		ip6_ins_rt(nrt);
 	}
 out:
@@ -1799,7 +1812,7 @@
  *	Misc support functions
  */
 
-static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
+static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
 				    const struct in6_addr *dest)
 {
 	struct net *net = dev_net(ort->dst.dev);
@@ -1819,10 +1832,14 @@
 		if (rt->rt6i_idev)
 			in6_dev_hold(rt->rt6i_idev);
 		rt->dst.lastuse = jiffies;
-		rt->dst.expires = 0;
 
 		rt->rt6i_gateway = ort->rt6i_gateway;
-		rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
+		rt->rt6i_flags = ort->rt6i_flags;
+		if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
+		    (RTF_DEFAULT | RTF_ADDRCONF))
+			rt6_set_from(rt, ort);
+		else
+			rt6_clean_expires(rt);
 		rt->rt6i_metric = 0;
 
 #ifdef CONFIG_IPV6_SUBTREES
@@ -1870,7 +1887,7 @@
 static struct rt6_info *rt6_add_route_info(struct net *net,
 					   const struct in6_addr *prefix, int prefixlen,
 					   const struct in6_addr *gwaddr, int ifindex,
-					   unsigned pref)
+					   unsigned int pref)
 {
 	struct fib6_config cfg = {
 		.fc_table	= RT6_TABLE_INFO,
@@ -2200,10 +2217,9 @@
 	icmp6_clean_all(fib6_ifdown, &adn);
 }
 
-struct rt6_mtu_change_arg
-{
+struct rt6_mtu_change_arg {
 	struct net_device *dev;
-	unsigned mtu;
+	unsigned int mtu;
 };
 
 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
@@ -2245,7 +2261,7 @@
 	return 0;
 }
 
-void rt6_mtu_change(struct net_device *dev, unsigned mtu)
+void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
 {
 	struct rt6_mtu_change_arg arg = {
 		.dev = dev,
@@ -2413,7 +2429,8 @@
 	else
 		table = RT6_TABLE_UNSPEC;
 	rtm->rtm_table = table;
-	NLA_PUT_U32(skb, RTA_TABLE, table);
+	if (nla_put_u32(skb, RTA_TABLE, table))
+		goto nla_put_failure;
 	if (rt->rt6i_flags & RTF_REJECT)
 		rtm->rtm_type = RTN_UNREACHABLE;
 	else if (rt->rt6i_flags & RTF_LOCAL)
@@ -2436,16 +2453,20 @@
 		rtm->rtm_flags |= RTM_F_CLONED;
 
 	if (dst) {
-		NLA_PUT(skb, RTA_DST, 16, dst);
+		if (nla_put(skb, RTA_DST, 16, dst))
+			goto nla_put_failure;
 		rtm->rtm_dst_len = 128;
 	} else if (rtm->rtm_dst_len)
-		NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr);
+		if (nla_put(skb, RTA_DST, 16, &rt->rt6i_dst.addr))
+			goto nla_put_failure;
 #ifdef CONFIG_IPV6_SUBTREES
 	if (src) {
-		NLA_PUT(skb, RTA_SRC, 16, src);
+		if (nla_put(skb, RTA_SRC, 16, src))
+			goto nla_put_failure;
 		rtm->rtm_src_len = 128;
-	} else if (rtm->rtm_src_len)
-		NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
+	} else if (rtm->rtm_src_len &&
+		   nla_put(skb, RTA_SRC, 16, &rt->rt6i_src.addr))
+		goto nla_put_failure;
 #endif
 	if (iif) {
 #ifdef CONFIG_IPV6_MROUTE
@@ -2463,17 +2484,20 @@
 			}
 		} else
 #endif
-			NLA_PUT_U32(skb, RTA_IIF, iif);
+			if (nla_put_u32(skb, RTA_IIF, iif))
+				goto nla_put_failure;
 	} else if (dst) {
 		struct in6_addr saddr_buf;
-		if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0)
-			NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
+		if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
+		    nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
+			goto nla_put_failure;
 	}
 
 	if (rt->rt6i_prefsrc.plen) {
 		struct in6_addr saddr_buf;
 		saddr_buf = rt->rt6i_prefsrc.addr;
-		NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
+		if (nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
+			goto nla_put_failure;
 	}
 
 	if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
@@ -2489,11 +2513,11 @@
 	}
 	rcu_read_unlock();
 
-	if (rt->dst.dev)
-		NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
-
-	NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
-
+	if (rt->dst.dev &&
+	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
+		goto nla_put_failure;
+	if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
+		goto nla_put_failure;
 	if (!(rt->rt6i_flags & RTF_EXPIRES))
 		expires = 0;
 	else if (rt->dst.expires - jiffies < INT_MAX)
@@ -2598,6 +2622,7 @@
 
 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
 	if (!skb) {
+		dst_release(&rt->dst);
 		err = -ENOBUFS;
 		goto errout;
 	}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index c4ffd17..e5fef943 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -87,35 +87,51 @@
 
 /* often modified stats are per cpu, other are shared (netdev->stats) */
 struct pcpu_tstats {
-	unsigned long	rx_packets;
-	unsigned long	rx_bytes;
-	unsigned long	tx_packets;
-	unsigned long	tx_bytes;
-} __attribute__((aligned(4*sizeof(unsigned long))));
+	u64	rx_packets;
+	u64	rx_bytes;
+	u64	tx_packets;
+	u64	tx_bytes;
+	struct u64_stats_sync	syncp;
+};
 
-static struct net_device_stats *ipip6_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *ipip6_get_stats64(struct net_device *dev,
+						   struct rtnl_link_stats64 *tot)
 {
-	struct pcpu_tstats sum = { 0 };
 	int i;
 
 	for_each_possible_cpu(i) {
 		const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+		unsigned int start;
 
-		sum.rx_packets += tstats->rx_packets;
-		sum.rx_bytes   += tstats->rx_bytes;
-		sum.tx_packets += tstats->tx_packets;
-		sum.tx_bytes   += tstats->tx_bytes;
+		do {
+			start = u64_stats_fetch_begin_bh(&tstats->syncp);
+			rx_packets = tstats->rx_packets;
+			tx_packets = tstats->tx_packets;
+			rx_bytes = tstats->rx_bytes;
+			tx_bytes = tstats->tx_bytes;
+		} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+		tot->rx_packets += rx_packets;
+		tot->tx_packets += tx_packets;
+		tot->rx_bytes   += rx_bytes;
+		tot->tx_bytes   += tx_bytes;
 	}
-	dev->stats.rx_packets = sum.rx_packets;
-	dev->stats.rx_bytes   = sum.rx_bytes;
-	dev->stats.tx_packets = sum.tx_packets;
-	dev->stats.tx_bytes   = sum.tx_bytes;
-	return &dev->stats;
+
+	tot->rx_errors = dev->stats.rx_errors;
+	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+	tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
+	tot->tx_dropped = dev->stats.tx_dropped;
+	tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
+	tot->tx_errors = dev->stats.tx_errors;
+
+	return tot;
 }
+
 /*
  * Must be invoked with rcu_read_lock
  */
-static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
+static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net,
 		struct net_device *dev, __be32 remote, __be32 local)
 {
 	unsigned int h0 = HASH(remote);
@@ -691,7 +707,7 @@
 			goto tx_error;
 		}
 
-		addr6 = (const struct in6_addr*)&neigh->primary_key;
+		addr6 = (const struct in6_addr *)&neigh->primary_key;
 		addr_type = ipv6_addr_type(addr6);
 
 		if ((addr_type & IPV6_ADDR_UNICAST) &&
@@ -721,7 +737,7 @@
 			goto tx_error;
 		}
 
-		addr6 = (const struct in6_addr*)&neigh->primary_key;
+		addr6 = (const struct in6_addr *)&neigh->primary_key;
 		addr_type = ipv6_addr_type(addr6);
 
 		if (addr_type == IPV6_ADDR_ANY) {
@@ -1126,7 +1142,7 @@
 	.ndo_start_xmit	= ipip6_tunnel_xmit,
 	.ndo_do_ioctl	= ipip6_tunnel_ioctl,
 	.ndo_change_mtu	= ipip6_tunnel_change_mtu,
-	.ndo_get_stats	= ipip6_get_stats,
+	.ndo_get_stats64= ipip6_get_stats64,
 };
 
 static void ipip6_dev_free(struct net_device *dev)
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 166a57c..e85c48b 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -16,32 +16,8 @@
 #include <net/addrconf.h>
 #include <net/inet_frag.h>
 
-static struct ctl_table empty[1];
-
-static ctl_table ipv6_static_skeleton[] = {
-	{
-		.procname	= "neigh",
-		.maxlen		= 0,
-		.mode		= 0555,
-		.child		= empty,
-	},
-	{ }
-};
-
 static ctl_table ipv6_table_template[] = {
 	{
-		.procname	= "route",
-		.maxlen		= 0,
-		.mode		= 0555,
-		.child		= ipv6_route_table_template
-	},
-	{
-		.procname	= "icmp",
-		.maxlen		= 0,
-		.mode		= 0555,
-		.child		= ipv6_icmp_table_template
-	},
-	{
 		.procname	= "bindv6only",
 		.data		= &init_net.ipv6.sysctl.bindv6only,
 		.maxlen		= sizeof(int),
@@ -62,13 +38,6 @@
 	{ }
 };
 
-struct ctl_path net_ipv6_ctl_path[] = {
-	{ .procname = "net", },
-	{ .procname = "ipv6", },
-	{ },
-};
-EXPORT_SYMBOL_GPL(net_ipv6_ctl_path);
-
 static int __net_init ipv6_sysctl_net_init(struct net *net)
 {
 	struct ctl_table *ipv6_table;
@@ -81,28 +50,37 @@
 			     GFP_KERNEL);
 	if (!ipv6_table)
 		goto out;
+	ipv6_table[0].data = &net->ipv6.sysctl.bindv6only;
 
 	ipv6_route_table = ipv6_route_sysctl_init(net);
 	if (!ipv6_route_table)
 		goto out_ipv6_table;
-	ipv6_table[0].child = ipv6_route_table;
 
 	ipv6_icmp_table = ipv6_icmp_sysctl_init(net);
 	if (!ipv6_icmp_table)
 		goto out_ipv6_route_table;
-	ipv6_table[1].child = ipv6_icmp_table;
 
-	ipv6_table[2].data = &net->ipv6.sysctl.bindv6only;
-
-	net->ipv6.sysctl.table = register_net_sysctl_table(net, net_ipv6_ctl_path,
-							   ipv6_table);
-	if (!net->ipv6.sysctl.table)
+	net->ipv6.sysctl.hdr = register_net_sysctl(net, "net/ipv6", ipv6_table);
+	if (!net->ipv6.sysctl.hdr)
 		goto out_ipv6_icmp_table;
 
+	net->ipv6.sysctl.route_hdr =
+		register_net_sysctl(net, "net/ipv6/route", ipv6_route_table);
+	if (!net->ipv6.sysctl.route_hdr)
+		goto out_unregister_ipv6_table;
+
+	net->ipv6.sysctl.icmp_hdr =
+		register_net_sysctl(net, "net/ipv6/icmp", ipv6_icmp_table);
+	if (!net->ipv6.sysctl.icmp_hdr)
+		goto out_unregister_route_table;
+
 	err = 0;
 out:
 	return err;
-
+out_unregister_route_table:
+	unregister_net_sysctl_table(net->ipv6.sysctl.route_hdr);
+out_unregister_ipv6_table:
+	unregister_net_sysctl_table(net->ipv6.sysctl.hdr);
 out_ipv6_icmp_table:
 	kfree(ipv6_icmp_table);
 out_ipv6_route_table:
@@ -118,11 +96,13 @@
 	struct ctl_table *ipv6_route_table;
 	struct ctl_table *ipv6_icmp_table;
 
-	ipv6_table = net->ipv6.sysctl.table->ctl_table_arg;
-	ipv6_route_table = ipv6_table[0].child;
-	ipv6_icmp_table = ipv6_table[1].child;
+	ipv6_table = net->ipv6.sysctl.hdr->ctl_table_arg;
+	ipv6_route_table = net->ipv6.sysctl.route_hdr->ctl_table_arg;
+	ipv6_icmp_table = net->ipv6.sysctl.icmp_hdr->ctl_table_arg;
 
-	unregister_net_sysctl_table(net->ipv6.sysctl.table);
+	unregister_net_sysctl_table(net->ipv6.sysctl.icmp_hdr);
+	unregister_net_sysctl_table(net->ipv6.sysctl.route_hdr);
+	unregister_net_sysctl_table(net->ipv6.sysctl.hdr);
 
 	kfree(ipv6_table);
 	kfree(ipv6_route_table);
@@ -140,7 +120,7 @@
 {
 	int err = -ENOMEM;
 
-	ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_rotable);
+	ip6_header = register_net_sysctl(&init_net, "net/ipv6", ipv6_rotable);
 	if (ip6_header == NULL)
 		goto out;
 
@@ -160,18 +140,3 @@
 	unregister_net_sysctl_table(ip6_header);
 	unregister_pernet_subsys(&ipv6_sysctl_net_ops);
 }
-
-static struct ctl_table_header *ip6_base;
-
-int ipv6_static_sysctl_register(void)
-{
-	ip6_base = register_sysctl_paths(net_ipv6_ctl_path, ipv6_static_skeleton);
-	if (ip6_base == NULL)
-		return -ENOMEM;
-	return 0;
-}
-
-void ipv6_static_sysctl_unregister(void)
-{
-	unregister_net_sysctl_table(ip6_base);
-}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 12c6ece..cdbf292 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1353,7 +1353,7 @@
 	newnp->pktoptions = NULL;
 	if (treq->pktopts != NULL) {
 		newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
-		kfree_skb(treq->pktopts);
+		consume_skb(treq->pktopts);
 		treq->pktopts = NULL;
 		if (newnp->pktoptions)
 			skb_set_owner_r(newnp->pktoptions, newsk);
@@ -1383,6 +1383,10 @@
 	tcp_mtup_init(newsk);
 	tcp_sync_mss(newsk, dst_mtu(dst));
 	newtp->advmss = dst_metric_advmss(dst);
+	if (tcp_sk(sk)->rx_opt.user_mss &&
+	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
+		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
+
 	tcp_initialize_rcv_mss(newsk);
 	if (tcp_rsk(req)->snt_synack)
 		tcp_valid_rtt_meas(newsk,
@@ -1645,7 +1649,7 @@
 #ifdef CONFIG_NET_DMA
 		struct tcp_sock *tp = tcp_sk(sk);
 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-			tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
+			tp->ucopy.dma_chan = net_dma_find_channel();
 		if (tp->ucopy.dma_chan)
 			ret = tcp_v6_do_rcv(sk, skb);
 		else
@@ -1654,7 +1658,8 @@
 			if (!tcp_prequeue(sk, skb))
 				ret = tcp_v6_do_rcv(sk, skb);
 		}
-	} else if (unlikely(sk_add_backlog(sk, skb))) {
+	} else if (unlikely(sk_add_backlog(sk, skb,
+					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
 		bh_unlock_sock(sk);
 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
 		goto discard_and_relse;
@@ -1829,64 +1834,15 @@
 static int tcp_v6_init_sock(struct sock *sk)
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
-	struct tcp_sock *tp = tcp_sk(sk);
 
-	skb_queue_head_init(&tp->out_of_order_queue);
-	tcp_init_xmit_timers(sk);
-	tcp_prequeue_init(tp);
-
-	icsk->icsk_rto = TCP_TIMEOUT_INIT;
-	tp->mdev = TCP_TIMEOUT_INIT;
-
-	/* So many TCP implementations out there (incorrectly) count the
-	 * initial SYN frame in their delayed-ACK and congestion control
-	 * algorithms that we must have the following bandaid to talk
-	 * efficiently to them.  -DaveM
-	 */
-	tp->snd_cwnd = 2;
-
-	/* See draft-stevens-tcpca-spec-01 for discussion of the
-	 * initialization of these values.
-	 */
-	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
-	tp->snd_cwnd_clamp = ~0;
-	tp->mss_cache = TCP_MSS_DEFAULT;
-
-	tp->reordering = sysctl_tcp_reordering;
-
-	sk->sk_state = TCP_CLOSE;
+	tcp_init_sock(sk);
 
 	icsk->icsk_af_ops = &ipv6_specific;
-	icsk->icsk_ca_ops = &tcp_init_congestion_ops;
-	icsk->icsk_sync_mss = tcp_sync_mss;
-	sk->sk_write_space = sk_stream_write_space;
-	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
 
 #ifdef CONFIG_TCP_MD5SIG
-	tp->af_specific = &tcp_sock_ipv6_specific;
+	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
 #endif
 
-	/* TCP Cookie Transactions */
-	if (sysctl_tcp_cookie_size > 0) {
-		/* Default, cookies without s_data_payload. */
-		tp->cookie_values =
-			kzalloc(sizeof(*tp->cookie_values),
-				sk->sk_allocation);
-		if (tp->cookie_values != NULL)
-			kref_init(&tp->cookie_values->kref);
-	}
-	/* Presumed zeroed, in order of appearance:
-	 *	cookie_in_always, cookie_out_never,
-	 *	s_data_constant, s_data_in, s_data_out
-	 */
-	sk->sk_sndbuf = sysctl_tcp_wmem[1];
-	sk->sk_rcvbuf = sysctl_tcp_rmem[1];
-
-	local_bh_disable();
-	sock_update_memcg(sk);
-	sk_sockets_allocated_inc(sk);
-	local_bh_enable();
-
 	return 0;
 }
 
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 37b0699..d39bbc9 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -611,14 +611,14 @@
 
 		sk = stack[i];
 		if (skb1) {
-			if (sk_rcvqueues_full(sk, skb1)) {
+			if (sk_rcvqueues_full(sk, skb1, sk->sk_rcvbuf)) {
 				kfree_skb(skb1);
 				goto drop;
 			}
 			bh_lock_sock(sk);
 			if (!sock_owned_by_user(sk))
 				udpv6_queue_rcv_skb(sk, skb1);
-			else if (sk_add_backlog(sk, skb1)) {
+			else if (sk_add_backlog(sk, skb1, sk->sk_rcvbuf)) {
 				kfree_skb(skb1);
 				bh_unlock_sock(sk);
 				goto drop;
@@ -790,14 +790,14 @@
 
 	/* deliver */
 
-	if (sk_rcvqueues_full(sk, skb)) {
+	if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
 		sock_put(sk);
 		goto discard;
 	}
 	bh_lock_sock(sk);
 	if (!sock_owned_by_user(sk))
 		udpv6_queue_rcv_skb(sk, skb);
-	else if (sk_add_backlog(sk, skb)) {
+	else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
 		atomic_inc(&sk->sk_drops);
 		bh_unlock_sock(sk);
 		sock_put(sk);
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 8ea65e0..8625fba 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -334,8 +334,8 @@
 		goto out_policy;
 
 #ifdef CONFIG_SYSCTL
-	sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path,
-						xfrm6_policy_table);
+	sysctl_hdr = register_net_sysctl(&init_net, "net/ipv6",
+					 xfrm6_policy_table);
 #endif
 out:
 	return ret;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 4fe1db12..ee5a706 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -68,9 +68,9 @@
 
 static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
 
-static inline unsigned xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
+static inline unsigned int xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
 {
-	unsigned h;
+	unsigned int h;
 
 	h = (__force u32)(addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]);
 	h ^= h >> 16;
@@ -80,7 +80,7 @@
 	return h;
 }
 
-static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi)
+static inline unsigned int xfrm6_tunnel_spi_hash_byspi(u32 spi)
 {
 	return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
 }
diff --git a/net/ipx/sysctl_net_ipx.c b/net/ipx/sysctl_net_ipx.c
index bd6dca0..ad7c03d 100644
--- a/net/ipx/sysctl_net_ipx.c
+++ b/net/ipx/sysctl_net_ipx.c
@@ -8,6 +8,7 @@
 
 #include <linux/mm.h>
 #include <linux/sysctl.h>
+#include <net/net_namespace.h>
 
 #ifndef CONFIG_SYSCTL
 #error This file should not be compiled without CONFIG_SYSCTL defined
@@ -27,20 +28,14 @@
 	{ },
 };
 
-static struct ctl_path ipx_path[] = {
-	{ .procname = "net", },
-	{ .procname = "ipx", },
-	{ }
-};
-
 static struct ctl_table_header *ipx_table_header;
 
 void ipx_register_sysctl(void)
 {
-	ipx_table_header = register_sysctl_paths(ipx_path, ipx_table);
+	ipx_table_header = register_net_sysctl(&init_net, "net/ipx", ipx_table);
 }
 
 void ipx_unregister_sysctl(void)
 {
-	unregister_sysctl_table(ipx_table_header);
+	unregister_net_sysctl_table(ipx_table_header);
 }
diff --git a/net/irda/ircomm/ircomm_tty_ioctl.c b/net/irda/ircomm/ircomm_tty_ioctl.c
index 77c5e64..d0667d6 100644
--- a/net/irda/ircomm/ircomm_tty_ioctl.c
+++ b/net/irda/ircomm/ircomm_tty_ioctl.c
@@ -54,7 +54,7 @@
  */
 static void ircomm_tty_change_speed(struct ircomm_tty_cb *self)
 {
-	unsigned cflag, cval;
+	unsigned int cflag, cval;
 	int baud;
 
 	IRDA_DEBUG(2, "%s()\n", __func__ );
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c
index 2615ffc..de73f64 100644
--- a/net/irda/irsysctl.c
+++ b/net/irda/irsysctl.c
@@ -235,12 +235,6 @@
 	{ }
 };
 
-static struct ctl_path irda_path[] = {
-	{ .procname = "net", },
-	{ .procname = "irda", },
-	{ }
-};
-
 static struct ctl_table_header *irda_table_header;
 
 /*
@@ -251,7 +245,7 @@
  */
 int __init irda_sysctl_register(void)
 {
-	irda_table_header = register_sysctl_paths(irda_path, irda_table);
+	irda_table_header = register_net_sysctl(&init_net, "net/irda", irda_table);
 	if (!irda_table_header)
 		return -ENOMEM;
 
@@ -266,7 +260,7 @@
  */
 void irda_sysctl_unregister(void)
 {
-	unregister_sysctl_table(irda_table_header);
+	unregister_net_sysctl_table(irda_table_header);
 }
 
 
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 11dbb22..34e4185 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1714,7 +1714,7 @@
 static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
 	struct net *net = sock_net(sk);
-	unsigned proto;
+	unsigned int proto;
 	struct km_event c;
 	struct xfrm_audit audit_info;
 	int err, err2;
@@ -3480,7 +3480,7 @@
 
 	/* Addresses to be used by KM for negotiation, if ext is available */
 	if (k != NULL && (set_sadb_kmaddress(skb, k) < 0))
-		return -EINVAL;
+		goto err;
 
 	/* selector src */
 	set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_SRC, sel);
@@ -3547,7 +3547,7 @@
 		goto out;
 
 	err = -EMSGSIZE;
-	if ((unsigned)len > sk->sk_sndbuf - 32)
+	if ((unsigned int)len > sk->sk_sndbuf - 32)
 		goto out;
 
 	err = -ENOBUFS;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 89ff8c6..f6732b6 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1424,6 +1424,7 @@
 		/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
 		udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
 		udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
+		udp_encap_enable();
 	}
 
 	sk->sk_user_data = tunnel;
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index a16a48e..09e4a38 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -54,15 +54,15 @@
  */
 struct l2tp_session_cfg {
 	enum l2tp_pwtype	pw_type;
-	unsigned		data_seq:2;	/* data sequencing level
+	unsigned int		data_seq:2;	/* data sequencing level
 						 * 0 => none, 1 => IP only,
 						 * 2 => all
 						 */
-	unsigned		recv_seq:1;	/* expect receive packets with
+	unsigned int		recv_seq:1;	/* expect receive packets with
 						 * sequence numbers? */
-	unsigned		send_seq:1;	/* send packets with sequence
+	unsigned int		send_seq:1;	/* send packets with sequence
 						 * numbers? */
-	unsigned		lns_mode:1;	/* behave as LNS? LAC enables
+	unsigned int		lns_mode:1;	/* behave as LNS? LAC enables
 						 * sequence numbers under
 						 * control of LNS. */
 	int			debug;		/* bitmask of debug message
@@ -107,15 +107,15 @@
 
 	char			name[32];	/* for logging */
 	char			ifname[IFNAMSIZ];
-	unsigned		data_seq:2;	/* data sequencing level
+	unsigned int		data_seq:2;	/* data sequencing level
 						 * 0 => none, 1 => IP only,
 						 * 2 => all
 						 */
-	unsigned		recv_seq:1;	/* expect receive packets with
+	unsigned int		recv_seq:1;	/* expect receive packets with
 						 * sequence numbers? */
-	unsigned		send_seq:1;	/* send packets with sequence
+	unsigned int		send_seq:1;	/* send packets with sequence
 						 * numbers? */
-	unsigned		lns_mode:1;	/* behave as LNS? LAC enables
+	unsigned int		lns_mode:1;	/* behave as LNS? LAC enables
 						 * sequence numbers under
 						 * control of LNS. */
 	int			debug;		/* bitmask of debug message
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 55670ec..585d93ec 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -232,7 +232,7 @@
 {
 	write_lock_bh(&l2tp_ip_lock);
 	hlist_del_init(&sk->sk_bind_node);
-	hlist_del_init(&sk->sk_node);
+	sk_del_node_init(sk);
 	write_unlock_bh(&l2tp_ip_lock);
 	sk_common_release(sk);
 }
@@ -271,7 +271,8 @@
 	    chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
 		goto out;
 
-	inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
+	if (addr->l2tp_addr.s_addr)
+		inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
 	if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
 		inet->inet_saddr = 0;  /* Use device */
 	sk_dst_reset(sk);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 93a41a0..bc8c334 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -231,24 +231,28 @@
 	if (IS_ERR(hdr))
 		return PTR_ERR(hdr);
 
-	NLA_PUT_U8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version);
-	NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
-	NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id);
-	NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, tunnel->debug);
-	NLA_PUT_U16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap);
+	if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) ||
+	    nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
+	    nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
+	    nla_put_u32(skb, L2TP_ATTR_DEBUG, tunnel->debug) ||
+	    nla_put_u16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap))
+		goto nla_put_failure;
 
 	nest = nla_nest_start(skb, L2TP_ATTR_STATS);
 	if (nest == NULL)
 		goto nla_put_failure;
 
-	NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets);
-	NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes);
-	NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors);
-	NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets);
-	NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes);
-	NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, tunnel->stats.rx_seq_discards);
-	NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, tunnel->stats.rx_oos_packets);
-	NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors);
+	if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets) ||
+	    nla_put_u64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes) ||
+	    nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors) ||
+	    nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets) ||
+	    nla_put_u64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes) ||
+	    nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
+			tunnel->stats.rx_seq_discards) ||
+	    nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
+			tunnel->stats.rx_oos_packets) ||
+	    nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors))
+		goto nla_put_failure;
 	nla_nest_end(skb, nest);
 
 	sk = tunnel->sock;
@@ -259,13 +263,16 @@
 
 	switch (tunnel->encap) {
 	case L2TP_ENCAPTYPE_UDP:
-		NLA_PUT_U16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport));
-		NLA_PUT_U16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport));
-		NLA_PUT_U8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT));
+		if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) ||
+		    nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)) ||
+		    nla_put_u8(skb, L2TP_ATTR_UDP_CSUM,
+			       (sk->sk_no_check != UDP_CSUM_NOXMIT)))
+			goto nla_put_failure;
 		/* NOBREAK */
 	case L2TP_ENCAPTYPE_IP:
-		NLA_PUT_BE32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr);
-		NLA_PUT_BE32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr);
+		if (nla_put_be32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr) ||
+		    nla_put_be32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr))
+			goto nla_put_failure;
 		break;
 	}
 
@@ -563,43 +570,50 @@
 	if (IS_ERR(hdr))
 		return PTR_ERR(hdr);
 
-	NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
-	NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id);
-	NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id);
-	NLA_PUT_U32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id);
-	NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, session->debug);
-	NLA_PUT_U16(skb, L2TP_ATTR_PW_TYPE, session->pwtype);
-	NLA_PUT_U16(skb, L2TP_ATTR_MTU, session->mtu);
-	if (session->mru)
-		NLA_PUT_U16(skb, L2TP_ATTR_MRU, session->mru);
+	if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
+	    nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) ||
+	    nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
+	    nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID,
+			session->peer_session_id) ||
+	    nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) ||
+	    nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype) ||
+	    nla_put_u16(skb, L2TP_ATTR_MTU, session->mtu) ||
+	    (session->mru &&
+	     nla_put_u16(skb, L2TP_ATTR_MRU, session->mru)))
+		goto nla_put_failure;
 
-	if (session->ifname && session->ifname[0])
-		NLA_PUT_STRING(skb, L2TP_ATTR_IFNAME, session->ifname);
-	if (session->cookie_len)
-		NLA_PUT(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0]);
-	if (session->peer_cookie_len)
-		NLA_PUT(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0]);
-	NLA_PUT_U8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq);
-	NLA_PUT_U8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq);
-	NLA_PUT_U8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode);
+	if ((session->ifname && session->ifname[0] &&
+	     nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
+	    (session->cookie_len &&
+	     nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
+		     &session->cookie[0])) ||
+	    (session->peer_cookie_len &&
+	     nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len,
+		     &session->peer_cookie[0])) ||
+	    nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq) ||
+	    nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq) ||
+	    nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode) ||
 #ifdef CONFIG_XFRM
-	if ((sk) && (sk->sk_policy[0] || sk->sk_policy[1]))
-		NLA_PUT_U8(skb, L2TP_ATTR_USING_IPSEC, 1);
+	    (((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) &&
+	     nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) ||
 #endif
-	if (session->reorder_timeout)
-		NLA_PUT_MSECS(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout);
-
+	    (session->reorder_timeout &&
+	     nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout)))
+		goto nla_put_failure;
 	nest = nla_nest_start(skb, L2TP_ATTR_STATS);
 	if (nest == NULL)
 		goto nla_put_failure;
-	NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets);
-	NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes);
-	NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors);
-	NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets);
-	NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes);
-	NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, session->stats.rx_seq_discards);
-	NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, session->stats.rx_oos_packets);
-	NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors);
+	if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets) ||
+	    nla_put_u64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes) ||
+	    nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors) ||
+	    nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets) ||
+	    nla_put_u64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes) ||
+	    nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
+			session->stats.rx_seq_discards) ||
+	    nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
+			session->stats.rx_oos_packets) ||
+	    nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors))
+		goto nla_put_failure;
 	nla_nest_end(skb, nest);
 
 	return genlmsg_end(skb, hdr);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index b9bef2c..17bc85d 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -518,7 +518,7 @@
 	if (sock_flag(sk, SOCK_ZAPPED))
 		goto out;
 	rc = 0;
-	if (!(unsigned)backlog)	/* BSDism */
+	if (!(unsigned int)backlog)	/* BSDism */
 		backlog = 1;
 	sk->sk_max_ack_backlog = backlog;
 	if (sk->sk_state != TCP_LISTEN) {
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index ba137a6..0d0d416 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -828,7 +828,7 @@
 	else {
 		dprintk("%s: adding to backlog...\n", __func__);
 		llc_set_backlog_type(skb, LLC_PACKET);
-		if (sk_add_backlog(sk, skb))
+		if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
 			goto drop_unlock;
 	}
 out:
diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c
index e2ebe35..d75306b 100644
--- a/net/llc/sysctl_net_llc.c
+++ b/net/llc/sysctl_net_llc.c
@@ -7,6 +7,7 @@
 #include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/sysctl.h>
+#include <net/net_namespace.h>
 #include <net/llc.h>
 
 #ifndef CONFIG_SYSCTL
@@ -56,48 +57,29 @@
 	{ },
 };
 
-static struct ctl_table llc2_dir_timeout_table[] = {
-	{
-		.procname	= "timeout",
-		.mode		= 0555,
-		.child		= llc2_timeout_table,
-	},
-	{ },
-};
-
-static struct ctl_table llc_table[] = {
-	{
-		.procname	= "llc2",
-		.mode		= 0555,
-		.child		= llc2_dir_timeout_table,
-	},
-	{
-		.procname       = "station",
-		.mode           = 0555,
-		.child          = llc_station_table,
-	},
-	{ },
-};
-
-static struct ctl_path llc_path[] = {
-	{ .procname = "net", },
-	{ .procname = "llc", },
-	{ }
-};
-
-static struct ctl_table_header *llc_table_header;
+static struct ctl_table_header *llc2_timeout_header;
+static struct ctl_table_header *llc_station_header;
 
 int __init llc_sysctl_init(void)
 {
-	llc_table_header = register_sysctl_paths(llc_path, llc_table);
+	llc2_timeout_header = register_net_sysctl(&init_net, "net/llc/llc2/timeout", llc2_timeout_table);
+	llc_station_header = register_net_sysctl(&init_net, "net/llc/station", llc_station_table);
 
-	return llc_table_header ? 0 : -ENOMEM;
+	if (!llc2_timeout_header || !llc_station_header) {
+		llc_sysctl_exit();
+		return -ENOMEM;
+	}
+	return 0;
 }
 
 void llc_sysctl_exit(void)
 {
-	if (llc_table_header) {
-		unregister_sysctl_table(llc_table_header);
-		llc_table_header = NULL;
+	if (llc2_timeout_header) {
+		unregister_net_sysctl_table(llc2_timeout_header);
+		llc2_timeout_header = NULL;
+	}
+	if (llc_station_header) {
+		unregister_net_sysctl_table(llc_station_header);
+		llc_station_header = NULL;
 	}
 }
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 49a2079..61cd391 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -455,8 +455,8 @@
 			 * fall back to HT20 if we don't use or use
 			 * the other extension channel
 			 */
-			if ((channel_type == NL80211_CHAN_HT40MINUS ||
-			     channel_type == NL80211_CHAN_HT40PLUS) &&
+			if (!(channel_type == NL80211_CHAN_HT40MINUS ||
+			      channel_type == NL80211_CHAN_HT40PLUS) ||
 			    channel_type != sdata->u.ibss.channel_type)
 				sta_ht_cap_new.cap &=
 					~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index dd2fbec..d5ac02f 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -103,7 +103,7 @@
 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
 				 struct sk_buff *skb,
 				 struct ieee80211_rate *rate,
-				 int rtap_len)
+				 int rtap_len, bool has_fcs)
 {
 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 	struct ieee80211_radiotap_header *rthdr;
@@ -134,7 +134,7 @@
 	}
 
 	/* IEEE80211_RADIOTAP_FLAGS */
-	if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
+	if (has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS))
 		*pos |= IEEE80211_RADIOTAP_F_FCS;
 	if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
 		*pos |= IEEE80211_RADIOTAP_F_BADFCS;
@@ -294,7 +294,8 @@
 	}
 
 	/* prepend radiotap information */
-	ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
+	ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
+					 true);
 
 	skb_reset_mac_header(skb);
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2568,7 +2569,8 @@
 		goto out_free_skb;
 
 	/* prepend radiotap information */
-	ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
+	ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
+					 false);
 
 	skb_set_mac_header(skb, 0);
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index e1b7e05..e19f365 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -290,12 +290,3 @@
 	if (netfilter_log_init() < 0)
 		panic("cannot initialize nf_log");
 }
-
-#ifdef CONFIG_SYSCTL
-struct ctl_path nf_net_netfilter_sysctl_path[] = {
-	{ .procname = "net", },
-	{ .procname = "netfilter", },
-	{ }
-};
-EXPORT_SYMBOL_GPL(nf_net_netfilter_sysctl_path);
-#endif /* CONFIG_SYSCTL */
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index a72a4df..7e1b061 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -109,8 +109,9 @@
 			} else
 				goto nla_put_failure;
 		}
-		NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
-				htonl(map->first_ip + id * map->hosts));
+		if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
+				    htonl(map->first_ip + id * map->hosts)))
+			goto nla_put_failure;
 		ipset_nest_end(skb, nested);
 	}
 	ipset_nest_end(skb, atd);
@@ -194,10 +195,11 @@
 			} else
 				goto nla_put_failure;
 		}
-		NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
-				htonl(map->first_ip + id * map->hosts));
-		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-			      htonl(ip_set_timeout_get(members[id])));
+		if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
+				    htonl(map->first_ip + id * map->hosts)) ||
+		    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+				  htonl(ip_set_timeout_get(members[id]))))
+			goto nla_put_failure;
 		ipset_nest_end(skb, nested);
 	}
 	ipset_nest_end(skb, adt);
@@ -334,15 +336,16 @@
 	nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
 	if (!nested)
 		goto nla_put_failure;
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
-	if (map->netmask != 32)
-		NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask);
-	NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
-	NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
-		      htonl(sizeof(*map) + map->memsize));
-	if (with_timeout(map->timeout))
-		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
+	    nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) ||
+	    (map->netmask != 32 &&
+	     nla_put_u8(skb, IPSET_ATTR_NETMASK, map->netmask)) ||
+	    nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
+			  htonl(sizeof(*map) + map->memsize)) ||
+	    (with_timeout(map->timeout) &&
+	     nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
+		goto nla_put_failure;
 	ipset_nest_end(skb, nested);
 
 	return 0;
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 81324c1..0bb16c46 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -186,11 +186,12 @@
 			} else
 				goto nla_put_failure;
 		}
-		NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
-				htonl(map->first_ip + id));
-		if (elem->match == MAC_FILLED)
-			NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
-				elem->ether);
+		if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
+				    htonl(map->first_ip + id)) ||
+		    (elem->match == MAC_FILLED &&
+		     nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN,
+			     elem->ether)))
+			goto nla_put_failure;
 		ipset_nest_end(skb, nested);
 	}
 	ipset_nest_end(skb, atd);
@@ -314,14 +315,16 @@
 			} else
 				goto nla_put_failure;
 		}
-		NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
-				htonl(map->first_ip + id));
-		if (elem->match == MAC_FILLED)
-			NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
-				elem->ether);
+		if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
+				    htonl(map->first_ip + id)) ||
+		    (elem->match == MAC_FILLED &&
+		     nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN,
+			     elem->ether)))
+		    goto nla_put_failure;
 		timeout = elem->match == MAC_UNSET ? elem->timeout
 				: ip_set_timeout_get(elem->timeout);
-		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout));
+		if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout)))
+		    goto nla_put_failure;
 		ipset_nest_end(skb, nested);
 	}
 	ipset_nest_end(skb, atd);
@@ -438,14 +441,16 @@
 	nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
 	if (!nested)
 		goto nla_put_failure;
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
-	NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
-	NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
-		      htonl(sizeof(*map)
-			    + (map->last_ip - map->first_ip + 1) * map->dsize));
-	if (with_timeout(map->timeout))
-		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
+	    nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) ||
+	    nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
+			  htonl(sizeof(*map) +
+				((map->last_ip - map->first_ip + 1) *
+				 map->dsize))) ||
+	    (with_timeout(map->timeout) &&
+	     nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
+		goto nla_put_failure;
 	ipset_nest_end(skb, nested);
 
 	return 0;
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 382ec28..b9f1fce 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -96,8 +96,9 @@
 			} else
 				goto nla_put_failure;
 		}
-		NLA_PUT_NET16(skb, IPSET_ATTR_PORT,
-			      htons(map->first_port + id));
+		if (nla_put_net16(skb, IPSET_ATTR_PORT,
+				  htons(map->first_port + id)))
+			goto nla_put_failure;
 		ipset_nest_end(skb, nested);
 	}
 	ipset_nest_end(skb, atd);
@@ -183,10 +184,11 @@
 			} else
 				goto nla_put_failure;
 		}
-		NLA_PUT_NET16(skb, IPSET_ATTR_PORT,
-			      htons(map->first_port + id));
-		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-			      htonl(ip_set_timeout_get(members[id])));
+		if (nla_put_net16(skb, IPSET_ATTR_PORT,
+				  htons(map->first_port + id)) ||
+		    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+				  htonl(ip_set_timeout_get(members[id]))))
+			goto nla_put_failure;
 		ipset_nest_end(skb, nested);
 	}
 	ipset_nest_end(skb, adt);
@@ -320,13 +322,14 @@
 	nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
 	if (!nested)
 		goto nla_put_failure;
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port));
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
-	NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
-	NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
-		      htonl(sizeof(*map) + map->memsize));
-	if (with_timeout(map->timeout))
-		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+	if (nla_put_net16(skb, IPSET_ATTR_PORT, htons(map->first_port)) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)) ||
+	    nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
+			  htonl(sizeof(*map) + map->memsize)) ||
+	    (with_timeout(map->timeout) &&
+	     nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
+		goto nla_put_failure;
 	ipset_nest_end(skb, nested);
 
 	return 0;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index e6c1c96..819c342 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1092,19 +1092,21 @@
 			ret = -EMSGSIZE;
 			goto release_refcount;
 		}
-		NLA_PUT_U8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
-		NLA_PUT_STRING(skb, IPSET_ATTR_SETNAME, set->name);
+		if (nla_put_u8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
+		    nla_put_string(skb, IPSET_ATTR_SETNAME, set->name))
+			goto nla_put_failure;
 		if (dump_flags & IPSET_FLAG_LIST_SETNAME)
 			goto next_set;
 		switch (cb->args[2]) {
 		case 0:
 			/* Core header data */
-			NLA_PUT_STRING(skb, IPSET_ATTR_TYPENAME,
-				       set->type->name);
-			NLA_PUT_U8(skb, IPSET_ATTR_FAMILY,
-				   set->family);
-			NLA_PUT_U8(skb, IPSET_ATTR_REVISION,
-				   set->revision);
+			if (nla_put_string(skb, IPSET_ATTR_TYPENAME,
+					   set->type->name) ||
+			    nla_put_u8(skb, IPSET_ATTR_FAMILY,
+				       set->family) ||
+			    nla_put_u8(skb, IPSET_ATTR_REVISION,
+				       set->revision))
+				goto nla_put_failure;
 			ret = set->variant->head(set, skb);
 			if (ret < 0)
 				goto release_refcount;
@@ -1410,11 +1412,12 @@
 			 IPSET_CMD_HEADER);
 	if (!nlh2)
 		goto nlmsg_failure;
-	NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
-	NLA_PUT_STRING(skb2, IPSET_ATTR_SETNAME, set->name);
-	NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, set->type->name);
-	NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, set->family);
-	NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, set->revision);
+	if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
+	    nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name) ||
+	    nla_put_string(skb2, IPSET_ATTR_TYPENAME, set->type->name) ||
+	    nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) ||
+	    nla_put_u8(skb2, IPSET_ATTR_REVISION, set->revision))
+		goto nla_put_failure;
 	nlmsg_end(skb2, nlh2);
 
 	ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
@@ -1469,11 +1472,12 @@
 			 IPSET_CMD_TYPE);
 	if (!nlh2)
 		goto nlmsg_failure;
-	NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
-	NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, typename);
-	NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, family);
-	NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, max);
-	NLA_PUT_U8(skb2, IPSET_ATTR_REVISION_MIN, min);
+	if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
+	    nla_put_string(skb2, IPSET_ATTR_TYPENAME, typename) ||
+	    nla_put_u8(skb2, IPSET_ATTR_FAMILY, family) ||
+	    nla_put_u8(skb2, IPSET_ATTR_REVISION, max) ||
+	    nla_put_u8(skb2, IPSET_ATTR_REVISION_MIN, min))
+		goto nla_put_failure;
 	nlmsg_end(skb2, nlh2);
 
 	pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len);
@@ -1517,7 +1521,8 @@
 			 IPSET_CMD_PROTOCOL);
 	if (!nlh2)
 		goto nlmsg_failure;
-	NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+	if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL))
+		goto nla_put_failure;
 	nlmsg_end(skb2, nlh2);
 
 	ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
@@ -1613,7 +1618,7 @@
 static int
 ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
 {
-	unsigned *op;
+	unsigned int *op;
 	void *data;
 	int copylen = *len, ret = 0;
 
@@ -1621,7 +1626,7 @@
 		return -EPERM;
 	if (optval != SO_IP_SET)
 		return -EBADF;
-	if (*len < sizeof(unsigned))
+	if (*len < sizeof(unsigned int))
 		return -EINVAL;
 
 	data = vmalloc(*len);
@@ -1631,7 +1636,7 @@
 		ret = -EFAULT;
 		goto done;
 	}
-	op = (unsigned *) data;
+	op = (unsigned int *) data;
 
 	if (*op < IP_SET_OP_VERSION) {
 		/* Check the version at the beginning of operations */
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index 5139dea..507fe93 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -81,7 +81,8 @@
 static inline bool
 hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data)
 {
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -94,9 +95,10 @@
 	const struct hash_ip4_telem *tdata =
 		(const struct hash_ip4_telem *)data;
 
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(tdata->timeout)));
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(tdata->timeout))))
+		goto nla_put_failure;
 
 	return 0;
 
@@ -262,7 +264,8 @@
 static bool
 hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data)
 {
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -275,9 +278,10 @@
 	const struct hash_ip6_telem *e =
 		(const struct hash_ip6_telem *)data;
 
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(e->timeout)));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(e->timeout))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 9c27e24..68f284c 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -93,9 +93,10 @@
 hash_ipport4_data_list(struct sk_buff *skb,
 		       const struct hash_ipport4_elem *data)
 {
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -109,12 +110,12 @@
 	const struct hash_ipport4_telem *tdata =
 		(const struct hash_ipport4_telem *)data;
 
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(tdata->timeout)));
-
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(tdata->timeout))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -308,9 +309,10 @@
 hash_ipport6_data_list(struct sk_buff *skb,
 		       const struct hash_ipport6_elem *data)
 {
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -324,11 +326,12 @@
 	const struct hash_ipport6_telem *e =
 		(const struct hash_ipport6_telem *)data;
 
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(e->timeout)));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(e->timeout))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 9134057..1eec4b9 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -94,10 +94,11 @@
 hash_ipportip4_data_list(struct sk_buff *skb,
 		       const struct hash_ipportip4_elem *data)
 {
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+	    nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -111,13 +112,13 @@
 	const struct hash_ipportip4_telem *tdata =
 		(const struct hash_ipportip4_telem *)data;
 
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(tdata->timeout)));
-
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+	    nla_put_ipaddr4(skb, IPSET_ATTR_IP2, tdata->ip2) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(tdata->timeout))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -319,10 +320,11 @@
 hash_ipportip6_data_list(struct sk_buff *skb,
 			 const struct hash_ipportip6_elem *data)
 {
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+	    nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -336,12 +338,13 @@
 	const struct hash_ipportip6_telem *e =
 		(const struct hash_ipportip6_telem *)data;
 
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(e->timeout)));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+	    nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(e->timeout))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 5d05e69..62d66ec 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -124,13 +124,14 @@
 {
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+	    nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -145,16 +146,16 @@
 		(const struct hash_ipportnet4_telem *)data;
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(tdata->timeout)));
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
-
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+	    nla_put_ipaddr4(skb, IPSET_ATTR_IP2, tdata->ip2) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(tdata->timeout))) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -436,13 +437,14 @@
 {
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+	    nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -457,15 +459,16 @@
 		(const struct hash_ipportnet6_telem *)data;
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(e->timeout)));
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+	    nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(e->timeout))) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 7c3d945..6607a81 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -111,10 +111,11 @@
 {
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -128,13 +129,13 @@
 		(const struct hash_net4_telem *)data;
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(tdata->timeout)));
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
-
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, tdata->cidr) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(tdata->timeout))) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -339,10 +340,11 @@
 {
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -356,12 +358,13 @@
 		(const struct hash_net6_telem *)data;
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(e->timeout)));
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, e->cidr) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(e->timeout))) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index f24037f..6093f3d 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -252,11 +252,12 @@
 
 	if (data->nomatch)
 		flags |= IPSET_FLAG_NOMATCH;
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
-	NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+	    nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -273,13 +274,14 @@
 
 	if (data->nomatch)
 		flags |= IPSET_FLAG_NOMATCH;
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
-	NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(tdata->timeout)));
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+	    nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(tdata->timeout))))
+		goto nla_put_failure;
 
 	return 0;
 
@@ -555,11 +557,12 @@
 
 	if (data->nomatch)
 		flags |= IPSET_FLAG_NOMATCH;
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
-	NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+	    nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -576,13 +579,14 @@
 
 	if (data->nomatch)
 		flags |= IPSET_FLAG_NOMATCH;
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
-	NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(e->timeout)));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+	    nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(e->timeout))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index ce2e771..ae3c644 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -124,12 +124,13 @@
 {
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -144,15 +145,15 @@
 		(const struct hash_netport4_telem *)data;
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(tdata->timeout)));
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
-
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(tdata->timeout))) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -402,12 +403,13 @@
 {
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -422,14 +424,15 @@
 		(const struct hash_netport6_telem *)data;
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(e->timeout)));
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(e->timeout))) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 7e095f9..6cb1225 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -402,12 +402,13 @@
 	nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
 	if (!nested)
 		goto nla_put_failure;
-	NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size));
-	if (with_timeout(map->timeout))
-		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
-	NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
-	NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
-		      htonl(sizeof(*map) + map->size * map->dsize));
+	if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
+	    (with_timeout(map->timeout) &&
+	     nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))) ||
+	    nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
+			  htonl(sizeof(*map) + map->size * map->dsize)))
+		goto nla_put_failure;
 	ipset_nest_end(skb, nested);
 
 	return 0;
@@ -442,13 +443,15 @@
 			} else
 				goto nla_put_failure;
 		}
-		NLA_PUT_STRING(skb, IPSET_ATTR_NAME,
-			       ip_set_name_byindex(e->id));
+		if (nla_put_string(skb, IPSET_ATTR_NAME,
+				   ip_set_name_byindex(e->id)))
+			goto nla_put_failure;
 		if (with_timeout(map->timeout)) {
 			const struct set_telem *te =
 				(const struct set_telem *) e;
-			NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-				      htonl(ip_set_timeout_get(te->timeout)));
+			__be32 to = htonl(ip_set_timeout_get(te->timeout));
+			if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, to))
+				goto nla_put_failure;
 		}
 		ipset_nest_end(skb, nested);
 	}
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 5285617..64f9e8f 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -313,7 +313,7 @@
  *	Assumes already checked proto==IPPROTO_TCP and diff!=0.
  */
 static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq,
-				 unsigned flag, __u32 seq, int diff)
+				 unsigned int flag, __u32 seq, int diff)
 {
 	/* spinlock is to keep updating cp->flags atomic */
 	spin_lock(&cp->lock);
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 29fa5ba..4a09b78 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -86,42 +86,42 @@
 static struct ip_vs_aligned_lock
 __ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned;
 
-static inline void ct_read_lock(unsigned key)
+static inline void ct_read_lock(unsigned int key)
 {
 	read_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
 }
 
-static inline void ct_read_unlock(unsigned key)
+static inline void ct_read_unlock(unsigned int key)
 {
 	read_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
 }
 
-static inline void ct_write_lock(unsigned key)
+static inline void ct_write_lock(unsigned int key)
 {
 	write_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
 }
 
-static inline void ct_write_unlock(unsigned key)
+static inline void ct_write_unlock(unsigned int key)
 {
 	write_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
 }
 
-static inline void ct_read_lock_bh(unsigned key)
+static inline void ct_read_lock_bh(unsigned int key)
 {
 	read_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
 }
 
-static inline void ct_read_unlock_bh(unsigned key)
+static inline void ct_read_unlock_bh(unsigned int key)
 {
 	read_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
 }
 
-static inline void ct_write_lock_bh(unsigned key)
+static inline void ct_write_lock_bh(unsigned int key)
 {
 	write_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
 }
 
-static inline void ct_write_unlock_bh(unsigned key)
+static inline void ct_write_unlock_bh(unsigned int key)
 {
 	write_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
 }
@@ -130,7 +130,7 @@
 /*
  *	Returns hash value for IPVS connection entry
  */
-static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned proto,
+static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned int proto,
 				       const union nf_inet_addr *addr,
 				       __be16 port)
 {
@@ -188,7 +188,7 @@
  */
 static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
 {
-	unsigned hash;
+	unsigned int hash;
 	int ret;
 
 	if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
@@ -224,7 +224,7 @@
  */
 static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
 {
-	unsigned hash;
+	unsigned int hash;
 	int ret;
 
 	/* unhash it and decrease its reference counter */
@@ -257,7 +257,7 @@
 static inline struct ip_vs_conn *
 __ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
 {
-	unsigned hash;
+	unsigned int hash;
 	struct ip_vs_conn *cp;
 	struct hlist_node *n;
 
@@ -344,7 +344,7 @@
 /* Get reference to connection template */
 struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
 {
-	unsigned hash;
+	unsigned int hash;
 	struct ip_vs_conn *cp;
 	struct hlist_node *n;
 
@@ -394,7 +394,7 @@
  *	p->vaddr, p->vport: pkt dest address (foreign host) */
 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
 {
-	unsigned hash;
+	unsigned int hash;
 	struct ip_vs_conn *cp, *ret=NULL;
 	struct hlist_node *n;
 
@@ -824,7 +824,7 @@
  */
 struct ip_vs_conn *
 ip_vs_conn_new(const struct ip_vs_conn_param *p,
-	       const union nf_inet_addr *daddr, __be16 dport, unsigned flags,
+	       const union nf_inet_addr *daddr, __be16 dport, unsigned int flags,
 	       struct ip_vs_dest *dest, __u32 fwmark)
 {
 	struct ip_vs_conn *cp;
@@ -1057,7 +1057,7 @@
 	.release = seq_release_net,
 };
 
-static const char *ip_vs_origin_name(unsigned flags)
+static const char *ip_vs_origin_name(unsigned int flags)
 {
 	if (flags & IP_VS_CONN_F_SYNC)
 		return "SYNC";
@@ -1169,7 +1169,7 @@
 	 * Randomly scan 1/32 of the whole table every second
 	 */
 	for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
-		unsigned hash = net_random() & ip_vs_conn_tab_mask;
+		unsigned int hash = net_random() & ip_vs_conn_tab_mask;
 		struct hlist_node *n;
 
 		/*
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 2555816..b5a5c73 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -80,7 +80,7 @@
 #define icmp_id(icmph)          (((icmph)->un).echo.id)
 #define icmpv6_id(icmph)        (icmph->icmp6_dataun.u_echo.identifier)
 
-const char *ip_vs_proto_name(unsigned proto)
+const char *ip_vs_proto_name(unsigned int proto)
 {
 	static char buf[20];
 
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index b3afe18..e3707d2 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -265,11 +265,11 @@
 /*
  *	Returns hash value for virtual service
  */
-static inline unsigned
-ip_vs_svc_hashkey(struct net *net, int af, unsigned proto,
+static inline unsigned int
+ip_vs_svc_hashkey(struct net *net, int af, unsigned int proto,
 		  const union nf_inet_addr *addr, __be16 port)
 {
-	register unsigned porth = ntohs(port);
+	register unsigned int porth = ntohs(port);
 	__be32 addr_fold = addr->ip;
 
 #ifdef CONFIG_IP_VS_IPV6
@@ -286,7 +286,7 @@
 /*
  *	Returns hash value of fwmark for virtual service lookup
  */
-static inline unsigned ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
+static inline unsigned int ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
 {
 	return (((size_t)net>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK;
 }
@@ -298,7 +298,7 @@
  */
 static int ip_vs_svc_hash(struct ip_vs_service *svc)
 {
-	unsigned hash;
+	unsigned int hash;
 
 	if (svc->flags & IP_VS_SVC_F_HASHED) {
 		pr_err("%s(): request for already hashed, called from %pF\n",
@@ -361,7 +361,7 @@
 __ip_vs_service_find(struct net *net, int af, __u16 protocol,
 		     const union nf_inet_addr *vaddr, __be16 vport)
 {
-	unsigned hash;
+	unsigned int hash;
 	struct ip_vs_service *svc;
 
 	/* Check for "full" addressed entries */
@@ -388,7 +388,7 @@
 static inline struct ip_vs_service *
 __ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark)
 {
-	unsigned hash;
+	unsigned int hash;
 	struct ip_vs_service *svc;
 
 	/* Check for fwmark addressed entries */
@@ -489,11 +489,11 @@
 /*
  *	Returns hash value for real service
  */
-static inline unsigned ip_vs_rs_hashkey(int af,
+static inline unsigned int ip_vs_rs_hashkey(int af,
 					    const union nf_inet_addr *addr,
 					    __be16 port)
 {
-	register unsigned porth = ntohs(port);
+	register unsigned int porth = ntohs(port);
 	__be32 addr_fold = addr->ip;
 
 #ifdef CONFIG_IP_VS_IPV6
@@ -512,7 +512,7 @@
  */
 static int ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest)
 {
-	unsigned hash;
+	unsigned int hash;
 
 	if (!list_empty(&dest->d_list)) {
 		return 0;
@@ -555,7 +555,7 @@
 			  __be16 dport)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
-	unsigned hash;
+	unsigned int hash;
 	struct ip_vs_dest *dest;
 
 	/*
@@ -842,7 +842,7 @@
 	       struct ip_vs_dest **dest_p)
 {
 	struct ip_vs_dest *dest;
-	unsigned atype;
+	unsigned int atype;
 
 	EnterFunction(2);
 
@@ -1846,13 +1846,6 @@
 	{ }
 };
 
-const struct ctl_path net_vs_ctl_path[] = {
-	{ .procname = "net", },
-	{ .procname = "ipv4", },
-	{ .procname = "vs", },
-	{ }
-};
-EXPORT_SYMBOL_GPL(net_vs_ctl_path);
 #endif
 
 #ifdef CONFIG_PROC_FS
@@ -1867,7 +1860,7 @@
  *	Write the contents of the VS rule table to a PROCfs file.
  *	(It is kept just for backward compatibility)
  */
-static inline const char *ip_vs_fwd_name(unsigned flags)
+static inline const char *ip_vs_fwd_name(unsigned int flags)
 {
 	switch (flags & IP_VS_CONN_F_FWD_MASK) {
 	case IP_VS_CONN_F_LOCALNODE:
@@ -2816,17 +2809,17 @@
 
 	ip_vs_copy_stats(&ustats, stats);
 
-	NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns);
-	NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts);
-	NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts);
-	NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes);
-	NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes);
-	NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, ustats.cps);
-	NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps);
-	NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps);
-	NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps);
-	NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps);
-
+	if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns) ||
+	    nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts) ||
+	    nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts) ||
+	    nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes) ||
+	    nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes) ||
+	    nla_put_u32(skb, IPVS_STATS_ATTR_CPS, ustats.cps) ||
+	    nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps) ||
+	    nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps) ||
+	    nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps) ||
+	    nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps))
+		goto nla_put_failure;
 	nla_nest_end(skb, nl_stats);
 
 	return 0;
@@ -2847,23 +2840,25 @@
 	if (!nl_service)
 		return -EMSGSIZE;
 
-	NLA_PUT_U16(skb, IPVS_SVC_ATTR_AF, svc->af);
-
+	if (nla_put_u16(skb, IPVS_SVC_ATTR_AF, svc->af))
+		goto nla_put_failure;
 	if (svc->fwmark) {
-		NLA_PUT_U32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark);
+		if (nla_put_u32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark))
+			goto nla_put_failure;
 	} else {
-		NLA_PUT_U16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol);
-		NLA_PUT(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr);
-		NLA_PUT_U16(skb, IPVS_SVC_ATTR_PORT, svc->port);
+		if (nla_put_u16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol) ||
+		    nla_put(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr) ||
+		    nla_put_u16(skb, IPVS_SVC_ATTR_PORT, svc->port))
+			goto nla_put_failure;
 	}
 
-	NLA_PUT_STRING(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name);
-	if (svc->pe)
-		NLA_PUT_STRING(skb, IPVS_SVC_ATTR_PE_NAME, svc->pe->name);
-	NLA_PUT(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags);
-	NLA_PUT_U32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ);
-	NLA_PUT_U32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask);
-
+	if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name) ||
+	    (svc->pe &&
+	     nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, svc->pe->name)) ||
+	    nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
+	    nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
+	    nla_put_u32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask))
+		goto nla_put_failure;
 	if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats))
 		goto nla_put_failure;
 
@@ -3038,21 +3033,22 @@
 	if (!nl_dest)
 		return -EMSGSIZE;
 
-	NLA_PUT(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr);
-	NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
-
-	NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
-		    atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
-	NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
-	NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
-	NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
-	NLA_PUT_U32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
-		    atomic_read(&dest->activeconns));
-	NLA_PUT_U32(skb, IPVS_DEST_ATTR_INACT_CONNS,
-		    atomic_read(&dest->inactconns));
-	NLA_PUT_U32(skb, IPVS_DEST_ATTR_PERSIST_CONNS,
-		    atomic_read(&dest->persistconns));
-
+	if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
+	    nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
+	    nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
+			(atomic_read(&dest->conn_flags) &
+			 IP_VS_CONN_F_FWD_MASK)) ||
+	    nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
+			atomic_read(&dest->weight)) ||
+	    nla_put_u32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold) ||
+	    nla_put_u32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold) ||
+	    nla_put_u32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
+			atomic_read(&dest->activeconns)) ||
+	    nla_put_u32(skb, IPVS_DEST_ATTR_INACT_CONNS,
+			atomic_read(&dest->inactconns)) ||
+	    nla_put_u32(skb, IPVS_DEST_ATTR_PERSIST_CONNS,
+			atomic_read(&dest->persistconns)))
+		goto nla_put_failure;
 	if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats))
 		goto nla_put_failure;
 
@@ -3181,10 +3177,10 @@
 	if (!nl_daemon)
 		return -EMSGSIZE;
 
-	NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_STATE, state);
-	NLA_PUT_STRING(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn);
-	NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid);
-
+	if (nla_put_u32(skb, IPVS_DAEMON_ATTR_STATE, state) ||
+	    nla_put_string(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn) ||
+	    nla_put_u32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid))
+		goto nla_put_failure;
 	nla_nest_end(skb, nl_daemon);
 
 	return 0;
@@ -3473,21 +3469,26 @@
 
 		__ip_vs_get_timeouts(net, &t);
 #ifdef CONFIG_IP_VS_PROTO_TCP
-		NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout);
-		NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
-			    t.tcp_fin_timeout);
+		if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP,
+				t.tcp_timeout) ||
+		    nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
+				t.tcp_fin_timeout))
+			goto nla_put_failure;
 #endif
 #ifdef CONFIG_IP_VS_PROTO_UDP
-		NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout);
+		if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout))
+			goto nla_put_failure;
 #endif
 
 		break;
 	}
 
 	case IPVS_CMD_GET_INFO:
-		NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE);
-		NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
-			    ip_vs_conn_tab_size);
+		if (nla_put_u32(msg, IPVS_INFO_ATTR_VERSION,
+				IP_VS_VERSION_CODE) ||
+		    nla_put_u32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
+				ip_vs_conn_tab_size))
+			goto nla_put_failure;
 		break;
 	}
 
@@ -3664,8 +3665,7 @@
 	tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
 
 
-	ipvs->sysctl_hdr = register_net_sysctl_table(net, net_vs_ctl_path,
-						     tbl);
+	ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
 	if (ipvs->sysctl_hdr == NULL) {
 		if (!net_eq(net, &init_net))
 			kfree(tbl);
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c
index 1c269e5..1a53a7a 100644
--- a/net/netfilter/ipvs/ip_vs_dh.c
+++ b/net/netfilter/ipvs/ip_vs_dh.c
@@ -68,7 +68,7 @@
 /*
  *	Returns hash value for IPVS DH entry
  */
-static inline unsigned ip_vs_dh_hashkey(int af, const union nf_inet_addr *addr)
+static inline unsigned int ip_vs_dh_hashkey(int af, const union nf_inet_addr *addr)
 {
 	__be32 addr_fold = addr->ip;
 
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 538d74e..debb8c7 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -177,7 +177,7 @@
 	__be16 port;
 	struct ip_vs_conn *n_cp;
 	char buf[24];		/* xxx.xxx.xxx.xxx,ppp,ppp\000 */
-	unsigned buf_len;
+	unsigned int buf_len;
 	int ret = 0;
 	enum ip_conntrack_info ctinfo;
 	struct nf_conn *ct;
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 0f16283..1024466 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -142,7 +142,7 @@
 /*
  *	Returns hash value for IPVS LBLC entry
  */
-static inline unsigned
+static inline unsigned int
 ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr)
 {
 	__be32 addr_fold = addr->ip;
@@ -163,7 +163,7 @@
 static void
 ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
 {
-	unsigned hash = ip_vs_lblc_hashkey(en->af, &en->addr);
+	unsigned int hash = ip_vs_lblc_hashkey(en->af, &en->addr);
 
 	list_add(&en->list, &tbl->bucket[hash]);
 	atomic_inc(&tbl->entries);
@@ -178,7 +178,7 @@
 ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,
 	       const union nf_inet_addr *addr)
 {
-	unsigned hash = ip_vs_lblc_hashkey(af, addr);
+	unsigned int hash = ip_vs_lblc_hashkey(af, addr);
 	struct ip_vs_lblc_entry *en;
 
 	list_for_each_entry(en, &tbl->bucket[hash], list)
@@ -563,8 +563,7 @@
 	ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration;
 
 	ipvs->lblc_ctl_header =
-		register_net_sysctl_table(net, net_vs_ctl_path,
-					  ipvs->lblc_ctl_table);
+		register_net_sysctl(net, "net/ipv4/vs", ipvs->lblc_ctl_table);
 	if (!ipvs->lblc_ctl_header) {
 		if (!net_eq(net, &init_net))
 			kfree(ipvs->lblc_ctl_table);
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index eec797f..9261825 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -311,7 +311,7 @@
 /*
  *	Returns hash value for IPVS LBLCR entry
  */
-static inline unsigned
+static inline unsigned int
 ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr)
 {
 	__be32 addr_fold = addr->ip;
@@ -332,7 +332,7 @@
 static void
 ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
 {
-	unsigned hash = ip_vs_lblcr_hashkey(en->af, &en->addr);
+	unsigned int hash = ip_vs_lblcr_hashkey(en->af, &en->addr);
 
 	list_add(&en->list, &tbl->bucket[hash]);
 	atomic_inc(&tbl->entries);
@@ -347,7 +347,7 @@
 ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl,
 		const union nf_inet_addr *addr)
 {
-	unsigned hash = ip_vs_lblcr_hashkey(af, addr);
+	unsigned int hash = ip_vs_lblcr_hashkey(af, addr);
 	struct ip_vs_lblcr_entry *en;
 
 	list_for_each_entry(en, &tbl->bucket[hash], list)
@@ -757,8 +757,7 @@
 	ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
 
 	ipvs->lblcr_ctl_header =
-		register_net_sysctl_table(net, net_vs_ctl_path,
-					  ipvs->lblcr_ctl_table);
+		register_net_sysctl(net, "net/ipv4/vs", ipvs->lblcr_ctl_table);
 	if (!ipvs->lblcr_ctl_header) {
 		if (!net_eq(net, &init_net))
 			kfree(ipvs->lblcr_ctl_table);
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index f843a88..6eda11d 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -48,7 +48,7 @@
  */
 static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
 {
-	unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
+	unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
 
 	pp->next = ip_vs_proto_table[hash];
 	ip_vs_proto_table[hash] = pp;
@@ -69,7 +69,7 @@
 register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
-	unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
+	unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
 	struct ip_vs_proto_data *pd =
 			kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC);
 
@@ -94,7 +94,7 @@
 static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp)
 {
 	struct ip_vs_protocol **pp_p;
-	unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
+	unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
 
 	pp_p = &ip_vs_proto_table[hash];
 	for (; *pp_p; pp_p = &(*pp_p)->next) {
@@ -117,7 +117,7 @@
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_proto_data **pd_p;
-	unsigned hash = IP_VS_PROTO_HASH(pd->pp->protocol);
+	unsigned int hash = IP_VS_PROTO_HASH(pd->pp->protocol);
 
 	pd_p = &ipvs->proto_data_table[hash];
 	for (; *pd_p; pd_p = &(*pd_p)->next) {
@@ -139,7 +139,7 @@
 struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto)
 {
 	struct ip_vs_protocol *pp;
-	unsigned hash = IP_VS_PROTO_HASH(proto);
+	unsigned int hash = IP_VS_PROTO_HASH(proto);
 
 	for (pp = ip_vs_proto_table[hash]; pp; pp = pp->next) {
 		if (pp->protocol == proto)
@@ -157,7 +157,7 @@
 __ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto)
 {
 	struct ip_vs_proto_data *pd;
-	unsigned hash = IP_VS_PROTO_HASH(proto);
+	unsigned int hash = IP_VS_PROTO_HASH(proto);
 
 	for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) {
 		if (pd->pp->protocol == proto)
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index 069e8d4..91e97ee 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -70,7 +70,7 @@
 /*
  *	Returns hash value for IPVS SH entry
  */
-static inline unsigned ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr)
+static inline unsigned int ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr)
 {
 	__be32 addr_fold = addr->ip;
 
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 8a0d6d6..bf5e538 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -839,7 +839,7 @@
 
 	p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0);
 	for (i=0; i<m->nr_conns; i++) {
-		unsigned flags, state;
+		unsigned int flags, state;
 
 		if (p + SIMPLE_CONN_SIZE > buffer+buflen) {
 			IP_VS_ERR_RL("BACKUP v0, bogus conn\n");
@@ -1109,7 +1109,7 @@
 
 		for (i=0; i<nr_conns; i++) {
 			union ip_vs_sync_conn *s;
-			unsigned size;
+			unsigned int size;
 			int retc;
 
 			p = msg_end;
@@ -1368,7 +1368,7 @@
 	 */
 	sk_change_net(sock->sk, net);
 	/* it is equivalent to the REUSEADDR option in user-space */
-	sock->sk->sk_reuse = 1;
+	sock->sk->sk_reuse = SK_CAN_REUSE;
 
 	result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr,
 			sizeof(struct sockaddr));
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index f4f8cda..d61e078 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -69,8 +69,8 @@
 
 	table[0].data = &net->ct.sysctl_acct;
 
-	net->ct.acct_sysctl_header = register_net_sysctl_table(net,
-			nf_net_netfilter_sysctl_path, table);
+	net->ct.acct_sysctl_header = register_net_sysctl(net, "net/netfilter",
+							 table);
 	if (!net->ct.acct_sysctl_header) {
 		printk(KERN_ERR "nf_conntrack_acct: can't register to sysctl.\n");
 		goto out_register;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 3cc4487..cf0747c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1152,8 +1152,9 @@
 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
 			       const struct nf_conntrack_tuple *tuple)
 {
-	NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port);
-	NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port);
+	if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
+	    nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -1592,7 +1593,7 @@
 	return 0;
 
 err_timeout:
-	nf_conntrack_timeout_fini(net);
+	nf_conntrack_ecache_fini(net);
 err_ecache:
 	nf_conntrack_tstamp_fini(net);
 err_tstamp:
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 5bd3047d..b924f3a 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -199,8 +199,7 @@
 	table[1].data = &net->ct.sysctl_events_retry_timeout;
 
 	net->ct.event_sysctl_header =
-		register_net_sysctl_table(net,
-					  nf_net_netfilter_sysctl_path, table);
+		register_net_sysctl(net, "net/netfilter", table);
 	if (!net->ct.event_sysctl_header) {
 		printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n");
 		goto out_register;
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 722291f..471b054 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -1230,7 +1230,7 @@
 
 /****************************************************************************/
 static int set_expect_timeout(struct nf_conntrack_expect *exp,
-			      unsigned timeout)
+			      unsigned int timeout)
 {
 	if (!exp || !del_timer(&exp->timeout))
 		return 0;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index ca7e835..462ec2d 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -66,7 +66,8 @@
 	nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED);
 	if (!nest_parms)
 		goto nla_put_failure;
-	NLA_PUT_U8(skb, CTA_PROTO_NUM, tuple->dst.protonum);
+	if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
+		goto nla_put_failure;
 
 	if (likely(l4proto->tuple_to_nlattr))
 		ret = l4proto->tuple_to_nlattr(skb, tuple);
@@ -126,7 +127,8 @@
 static inline int
 ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
 {
-	NLA_PUT_BE32(skb, CTA_STATUS, htonl(ct->status));
+	if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -141,7 +143,8 @@
 	if (timeout < 0)
 		timeout = 0;
 
-	NLA_PUT_BE32(skb, CTA_TIMEOUT, htonl(timeout));
+	if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -190,7 +193,8 @@
 	nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED);
 	if (!nest_helper)
 		goto nla_put_failure;
-	NLA_PUT_STRING(skb, CTA_HELP_NAME, helper->name);
+	if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
+		goto nla_put_failure;
 
 	if (helper->to_nlattr)
 		helper->to_nlattr(skb, ct);
@@ -214,8 +218,9 @@
 	if (!nest_count)
 		goto nla_put_failure;
 
-	NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts));
-	NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes));
+	if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)) ||
+	    nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)))
+		goto nla_put_failure;
 
 	nla_nest_end(skb, nest_count);
 
@@ -260,11 +265,10 @@
 	if (!nest_count)
 		goto nla_put_failure;
 
-	NLA_PUT_BE64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start));
-	if (tstamp->stop != 0) {
-		NLA_PUT_BE64(skb, CTA_TIMESTAMP_STOP,
-			     cpu_to_be64(tstamp->stop));
-	}
+	if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)) ||
+	    (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
+					       cpu_to_be64(tstamp->stop))))
+		goto nla_put_failure;
 	nla_nest_end(skb, nest_count);
 
 	return 0;
@@ -277,7 +281,8 @@
 static inline int
 ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
 {
-	NLA_PUT_BE32(skb, CTA_MARK, htonl(ct->mark));
+	if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -304,7 +309,8 @@
 	if (!nest_secctx)
 		goto nla_put_failure;
 
-	NLA_PUT_STRING(skb, CTA_SECCTX_NAME, secctx);
+	if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
+		goto nla_put_failure;
 	nla_nest_end(skb, nest_secctx);
 
 	ret = 0;
@@ -349,12 +355,13 @@
 	if (!nest_parms)
 		goto nla_put_failure;
 
-	NLA_PUT_BE32(skb, CTA_NAT_SEQ_CORRECTION_POS,
-		     htonl(natseq->correction_pos));
-	NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_BEFORE,
-		     htonl(natseq->offset_before));
-	NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_AFTER,
-		     htonl(natseq->offset_after));
+	if (nla_put_be32(skb, CTA_NAT_SEQ_CORRECTION_POS,
+			 htonl(natseq->correction_pos)) ||
+	    nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_BEFORE,
+			 htonl(natseq->offset_before)) ||
+	    nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_AFTER,
+			 htonl(natseq->offset_after)))
+		goto nla_put_failure;
 
 	nla_nest_end(skb, nest_parms);
 
@@ -390,7 +397,8 @@
 static inline int
 ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
 {
-	NLA_PUT_BE32(skb, CTA_ID, htonl((unsigned long)ct));
+	if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -400,7 +408,8 @@
 static inline int
 ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
 {
-	NLA_PUT_BE32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use)));
+	if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -440,8 +449,9 @@
 		goto nla_put_failure;
 	nla_nest_end(skb, nest_parms);
 
-	if (nf_ct_zone(ct))
-		NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct)));
+	if (nf_ct_zone(ct) &&
+	    nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+		goto nla_put_failure;
 
 	if (ctnetlink_dump_status(skb, ct) < 0 ||
 	    ctnetlink_dump_timeout(skb, ct) < 0 ||
@@ -617,8 +627,9 @@
 		goto nla_put_failure;
 	nla_nest_end(skb, nest_parms);
 
-	if (nf_ct_zone(ct))
-		NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct)));
+	if (nf_ct_zone(ct) &&
+	    nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+		goto nla_put_failure;
 
 	if (ctnetlink_dump_id(skb, ct) < 0)
 		goto nla_put_failure;
@@ -1705,7 +1716,8 @@
 		if (!nest_parms)
 			goto nla_put_failure;
 
-		NLA_PUT_BE32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir));
+		if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
+			goto nla_put_failure;
 
 		nat_tuple.src.l3num = nf_ct_l3num(master);
 		nat_tuple.src.u3.ip = exp->saved_ip;
@@ -1718,21 +1730,24 @@
 	        nla_nest_end(skb, nest_parms);
 	}
 #endif
-	NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout));
-	NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp));
-	NLA_PUT_BE32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags));
-	NLA_PUT_BE32(skb, CTA_EXPECT_CLASS, htonl(exp->class));
+	if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
+	    nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
+	    nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
+	    nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
+		goto nla_put_failure;
 	help = nfct_help(master);
 	if (help) {
 		struct nf_conntrack_helper *helper;
 
 		helper = rcu_dereference(help->helper);
-		if (helper)
-			NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name);
+		if (helper &&
+		    nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
+			goto nla_put_failure;
 	}
 	expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
-	if (expfn != NULL)
-		NLA_PUT_STRING(skb, CTA_EXPECT_FN, expfn->name);
+	if (expfn != NULL &&
+	    nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
+		goto nla_put_failure;
 
 	return 0;
 
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index be3da2c..8b631b0 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -36,11 +36,11 @@
 
 #ifdef CONFIG_SYSCTL
 static int
-nf_ct_register_sysctl(struct ctl_table_header **header, struct ctl_path *path,
+nf_ct_register_sysctl(struct ctl_table_header **header, const char *path,
 		      struct ctl_table *table, unsigned int *users)
 {
 	if (*header == NULL) {
-		*header = register_sysctl_paths(path, table);
+		*header = register_net_sysctl(&init_net, path, table);
 		if (*header == NULL)
 			return -ENOMEM;
 	}
@@ -56,7 +56,7 @@
 	if (users != NULL && --*users > 0)
 		return;
 
-	unregister_sysctl_table(*header);
+	unregister_net_sysctl_table(*header);
 	*header = NULL;
 }
 #endif
@@ -250,7 +250,7 @@
 #ifdef CONFIG_SYSCTL
 	if (l4proto->ctl_table != NULL) {
 		err = nf_ct_register_sysctl(l4proto->ctl_table_header,
-					    nf_net_netfilter_sysctl_path,
+					    "net/netfilter",
 					    l4proto->ctl_table,
 					    l4proto->ctl_table_users);
 		if (err < 0)
@@ -259,7 +259,7 @@
 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
 	if (l4proto->ctl_compat_table != NULL) {
 		err = nf_ct_register_sysctl(&l4proto->ctl_compat_table_header,
-					    nf_net_ipv4_netfilter_sysctl_path,
+					    "net/ipv4/netfilter",
 					    l4proto->ctl_compat_table, NULL);
 		if (err == 0)
 			goto out;
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 24fdce2..ef706a4 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -643,11 +643,12 @@
 	nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP | NLA_F_NESTED);
 	if (!nest_parms)
 		goto nla_put_failure;
-	NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state);
-	NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_ROLE,
-		   ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]);
-	NLA_PUT_BE64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
-		     cpu_to_be64(ct->proto.dccp.handshake_seq));
+	if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state) ||
+	    nla_put_u8(skb, CTA_PROTOINFO_DCCP_ROLE,
+		       ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]) ||
+	    nla_put_be64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
+			 cpu_to_be64(ct->proto.dccp.handshake_seq)))
+		goto nla_put_failure;
 	nla_nest_end(skb, nest_parms);
 	spin_unlock_bh(&ct->lock);
 	return 0;
@@ -739,9 +740,10 @@
         const unsigned int *timeouts = data;
 	int i;
 
-	for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++)
-		NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ));
-
+	for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) {
+		if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ)))
+			goto nla_put_failure;
+	}
 	return 0;
 
 nla_put_failure:
@@ -908,8 +910,8 @@
 	dn->sysctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT];
 	dn->sysctl_table[7].data = &dn->dccp_loose;
 
-	dn->sysctl_header = register_net_sysctl_table(net,
-			nf_net_netfilter_sysctl_path, dn->sysctl_table);
+	dn->sysctl_header = register_net_sysctl(net, "net/netfilter",
+						dn->sysctl_table);
 	if (!dn->sysctl_header) {
 		kfree(dn->sysctl_table);
 		return -ENOMEM;
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index 835e24c..d8923d5 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -90,7 +90,8 @@
 {
 	const unsigned int *timeout = data;
 
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ));
+	if (nla_put_be32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ)))
+		goto nla_put_failure;
 
 	return 0;
 
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 659648c..4bf6b4e 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -321,10 +321,11 @@
 {
 	const unsigned int *timeouts = data;
 
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_UNREPLIED,
-			htonl(timeouts[GRE_CT_UNREPLIED] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_REPLIED,
-			htonl(timeouts[GRE_CT_REPLIED] / HZ));
+	if (nla_put_be32(skb, CTA_TIMEOUT_GRE_UNREPLIED,
+			 htonl(timeouts[GRE_CT_UNREPLIED] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_GRE_REPLIED,
+			 htonl(timeouts[GRE_CT_REPLIED] / HZ)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 72b5088..996db2f 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -482,15 +482,12 @@
 	if (!nest_parms)
 		goto nla_put_failure;
 
-	NLA_PUT_U8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state);
-
-	NLA_PUT_BE32(skb,
-		     CTA_PROTOINFO_SCTP_VTAG_ORIGINAL,
-		     ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]);
-
-	NLA_PUT_BE32(skb,
-		     CTA_PROTOINFO_SCTP_VTAG_REPLY,
-		     ct->proto.sctp.vtag[IP_CT_DIR_REPLY]);
+	if (nla_put_u8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state) ||
+	    nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_ORIGINAL,
+			 ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]) ||
+	    nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_REPLY,
+			 ct->proto.sctp.vtag[IP_CT_DIR_REPLY]))
+		goto nla_put_failure;
 
 	spin_unlock_bh(&ct->lock);
 
@@ -578,9 +575,10 @@
         const unsigned int *timeouts = data;
 	int i;
 
-	for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++)
-	        NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ));
-
+	for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) {
+	        if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ)))
+			goto nla_put_failure;
+	}
         return 0;
 
 nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 361eade..4dfbfa8 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -584,8 +584,8 @@
 			 * Let's try to use the data from the packet.
 			 */
 			sender->td_end = end;
-			win <<= sender->td_scale;
-			sender->td_maxwin = (win == 0 ? 1 : win);
+			swin = win << sender->td_scale;
+			sender->td_maxwin = (swin == 0 ? 1 : swin);
 			sender->td_maxend = end + sender->td_maxwin;
 			/*
 			 * We haven't seen traffic in the other direction yet
@@ -1147,21 +1147,22 @@
 	if (!nest_parms)
 		goto nla_put_failure;
 
-	NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state);
-
-	NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
-		   ct->proto.tcp.seen[0].td_scale);
-
-	NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
-		   ct->proto.tcp.seen[1].td_scale);
+	if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state) ||
+	    nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
+		       ct->proto.tcp.seen[0].td_scale) ||
+	    nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
+		       ct->proto.tcp.seen[1].td_scale))
+		goto nla_put_failure;
 
 	tmp.flags = ct->proto.tcp.seen[0].flags;
-	NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
-		sizeof(struct nf_ct_tcp_flags), &tmp);
+	if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
+		    sizeof(struct nf_ct_tcp_flags), &tmp))
+		goto nla_put_failure;
 
 	tmp.flags = ct->proto.tcp.seen[1].flags;
-	NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
-		sizeof(struct nf_ct_tcp_flags), &tmp);
+	if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
+		    sizeof(struct nf_ct_tcp_flags), &tmp))
+		goto nla_put_failure;
 	spin_unlock_bh(&ct->lock);
 
 	nla_nest_end(skb, nest_parms);
@@ -1310,28 +1311,29 @@
 {
 	const unsigned int *timeouts = data;
 
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
-			htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
-			htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
-			htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
-			htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
-			htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
-			htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
-			htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE,
-			htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
-			htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_RETRANS,
-			htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_UNACK,
-			htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ));
+	if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
+			htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
+			 htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
+			 htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
+			 htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
+			 htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
+			 htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
+			 htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE,
+			 htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
+			 htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS,
+			 htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK,
+			 htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index a9073dc..7259a6b 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -181,10 +181,11 @@
 {
 	const unsigned int *timeouts = data;
 
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_UNREPLIED,
-			htonl(timeouts[UDP_CT_UNREPLIED] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_REPLIED,
-			htonl(timeouts[UDP_CT_REPLIED] / HZ));
+	if (nla_put_be32(skb, CTA_TIMEOUT_UDP_UNREPLIED,
+			 htonl(timeouts[UDP_CT_UNREPLIED] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_UDP_REPLIED,
+			 htonl(timeouts[UDP_CT_REPLIED] / HZ)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index e060639..4d60a53 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -185,10 +185,11 @@
 {
 	const unsigned int *timeouts = data;
 
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED,
-			htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_REPLIED,
-			htonl(timeouts[UDPLITE_CT_REPLIED] / HZ));
+	if (nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED,
+			 htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_REPLIED,
+			 htonl(timeouts[UDPLITE_CT_REPLIED] / HZ)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 885f5ab..9b39432 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -468,18 +468,13 @@
 	{ }
 };
 
-static struct ctl_path nf_ct_path[] = {
-	{ .procname = "net", },
-	{ }
-};
-
 static int nf_conntrack_standalone_init_sysctl(struct net *net)
 {
 	struct ctl_table *table;
 
 	if (net_eq(net, &init_net)) {
 		nf_ct_netfilter_header =
-		       register_sysctl_paths(nf_ct_path, nf_ct_netfilter_table);
+		       register_net_sysctl(&init_net, "net", nf_ct_netfilter_table);
 		if (!nf_ct_netfilter_header)
 			goto out;
 	}
@@ -494,8 +489,7 @@
 	table[3].data = &net->ct.sysctl_checksum;
 	table[4].data = &net->ct.sysctl_log_invalid;
 
-	net->ct.sysctl_header = register_net_sysctl_table(net,
-					nf_net_netfilter_sysctl_path, table);
+	net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table);
 	if (!net->ct.sysctl_header)
 		goto out_unregister_netfilter;
 
@@ -505,7 +499,7 @@
 	kfree(table);
 out_kmemdup:
 	if (net_eq(net, &init_net))
-		unregister_sysctl_table(nf_ct_netfilter_header);
+		unregister_net_sysctl_table(nf_ct_netfilter_header);
 out:
 	printk(KERN_ERR "nf_conntrack: can't register to sysctl.\n");
 	return -ENOMEM;
@@ -516,7 +510,7 @@
 	struct ctl_table *table;
 
 	if (net_eq(net, &init_net))
-		unregister_sysctl_table(nf_ct_netfilter_header);
+		unregister_net_sysctl_table(nf_ct_netfilter_header);
 	table = net->ct.sysctl_header->ctl_table_arg;
 	unregister_net_sysctl_table(net->ct.sysctl_header);
 	kfree(table);
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
index e8d27af..dbb364f 100644
--- a/net/netfilter/nf_conntrack_timestamp.c
+++ b/net/netfilter/nf_conntrack_timestamp.c
@@ -51,8 +51,8 @@
 
 	table[0].data = &net->ct.sysctl_tstamp;
 
-	net->ct.tstamp_sysctl_header = register_net_sysctl_table(net,
-			nf_net_netfilter_sysctl_path, table);
+	net->ct.tstamp_sysctl_header = register_net_sysctl(net,	"net/netfilter",
+							   table);
 	if (!net->ct.tstamp_sysctl_header) {
 		printk(KERN_ERR "nf_ct_tstamp: can't register to sysctl.\n");
 		goto out_register;
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 957374a..703fb26 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -214,13 +214,6 @@
 #endif /* PROC_FS */
 
 #ifdef CONFIG_SYSCTL
-static struct ctl_path nf_log_sysctl_path[] = {
-	{ .procname = "net", },
-	{ .procname = "netfilter", },
-	{ .procname = "nf_log", },
-	{ }
-};
-
 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
 static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
 static struct ctl_table_header *nf_log_dir_header;
@@ -283,7 +276,7 @@
 		nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i;
 	}
 
-	nf_log_dir_header = register_sysctl_paths(nf_log_sysctl_path,
+	nf_log_dir_header = register_net_sysctl(&init_net, "net/netfilter/nf_log",
 				       nf_log_sysctl_table);
 	if (!nf_log_dir_header)
 		return -ENOMEM;
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index e6ddde1..3e797d1 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -103,7 +103,7 @@
 EXPORT_SYMBOL_GPL(nfnetlink_has_listeners);
 
 int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid,
-		   unsigned group, int echo, gfp_t flags)
+		   unsigned int group, int echo, gfp_t flags)
 {
 	return nlmsg_notify(net->nfnl, skb, pid, group, echo, flags);
 }
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index d98c868..b2e7310 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -109,7 +109,8 @@
 	nfmsg->version = NFNETLINK_V0;
 	nfmsg->res_id = 0;
 
-	NLA_PUT_STRING(skb, NFACCT_NAME, acct->name);
+	if (nla_put_string(skb, NFACCT_NAME, acct->name))
+		goto nla_put_failure;
 
 	if (type == NFNL_MSG_ACCT_GET_CTRZERO) {
 		pkts = atomic64_xchg(&acct->pkts, 0);
@@ -118,9 +119,10 @@
 		pkts = atomic64_read(&acct->pkts);
 		bytes = atomic64_read(&acct->bytes);
 	}
-	NLA_PUT_BE64(skb, NFACCT_PKTS, cpu_to_be64(pkts));
-	NLA_PUT_BE64(skb, NFACCT_BYTES, cpu_to_be64(bytes));
-	NLA_PUT_BE32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt)));
+	if (nla_put_be64(skb, NFACCT_PKTS, cpu_to_be64(pkts)) ||
+	    nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes)) ||
+	    nla_put_be32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt))))
+		goto nla_put_failure;
 
 	nlmsg_end(skb, nlh);
 	return skb->len;
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 2b9e79f..3e65528 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -170,11 +170,12 @@
 	nfmsg->version = NFNETLINK_V0;
 	nfmsg->res_id = 0;
 
-	NLA_PUT_STRING(skb, CTA_TIMEOUT_NAME, timeout->name);
-	NLA_PUT_BE16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num));
-	NLA_PUT_U8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto);
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_USE,
-			htonl(atomic_read(&timeout->refcnt)));
+	if (nla_put_string(skb, CTA_TIMEOUT_NAME, timeout->name) ||
+	    nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num)) ||
+	    nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_USE,
+			 htonl(atomic_read(&timeout->refcnt))))
+		goto nla_put_failure;
 
 	if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
 		struct nlattr *nest_parms;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 66b2c54..3c3cfc0 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -391,67 +391,78 @@
 	pmsg.hw_protocol	= skb->protocol;
 	pmsg.hook		= hooknum;
 
-	NLA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg);
+	if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg))
+		goto nla_put_failure;
 
-	if (prefix)
-		NLA_PUT(inst->skb, NFULA_PREFIX, plen, prefix);
+	if (prefix &&
+	    nla_put(inst->skb, NFULA_PREFIX, plen, prefix))
+		goto nla_put_failure;
 
 	if (indev) {
 #ifndef CONFIG_BRIDGE_NETFILTER
-		NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
-			     htonl(indev->ifindex));
+		if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
+				 htonl(indev->ifindex)))
+			goto nla_put_failure;
 #else
 		if (pf == PF_BRIDGE) {
 			/* Case 1: outdev is physical input device, we need to
 			 * look for bridge group (when called from
 			 * netfilter_bridge) */
-			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
-				     htonl(indev->ifindex));
+			if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
+					 htonl(indev->ifindex)) ||
 			/* this is the bridge group "brX" */
 			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
-			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
-				     htonl(br_port_get_rcu(indev)->br->dev->ifindex));
+			    nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
+					 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
+				goto nla_put_failure;
 		} else {
 			/* Case 2: indev is bridge group, we need to look for
 			 * physical device (when called from ipv4) */
-			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
-				     htonl(indev->ifindex));
-			if (skb->nf_bridge && skb->nf_bridge->physindev)
-				NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
-					     htonl(skb->nf_bridge->physindev->ifindex));
+			if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
+					 htonl(indev->ifindex)))
+				goto nla_put_failure;
+			if (skb->nf_bridge && skb->nf_bridge->physindev &&
+			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
+					 htonl(skb->nf_bridge->physindev->ifindex)))
+				goto nla_put_failure;
 		}
 #endif
 	}
 
 	if (outdev) {
 #ifndef CONFIG_BRIDGE_NETFILTER
-		NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
-			     htonl(outdev->ifindex));
+		if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
+				 htonl(outdev->ifindex)))
+			goto nla_put_failure;
 #else
 		if (pf == PF_BRIDGE) {
 			/* Case 1: outdev is physical output device, we need to
 			 * look for bridge group (when called from
 			 * netfilter_bridge) */
-			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
-				     htonl(outdev->ifindex));
+			if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
+					 htonl(outdev->ifindex)) ||
 			/* this is the bridge group "brX" */
 			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
-			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
-				     htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
+			    nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
+					 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
+				goto nla_put_failure;
 		} else {
 			/* Case 2: indev is a bridge group, we need to look
 			 * for physical device (when called from ipv4) */
-			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
-				     htonl(outdev->ifindex));
-			if (skb->nf_bridge && skb->nf_bridge->physoutdev)
-				NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
-					     htonl(skb->nf_bridge->physoutdev->ifindex));
+			if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
+					 htonl(outdev->ifindex)))
+				goto nla_put_failure;
+			if (skb->nf_bridge && skb->nf_bridge->physoutdev &&
+			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
+					 htonl(skb->nf_bridge->physoutdev->ifindex)))
+				goto nla_put_failure;
 		}
 #endif
 	}
 
-	if (skb->mark)
-		NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark));
+	if (skb->mark &&
+	    nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark)))
+		goto nla_put_failure;
 
 	if (indev && skb->dev &&
 	    skb->mac_header != skb->network_header) {
@@ -459,16 +470,18 @@
 		int len = dev_parse_header(skb, phw.hw_addr);
 		if (len > 0) {
 			phw.hw_addrlen = htons(len);
-			NLA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw);
+			if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
+				goto nla_put_failure;
 		}
 	}
 
 	if (indev && skb_mac_header_was_set(skb)) {
-		NLA_PUT_BE16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type));
-		NLA_PUT_BE16(inst->skb, NFULA_HWLEN,
-			     htons(skb->dev->hard_header_len));
-		NLA_PUT(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
-			skb_mac_header(skb));
+		if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
+		    nla_put_be16(inst->skb, NFULA_HWLEN,
+				 htons(skb->dev->hard_header_len)) ||
+		    nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
+			    skb_mac_header(skb)))
+			goto nla_put_failure;
 	}
 
 	if (skb->tstamp.tv64) {
@@ -477,7 +490,8 @@
 		ts.sec = cpu_to_be64(tv.tv_sec);
 		ts.usec = cpu_to_be64(tv.tv_usec);
 
-		NLA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts);
+		if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
+			goto nla_put_failure;
 	}
 
 	/* UID */
@@ -487,22 +501,24 @@
 			struct file *file = skb->sk->sk_socket->file;
 			__be32 uid = htonl(file->f_cred->fsuid);
 			__be32 gid = htonl(file->f_cred->fsgid);
-			/* need to unlock here since NLA_PUT may goto */
 			read_unlock_bh(&skb->sk->sk_callback_lock);
-			NLA_PUT_BE32(inst->skb, NFULA_UID, uid);
-			NLA_PUT_BE32(inst->skb, NFULA_GID, gid);
+			if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
+			    nla_put_be32(inst->skb, NFULA_GID, gid))
+				goto nla_put_failure;
 		} else
 			read_unlock_bh(&skb->sk->sk_callback_lock);
 	}
 
 	/* local sequence number */
-	if (inst->flags & NFULNL_CFG_F_SEQ)
-		NLA_PUT_BE32(inst->skb, NFULA_SEQ, htonl(inst->seq++));
+	if ((inst->flags & NFULNL_CFG_F_SEQ) &&
+	    nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++)))
+		goto nla_put_failure;
 
 	/* global sequence number */
-	if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
-		NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
-			     htonl(atomic_inc_return(&global_seq)));
+	if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
+	    nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
+			 htonl(atomic_inc_return(&global_seq))))
+		goto nla_put_failure;
 
 	if (data_len) {
 		struct nlattr *nla;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index a80b0cb..8d6bcf3 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -288,58 +288,67 @@
 	indev = entry->indev;
 	if (indev) {
 #ifndef CONFIG_BRIDGE_NETFILTER
-		NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex));
+		if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
+			goto nla_put_failure;
 #else
 		if (entry->pf == PF_BRIDGE) {
 			/* Case 1: indev is physical input device, we need to
 			 * look for bridge group (when called from
 			 * netfilter_bridge) */
-			NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
-				     htonl(indev->ifindex));
+			if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
+					 htonl(indev->ifindex)) ||
 			/* this is the bridge group "brX" */
 			/* rcu_read_lock()ed by __nf_queue */
-			NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
-				     htonl(br_port_get_rcu(indev)->br->dev->ifindex));
+			    nla_put_be32(skb, NFQA_IFINDEX_INDEV,
+					 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
+				goto nla_put_failure;
 		} else {
 			/* Case 2: indev is bridge group, we need to look for
 			 * physical device (when called from ipv4) */
-			NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
-				     htonl(indev->ifindex));
-			if (entskb->nf_bridge && entskb->nf_bridge->physindev)
-				NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
-					     htonl(entskb->nf_bridge->physindev->ifindex));
+			if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
+					 htonl(indev->ifindex)))
+				goto nla_put_failure;
+			if (entskb->nf_bridge && entskb->nf_bridge->physindev &&
+			    nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
+					 htonl(entskb->nf_bridge->physindev->ifindex)))
+				goto nla_put_failure;
 		}
 #endif
 	}
 
 	if (outdev) {
 #ifndef CONFIG_BRIDGE_NETFILTER
-		NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex));
+		if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
+			goto nla_put_failure;
 #else
 		if (entry->pf == PF_BRIDGE) {
 			/* Case 1: outdev is physical output device, we need to
 			 * look for bridge group (when called from
 			 * netfilter_bridge) */
-			NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
-				     htonl(outdev->ifindex));
+			if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
+					 htonl(outdev->ifindex)) ||
 			/* this is the bridge group "brX" */
 			/* rcu_read_lock()ed by __nf_queue */
-			NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
-				     htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
+			    nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
+					 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
+				goto nla_put_failure;
 		} else {
 			/* Case 2: outdev is bridge group, we need to look for
 			 * physical output device (when called from ipv4) */
-			NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
-				     htonl(outdev->ifindex));
-			if (entskb->nf_bridge && entskb->nf_bridge->physoutdev)
-				NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
-					     htonl(entskb->nf_bridge->physoutdev->ifindex));
+			if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
+					 htonl(outdev->ifindex)))
+				goto nla_put_failure;
+			if (entskb->nf_bridge && entskb->nf_bridge->physoutdev &&
+			    nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
+					 htonl(entskb->nf_bridge->physoutdev->ifindex)))
+				goto nla_put_failure;
 		}
 #endif
 	}
 
-	if (entskb->mark)
-		NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark));
+	if (entskb->mark &&
+	    nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
+		goto nla_put_failure;
 
 	if (indev && entskb->dev &&
 	    entskb->mac_header != entskb->network_header) {
@@ -347,7 +356,8 @@
 		int len = dev_parse_header(entskb, phw.hw_addr);
 		if (len) {
 			phw.hw_addrlen = htons(len);
-			NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
+			if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
+				goto nla_put_failure;
 		}
 	}
 
@@ -357,7 +367,8 @@
 		ts.sec = cpu_to_be64(tv.tv_sec);
 		ts.usec = cpu_to_be64(tv.tv_usec);
 
-		NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
+		if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
+			goto nla_put_failure;
 	}
 
 	if (data_len) {
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index d2ff15a..fc0d6db 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -314,7 +314,7 @@
 #ifdef CONFIG_PROC_FS
 	struct proc_dir_entry *pde;
 #endif
-	unsigned i;
+	unsigned int i;
 	int ret = -EINVAL;
 
 	if (unlikely(!hash_rnd_inited)) {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index faa48f7..b3025a6 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -104,27 +104,27 @@
 }
 
 struct nl_pid_hash {
-	struct hlist_head *table;
-	unsigned long rehash_time;
+	struct hlist_head	*table;
+	unsigned long		rehash_time;
 
-	unsigned int mask;
-	unsigned int shift;
+	unsigned int		mask;
+	unsigned int		shift;
 
-	unsigned int entries;
-	unsigned int max_shift;
+	unsigned int		entries;
+	unsigned int		max_shift;
 
-	u32 rnd;
+	u32			rnd;
 };
 
 struct netlink_table {
-	struct nl_pid_hash hash;
-	struct hlist_head mc_list;
-	struct listeners __rcu *listeners;
-	unsigned int nl_nonroot;
-	unsigned int groups;
-	struct mutex *cb_mutex;
-	struct module *module;
-	int registered;
+	struct nl_pid_hash	hash;
+	struct hlist_head	mc_list;
+	struct listeners __rcu	*listeners;
+	unsigned int		nl_nonroot;
+	unsigned int		groups;
+	struct mutex		*cb_mutex;
+	struct module		*module;
+	int			registered;
 };
 
 static struct netlink_table *nl_table;
@@ -132,7 +132,6 @@
 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
 
 static int netlink_dump(struct sock *sk);
-static void netlink_destroy_callback(struct netlink_callback *cb);
 
 static DEFINE_RWLOCK(nl_table_lock);
 static atomic_t nl_table_users = ATOMIC_INIT(0);
@@ -149,6 +148,18 @@
 	return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
 }
 
+static void netlink_destroy_callback(struct netlink_callback *cb)
+{
+	kfree_skb(cb->skb);
+	kfree(cb);
+}
+
+static void netlink_consume_callback(struct netlink_callback *cb)
+{
+	consume_skb(cb->skb);
+	kfree(cb);
+}
+
 static void netlink_sock_destruct(struct sock *sk)
 {
 	struct netlink_sock *nlk = nlk_sk(sk);
@@ -414,9 +425,9 @@
 	sock_init_data(sock, sk);
 
 	nlk = nlk_sk(sk);
-	if (cb_mutex)
+	if (cb_mutex) {
 		nlk->cb_mutex = cb_mutex;
-	else {
+	} else {
 		nlk->cb_mutex = &nlk->cb_def_mutex;
 		mutex_init(nlk->cb_mutex);
 	}
@@ -522,8 +533,9 @@
 			nl_table[sk->sk_protocol].module = NULL;
 			nl_table[sk->sk_protocol].registered = 0;
 		}
-	} else if (nlk->subscriptions)
+	} else if (nlk->subscriptions) {
 		netlink_update_listeners(sk);
+	}
 	netlink_table_ungrab();
 
 	kfree(nlk->groups);
@@ -866,7 +878,7 @@
 		struct sk_buff *nskb = skb_clone(skb, allocation);
 		if (!nskb)
 			return skb;
-		kfree_skb(skb);
+		consume_skb(skb);
 		skb = nskb;
 	}
 
@@ -896,8 +908,10 @@
 		ret = skb->len;
 		skb_set_owner_r(skb, sk);
 		nlk->netlink_rcv(skb);
+		consume_skb(skb);
+	} else {
+		kfree_skb(skb);
 	}
-	kfree_skb(skb);
 	sock_put(sk);
 	return ret;
 }
@@ -1086,8 +1100,8 @@
 	if (info.delivery_failure) {
 		kfree_skb(info.skb2);
 		return -ENOBUFS;
-	} else
-		consume_skb(info.skb2);
+	}
+	consume_skb(info.skb2);
 
 	if (info.delivered) {
 		if (info.congested && (allocation & __GFP_WAIT))
@@ -1240,8 +1254,9 @@
 			nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
 			clear_bit(0, &nlk->state);
 			wake_up_interruptible(&nlk->wait);
-		} else
+		} else {
 			nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
+		}
 		err = 0;
 		break;
 	default:
@@ -1645,12 +1660,6 @@
 }
 EXPORT_SYMBOL(netlink_set_nonroot);
 
-static void netlink_destroy_callback(struct netlink_callback *cb)
-{
-	kfree_skb(cb->skb);
-	kfree(cb);
-}
-
 struct nlmsghdr *
 __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
 {
@@ -1727,7 +1736,7 @@
 	nlk->cb = NULL;
 	mutex_unlock(nlk->cb_mutex);
 
-	netlink_destroy_callback(cb);
+	netlink_consume_callback(cb);
 	return 0;
 
 errout_skb:
@@ -1996,11 +2005,11 @@
 
 static int netlink_seq_show(struct seq_file *seq, void *v)
 {
-	if (v == SEQ_START_TOKEN)
+	if (v == SEQ_START_TOKEN) {
 		seq_puts(seq,
 			 "sk       Eth Pid    Groups   "
 			 "Rmem     Wmem     Dump     Locks     Drops     Inode\n");
-	else {
+	} else {
 		struct sock *s = v;
 		struct netlink_sock *nlk = nlk_sk(s);
 
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 9f40441..8340ace 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -635,11 +635,12 @@
 	if (hdr == NULL)
 		return -1;
 
-	NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, family->name);
-	NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, family->id);
-	NLA_PUT_U32(skb, CTRL_ATTR_VERSION, family->version);
-	NLA_PUT_U32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize);
-	NLA_PUT_U32(skb, CTRL_ATTR_MAXATTR, family->maxattr);
+	if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
+	    nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
+	    nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
+	    nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
+	    nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
+		goto nla_put_failure;
 
 	if (!list_empty(&family->ops_list)) {
 		struct nlattr *nla_ops;
@@ -657,8 +658,9 @@
 			if (nest == NULL)
 				goto nla_put_failure;
 
-			NLA_PUT_U32(skb, CTRL_ATTR_OP_ID, ops->cmd);
-			NLA_PUT_U32(skb, CTRL_ATTR_OP_FLAGS, ops->flags);
+			if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) ||
+			    nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, ops->flags))
+				goto nla_put_failure;
 
 			nla_nest_end(skb, nest);
 		}
@@ -682,9 +684,10 @@
 			if (nest == NULL)
 				goto nla_put_failure;
 
-			NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id);
-			NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME,
-				       grp->name);
+			if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) ||
+			    nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
+					   grp->name))
+				goto nla_put_failure;
 
 			nla_nest_end(skb, nest);
 		}
@@ -710,8 +713,9 @@
 	if (hdr == NULL)
 		return -1;
 
-	NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name);
-	NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id);
+	if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name) ||
+	    nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id))
+		goto nla_put_failure;
 
 	nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
 	if (nla_grps == NULL)
@@ -721,9 +725,10 @@
 	if (nest == NULL)
 		goto nla_put_failure;
 
-	NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id);
-	NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME,
-		       grp->name);
+	if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) ||
+	    nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
+			   grp->name))
+		goto nla_put_failure;
 
 	nla_nest_end(skb, nest);
 	nla_nest_end(skb, nla_grps);
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
index 1c51d7a..743262b 100644
--- a/net/netrom/nr_dev.c
+++ b/net/netrom/nr_dev.c
@@ -97,7 +97,7 @@
 
 static int nr_header(struct sk_buff *skb, struct net_device *dev,
 		     unsigned short type,
-		     const void *daddr, const void *saddr, unsigned len)
+		     const void *daddr, const void *saddr, unsigned int len)
 {
 	unsigned char *buff = skb_push(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
 
diff --git a/net/netrom/sysctl_net_netrom.c b/net/netrom/sysctl_net_netrom.c
index 1e0fa9e5..42f630b 100644
--- a/net/netrom/sysctl_net_netrom.c
+++ b/net/netrom/sysctl_net_netrom.c
@@ -146,18 +146,12 @@
 	{ }
 };
 
-static struct ctl_path nr_path[] = {
-	{ .procname = "net", },
-	{ .procname = "netrom", },
-	{ }
-};
-
 void __init nr_register_sysctl(void)
 {
-	nr_table_header = register_sysctl_paths(nr_path, nr_table);
+	nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table);
 }
 
 void nr_unregister_sysctl(void)
 {
-	unregister_sysctl_table(nr_table_header);
+	unregister_net_sysctl_table(nr_table_header);
 }
diff --git a/net/nfc/core.c b/net/nfc/core.c
index da35327..3192c3f 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -587,12 +587,12 @@
 static int match_idx(struct device *d, void *data)
 {
 	struct nfc_dev *dev = to_nfc_dev(d);
-	unsigned *idx = data;
+	unsigned int *idx = data;
 
 	return dev->idx == *idx;
 }
 
-struct nfc_dev *nfc_get_device(unsigned idx)
+struct nfc_dev *nfc_get_device(unsigned int idx)
 {
 	struct device *d;
 
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index ebdb605..f1829f6 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -63,19 +63,23 @@
 
 	genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
 
-	NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target->idx);
-	NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols);
-	NLA_PUT_U16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res);
-	NLA_PUT_U8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res);
-	if (target->nfcid1_len > 0)
-		NLA_PUT(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len,
-			target->nfcid1);
-	if (target->sensb_res_len > 0)
-		NLA_PUT(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len,
-			target->sensb_res);
-	if (target->sensf_res_len > 0)
-		NLA_PUT(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len,
-			target->sensf_res);
+	if (nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target->idx) ||
+	    nla_put_u32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols) ||
+	    nla_put_u16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res) ||
+	    nla_put_u8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res))
+		goto nla_put_failure;
+	if (target->nfcid1_len > 0 &&
+	    nla_put(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len,
+		    target->nfcid1))
+		goto nla_put_failure;
+	if (target->sensb_res_len > 0 &&
+	    nla_put(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len,
+		    target->sensb_res))
+		goto nla_put_failure;
+	if (target->sensf_res_len > 0 &&
+	    nla_put(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len,
+		    target->sensf_res))
+		goto nla_put_failure;
 
 	return genlmsg_end(msg, hdr);
 
@@ -170,7 +174,8 @@
 	if (!hdr)
 		goto free_msg;
 
-	NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
+	if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -197,8 +202,9 @@
 	if (!hdr)
 		goto free_msg;
 
-	NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev));
-	NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target_idx);
+	if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
+	    nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target_idx))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -227,10 +233,11 @@
 	if (!hdr)
 		goto free_msg;
 
-	NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev));
-	NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
-	NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols);
-	NLA_PUT_U8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up);
+	if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
+	    nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
+	    nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) ||
+	    nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -259,7 +266,8 @@
 	if (!hdr)
 		goto free_msg;
 
-	NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
+	if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -289,10 +297,11 @@
 	if (cb)
 		genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
 
-	NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev));
-	NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
-	NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols);
-	NLA_PUT_U8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up);
+	if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
+	    nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
+	    nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) ||
+	    nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up))
+		goto nla_put_failure;
 
 	return genlmsg_end(msg, hdr);
 
@@ -369,11 +378,14 @@
 	if (!hdr)
 		goto free_msg;
 
-	NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
-	if (rf_mode == NFC_RF_INITIATOR)
-		NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target_idx);
-	NLA_PUT_U8(msg, NFC_ATTR_COMM_MODE, comm_mode);
-	NLA_PUT_U8(msg, NFC_ATTR_RF_MODE, rf_mode);
+	if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+		goto nla_put_failure;
+	if (rf_mode == NFC_RF_INITIATOR &&
+	    nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target_idx))
+		goto nla_put_failure;
+	if (nla_put_u8(msg, NFC_ATTR_COMM_MODE, comm_mode) ||
+	    nla_put_u8(msg, NFC_ATTR_RF_MODE, rf_mode))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -406,7 +418,8 @@
 	if (!hdr)
 		goto free_msg;
 
-	NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
+	if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index 2c868d2..7d589a8 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -128,7 +128,7 @@
 			       u8 comm_mode, u8 rf_mode);
 int nfc_genl_dep_link_down_event(struct nfc_dev *dev);
 
-struct nfc_dev *nfc_get_device(unsigned idx);
+struct nfc_dev *nfc_get_device(unsigned int idx);
 
 static inline void nfc_put_device(struct nfc_dev *dev)
 {
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index e44e631..f86de29 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -778,15 +778,18 @@
 	tcp_flags = flow->tcp_flags;
 	spin_unlock_bh(&flow->lock);
 
-	if (used)
-		NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used));
+	if (used &&
+	    nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
+		goto nla_put_failure;
 
-	if (stats.n_packets)
-		NLA_PUT(skb, OVS_FLOW_ATTR_STATS,
-			sizeof(struct ovs_flow_stats), &stats);
+	if (stats.n_packets &&
+	    nla_put(skb, OVS_FLOW_ATTR_STATS,
+		    sizeof(struct ovs_flow_stats), &stats))
+		goto nla_put_failure;
 
-	if (tcp_flags)
-		NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags);
+	if (tcp_flags &&
+	    nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags))
+		goto nla_put_failure;
 
 	/* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
 	 * this is the first flow to be dumped into 'skb'.  This is unusual for
@@ -1168,7 +1171,8 @@
 		goto nla_put_failure;
 
 	get_dp_stats(dp, &dp_stats);
-	NLA_PUT(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats);
+	if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats))
+		goto nla_put_failure;
 
 	return genlmsg_end(skb, ovs_header);
 
@@ -1468,14 +1472,16 @@
 
 	ovs_header->dp_ifindex = get_dpifindex(vport->dp);
 
-	NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
-	NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type);
-	NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport));
-	NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid);
+	if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
+	    nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
+	    nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
+	    nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid))
+		goto nla_put_failure;
 
 	ovs_vport_get_stats(vport, &vport_stats);
-	NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
-		&vport_stats);
+	if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
+		    &vport_stats))
+		goto nla_put_failure;
 
 	err = ovs_vport_get_options(vport, skb);
 	if (err == -EMSGSIZE)
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 1252c30..7cb4163 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -1174,11 +1174,13 @@
 	struct ovs_key_ethernet *eth_key;
 	struct nlattr *nla, *encap;
 
-	if (swkey->phy.priority)
-		NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority);
+	if (swkey->phy.priority &&
+	    nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority))
+		goto nla_put_failure;
 
-	if (swkey->phy.in_port != USHRT_MAX)
-		NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port);
+	if (swkey->phy.in_port != USHRT_MAX &&
+	    nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port))
+		goto nla_put_failure;
 
 	nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
 	if (!nla)
@@ -1188,8 +1190,9 @@
 	memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
 
 	if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
-		NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q));
-		NLA_PUT_BE16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci);
+		if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)) ||
+		    nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci))
+			goto nla_put_failure;
 		encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
 		if (!swkey->eth.tci)
 			goto unencap;
@@ -1200,7 +1203,8 @@
 	if (swkey->eth.type == htons(ETH_P_802_2))
 		goto unencap;
 
-	NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type);
+	if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type))
+		goto nla_put_failure;
 
 	if (swkey->eth.type == htons(ETH_P_IP)) {
 		struct ovs_key_ipv4 *ipv4_key;
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index c1068ae..5920bda 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -140,9 +140,9 @@
 	return netdev_vport->dev->ifindex;
 }
 
-static unsigned packet_length(const struct sk_buff *skb)
+static unsigned int packet_length(const struct sk_buff *skb)
 {
-	unsigned length = skb->len - ETH_HLEN;
+	unsigned int length = skb->len - ETH_HLEN;
 
 	if (skb->protocol == htons(ETH_P_8021Q))
 		length -= VLAN_HLEN;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 4f2c0df..0f66174 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1654,7 +1654,7 @@
 			skb->data = skb_head;
 			skb->len = skb_len;
 		}
-		kfree_skb(skb);
+		consume_skb(skb);
 		skb = nskb;
 	}
 
@@ -1764,7 +1764,7 @@
 		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
 				  po->tp_reserve;
 	} else {
-		unsigned maclen = skb_network_offset(skb);
+		unsigned int maclen = skb_network_offset(skb);
 		netoff = TPACKET_ALIGN(po->tp_hdrlen +
 				       (maclen < 16 ? 16 : maclen)) +
 			po->tp_reserve;
@@ -3224,10 +3224,10 @@
 			     char __user *optval, int __user *optlen)
 {
 	int len;
-	int val;
+	int val, lv = sizeof(val);
 	struct sock *sk = sock->sk;
 	struct packet_sock *po = pkt_sk(sk);
-	void *data;
+	void *data = &val;
 	struct tpacket_stats st;
 	union tpacket_stats_u st_u;
 
@@ -3242,21 +3242,17 @@
 
 	switch (optname) {
 	case PACKET_STATISTICS:
-		if (po->tp_version == TPACKET_V3) {
-			len = sizeof(struct tpacket_stats_v3);
-		} else {
-			if (len > sizeof(struct tpacket_stats))
-				len = sizeof(struct tpacket_stats);
-		}
 		spin_lock_bh(&sk->sk_receive_queue.lock);
 		if (po->tp_version == TPACKET_V3) {
+			lv = sizeof(struct tpacket_stats_v3);
 			memcpy(&st_u.stats3, &po->stats,
-			sizeof(struct tpacket_stats));
+			       sizeof(struct tpacket_stats));
 			st_u.stats3.tp_freeze_q_cnt =
-			po->stats_u.stats3.tp_freeze_q_cnt;
+					po->stats_u.stats3.tp_freeze_q_cnt;
 			st_u.stats3.tp_packets += po->stats.tp_drops;
 			data = &st_u.stats3;
 		} else {
+			lv = sizeof(struct tpacket_stats);
 			st = po->stats;
 			st.tp_packets += st.tp_drops;
 			data = &st;
@@ -3265,31 +3261,16 @@
 		spin_unlock_bh(&sk->sk_receive_queue.lock);
 		break;
 	case PACKET_AUXDATA:
-		if (len > sizeof(int))
-			len = sizeof(int);
 		val = po->auxdata;
-
-		data = &val;
 		break;
 	case PACKET_ORIGDEV:
-		if (len > sizeof(int))
-			len = sizeof(int);
 		val = po->origdev;
-
-		data = &val;
 		break;
 	case PACKET_VNET_HDR:
-		if (len > sizeof(int))
-			len = sizeof(int);
 		val = po->has_vnet_hdr;
-
-		data = &val;
 		break;
 	case PACKET_VERSION:
-		if (len > sizeof(int))
-			len = sizeof(int);
 		val = po->tp_version;
-		data = &val;
 		break;
 	case PACKET_HDRLEN:
 		if (len > sizeof(int))
@@ -3309,39 +3290,28 @@
 		default:
 			return -EINVAL;
 		}
-		data = &val;
 		break;
 	case PACKET_RESERVE:
-		if (len > sizeof(unsigned int))
-			len = sizeof(unsigned int);
 		val = po->tp_reserve;
-		data = &val;
 		break;
 	case PACKET_LOSS:
-		if (len > sizeof(unsigned int))
-			len = sizeof(unsigned int);
 		val = po->tp_loss;
-		data = &val;
 		break;
 	case PACKET_TIMESTAMP:
-		if (len > sizeof(int))
-			len = sizeof(int);
 		val = po->tp_tstamp;
-		data = &val;
 		break;
 	case PACKET_FANOUT:
-		if (len > sizeof(int))
-			len = sizeof(int);
 		val = (po->fanout ?
 		       ((u32)po->fanout->id |
 			((u32)po->fanout->type << 16)) :
 		       0);
-		data = &val;
 		break;
 	default:
 		return -ENOPROTOOPT;
 	}
 
+	if (len > lv)
+		len = lv;
 	if (put_user(len, optlen))
 		return -EFAULT;
 	if (copy_to_user(optval, data, len))
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index d65f699..779ce4f 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -129,7 +129,7 @@
 /* Phonet device header operations */
 static int pn_header_create(struct sk_buff *skb, struct net_device *dev,
 				unsigned short type, const void *daddr,
-				const void *saddr, unsigned len)
+				const void *saddr, unsigned int len)
 {
 	u8 *media = skb_push(skb, 1);
 
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 9726fe6..9dd4f92 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -273,7 +273,7 @@
 	hdr = pnp_hdr(skb);
 	if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
 		LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
-				(unsigned)hdr->data[0]);
+				(unsigned int)hdr->data[0]);
 		return -EOPNOTSUPP;
 	}
 
@@ -305,7 +305,7 @@
 
 	default:
 		LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP indication: %u\n",
-				(unsigned)hdr->data[1]);
+				(unsigned int)hdr->data[1]);
 		return -EOPNOTSUPP;
 	}
 	if (wake)
@@ -478,9 +478,9 @@
 	skb_queue_purge(&pn->ctrlreq_queue);
 }
 
-static u8 pipe_negotiate_fc(const u8 *fcs, unsigned n)
+static u8 pipe_negotiate_fc(const u8 *fcs, unsigned int n)
 {
-	unsigned i;
+	unsigned int i;
 	u8 final_fc = PN_NO_FLOW_CONTROL;
 
 	for (i = 0; i < n; i++) {
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 9b9a85e..36f75a9 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -44,7 +44,7 @@
 	struct phonet_routes routes;
 };
 
-int phonet_net_id __read_mostly;
+static int phonet_net_id __read_mostly;
 
 static struct phonet_net *phonet_pernet(struct net *net)
 {
@@ -268,7 +268,7 @@
 static void phonet_route_autodel(struct net_device *dev)
 {
 	struct phonet_net *pnn = phonet_pernet(dev_net(dev));
-	unsigned i;
+	unsigned int i;
 	DECLARE_BITMAP(deleted, 64);
 
 	/* Remove left-over Phonet routes */
@@ -331,23 +331,6 @@
 
 static void __net_exit phonet_exit_net(struct net *net)
 {
-	struct phonet_net *pnn = phonet_pernet(net);
-	struct net_device *dev;
-	unsigned i;
-
-	rtnl_lock();
-	for_each_netdev(net, dev)
-		phonet_device_destroy(dev);
-
-	for (i = 0; i < 64; i++) {
-		dev = pnn->routes.table[i];
-		if (dev) {
-			rtm_phonet_notify(RTM_DELROUTE, dev, i);
-			dev_put(dev);
-		}
-	}
-	rtnl_unlock();
-
 	proc_net_remove(net, "phonet");
 }
 
@@ -361,7 +344,7 @@
 /* Initialize Phonet devices list */
 int __init phonet_device_init(void)
 {
-	int err = register_pernet_device(&phonet_net_ops);
+	int err = register_pernet_subsys(&phonet_net_ops);
 	if (err)
 		return err;
 
@@ -377,7 +360,7 @@
 {
 	rtnl_unregister_all(PF_PHONET);
 	unregister_netdevice_notifier(&phonet_device_notifier);
-	unregister_pernet_device(&phonet_net_ops);
+	unregister_pernet_subsys(&phonet_net_ops);
 	proc_net_remove(&init_net, "pnresource");
 }
 
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index d61f676..cfdf135 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -116,7 +116,8 @@
 	ifm->ifa_flags = IFA_F_PERMANENT;
 	ifm->ifa_scope = RT_SCOPE_LINK;
 	ifm->ifa_index = dev->ifindex;
-	NLA_PUT_U8(skb, IFA_LOCAL, addr);
+	if (nla_put_u8(skb, IFA_LOCAL, addr))
+		goto nla_put_failure;
 	return nlmsg_end(skb, nlh);
 
 nla_put_failure:
@@ -183,8 +184,9 @@
 	rtm->rtm_scope = RT_SCOPE_UNIVERSE;
 	rtm->rtm_type = RTN_UNICAST;
 	rtm->rtm_flags = 0;
-	NLA_PUT_U8(skb, RTA_DST, dst);
-	NLA_PUT_U32(skb, RTA_OIF, dev->ifindex);
+	if (nla_put_u8(skb, RTA_DST, dst) ||
+	    nla_put_u32(skb, RTA_OIF, dev->ifindex))
+		goto nla_put_failure;
 	return nlmsg_end(skb, nlh);
 
 nla_put_failure:
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 4c7eff3..89cfa9c 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -58,7 +58,7 @@
 
 void __init pn_sock_init(void)
 {
-	unsigned i;
+	unsigned int i;
 
 	for (i = 0; i < PN_HASHSIZE; i++)
 		INIT_HLIST_HEAD(pnsocks.hlist + i);
@@ -116,7 +116,7 @@
 void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
 {
 	struct hlist_head *hlist = pnsocks.hlist;
-	unsigned h;
+	unsigned int h;
 
 	rcu_read_lock();
 	for (h = 0; h < PN_HASHSIZE; h++) {
@@ -545,7 +545,7 @@
 	struct hlist_head *hlist = pnsocks.hlist;
 	struct hlist_node *node;
 	struct sock *sknode;
-	unsigned h;
+	unsigned int h;
 
 	for (h = 0; h < PN_HASHSIZE; h++) {
 		sk_for_each_rcu(sknode, node, hlist) {
@@ -710,7 +710,7 @@
 
 void pn_sock_unbind_all_res(struct sock *sk)
 {
-	unsigned res, match = 0;
+	unsigned int res, match = 0;
 
 	mutex_lock(&resource_mutex);
 	for (res = 0; res < 256; res++) {
@@ -732,7 +732,7 @@
 static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
 {
 	struct net *net = seq_file_net(seq);
-	unsigned i;
+	unsigned int i;
 
 	if (!net_eq(net, &init_net))
 		return NULL;
@@ -750,7 +750,7 @@
 static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
 {
 	struct net *net = seq_file_net(seq);
-	unsigned i;
+	unsigned int i;
 
 	BUG_ON(!net_eq(net, &init_net));
 
diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
index cea1c7d..696348f 100644
--- a/net/phonet/sysctl.c
+++ b/net/phonet/sysctl.c
@@ -27,6 +27,10 @@
 #include <linux/errno.h>
 #include <linux/init.h>
 
+#include <net/sock.h>
+#include <linux/phonet.h>
+#include <net/phonet/phonet.h>
+
 #define DYNAMIC_PORT_MIN	0x40
 #define DYNAMIC_PORT_MAX	0x7f
 
@@ -46,7 +50,8 @@
 
 void phonet_get_local_port_range(int *min, int *max)
 {
-	unsigned seq;
+	unsigned int seq;
+
 	do {
 		seq = read_seqbegin(&local_port_range_lock);
 		if (min)
@@ -93,19 +98,13 @@
 	{ }
 };
 
-static struct ctl_path phonet_ctl_path[] = {
-	{ .procname = "net", },
-	{ .procname = "phonet", },
-	{ },
-};
-
 int __init phonet_sysctl_init(void)
 {
-	phonet_table_hrd = register_sysctl_paths(phonet_ctl_path, phonet_table);
+	phonet_table_hrd = register_net_sysctl(&init_net, "net/phonet", phonet_table);
 	return phonet_table_hrd == NULL ? -ENOMEM : 0;
 }
 
 void phonet_sysctl_exit(void)
 {
-	unregister_sysctl_table(phonet_table_hrd);
+	unregister_net_sysctl_table(phonet_table_hrd);
 }
diff --git a/net/rds/ib_sysctl.c b/net/rds/ib_sysctl.c
index 1253b00..7e643ba 100644
--- a/net/rds/ib_sysctl.c
+++ b/net/rds/ib_sysctl.c
@@ -106,22 +106,15 @@
 	{ }
 };
 
-static struct ctl_path rds_ib_sysctl_path[] = {
-	{ .procname = "net", },
-	{ .procname = "rds", },
-	{ .procname = "ib", },
-	{ }
-};
-
 void rds_ib_sysctl_exit(void)
 {
 	if (rds_ib_sysctl_hdr)
-		unregister_sysctl_table(rds_ib_sysctl_hdr);
+		unregister_net_sysctl_table(rds_ib_sysctl_hdr);
 }
 
 int rds_ib_sysctl_init(void)
 {
-	rds_ib_sysctl_hdr = register_sysctl_paths(rds_ib_sysctl_path, rds_ib_sysctl_table);
+	rds_ib_sysctl_hdr = register_net_sysctl(&init_net, "net/rds/ib", rds_ib_sysctl_table);
 	if (!rds_ib_sysctl_hdr)
 		return -ENOMEM;
 	return 0;
diff --git a/net/rds/iw_sysctl.c b/net/rds/iw_sysctl.c
index e2e4717..5d5ebd5 100644
--- a/net/rds/iw_sysctl.c
+++ b/net/rds/iw_sysctl.c
@@ -109,22 +109,15 @@
 	{ }
 };
 
-static struct ctl_path rds_iw_sysctl_path[] = {
-	{ .procname = "net", },
-	{ .procname = "rds", },
-	{ .procname = "iw", },
-	{ }
-};
-
 void rds_iw_sysctl_exit(void)
 {
 	if (rds_iw_sysctl_hdr)
-		unregister_sysctl_table(rds_iw_sysctl_hdr);
+		unregister_net_sysctl_table(rds_iw_sysctl_hdr);
 }
 
 int rds_iw_sysctl_init(void)
 {
-	rds_iw_sysctl_hdr = register_sysctl_paths(rds_iw_sysctl_path, rds_iw_sysctl_table);
+	rds_iw_sysctl_hdr = register_net_sysctl(&init_net, "net/rds/iw", rds_iw_sysctl_table);
 	if (!rds_iw_sysctl_hdr)
 		return -ENOMEM;
 	return 0;
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index 25ad0c7..907214b 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -92,17 +92,10 @@
 	{ }
 };
 
-static struct ctl_path rds_sysctl_path[] = {
-	{ .procname = "net", },
-	{ .procname = "rds", },
-	{ }
-};
-
-
 void rds_sysctl_exit(void)
 {
 	if (rds_sysctl_reg_table)
-		unregister_sysctl_table(rds_sysctl_reg_table);
+		unregister_net_sysctl_table(rds_sysctl_reg_table);
 }
 
 int rds_sysctl_init(void)
@@ -110,7 +103,7 @@
 	rds_sysctl_reconnect_min = msecs_to_jiffies(1);
 	rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min;
 
-	rds_sysctl_reg_table = register_sysctl_paths(rds_sysctl_path, rds_sysctl_rds_table);
+	rds_sysctl_reg_table = register_net_sysctl(&init_net,"net/rds", rds_sysctl_rds_table);
 	if (!rds_sysctl_reg_table)
 		return -ENOMEM;
 	return 0;
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 8b5cc4a..7298137 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -145,7 +145,7 @@
 	if (ret < 0)
 		goto out;
 
-	sock->sk->sk_reuse = 1;
+	sock->sk->sk_reuse = SK_CAN_REUSE;
 	rds_tcp_nonagle(sock);
 
 	write_lock_bh(&sock->sk->sk_callback_lock);
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index 906cc05..28dbdb9 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -37,7 +37,7 @@
 
 static int rose_header(struct sk_buff *skb, struct net_device *dev,
 		       unsigned short type,
-		       const void *daddr, const void *saddr, unsigned len)
+		       const void *daddr, const void *saddr, unsigned int len)
 {
 	unsigned char *buff = skb_push(skb, ROSE_MIN_LEN + 2);
 
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
index 47f1fdb..7ca5774 100644
--- a/net/rose/rose_subr.c
+++ b/net/rose/rose_subr.c
@@ -399,7 +399,7 @@
 
 	facilities_len = *p++;
 
-	if (facilities_len == 0 || (unsigned)facilities_len > packet_len)
+	if (facilities_len == 0 || (unsigned int)facilities_len > packet_len)
 		return 0;
 
 	while (facilities_len >= 3 && *p == 0x00) {
diff --git a/net/rose/sysctl_net_rose.c b/net/rose/sysctl_net_rose.c
index df6d9da..94ca9c2 100644
--- a/net/rose/sysctl_net_rose.c
+++ b/net/rose/sysctl_net_rose.c
@@ -118,18 +118,12 @@
 	{ }
 };
 
-static struct ctl_path rose_path[] = {
-	{ .procname = "net", },
-	{ .procname = "rose", },
-	{ }
-};
-
 void __init rose_register_sysctl(void)
 {
-	rose_table_header = register_sysctl_paths(rose_path, rose_table);
+	rose_table_header = register_net_sysctl(&init_net, "net/rose", rose_table);
 }
 
 void rose_unregister_sysctl(void)
 {
-	unregister_sysctl_table(rose_table_header);
+	unregister_net_sysctl_table(rose_table_header);
 }
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 74c064c..05996d0 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -26,7 +26,7 @@
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_NETPROTO(PF_RXRPC);
 
-unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
+unsigned int rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
 module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
 MODULE_PARM_DESC(debug, "RxRPC debugging mask");
 
@@ -513,7 +513,7 @@
 			    char __user *optval, unsigned int optlen)
 {
 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
-	unsigned min_sec_level;
+	unsigned int min_sec_level;
 	int ret;
 
 	_enter(",%d,%d,,%d", level, optname, optlen);
@@ -555,13 +555,13 @@
 
 		case RXRPC_MIN_SECURITY_LEVEL:
 			ret = -EINVAL;
-			if (optlen != sizeof(unsigned))
+			if (optlen != sizeof(unsigned int))
 				goto error;
 			ret = -EISCONN;
 			if (rx->sk.sk_state != RXRPC_UNCONNECTED)
 				goto error;
 			ret = get_user(min_sec_level,
-				       (unsigned __user *) optval);
+				       (unsigned int __user *) optval);
 			if (ret < 0)
 				goto error;
 			ret = -EINVAL;
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index c3126e8..e4d9cbc 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -19,7 +19,7 @@
 #include <net/af_rxrpc.h>
 #include "ar-internal.h"
 
-static unsigned rxrpc_ack_defer = 1;
+static unsigned int rxrpc_ack_defer = 1;
 
 static const char *const rxrpc_acks[] = {
 	"---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL",
@@ -548,11 +548,11 @@
  * process the extra information that may be appended to an ACK packet
  */
 static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
-				  unsigned latest, int nAcks)
+				  unsigned int latest, int nAcks)
 {
 	struct rxrpc_ackinfo ackinfo;
 	struct rxrpc_peer *peer;
-	unsigned mtu;
+	unsigned int mtu;
 
 	if (skb_copy_bits(skb, nAcks + 3, &ackinfo, sizeof(ackinfo)) < 0) {
 		_leave(" [no ackinfo]");
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
index bf656c2..a3bbb36 100644
--- a/net/rxrpc/ar-call.c
+++ b/net/rxrpc/ar-call.c
@@ -38,8 +38,8 @@
 struct kmem_cache *rxrpc_call_jar;
 LIST_HEAD(rxrpc_calls);
 DEFINE_RWLOCK(rxrpc_call_lock);
-static unsigned rxrpc_call_max_lifetime = 60;
-static unsigned rxrpc_dead_call_timeout = 2;
+static unsigned int rxrpc_call_max_lifetime = 60;
+static unsigned int rxrpc_dead_call_timeout = 2;
 
 static void rxrpc_destroy_call(struct work_struct *work);
 static void rxrpc_call_life_expired(unsigned long _call);
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 1a2b0633..529572f 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -76,7 +76,7 @@
 		 * --ANK */
 //		ret = -ENOBUFS;
 //		if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-//		    (unsigned) sk->sk_rcvbuf)
+//		    (unsigned int) sk->sk_rcvbuf)
 //			goto out;
 
 		ret = sk_filter(sk, skb);
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 8e22bd3..a693aca 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -83,7 +83,7 @@
 	struct rxrpc_call	*call;		/* call with which associated */
 	unsigned long		resend_at;	/* time in jiffies at which to resend */
 	union {
-		unsigned	offset;		/* offset into buffer of next read */
+		unsigned int	offset;		/* offset into buffer of next read */
 		int		remain;		/* amount of space remaining for next write */
 		u32		error;		/* network error code */
 		bool		need_resend;	/* T if needs resending */
@@ -176,9 +176,9 @@
 	struct list_head	error_targets;	/* targets for net error distribution */
 	spinlock_t		lock;		/* access lock */
 	atomic_t		usage;
-	unsigned		if_mtu;		/* interface MTU for this peer */
-	unsigned		mtu;		/* network MTU for this peer */
-	unsigned		maxdata;	/* data size (MTU - hdrsize) */
+	unsigned int		if_mtu;		/* interface MTU for this peer */
+	unsigned int		mtu;		/* network MTU for this peer */
+	unsigned int		maxdata;	/* data size (MTU - hdrsize) */
 	unsigned short		hdrsize;	/* header size (IP + UDP + RxRPC) */
 	int			debug_id;	/* debug ID for printks */
 	int			net_error;	/* network error distributed */
@@ -187,8 +187,8 @@
 	/* calculated RTT cache */
 #define RXRPC_RTT_CACHE_SIZE 32
 	suseconds_t		rtt;		/* current RTT estimate (in uS) */
-	unsigned		rtt_point;	/* next entry at which to insert */
-	unsigned		rtt_usage;	/* amount of cache actually used */
+	unsigned int		rtt_point;	/* next entry at which to insert */
+	unsigned int		rtt_usage;	/* amount of cache actually used */
 	suseconds_t		rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */
 };
 
@@ -271,7 +271,7 @@
 	} state;
 	int			error;		/* error code for local abort */
 	int			debug_id;	/* debug ID for printks */
-	unsigned		call_counter;	/* call ID counter */
+	unsigned int		call_counter;	/* call ID counter */
 	atomic_t		serial;		/* packet serial number counter */
 	atomic_t		hi_serial;	/* highest serial number received */
 	u8			avail_calls;	/* number of calls available */
@@ -592,7 +592,7 @@
 /*
  * debug tracing
  */
-extern unsigned rxrpc_debug;
+extern unsigned int rxrpc_debug;
 
 #define dbgprintk(FMT,...) \
 	printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index ae3a035..8b1f9f4 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -82,7 +82,7 @@
  * - the caller guarantees we have at least 4 words
  */
 static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
-				       unsigned toklen)
+				       unsigned int toklen)
 {
 	struct rxrpc_key_token *token, **pptoken;
 	size_t plen;
@@ -210,10 +210,10 @@
  */
 static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
 				       const __be32 **_xdr,
-				       unsigned *_toklen)
+				       unsigned int *_toklen)
 {
 	const __be32 *xdr = *_xdr;
-	unsigned toklen = *_toklen, n_parts, loop, tmp;
+	unsigned int toklen = *_toklen, n_parts, loop, tmp;
 
 	/* there must be at least one name, and at least #names+1 length
 	 * words */
@@ -286,10 +286,10 @@
 static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
 					 size_t max_data_size,
 					 const __be32 **_xdr,
-					 unsigned *_toklen)
+					 unsigned int *_toklen)
 {
 	const __be32 *xdr = *_xdr;
-	unsigned toklen = *_toklen, len;
+	unsigned int toklen = *_toklen, len;
 
 	/* there must be at least one tag and one length word */
 	if (toklen <= 8)
@@ -330,11 +330,11 @@
 					  u8 max_n_elem,
 					  size_t max_elem_size,
 					  const __be32 **_xdr,
-					  unsigned *_toklen)
+					  unsigned int *_toklen)
 {
 	struct krb5_tagged_data *td;
 	const __be32 *xdr = *_xdr;
-	unsigned toklen = *_toklen, n_elem, loop;
+	unsigned int toklen = *_toklen, n_elem, loop;
 	int ret;
 
 	/* there must be at least one count */
@@ -380,10 +380,10 @@
  * extract a krb5 ticket
  */
 static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
-				    const __be32 **_xdr, unsigned *_toklen)
+				    const __be32 **_xdr, unsigned int *_toklen)
 {
 	const __be32 *xdr = *_xdr;
-	unsigned toklen = *_toklen, len;
+	unsigned int toklen = *_toklen, len;
 
 	/* there must be at least one length word */
 	if (toklen <= 4)
@@ -419,7 +419,7 @@
  * - the caller guarantees we have at least 4 words
  */
 static int rxrpc_instantiate_xdr_rxk5(struct key *key, const __be32 *xdr,
-				      unsigned toklen)
+				      unsigned int toklen)
 {
 	struct rxrpc_key_token *token, **pptoken;
 	struct rxk5_key *rxk5;
@@ -549,7 +549,7 @@
 {
 	const __be32 *xdr = data, *token;
 	const char *cp;
-	unsigned len, tmp, loop, ntoken, toklen, sec_ix;
+	unsigned int len, tmp, loop, ntoken, toklen, sec_ix;
 	int ret;
 
 	_enter(",{%x,%x,%x,%x},%zu",
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 7635107..f226709 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -31,7 +31,7 @@
 #define REALM_SZ			40	/* size of principal's auth domain */
 #define SNAME_SZ			40	/* size of service name */
 
-unsigned rxrpc_debug;
+unsigned int rxrpc_debug;
 module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
 MODULE_PARM_DESC(debug, "rxkad debugging mask");
 
@@ -207,7 +207,7 @@
 	struct rxrpc_crypt iv;
 	struct scatterlist sg[16];
 	struct sk_buff *trailer;
-	unsigned len;
+	unsigned int len;
 	u16 check;
 	int nsg;
 
@@ -826,7 +826,7 @@
 	struct rxrpc_crypt iv, key;
 	struct scatterlist sg[1];
 	struct in_addr addr;
-	unsigned life;
+	unsigned int life;
 	time_t issue, now;
 	bool little_endian;
 	int ret;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 93fdf13..5cfb160 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -127,7 +127,8 @@
 	nest = nla_nest_start(skb, a->order);
 	if (nest == NULL)
 		goto nla_put_failure;
-	NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind);
+	if (nla_put_string(skb, TCA_KIND, a->ops->kind))
+		goto nla_put_failure;
 	for (i = 0; i < (hinfo->hmask + 1); i++) {
 		p = hinfo->htab[tcf_hash(i, hinfo->hmask)];
 
@@ -139,7 +140,8 @@
 			p = s_p;
 		}
 	}
-	NLA_PUT_U32(skb, TCA_FCNT, n_i);
+	if (nla_put_u32(skb, TCA_FCNT, n_i))
+		goto nla_put_failure;
 	nla_nest_end(skb, nest);
 
 	return n_i;
@@ -437,7 +439,8 @@
 	if (a->ops == NULL || a->ops->dump == NULL)
 		return err;
 
-	NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind);
+	if (nla_put_string(skb, TCA_KIND, a->ops->kind))
+		goto nla_put_failure;
 	if (tcf_action_copy_stats(skb, a, 0))
 		goto nla_put_failure;
 	nest = nla_nest_start(skb, TCA_OPTIONS);
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 453a734..882124ceb 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -550,11 +550,13 @@
 	};
 	struct tcf_t t;
 
-	NLA_PUT(skb, TCA_CSUM_PARMS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
+		goto nla_put_failure;
 	t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
-	NLA_PUT(skb, TCA_CSUM_TM, sizeof(t), &t);
+	if (nla_put(skb, TCA_CSUM_TM, sizeof(t), &t))
+		goto nla_put_failure;
 
 	return skb->len;
 
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index b77f5a0..f10fb82 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -162,7 +162,8 @@
 	};
 	struct tcf_t t;
 
-	NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_GACT_PARMS, sizeof(opt), &opt))
+		goto nla_put_failure;
 #ifdef CONFIG_GACT_PROB
 	if (gact->tcfg_ptype) {
 		struct tc_gact_p p_opt = {
@@ -171,13 +172,15 @@
 			.ptype   = gact->tcfg_ptype,
 		};
 
-		NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt);
+		if (nla_put(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt))
+			goto nla_put_failure;
 	}
 #endif
 	t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(gact->tcf_tm.expires);
-	NLA_PUT(skb, TCA_GACT_TM, sizeof(t), &t);
+	if (nla_put(skb, TCA_GACT_TM, sizeof(t), &t))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 60f8f61..0beba0e 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -267,15 +267,17 @@
 	c.refcnt = ipt->tcf_refcnt - ref;
 	strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
 
-	NLA_PUT(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t);
-	NLA_PUT_U32(skb, TCA_IPT_INDEX, ipt->tcf_index);
-	NLA_PUT_U32(skb, TCA_IPT_HOOK, ipt->tcfi_hook);
-	NLA_PUT(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c);
-	NLA_PUT_STRING(skb, TCA_IPT_TABLE, ipt->tcfi_tname);
+	if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
+	    nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) ||
+	    nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) ||
+	    nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) ||
+	    nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname))
+		goto nla_put_failure;
 	tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install);
 	tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse);
 	tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires);
-	NLA_PUT(skb, TCA_IPT_TM, sizeof (tm), &tm);
+	if (nla_put(skb, TCA_IPT_TM, sizeof (tm), &tm))
+		goto nla_put_failure;
 	kfree(t);
 	return skb->len;
 
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index e051398..d583aea 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -227,11 +227,13 @@
 	};
 	struct tcf_t t;
 
-	NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
+		goto nla_put_failure;
 	t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(m->tcf_tm.expires);
-	NLA_PUT(skb, TCA_MIRRED_TM, sizeof(t), &t);
+	if (nla_put(skb, TCA_MIRRED_TM, sizeof(t), &t))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 001d1b3..b5d029e 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -284,11 +284,13 @@
 	};
 	struct tcf_t t;
 
-	NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
+		goto nla_put_failure;
 	t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
-	NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t);
+	if (nla_put(skb, TCA_NAT_TM, sizeof(t), &t))
+		goto nla_put_failure;
 
 	return skb->len;
 
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 10d3aed..26aa2f6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -215,11 +215,13 @@
 	opt->refcnt = p->tcf_refcnt - ref;
 	opt->bindcnt = p->tcf_bindcnt - bind;
 
-	NLA_PUT(skb, TCA_PEDIT_PARMS, s, opt);
+	if (nla_put(skb, TCA_PEDIT_PARMS, s, opt))
+		goto nla_put_failure;
 	t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
-	NLA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t);
+	if (nla_put(skb, TCA_PEDIT_TM, sizeof(t), &t))
+		goto nla_put_failure;
 	kfree(opt);
 	return skb->len;
 
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 6fb3f5a..a9de232 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -356,11 +356,14 @@
 		opt.rate = police->tcfp_R_tab->rate;
 	if (police->tcfp_P_tab)
 		opt.peakrate = police->tcfp_P_tab->rate;
-	NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
-	if (police->tcfp_result)
-		NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result);
-	if (police->tcfp_ewma_rate)
-		NLA_PUT_U32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate);
+	if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
+		goto nla_put_failure;
+	if (police->tcfp_result &&
+	    nla_put_u32(skb, TCA_POLICE_RESULT, police->tcfp_result))
+		goto nla_put_failure;
+	if (police->tcfp_ewma_rate &&
+	    nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 73e0a3a..3922f2a 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -172,12 +172,14 @@
 	};
 	struct tcf_t t;
 
-	NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt);
-	NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata);
+	if (nla_put(skb, TCA_DEF_PARMS, sizeof(opt), &opt) ||
+	    nla_put_string(skb, TCA_DEF_DATA, d->tcfd_defdata))
+		goto nla_put_failure;
 	t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
-	NLA_PUT(skb, TCA_DEF_TM, sizeof(t), &t);
+	if (nla_put(skb, TCA_DEF_TM, sizeof(t), &t))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 35dbbe9..476e0fa 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -166,20 +166,25 @@
 	};
 	struct tcf_t t;
 
-	NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt);
-	if (d->flags & SKBEDIT_F_PRIORITY)
-		NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
-			&d->priority);
-	if (d->flags & SKBEDIT_F_QUEUE_MAPPING)
-		NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING,
-			sizeof(d->queue_mapping), &d->queue_mapping);
-	if (d->flags & SKBEDIT_F_MARK)
-		NLA_PUT(skb, TCA_SKBEDIT_MARK, sizeof(d->mark),
-			&d->mark);
+	if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
+		goto nla_put_failure;
+	if ((d->flags & SKBEDIT_F_PRIORITY) &&
+	    nla_put(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
+		    &d->priority))
+		goto nla_put_failure;
+	if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) &&
+	    nla_put(skb, TCA_SKBEDIT_QUEUE_MAPPING,
+		    sizeof(d->queue_mapping), &d->queue_mapping))
+		goto nla_put_failure;
+	if ((d->flags & SKBEDIT_F_MARK) &&
+	    nla_put(skb, TCA_SKBEDIT_MARK, sizeof(d->mark),
+		    &d->mark))
+		goto nla_put_failure;
 	t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
-	NLA_PUT(skb, TCA_SKBEDIT_TM, sizeof(t), &t);
+	if (nla_put(skb, TCA_SKBEDIT_TM, sizeof(t), &t))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index a69d44f..f452f69 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -357,7 +357,8 @@
 	tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
 	tcm->tcm_parent = tp->classid;
 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
-	NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind);
+	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
+		goto nla_put_failure;
 	tcm->tcm_handle = fh;
 	if (RTM_DELTFILTER != event) {
 		tcm->tcm_handle = 0;
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index ea1f70b..590960a 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -257,8 +257,9 @@
 	if (nest == NULL)
 		goto nla_put_failure;
 
-	if (f->res.classid)
-		NLA_PUT_U32(skb, TCA_BASIC_CLASSID, f->res.classid);
+	if (f->res.classid &&
+	    nla_put_u32(skb, TCA_BASIC_CLASSID, f->res.classid))
+		goto nla_put_failure;
 
 	if (tcf_exts_dump(skb, &f->exts, &basic_ext_map) < 0 ||
 	    tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0)
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 1d8bd0d..ccd08c8 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -572,25 +572,32 @@
 	if (nest == NULL)
 		goto nla_put_failure;
 
-	NLA_PUT_U32(skb, TCA_FLOW_KEYS, f->keymask);
-	NLA_PUT_U32(skb, TCA_FLOW_MODE, f->mode);
+	if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
+	    nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
+		goto nla_put_failure;
 
 	if (f->mask != ~0 || f->xor != 0) {
-		NLA_PUT_U32(skb, TCA_FLOW_MASK, f->mask);
-		NLA_PUT_U32(skb, TCA_FLOW_XOR, f->xor);
+		if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
+		    nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
+			goto nla_put_failure;
 	}
-	if (f->rshift)
-		NLA_PUT_U32(skb, TCA_FLOW_RSHIFT, f->rshift);
-	if (f->addend)
-		NLA_PUT_U32(skb, TCA_FLOW_ADDEND, f->addend);
+	if (f->rshift &&
+	    nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
+		goto nla_put_failure;
+	if (f->addend &&
+	    nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
+		goto nla_put_failure;
 
-	if (f->divisor)
-		NLA_PUT_U32(skb, TCA_FLOW_DIVISOR, f->divisor);
-	if (f->baseclass)
-		NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass);
+	if (f->divisor &&
+	    nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
+		goto nla_put_failure;
+	if (f->baseclass &&
+	    nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
+		goto nla_put_failure;
 
-	if (f->perturb_period)
-		NLA_PUT_U32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ);
+	if (f->perturb_period &&
+	    nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
+		goto nla_put_failure;
 
 	if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0)
 		goto nla_put_failure;
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 389af15..8384a47 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -346,14 +346,17 @@
 	if (nest == NULL)
 		goto nla_put_failure;
 
-	if (f->res.classid)
-		NLA_PUT_U32(skb, TCA_FW_CLASSID, f->res.classid);
+	if (f->res.classid &&
+	    nla_put_u32(skb, TCA_FW_CLASSID, f->res.classid))
+		goto nla_put_failure;
 #ifdef CONFIG_NET_CLS_IND
-	if (strlen(f->indev))
-		NLA_PUT_STRING(skb, TCA_FW_INDEV, f->indev);
+	if (strlen(f->indev) &&
+	    nla_put_string(skb, TCA_FW_INDEV, f->indev))
+		goto nla_put_failure;
 #endif /* CONFIG_NET_CLS_IND */
-	if (head->mask != 0xFFFFFFFF)
-		NLA_PUT_U32(skb, TCA_FW_MASK, head->mask);
+	if (head->mask != 0xFFFFFFFF &&
+	    nla_put_u32(skb, TCA_FW_MASK, head->mask))
+		goto nla_put_failure;
 
 	if (tcf_exts_dump(skb, &f->exts, &fw_ext_map) < 0)
 		goto nla_put_failure;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 13ab66e..36fec42 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -571,17 +571,21 @@
 
 	if (!(f->handle & 0x8000)) {
 		id = f->id & 0xFF;
-		NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
+		if (nla_put_u32(skb, TCA_ROUTE4_TO, id))
+			goto nla_put_failure;
 	}
 	if (f->handle & 0x80000000) {
-		if ((f->handle >> 16) != 0xFFFF)
-			NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
+		if ((f->handle >> 16) != 0xFFFF &&
+		    nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif))
+			goto nla_put_failure;
 	} else {
 		id = f->id >> 16;
-		NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
+		if (nla_put_u32(skb, TCA_ROUTE4_FROM, id))
+			goto nla_put_failure;
 	}
-	if (f->res.classid)
-		NLA_PUT_U32(skb, TCA_ROUTE4_CLASSID, f->res.classid);
+	if (f->res.classid &&
+	    nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
+		goto nla_put_failure;
 
 	if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
 		goto nla_put_failure;
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index b014279..18ab93e 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -615,18 +615,22 @@
 	if (nest == NULL)
 		goto nla_put_failure;
 
-	NLA_PUT(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst);
+	if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
+		goto nla_put_failure;
 	pinfo.dpi = s->dpi;
 	pinfo.spi = f->spi;
 	pinfo.protocol = s->protocol;
 	pinfo.tunnelid = s->tunnelid;
 	pinfo.tunnelhdr = f->tunnelhdr;
 	pinfo.pad = 0;
-	NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo);
-	if (f->res.classid)
-		NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid);
-	if (((f->handle >> 8) & 0xFF) != 16)
-		NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src);
+	if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
+		goto nla_put_failure;
+	if (f->res.classid &&
+	    nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
+		goto nla_put_failure;
+	if (((f->handle >> 8) & 0xFF) != 16 &&
+	    nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
+		goto nla_put_failure;
 
 	if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0)
 		goto nla_put_failure;
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index dbe1992..fe29420 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -438,10 +438,11 @@
 
 	if (!fh) {
 		t->tcm_handle = ~0; /* whatever ... */
-		NLA_PUT_U32(skb, TCA_TCINDEX_HASH, p->hash);
-		NLA_PUT_U16(skb, TCA_TCINDEX_MASK, p->mask);
-		NLA_PUT_U32(skb, TCA_TCINDEX_SHIFT, p->shift);
-		NLA_PUT_U32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through);
+		if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
+		    nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
+		    nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
+		    nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
+			goto nla_put_failure;
 		nla_nest_end(skb, nest);
 	} else {
 		if (p->perfect) {
@@ -460,8 +461,9 @@
 			}
 		}
 		pr_debug("handle = %d\n", t->tcm_handle);
-		if (r->res.class)
-			NLA_PUT_U32(skb, TCA_TCINDEX_CLASSID, r->res.classid);
+		if (r->res.class &&
+		    nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
+			goto nla_put_failure;
 
 		if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0)
 			goto nla_put_failure;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 939b627..591b006 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -733,36 +733,44 @@
 		struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
 		u32 divisor = ht->divisor + 1;
 
-		NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor);
+		if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
+			goto nla_put_failure;
 	} else {
-		NLA_PUT(skb, TCA_U32_SEL,
-			sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
-			&n->sel);
+		if (nla_put(skb, TCA_U32_SEL,
+			    sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
+			    &n->sel))
+			goto nla_put_failure;
 		if (n->ht_up) {
 			u32 htid = n->handle & 0xFFFFF000;
-			NLA_PUT_U32(skb, TCA_U32_HASH, htid);
+			if (nla_put_u32(skb, TCA_U32_HASH, htid))
+				goto nla_put_failure;
 		}
-		if (n->res.classid)
-			NLA_PUT_U32(skb, TCA_U32_CLASSID, n->res.classid);
-		if (n->ht_down)
-			NLA_PUT_U32(skb, TCA_U32_LINK, n->ht_down->handle);
+		if (n->res.classid &&
+		    nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
+			goto nla_put_failure;
+		if (n->ht_down &&
+		    nla_put_u32(skb, TCA_U32_LINK, n->ht_down->handle))
+			goto nla_put_failure;
 
 #ifdef CONFIG_CLS_U32_MARK
-		if (n->mark.val || n->mark.mask)
-			NLA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark);
+		if ((n->mark.val || n->mark.mask) &&
+		    nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark))
+			goto nla_put_failure;
 #endif
 
 		if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0)
 			goto nla_put_failure;
 
 #ifdef CONFIG_NET_CLS_IND
-		if (strlen(n->indev))
-			NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev);
+		if (strlen(n->indev) &&
+		    nla_put_string(skb, TCA_U32_INDEV, n->indev))
+			goto nla_put_failure;
 #endif
 #ifdef CONFIG_CLS_U32_PERF
-		NLA_PUT(skb, TCA_U32_PCNT,
-		sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
-			n->pf);
+		if (nla_put(skb, TCA_U32_PCNT,
+			    sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
+			    n->pf))
+			goto nla_put_failure;
 #endif
 	}
 
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 1363bf1..4790c69 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -585,8 +585,9 @@
 
 static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
 {
-	if (v->val && v->len)
-		NLA_PUT(skb, tlv, v->len, (void *) v->val);
+	if (v->val && v->len &&
+	    nla_put(skb, tlv, v->len, (void *) v->val))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -636,10 +637,13 @@
 
 static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
 {
-	if (v->len == sizeof(unsigned long))
-		NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val);
-	else if (v->len == sizeof(u32))
-		NLA_PUT_U32(skb, tlv, v->val);
+	if (v->len == sizeof(unsigned long)) {
+		if (nla_put(skb, tlv, sizeof(unsigned long), &v->val))
+			goto nla_put_failure;
+	} else if (v->len == sizeof(u32)) {
+		if (nla_put_u32(skb, tlv, v->val))
+			goto nla_put_failure;
+	}
 
 	return 0;
 
@@ -831,7 +835,8 @@
 	memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left));
 	memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right));
 
-	NLA_PUT(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr);
+	if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr))
+		goto nla_put_failure;
 
 	ops = meta_type_ops(&meta->lvalue);
 	if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 ||
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 88d93eb..aca233c 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -441,7 +441,8 @@
 	if (top_start == NULL)
 		goto nla_put_failure;
 
-	NLA_PUT(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr);
+	if (nla_put(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr))
+		goto nla_put_failure;
 
 	list_start = nla_nest_start(skb, TCA_EMATCH_TREE_LIST);
 	if (list_start == NULL)
@@ -457,7 +458,8 @@
 			.flags = em->flags
 		};
 
-		NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr);
+		if (nla_put(skb, i + 1, sizeof(em_hdr), &em_hdr))
+			goto nla_put_failure;
 
 		if (em->ops && em->ops->dump) {
 			if (em->ops->dump(skb, em) < 0)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 3d8981f..d2daefc 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -426,7 +426,8 @@
 	nest = nla_nest_start(skb, TCA_STAB);
 	if (nest == NULL)
 		goto nla_put_failure;
-	NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts);
+	if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
+		goto nla_put_failure;
 	nla_nest_end(skb, nest);
 
 	return skb->len;
@@ -1201,7 +1202,8 @@
 	tcm->tcm_parent = clid;
 	tcm->tcm_handle = q->handle;
 	tcm->tcm_info = atomic_read(&q->refcnt);
-	NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
+	if (nla_put_string(skb, TCA_KIND, q->ops->id))
+		goto nla_put_failure;
 	if (q->ops->dump && q->ops->dump(q, skb) < 0)
 		goto nla_put_failure;
 	q->qstats.qlen = q->q.qlen;
@@ -1505,7 +1507,8 @@
 	tcm->tcm_parent = q->handle;
 	tcm->tcm_handle = q->handle;
 	tcm->tcm_info = 0;
-	NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
+	if (nla_put_string(skb, TCA_KIND, q->ops->id))
+		goto nla_put_failure;
 	if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
 		goto nla_put_failure;
 
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index e25e490..a77a4fb 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -601,7 +601,8 @@
 	if (nest == NULL)
 		goto nla_put_failure;
 
-	NLA_PUT(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr);
+	if (nla_put(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr))
+		goto nla_put_failure;
 	if (flow->vcc) {
 		struct sockaddr_atmpvc pvc;
 		int state;
@@ -610,15 +611,19 @@
 		pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
 		pvc.sap_addr.vpi = flow->vcc->vpi;
 		pvc.sap_addr.vci = flow->vcc->vci;
-		NLA_PUT(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc);
+		if (nla_put(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc))
+			goto nla_put_failure;
 		state = ATM_VF2VS(flow->vcc->flags);
-		NLA_PUT_U32(skb, TCA_ATM_STATE, state);
+		if (nla_put_u32(skb, TCA_ATM_STATE, state))
+			goto nla_put_failure;
 	}
-	if (flow->excess)
-		NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid);
-	else
-		NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0);
-
+	if (flow->excess) {
+		if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->classid))
+			goto nla_put_failure;
+	} else {
+		if (nla_put_u32(skb, TCA_ATM_EXCESS, 0))
+			goto nla_put_failure;
+	}
 	nla_nest_end(skb, nest);
 	return skb->len;
 
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 24d94c0..6aabd77 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1425,7 +1425,8 @@
 {
 	unsigned char *b = skb_tail_pointer(skb);
 
-	NLA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate);
+	if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
@@ -1450,7 +1451,8 @@
 	opt.minidle = (u32)(-cl->minidle);
 	opt.offtime = cl->offtime;
 	opt.change = ~0;
-	NLA_PUT(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
@@ -1468,7 +1470,8 @@
 	opt.priority = cl->priority + 1;
 	opt.cpriority = cl->cpriority + 1;
 	opt.weight = cl->weight;
-	NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
@@ -1485,7 +1488,8 @@
 	opt.priority2 = cl->priority2 + 1;
 	opt.pad = 0;
 	opt.penalty = cl->penalty;
-	NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
@@ -1502,7 +1506,8 @@
 		opt.split = cl->split ? cl->split->common.classid : 0;
 		opt.defmap = cl->defmap;
 		opt.defchange = ~0;
-		NLA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt);
+		if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
+			goto nla_put_failure;
 	}
 	return skb->len;
 
@@ -1521,7 +1526,8 @@
 		opt.police = cl->police;
 		opt.__res1 = 0;
 		opt.__res2 = 0;
-		NLA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt);
+		if (nla_put(skb, TCA_CBQ_POLICE, sizeof(opt), &opt))
+			goto nla_put_failure;
 	}
 	return skb->len;
 
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 7e267d7..81445cc 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -515,8 +515,9 @@
 	if (opts == NULL)
 		goto nla_put_failure;
 
-	NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
-	NLA_PUT_U32(skb, TCA_CHOKE_MAX_P, q->parms.max_P);
+	if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
+	    nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
+		goto nla_put_failure;
 	return nla_nest_end(skb, opts);
 
 nla_put_failure:
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 6b7fe4a..c218987 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -260,7 +260,8 @@
 	nest = nla_nest_start(skb, TCA_OPTIONS);
 	if (nest == NULL)
 		goto nla_put_failure;
-	NLA_PUT_U32(skb, TCA_DRR_QUANTUM, cl->quantum);
+	if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
+		goto nla_put_failure;
 	return nla_nest_end(skb, nest);
 
 nla_put_failure:
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 2c79020..389b856 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -429,8 +429,9 @@
 	opts = nla_nest_start(skb, TCA_OPTIONS);
 	if (opts == NULL)
 		goto nla_put_failure;
-	NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]);
-	NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]);
+	if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]) ||
+	    nla_put_u8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]))
+		goto nla_put_failure;
 
 	return nla_nest_end(skb, opts);
 
@@ -447,13 +448,16 @@
 	opts = nla_nest_start(skb, TCA_OPTIONS);
 	if (opts == NULL)
 		goto nla_put_failure;
-	NLA_PUT_U16(skb, TCA_DSMARK_INDICES, p->indices);
+	if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
+		goto nla_put_failure;
 
-	if (p->default_index != NO_DEFAULT_INDEX)
-		NLA_PUT_U16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index);
+	if (p->default_index != NO_DEFAULT_INDEX &&
+	    nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
+		goto nla_put_failure;
 
-	if (p->set_tc_index)
-		NLA_PUT_FLAG(skb, TCA_DSMARK_SET_TC_INDEX);
+	if (p->set_tc_index &&
+	    nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
+		goto nla_put_failure;
 
 	return nla_nest_end(skb, opts);
 
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 66effe2..e15a9eb 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -85,7 +85,8 @@
 {
 	struct tc_fifo_qopt opt = { .limit = sch->limit };
 
-	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 67fc573..0eb1202 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -512,7 +512,8 @@
 	struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
 
 	memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
-	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 0b15236..ab620bf 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -521,14 +521,16 @@
 	opts = nla_nest_start(skb, TCA_OPTIONS);
 	if (opts == NULL)
 		goto nla_put_failure;
-	NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
+	if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
+		goto nla_put_failure;
 
 	for (i = 0; i < MAX_DPs; i++) {
 		struct gred_sched_data *q = table->tab[i];
 
 		max_p[i] = q ? q->parms.max_P : 0;
 	}
-	NLA_PUT(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p);
+	if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
+		goto nla_put_failure;
 
 	parms = nla_nest_start(skb, TCA_GRED_PARMS);
 	if (parms == NULL)
@@ -565,11 +567,8 @@
 		opt.packets	= q->packetsin;
 		opt.bytesin	= q->bytesin;
 
-		if (gred_wred_mode(table)) {
-			q->vars.qidlestart =
-				table->tab[table->def]->vars.qidlestart;
-			q->vars.qavg = table->tab[table->def]->vars.qavg;
-		}
+		if (gred_wred_mode(table))
+			gred_load_wred_set(table, q);
 
 		opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg);
 
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 9bdca2e..8db3e2c 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1305,7 +1305,8 @@
 	tsc.m1 = sm2m(sc->sm1);
 	tsc.d  = dx2d(sc->dx);
 	tsc.m2 = sm2m(sc->sm2);
-	NLA_PUT(skb, attr, sizeof(tsc), &tsc);
+	if (nla_put(skb, attr, sizeof(tsc), &tsc))
+		goto nla_put_failure;
 
 	return skb->len;
 
@@ -1573,7 +1574,8 @@
 	}
 
 	qopt.defcls = q->defcls;
-	NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
+	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
+		goto nla_put_failure;
 	return skb->len;
 
  nla_put_failure:
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 29b942c..2ea6f19 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1051,7 +1051,8 @@
 	nest = nla_nest_start(skb, TCA_OPTIONS);
 	if (nest == NULL)
 		goto nla_put_failure;
-	NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
+	if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt))
+		goto nla_put_failure;
 	nla_nest_end(skb, nest);
 
 	spin_unlock_bh(root_lock);
@@ -1090,7 +1091,8 @@
 	opt.quantum = cl->quantum;
 	opt.prio = cl->prio;
 	opt.level = cl->level;
-	NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
+		goto nla_put_failure;
 
 	nla_nest_end(skb, nest);
 	spin_unlock_bh(root_lock);
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 28de430..d1831ca 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -247,7 +247,8 @@
 		opt.offset[i] = dev->tc_to_txq[i].offset;
 	}
 
-	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+		goto nla_put_failure;
 
 	return skb->len;
 nla_put_failure:
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 49131d7..2a2b096 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -284,7 +284,8 @@
 	opt.bands = q->bands;
 	opt.max_bands = q->max_bands;
 
-	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+		goto nla_put_failure;
 
 	return skb->len;
 
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 5da548f..1109731 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -834,7 +834,8 @@
 			.p23 = q->clg.a5,
 		};
 
-		NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi);
+		if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
+			goto nla_put_failure;
 		break;
 	}
 	case CLG_GILB_ELL: {
@@ -845,7 +846,8 @@
 			.k1 = q->clg.a4,
 		};
 
-		NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge);
+		if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
+			goto nla_put_failure;
 		break;
 	}
 	}
@@ -874,26 +876,31 @@
 	qopt.loss = q->loss;
 	qopt.gap = q->gap;
 	qopt.duplicate = q->duplicate;
-	NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
+	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
+		goto nla_put_failure;
 
 	cor.delay_corr = q->delay_cor.rho;
 	cor.loss_corr = q->loss_cor.rho;
 	cor.dup_corr = q->dup_cor.rho;
-	NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
+	if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
+		goto nla_put_failure;
 
 	reorder.probability = q->reorder;
 	reorder.correlation = q->reorder_cor.rho;
-	NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
+	if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
+		goto nla_put_failure;
 
 	corrupt.probability = q->corrupt;
 	corrupt.correlation = q->corrupt_cor.rho;
-	NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
+	if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
+		goto nla_put_failure;
 
 	rate.rate = q->rate;
 	rate.packet_overhead = q->packet_overhead;
 	rate.cell_size = q->cell_size;
 	rate.cell_overhead = q->cell_overhead;
-	NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate);
+	if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
+		goto nla_put_failure;
 
 	if (dump_loss_model(q, skb) != 0)
 		goto nla_put_failure;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index b5d56a2..79359b6 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -247,7 +247,8 @@
 	opt.bands = q->bands;
 	memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
 
-	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+		goto nla_put_failure;
 
 	return skb->len;
 
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index e68cb44..9af01f3 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -429,8 +429,9 @@
 	nest = nla_nest_start(skb, TCA_OPTIONS);
 	if (nest == NULL)
 		goto nla_put_failure;
-	NLA_PUT_U32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w);
-	NLA_PUT_U32(skb, TCA_QFQ_LMAX, cl->lmax);
+	if (nla_put_u32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w) ||
+	    nla_put_u32(skb, TCA_QFQ_LMAX, cl->lmax))
+		goto nla_put_failure;
 	return nla_nest_end(skb, nest);
 
 nla_put_failure:
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index a5cc301..633e32d 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -272,8 +272,9 @@
 	opts = nla_nest_start(skb, TCA_OPTIONS);
 	if (opts == NULL)
 		goto nla_put_failure;
-	NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
-	NLA_PUT_U32(skb, TCA_RED_MAX_P, q->parms.max_P);
+	if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
+	    nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
+		goto nla_put_failure;
 	return nla_nest_end(skb, opts);
 
 nla_put_failure:
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index d7eea99..74305c8 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -570,7 +570,8 @@
 
 	sch->qstats.backlog = q->qdisc->qstats.backlog;
 	opts = nla_nest_start(skb, TCA_OPTIONS);
-	NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
+		goto nla_put_failure;
 	return nla_nest_end(skb, opts);
 
 nla_put_failure:
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 02a21ab..d3a1bc2 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -812,7 +812,8 @@
 	memcpy(&opt.stats, &q->stats, sizeof(opt.stats));
 	opt.flags	= q->flags;
 
-	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+		goto nla_put_failure;
 
 	return skb->len;
 
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index b8e1563..4b056c15 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -359,7 +359,8 @@
 		memset(&opt.peakrate, 0, sizeof(opt.peakrate));
 	opt.mtu = q->mtu;
 	opt.buffer = q->buffer;
-	NLA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
+		goto nla_put_failure;
 
 	nla_nest_end(skb, nest);
 	return skb->len;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index acd2edb..5bc9ab1 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1408,7 +1408,7 @@
 }
 
 /* Increase asoc's rwnd by len and send any window update SACK if needed. */
-void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len)
+void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
 {
 	struct sctp_chunk *sack;
 	struct timer_list *timer;
@@ -1465,7 +1465,7 @@
 }
 
 /* Decrease asoc's rwnd by len. */
-void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len)
+void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
 {
 	int rx_count;
 	int over = 0;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 80f71af..80564fe 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -342,7 +342,7 @@
 		sctp_bh_lock_sock(sk);
 
 		if (sock_owned_by_user(sk)) {
-			if (sk_add_backlog(sk, skb))
+			if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
 				sctp_chunk_free(chunk);
 			else
 				backloged = 1;
@@ -376,7 +376,7 @@
 	struct sctp_ep_common *rcvr = chunk->rcvr;
 	int ret;
 
-	ret = sk_add_backlog(sk, skb);
+	ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
 	if (!ret) {
 		/* Hold the assoc/ep while hanging on the backlog queue.
 		 * This way, we know structures we need will not disappear
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 817174e..69534c5 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -663,8 +663,8 @@
 	 */
 	if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) &&
 	    inflight && sctp_state(asoc, ESTABLISHED)) {
-		unsigned max = transport->pathmtu - packet->overhead;
-		unsigned len = chunk->skb->len + q->out_qlen;
+		unsigned int max = transport->pathmtu - packet->overhead;
+		unsigned int len = chunk->skb->len + q->out_qlen;
 
 		/* Check whether this chunk and all the rest of pending
 		 * data will fit or delay in hopes of bundling a full
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index cfeb1d4..a0fa19f 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1147,7 +1147,7 @@
 	__u32 sack_ctsn, ctsn, tsn;
 	__u32 highest_tsn, highest_new_tsn;
 	__u32 sack_a_rwnd;
-	unsigned outstanding;
+	unsigned int outstanding;
 	struct sctp_transport *primary = asoc->peer.primary_path;
 	int count_of_newacks = 0;
 	int gap_ack_blocks;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 1ff51c9..fbb374c 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -524,7 +524,7 @@
 /* Worker routine to handle INIT command failure.  */
 static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands,
 				 struct sctp_association *asoc,
-				 unsigned error)
+				 unsigned int error)
 {
 	struct sctp_ulpevent *event;
 
@@ -550,7 +550,7 @@
 				  sctp_event_t event_type,
 				  sctp_subtype_t subtype,
 				  struct sctp_chunk *chunk,
-				  unsigned error)
+				  unsigned int error)
 {
 	struct sctp_ulpevent *event;
 
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 891f5db..a147b4d 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2410,7 +2410,7 @@
 					sctp_cmd_seq_t *commands)
 {
 	struct sctp_chunk *chunk = arg;
-	unsigned len;
+	unsigned int len;
 	__be16 error = SCTP_ERROR_NO_ERROR;
 
 	/* See if we have an error cause code in the chunk.  */
@@ -2446,7 +2446,7 @@
 				     sctp_cmd_seq_t *commands)
 {
 	struct sctp_chunk *chunk = arg;
-	unsigned len;
+	unsigned int len;
 	__be16 error = SCTP_ERROR_NO_ERROR;
 
 	if (!sctp_vtag_verify_either(chunk, asoc))
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 60ffbd0..e5fe639 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -275,22 +275,16 @@
 	{ /* sentinel */ }
 };
 
-static struct ctl_path sctp_path[] = {
-	{ .procname = "net", },
-	{ .procname = "sctp", },
-	{ }
-};
-
 static struct ctl_table_header * sctp_sysctl_header;
 
 /* Sysctl registration.  */
 void sctp_sysctl_register(void)
 {
-	sctp_sysctl_header = register_sysctl_paths(sctp_path, sctp_table);
+	sctp_sysctl_header = register_net_sysctl(&init_net, "net/sctp", sctp_table);
 }
 
 /* Sysctl deregistration.  */
 void sctp_sysctl_unregister(void)
 {
-	unregister_sysctl_table(sctp_sysctl_header);
+	unregister_net_sysctl_table(sctp_sysctl_header);
 }
diff --git a/net/socket.c b/net/socket.c
index 851edcd..d3aaa4f 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1479,7 +1479,7 @@
 	sock = sockfd_lookup_light(fd, &err, &fput_needed);
 	if (sock) {
 		somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
-		if ((unsigned)backlog > somaxconn)
+		if ((unsigned int)backlog > somaxconn)
 			backlog = somaxconn;
 
 		err = security_socket_listen(sock, backlog);
@@ -1691,7 +1691,7 @@
  */
 
 SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
-		unsigned, flags, struct sockaddr __user *, addr,
+		unsigned int, flags, struct sockaddr __user *, addr,
 		int, addr_len)
 {
 	struct socket *sock;
@@ -1738,7 +1738,7 @@
  */
 
 SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len,
-		unsigned, flags)
+		unsigned int, flags)
 {
 	return sys_sendto(fd, buff, len, flags, NULL, 0);
 }
@@ -1750,7 +1750,7 @@
  */
 
 SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
-		unsigned, flags, struct sockaddr __user *, addr,
+		unsigned int, flags, struct sockaddr __user *, addr,
 		int __user *, addr_len)
 {
 	struct socket *sock;
@@ -1795,7 +1795,7 @@
  */
 
 asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size,
-			 unsigned flags)
+			 unsigned int flags)
 {
 	return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL);
 }
@@ -1897,7 +1897,7 @@
 };
 
 static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
-			 struct msghdr *msg_sys, unsigned flags,
+			 struct msghdr *msg_sys, unsigned int flags,
 			 struct used_address *used_address)
 {
 	struct compat_msghdr __user *msg_compat =
@@ -1908,7 +1908,7 @@
 	    __attribute__ ((aligned(sizeof(__kernel_size_t))));
 	/* 20 is size of ipv6_pktinfo */
 	unsigned char *ctl_buf = ctl;
-	int err, ctl_len, iov_size, total_len;
+	int err, ctl_len, total_len;
 
 	err = -EFAULT;
 	if (MSG_CMSG_COMPAT & flags) {
@@ -1917,16 +1917,13 @@
 	} else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
 		return -EFAULT;
 
-	/* do not move before msg_sys is valid */
-	err = -EMSGSIZE;
-	if (msg_sys->msg_iovlen > UIO_MAXIOV)
-		goto out;
-
-	/* Check whether to allocate the iovec area */
-	err = -ENOMEM;
-	iov_size = msg_sys->msg_iovlen * sizeof(struct iovec);
 	if (msg_sys->msg_iovlen > UIO_FASTIOV) {
-		iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
+		err = -EMSGSIZE;
+		if (msg_sys->msg_iovlen > UIO_MAXIOV)
+			goto out;
+		err = -ENOMEM;
+		iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec),
+			      GFP_KERNEL);
 		if (!iov)
 			goto out;
 	}
@@ -2005,7 +2002,7 @@
 		sock_kfree_s(sock->sk, ctl_buf, ctl_len);
 out_freeiov:
 	if (iov != iovstack)
-		sock_kfree_s(sock->sk, iov, iov_size);
+		kfree(iov);
 out:
 	return err;
 }
@@ -2014,7 +2011,7 @@
  *	BSD sendmsg interface
  */
 
-SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
+SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags)
 {
 	int fput_needed, err;
 	struct msghdr msg_sys;
@@ -2096,14 +2093,14 @@
 }
 
 static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
-			 struct msghdr *msg_sys, unsigned flags, int nosec)
+			 struct msghdr *msg_sys, unsigned int flags, int nosec)
 {
 	struct compat_msghdr __user *msg_compat =
 	    (struct compat_msghdr __user *)msg;
 	struct iovec iovstack[UIO_FASTIOV];
 	struct iovec *iov = iovstack;
 	unsigned long cmsg_ptr;
-	int err, iov_size, total_len, len;
+	int err, total_len, len;
 
 	/* kernel mode address */
 	struct sockaddr_storage addr;
@@ -2118,15 +2115,13 @@
 	} else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
 		return -EFAULT;
 
-	err = -EMSGSIZE;
-	if (msg_sys->msg_iovlen > UIO_MAXIOV)
-		goto out;
-
-	/* Check whether to allocate the iovec area */
-	err = -ENOMEM;
-	iov_size = msg_sys->msg_iovlen * sizeof(struct iovec);
 	if (msg_sys->msg_iovlen > UIO_FASTIOV) {
-		iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
+		err = -EMSGSIZE;
+		if (msg_sys->msg_iovlen > UIO_MAXIOV)
+			goto out;
+		err = -ENOMEM;
+		iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec),
+			      GFP_KERNEL);
 		if (!iov)
 			goto out;
 	}
@@ -2180,7 +2175,7 @@
 
 out_freeiov:
 	if (iov != iovstack)
-		sock_kfree_s(sock->sk, iov, iov_size);
+		kfree(iov);
 out:
 	return err;
 }
@@ -2524,6 +2519,12 @@
 static int __init sock_init(void)
 {
 	int err;
+	/*
+	 *      Initialize the network sysctl infrastructure.
+	 */
+	err = net_sysctl_init();
+	if (err)
+		goto out;
 
 	/*
 	 *      Initialize sock SLAB cache.
@@ -3223,7 +3224,7 @@
 	return -ENOIOCTLCMD;
 }
 
-static long compat_sock_ioctl(struct file *file, unsigned cmd,
+static long compat_sock_ioctl(struct file *file, unsigned int cmd,
 			      unsigned long arg)
 {
 	struct socket *sock = file->private_data;
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 8eff8c3..d3611f1 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -624,7 +624,7 @@
 	ctx->seq_send = ctx->seq_send64;
 	if (ctx->seq_send64 != ctx->seq_send) {
 		dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__,
-			(long unsigned)ctx->seq_send64, ctx->seq_send);
+			(unsigned long)ctx->seq_send64, ctx->seq_send);
 		p = ERR_PTR(-EINVAL);
 		goto out_err;
 	}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index de0b0f3..47ad266 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1273,7 +1273,7 @@
 	__acquires(cd->hash_lock)
 {
 	loff_t n = *pos;
-	unsigned hash, entry;
+	unsigned int hash, entry;
 	struct cache_head *ch;
 	struct cache_detail *cd = ((struct handle*)m->private)->cd;
 
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 824d32f..f0132b2 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1556,7 +1556,7 @@
 					(char *)&val, sizeof(val));
 
 	if (type == SOCK_STREAM)
-		sock->sk->sk_reuse = 1;		/* allow address reuse */
+		sock->sk->sk_reuse = SK_CAN_REUSE; /* allow address reuse */
 	error = kernel_bind(sock, sin, len);
 	if (error < 0)
 		goto bummer;
diff --git a/net/sunrpc/timer.c b/net/sunrpc/timer.c
index dd82434..08881d0 100644
--- a/net/sunrpc/timer.c
+++ b/net/sunrpc/timer.c
@@ -34,7 +34,7 @@
 void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
 {
 	unsigned long init = 0;
-	unsigned i;
+	unsigned int i;
 
 	rt->timeo = timeo;
 
@@ -57,7 +57,7 @@
  * NB: When computing the smoothed RTT and standard deviation,
  *     be careful not to produce negative intermediate results.
  */
-void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m)
+void rpc_update_rtt(struct rpc_rtt *rt, unsigned int timer, long m)
 {
 	long *srtt, *sdrtt;
 
@@ -106,7 +106,7 @@
  * read, write, commit     - A+4D
  * other                   - timeo
  */
-unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer)
+unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned int timer)
 {
 	unsigned long res;
 
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index b97a3dd..fddcccf 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -1204,7 +1204,7 @@
 		int (*actor)(struct scatterlist *, void *), void *data)
 {
 	int i, ret = 0;
-	unsigned page_len, thislen, page_offset;
+	unsigned int page_len, thislen, page_offset;
 	struct scatterlist      sg[1];
 
 	sg_init_table(sg, 1);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 0cbcd1a..6fe2dce 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -783,7 +783,7 @@
 {
 	struct rpc_rqst *req = task->tk_rqstp;
 	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
-	unsigned timer = task->tk_msg.rpc_proc->p_timer;
+	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
 	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
 
 	if (timer) {
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index c3e65ae..f3e813a 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -59,19 +59,6 @@
 	.permissions = net_ctl_permissions,
 };
 
-static int net_ctl_ro_header_perms(struct ctl_table_root *root,
-		struct nsproxy *namespaces, struct ctl_table *table)
-{
-	if (net_eq(namespaces->net_ns, &init_net))
-		return table->mode;
-	else
-		return table->mode & ~0222;
-}
-
-static struct ctl_table_root net_sysctl_ro_root = {
-	.permissions = net_ctl_ro_header_perms,
-};
-
 static int __net_init sysctl_net_init(struct net *net)
 {
 	setup_sysctl_set(&net->sysctls, &net_sysctl_root, is_seen);
@@ -88,34 +75,32 @@
 	.exit = sysctl_net_exit,
 };
 
-static __init int net_sysctl_init(void)
+static struct ctl_table_header *net_header;
+__init int net_sysctl_init(void)
 {
-	int ret;
+	static struct ctl_table empty[1];
+	int ret = -ENOMEM;
+	/* Avoid limitations in the sysctl implementation by
+	 * registering "/proc/sys/net" as an empty directory not in a
+	 * network namespace.
+	 */
+	net_header = register_sysctl("net", empty);
+	if (!net_header)
+		goto out;
 	ret = register_pernet_subsys(&sysctl_pernet_ops);
 	if (ret)
 		goto out;
-	setup_sysctl_set(&net_sysctl_ro_root.default_set, &net_sysctl_ro_root, NULL);
-	register_sysctl_root(&net_sysctl_ro_root);
 	register_sysctl_root(&net_sysctl_root);
 out:
 	return ret;
 }
-subsys_initcall(net_sysctl_init);
 
-struct ctl_table_header *register_net_sysctl_table(struct net *net,
-	const struct ctl_path *path, struct ctl_table *table)
+struct ctl_table_header *register_net_sysctl(struct net *net,
+	const char *path, struct ctl_table *table)
 {
-	return __register_sysctl_paths(&net->sysctls, path, table);
+	return __register_sysctl_table(&net->sysctls, path, table);
 }
-EXPORT_SYMBOL_GPL(register_net_sysctl_table);
-
-struct ctl_table_header *register_net_sysctl_rotable(const
-		struct ctl_path *path, struct ctl_table *table)
-{
-	return __register_sysctl_paths(&net_sysctl_ro_root.default_set,
-					path, table);
-}
-EXPORT_SYMBOL_GPL(register_net_sysctl_rotable);
+EXPORT_SYMBOL_GPL(register_net_sysctl);
 
 void unregister_net_sysctl_table(struct ctl_table_header *header)
 {
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index e4f35af..d706a1d 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -50,12 +50,30 @@
 	return addr & TIPC_CLUSTER_MASK;
 }
 
-static inline int in_own_cluster(u32 addr)
+static inline int in_own_cluster_exact(u32 addr)
 {
 	return !((addr ^ tipc_own_addr) >> 12);
 }
 
 /**
+ * in_own_node - test for node inclusion; <0.0.0> always matches
+ */
+
+static inline int in_own_node(u32 addr)
+{
+	return (addr == tipc_own_addr) || !addr;
+}
+
+/**
+ * in_own_cluster - test for cluster inclusion; <0.0.0> always matches
+ */
+
+static inline int in_own_cluster(u32 addr)
+{
+	return in_own_cluster_exact(addr) || !addr;
+}
+
+/**
  * addr_domain - convert 2-bit scope value to equivalent message lookup domain
  *
  * Needed when address of a named message must be looked up a second time
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 5dfd89c..0bfdeba 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -449,7 +449,7 @@
 		if (tipc_in_scope(disc_domain, tipc_own_addr)) {
 			disc_domain = tipc_own_addr & TIPC_CLUSTER_MASK;
 			res = 0;   /* accept any node in own cluster */
-		} else if (in_own_cluster(disc_domain))
+		} else if (in_own_cluster_exact(disc_domain))
 			res = 0;   /* accept specified node in own cluster */
 	}
 	if (res) {
diff --git a/net/tipc/config.c b/net/tipc/config.c
index f76d3b1..f5458ed 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -290,7 +290,7 @@
 
 	/* Check command authorization */
 
-	if (likely(orig_node == tipc_own_addr)) {
+	if (likely(in_own_node(orig_node))) {
 		/* command is permitted */
 	} else if (cmd >= 0x8000) {
 		rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b4b9b30..33cecd6 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -612,7 +612,7 @@
  * @event: state machine event to process
  */
 
-static void link_state_event(struct tipc_link *l_ptr, unsigned event)
+static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
 {
 	struct tipc_link *other;
 	u32 cont_intv = l_ptr->continuity_interval;
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index d57da61..211c172 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -68,12 +68,37 @@
 };
 
 /**
- * List of externally visible publications by this node --
- * that is, all publications having scope > TIPC_NODE_SCOPE.
+ * struct publ_list - list of publications made by this node
+ * @list: circular list of publications
+ * @list_size: number of entries in list
  */
+struct publ_list {
+	struct list_head list;
+	u32 size;
+};
 
-static LIST_HEAD(publ_root);
-static u32 publ_cnt;
+static struct publ_list publ_zone = {
+	.list = LIST_HEAD_INIT(publ_zone.list),
+	.size = 0,
+};
+
+static struct publ_list publ_cluster = {
+	.list = LIST_HEAD_INIT(publ_cluster.list),
+	.size = 0,
+};
+
+static struct publ_list publ_node = {
+	.list = LIST_HEAD_INIT(publ_node.list),
+	.size = 0,
+};
+
+static struct publ_list *publ_lists[] = {
+	NULL,
+	&publ_zone,	/* publ_lists[TIPC_ZONE_SCOPE]		*/
+	&publ_cluster,	/* publ_lists[TIPC_CLUSTER_SCOPE]	*/
+	&publ_node	/* publ_lists[TIPC_NODE_SCOPE]		*/
+};
+
 
 /**
  * publ_to_item - add publication info to a publication message
@@ -132,8 +157,11 @@
 	struct sk_buff *buf;
 	struct distr_item *item;
 
-	list_add_tail(&publ->local_list, &publ_root);
-	publ_cnt++;
+	list_add_tail(&publ->local_list, &publ_lists[publ->scope]->list);
+	publ_lists[publ->scope]->size++;
+
+	if (publ->scope == TIPC_NODE_SCOPE)
+		return;
 
 	buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
 	if (!buf) {
@@ -156,7 +184,10 @@
 	struct distr_item *item;
 
 	list_del(&publ->local_list);
-	publ_cnt--;
+	publ_lists[publ->scope]->size--;
+
+	if (publ->scope == TIPC_NODE_SCOPE)
+		return;
 
 	buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
 	if (!buf) {
@@ -169,6 +200,39 @@
 	named_cluster_distribute(buf);
 }
 
+/*
+ * named_distribute - prepare name info for bulk distribution to another node
+ */
+static void named_distribute(struct list_head *message_list, u32 node,
+			     struct publ_list *pls, u32 max_item_buf)
+{
+	struct publication *publ;
+	struct sk_buff *buf = NULL;
+	struct distr_item *item = NULL;
+	u32 left = 0;
+	u32 rest = pls->size * ITEM_SIZE;
+
+	list_for_each_entry(publ, &pls->list, local_list) {
+		if (!buf) {
+			left = (rest <= max_item_buf) ? rest : max_item_buf;
+			rest -= left;
+			buf = named_prepare_buf(PUBLICATION, left, node);
+			if (!buf) {
+				warn("Bulk publication failure\n");
+				return;
+			}
+			item = (struct distr_item *)msg_data(buf_msg(buf));
+		}
+		publ_to_item(item, publ);
+		item++;
+		left -= ITEM_SIZE;
+		if (!left) {
+			list_add_tail((struct list_head *)buf, message_list);
+			buf = NULL;
+		}
+	}
+}
+
 /**
  * tipc_named_node_up - tell specified node about all publications by this node
  */
@@ -177,13 +241,8 @@
 {
 	struct tipc_node *n_ptr;
 	struct tipc_link *l_ptr;
-	struct publication *publ;
-	struct distr_item *item = NULL;
-	struct sk_buff *buf = NULL;
 	struct list_head message_list;
 	u32 node = (u32)nodearg;
-	u32 left = 0;
-	u32 rest;
 	u32 max_item_buf = 0;
 
 	/* compute maximum amount of publication data to send per message */
@@ -207,28 +266,8 @@
 	INIT_LIST_HEAD(&message_list);
 
 	read_lock_bh(&tipc_nametbl_lock);
-	rest = publ_cnt * ITEM_SIZE;
-
-	list_for_each_entry(publ, &publ_root, local_list) {
-		if (!buf) {
-			left = (rest <= max_item_buf) ? rest : max_item_buf;
-			rest -= left;
-			buf = named_prepare_buf(PUBLICATION, left, node);
-			if (!buf) {
-				warn("Bulk publication distribution failure\n");
-				goto exit;
-			}
-			item = (struct distr_item *)msg_data(buf_msg(buf));
-		}
-		publ_to_item(item, publ);
-		item++;
-		left -= ITEM_SIZE;
-		if (!left) {
-			list_add_tail((struct list_head *)buf, &message_list);
-			buf = NULL;
-		}
-	}
-exit:
+	named_distribute(&message_list, node, &publ_cluster, max_item_buf);
+	named_distribute(&message_list, node, &publ_zone, max_item_buf);
 	read_unlock_bh(&tipc_nametbl_lock);
 
 	tipc_link_send_names(&message_list, (u32)node);
@@ -316,21 +355,23 @@
 }
 
 /**
- * tipc_named_reinit - re-initialize local publication list
+ * tipc_named_reinit - re-initialize local publications
  *
  * This routine is called whenever TIPC networking is enabled.
- * All existing publications by this node that have "cluster" or "zone" scope
- * are updated to reflect the node's new network address.
+ * All name table entries published by this node are updated to reflect
+ * the node's new network address.
  */
 
 void tipc_named_reinit(void)
 {
 	struct publication *publ;
+	int scope;
 
 	write_lock_bh(&tipc_nametbl_lock);
 
-	list_for_each_entry(publ, &publ_root, local_list)
-		publ->node = tipc_own_addr;
+	for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++)
+		list_for_each_entry(publ, &publ_lists[scope]->list, local_list)
+			publ->node = tipc_own_addr;
 
 	write_unlock_bh(&tipc_nametbl_lock);
 }
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index c6a1ae3..25c2975 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -347,7 +347,7 @@
 		info->cluster_list_size++;
 	}
 
-	if (node == tipc_own_addr) {
+	if (in_own_node(node)) {
 		list_add(&publ->node_list, &info->node_list);
 		info->node_list_size++;
 	}
@@ -418,7 +418,7 @@
 
 	/* Remove publication from node scope list, if present */
 
-	if (node == tipc_own_addr) {
+	if (in_own_node(node)) {
 		list_del(&publ->node_list);
 		info->node_list_size--;
 	}
@@ -604,7 +604,7 @@
 		publ = list_first_entry(&info->node_list, struct publication,
 					node_list);
 		list_move_tail(&publ->node_list, &info->node_list);
-	} else if (in_own_cluster(*destnode)) {
+	} else if (in_own_cluster_exact(*destnode)) {
 		if (list_empty(&info->cluster_list))
 			goto no_match;
 		publ = list_first_entry(&info->cluster_list, struct publication,
@@ -695,11 +695,12 @@
 	}
 
 	write_lock_bh(&tipc_nametbl_lock);
-	table.local_publ_count++;
 	publ = tipc_nametbl_insert_publ(type, lower, upper, scope,
 				   tipc_own_addr, port_ref, key);
-	if (publ && (scope != TIPC_NODE_SCOPE))
+	if (likely(publ)) {
+		table.local_publ_count++;
 		tipc_named_publish(publ);
+	}
 	write_unlock_bh(&tipc_nametbl_lock);
 	return publ;
 }
@@ -716,8 +717,7 @@
 	publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
 	if (likely(publ)) {
 		table.local_publ_count--;
-		if (publ->scope != TIPC_NODE_SCOPE)
-			tipc_named_withdraw(publ);
+		tipc_named_withdraw(publ);
 		write_unlock_bh(&tipc_nametbl_lock);
 		list_del_init(&publ->pport_list);
 		kfree(publ);
diff --git a/net/tipc/net.c b/net/tipc/net.c
index d4531b0..5fab4ff 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -178,11 +178,12 @@
 	tipc_subscr_stop();
 	tipc_cfg_stop();
 
+	write_lock_bh(&tipc_net_lock);
 	tipc_own_addr = addr;
 	tipc_named_reinit();
 	tipc_port_reinit();
-
 	tipc_bclink_init();
+	write_unlock_bh(&tipc_net_lock);
 
 	tipc_k_signal((Handler)tipc_subscr_start, 0);
 	tipc_k_signal((Handler)tipc_cfg_init, 0);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index a34cabc..76565c9 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -58,7 +58,7 @@
  * entries has been chosen so that no hash chain exceeds 8 nodes and will
  * usually be much smaller (typically only a single node).
  */
-static inline unsigned int tipc_hashfn(u32 addr)
+static unsigned int tipc_hashfn(u32 addr)
 {
 	return addr & (NODE_HTABLE_SIZE - 1);
 }
@@ -72,7 +72,7 @@
 	struct tipc_node *node;
 	struct hlist_node *pos;
 
-	if (unlikely(!in_own_cluster(addr)))
+	if (unlikely(!in_own_cluster_exact(addr)))
 		return NULL;
 
 	hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) {
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index c3c2815..327ffbb 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -45,7 +45,7 @@
 void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
 		       void *usr_handle, net_ev_handler handle_down)
 {
-	if (addr == tipc_own_addr) {
+	if (in_own_node(addr)) {
 		node_sub->node = NULL;
 		return;
 	}
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 94d2904..4aede40 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -69,6 +69,28 @@
 	return msg_destport(&p_ptr->phdr);
 }
 
+/*
+ * tipc_port_peer_msg - verify message was sent by connected port's peer
+ *
+ * Handles cases where the node's network address has changed from
+ * the default of <0.0.0> to its configured setting.
+ */
+
+int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg)
+{
+	u32 peernode;
+	u32 orignode;
+
+	if (msg_origport(msg) != port_peerport(p_ptr))
+		return 0;
+
+	orignode = msg_orignode(msg);
+	peernode = port_peernode(p_ptr);
+	return (orignode == peernode) ||
+		(!orignode && (peernode == tipc_own_addr)) ||
+		(!peernode && (orignode == tipc_own_addr));
+}
+
 /**
  * tipc_multicast - send a multicast message to local and remote destinations
  */
@@ -221,18 +243,25 @@
 	p_ptr->usr_handle = usr_handle;
 	p_ptr->max_pkt = MAX_PKT_DEFAULT;
 	p_ptr->ref = ref;
-	msg = &p_ptr->phdr;
-	tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
-	msg_set_origport(msg, ref);
 	INIT_LIST_HEAD(&p_ptr->wait_list);
 	INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
 	p_ptr->dispatcher = dispatcher;
 	p_ptr->wakeup = wakeup;
 	p_ptr->user_port = NULL;
 	k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
-	spin_lock_bh(&tipc_port_list_lock);
 	INIT_LIST_HEAD(&p_ptr->publications);
 	INIT_LIST_HEAD(&p_ptr->port_list);
+
+	/*
+	 * Must hold port list lock while initializing message header template
+	 * to ensure a change to node's own network address doesn't result
+	 * in template containing out-dated network address information
+	 */
+
+	spin_lock_bh(&tipc_port_list_lock);
+	msg = &p_ptr->phdr;
+	tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
+	msg_set_origport(msg, ref);
 	list_add_tail(&p_ptr->port_list, &ports);
 	spin_unlock_bh(&tipc_port_list_lock);
 	return p_ptr;
@@ -415,7 +444,7 @@
 	/* send returned message & dispose of rejected message */
 
 	src_node = msg_prevnode(msg);
-	if (src_node == tipc_own_addr)
+	if (in_own_node(src_node))
 		tipc_port_recv_msg(rbuf);
 	else
 		tipc_link_send(rbuf, src_node, msg_link_selector(rmsg));
@@ -519,25 +548,21 @@
 	struct tipc_msg *msg = buf_msg(buf);
 	struct tipc_port *p_ptr;
 	struct sk_buff *r_buf = NULL;
-	u32 orignode = msg_orignode(msg);
-	u32 origport = msg_origport(msg);
 	u32 destport = msg_destport(msg);
 	int wakeable;
 
 	/* Validate connection */
 
 	p_ptr = tipc_port_lock(destport);
-	if (!p_ptr || !p_ptr->connected ||
-	    (port_peernode(p_ptr) != orignode) ||
-	    (port_peerport(p_ptr) != origport)) {
+	if (!p_ptr || !p_ptr->connected || !tipc_port_peer_msg(p_ptr, msg)) {
 		r_buf = tipc_buf_acquire(BASIC_H_SIZE);
 		if (r_buf) {
 			msg = buf_msg(r_buf);
 			tipc_msg_init(msg, TIPC_HIGH_IMPORTANCE, TIPC_CONN_MSG,
-				      BASIC_H_SIZE, orignode);
+				      BASIC_H_SIZE, msg_orignode(msg));
 			msg_set_errcode(msg, TIPC_ERR_NO_PORT);
 			msg_set_origport(msg, destport);
-			msg_set_destport(msg, origport);
+			msg_set_destport(msg, msg_origport(msg));
 		}
 		if (p_ptr)
 			tipc_port_unlock(p_ptr);
@@ -646,8 +671,6 @@
 	spin_lock_bh(&tipc_port_list_lock);
 	list_for_each_entry(p_ptr, &ports, port_list) {
 		msg = &p_ptr->phdr;
-		if (msg_orignode(msg) == tipc_own_addr)
-			break;
 		msg_set_prevnode(msg, tipc_own_addr);
 		msg_set_orignode(msg, tipc_own_addr);
 	}
@@ -676,6 +699,7 @@
 		struct tipc_name_seq dseq;
 		void *usr_handle;
 		int connected;
+		int peer_invalid;
 		int published;
 		u32 message_type;
 
@@ -696,6 +720,7 @@
 		up_ptr = p_ptr->user_port;
 		usr_handle = up_ptr->usr_handle;
 		connected = p_ptr->connected;
+		peer_invalid = connected && !tipc_port_peer_msg(p_ptr, msg);
 		published = p_ptr->published;
 
 		if (unlikely(msg_errcode(msg)))
@@ -705,8 +730,6 @@
 
 		case TIPC_CONN_MSG:{
 				tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
-				u32 peer_port = port_peerport(p_ptr);
-				u32 peer_node = port_peernode(p_ptr);
 				u32 dsz;
 
 				tipc_port_unlock(p_ptr);
@@ -715,8 +738,7 @@
 				if (unlikely(!connected)) {
 					if (tipc_connect2port(dref, &orig))
 						goto reject;
-				} else if ((msg_origport(msg) != peer_port) ||
-					   (msg_orignode(msg) != peer_node))
+				} else if (peer_invalid)
 					goto reject;
 				dsz = msg_data_sz(msg);
 				if (unlikely(dsz &&
@@ -768,14 +790,9 @@
 		case TIPC_CONN_MSG:{
 				tipc_conn_shutdown_event cb =
 					up_ptr->conn_err_cb;
-				u32 peer_port = port_peerport(p_ptr);
-				u32 peer_node = port_peernode(p_ptr);
 
 				tipc_port_unlock(p_ptr);
-				if (!cb || !connected)
-					break;
-				if ((msg_origport(msg) != peer_port) ||
-				    (msg_orignode(msg) != peer_node))
+				if (!cb || !connected || peer_invalid)
 					break;
 				tipc_disconnect(dref);
 				skb_pull(buf, msg_hdr_sz(msg));
@@ -1152,17 +1169,6 @@
 	/* validate destination & pass to port, otherwise reject message */
 	p_ptr = tipc_port_lock(destport);
 	if (likely(p_ptr)) {
-		if (likely(p_ptr->connected)) {
-			if ((unlikely(msg_origport(msg) !=
-				      tipc_peer_port(p_ptr))) ||
-			    (unlikely(msg_orignode(msg) !=
-				      tipc_peer_node(p_ptr))) ||
-			    (unlikely(!msg_connected(msg)))) {
-				err = TIPC_ERR_NO_PORT;
-				tipc_port_unlock(p_ptr);
-				goto reject;
-			}
-		}
 		err = p_ptr->dispatcher(p_ptr, buf);
 		tipc_port_unlock(p_ptr);
 		if (likely(!err))
@@ -1170,7 +1176,7 @@
 	} else {
 		err = TIPC_ERR_NO_PORT;
 	}
-reject:
+
 	return tipc_reject_msg(buf, err);
 }
 
@@ -1211,7 +1217,7 @@
 	p_ptr->congested = 1;
 	if (!tipc_port_congested(p_ptr)) {
 		destnode = port_peernode(p_ptr);
-		if (likely(destnode != tipc_own_addr))
+		if (likely(!in_own_node(destnode)))
 			res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
 							   total_len, destnode);
 		else
@@ -1261,13 +1267,17 @@
 	msg_set_destport(msg, destport);
 
 	if (likely(destport || destnode)) {
-		if (likely(destnode == tipc_own_addr))
+		if (likely(in_own_node(destnode)))
 			res = tipc_port_recv_sections(p_ptr, num_sect,
 						      msg_sect, total_len);
-		else
+		else if (tipc_own_addr)
 			res = tipc_link_send_sections_fast(p_ptr, msg_sect,
 							   num_sect, total_len,
 							   destnode);
+		else
+			res = tipc_port_reject_sections(p_ptr, msg, msg_sect,
+							num_sect, total_len,
+							TIPC_ERR_NO_NODE);
 		if (likely(res != -ELINKCONG)) {
 			if (res > 0)
 				p_ptr->sent++;
@@ -1305,12 +1315,15 @@
 	msg_set_destport(msg, dest->ref);
 	msg_set_hdr_sz(msg, BASIC_H_SIZE);
 
-	if (dest->node == tipc_own_addr)
+	if (in_own_node(dest->node))
 		res =  tipc_port_recv_sections(p_ptr, num_sect, msg_sect,
 					       total_len);
-	else
+	else if (tipc_own_addr)
 		res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
 						   total_len, dest->node);
+	else
+		res = tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
+						total_len, TIPC_ERR_NO_NODE);
 	if (likely(res != -ELINKCONG)) {
 		if (res > 0)
 			p_ptr->sent++;
@@ -1349,7 +1362,7 @@
 	skb_push(buf, BASIC_H_SIZE);
 	skb_copy_to_linear_data(buf, msg, BASIC_H_SIZE);
 
-	if (dest->node == tipc_own_addr)
+	if (in_own_node(dest->node))
 		res = tipc_port_recv_msg(buf);
 	else
 		res = tipc_send_buf_fast(buf, dest->node);
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 9b88531..301e1bd 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -201,6 +201,7 @@
  * The following routines require that the port be locked on entry
  */
 int tipc_disconnect_port(struct tipc_port *tp_ptr);
+int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
 
 /*
  * TIPC messaging routines
@@ -257,16 +258,6 @@
 	return (struct tipc_port *)tipc_ref_deref(ref);
 }
 
-static inline u32 tipc_peer_port(struct tipc_port *p_ptr)
-{
-	return msg_destport(&p_ptr->phdr);
-}
-
-static inline u32 tipc_peer_node(struct tipc_port *p_ptr)
-{
-	return msg_destnode(&p_ptr->phdr);
-}
-
 static inline int tipc_port_congested(struct tipc_port *p_ptr)
 {
 	return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 29e957f..6d4991e 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -535,7 +535,7 @@
 		     (dest->family != AF_TIPC)))
 		return -EINVAL;
 	if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
-	    (m->msg_iovlen > (unsigned)INT_MAX))
+	    (m->msg_iovlen > (unsigned int)INT_MAX))
 		return -EMSGSIZE;
 
 	if (iocb)
@@ -647,7 +647,7 @@
 		return send_msg(iocb, sock, m, total_len);
 
 	if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
-	    (m->msg_iovlen > (unsigned)INT_MAX))
+	    (m->msg_iovlen > (unsigned int)INT_MAX))
 		return -EMSGSIZE;
 
 	if (iocb)
@@ -734,8 +734,8 @@
 		goto exit;
 	}
 
-	if ((total_len > (unsigned)INT_MAX) ||
-	    (m->msg_iovlen > (unsigned)INT_MAX)) {
+	if ((total_len > (unsigned int)INT_MAX) ||
+	    (m->msg_iovlen > (unsigned int)INT_MAX)) {
 		res = -EMSGSIZE;
 		goto exit;
 	}
@@ -1236,7 +1236,8 @@
 		if (msg_mcast(msg))
 			return TIPC_ERR_NO_PORT;
 		if (sock->state == SS_CONNECTED) {
-			if (!msg_connected(msg))
+			if (!msg_connected(msg) ||
+			    !tipc_port_peer_msg(tipc_sk_port(sk), msg))
 				return TIPC_ERR_NO_PORT;
 		} else if (sock->state == SS_CONNECTING) {
 			if (!msg_connected(msg) && (msg_errcode(msg) == 0))
@@ -1329,7 +1330,7 @@
 	if (!sock_owned_by_user(sk)) {
 		res = filter_rcv(sk, buf);
 	} else {
-		if (sk_add_backlog(sk, buf))
+		if (sk_add_backlog(sk, buf, sk->sk_rcvbuf))
 			res = TIPC_ERR_OVERLOAD;
 		else
 			res = TIPC_OK;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index d510353..641f2e4 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -149,9 +149,10 @@
  *    each socket state is protected by separate spin lock.
  */
 
-static inline unsigned unix_hash_fold(__wsum n)
+static inline unsigned int unix_hash_fold(__wsum n)
 {
-	unsigned hash = (__force unsigned)n;
+	unsigned int hash = (__force unsigned int)n;
+
 	hash ^= hash>>16;
 	hash ^= hash>>8;
 	return hash&(UNIX_HASH_SIZE-1);
@@ -200,7 +201,7 @@
  *		- if started by zero, it is abstract name.
  */
 
-static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)
+static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
 {
 	if (len <= sizeof(short) || len > sizeof(*sunaddr))
 		return -EINVAL;
@@ -250,7 +251,7 @@
 
 static struct sock *__unix_find_socket_byname(struct net *net,
 					      struct sockaddr_un *sunname,
-					      int len, int type, unsigned hash)
+					      int len, int type, unsigned int hash)
 {
 	struct sock *s;
 	struct hlist_node *node;
@@ -273,7 +274,7 @@
 static inline struct sock *unix_find_socket_byname(struct net *net,
 						   struct sockaddr_un *sunname,
 						   int len, int type,
-						   unsigned hash)
+						   unsigned int hash)
 {
 	struct sock *s;
 
@@ -760,7 +761,7 @@
 
 static struct sock *unix_find_other(struct net *net,
 				    struct sockaddr_un *sunname, int len,
-				    int type, unsigned hash, int *error)
+				    int type, unsigned int hash, int *error)
 {
 	struct sock *u;
 	struct path path;
@@ -824,7 +825,7 @@
 	struct dentry *dentry = NULL;
 	struct path path;
 	int err;
-	unsigned hash;
+	unsigned int hash;
 	struct unix_address *addr;
 	struct hlist_head *list;
 
@@ -964,7 +965,7 @@
 	struct net *net = sock_net(sk);
 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
 	struct sock *other;
-	unsigned hash;
+	unsigned int hash;
 	int err;
 
 	if (addr->sa_family != AF_UNSPEC) {
@@ -1062,7 +1063,7 @@
 	struct sock *newsk = NULL;
 	struct sock *other = NULL;
 	struct sk_buff *skb = NULL;
-	unsigned hash;
+	unsigned int hash;
 	int st;
 	int err;
 	long timeo;
@@ -1437,11 +1438,12 @@
 	struct sock *other = NULL;
 	int namelen = 0; /* fake GCC */
 	int err;
-	unsigned hash;
+	unsigned int hash;
 	struct sk_buff *skb;
 	long timeo;
 	struct scm_cookie tmp_scm;
 	int max_level;
+	int data_len = 0;
 
 	if (NULL == siocb->scm)
 		siocb->scm = &tmp_scm;
@@ -1475,7 +1477,13 @@
 	if (len > sk->sk_sndbuf - 32)
 		goto out;
 
-	skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
+	if (len > SKB_MAX_ALLOC)
+		data_len = min_t(size_t,
+				 len - SKB_MAX_ALLOC,
+				 MAX_SKB_FRAGS * PAGE_SIZE);
+
+	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
+				   msg->msg_flags & MSG_DONTWAIT, &err);
 	if (skb == NULL)
 		goto out;
 
@@ -1485,8 +1493,10 @@
 	max_level = err + 1;
 	unix_get_secdata(siocb->scm, skb);
 
-	skb_reset_transport_header(skb);
-	err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+	skb_put(skb, len - data_len);
+	skb->data_len = data_len;
+	skb->len = len;
+	err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len);
 	if (err)
 		goto out_free;
 
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index 397cffe..b34b5b9 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -26,12 +26,6 @@
 	{ }
 };
 
-static struct ctl_path unix_path[] = {
-	{ .procname = "net", },
-	{ .procname = "unix", },
-	{ },
-};
-
 int __net_init unix_sysctl_register(struct net *net)
 {
 	struct ctl_table *table;
@@ -41,7 +35,7 @@
 		goto err_alloc;
 
 	table[0].data = &net->unx.sysctl_max_dgram_qlen;
-	net->unx.ctl = register_net_sysctl_table(net, unix_path, table);
+	net->unx.ctl = register_net_sysctl(net, "net/unix", table);
 	if (net->unx.ctl == NULL)
 		goto err_reg;
 
@@ -58,6 +52,6 @@
 	struct ctl_table *table;
 
 	table = net->unx.ctl->ctl_table_arg;
-	unregister_sysctl_table(net->unx.ctl);
+	unregister_net_sysctl_table(net->unx.ctl);
 	kfree(table);
 }
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index 3c65eae..a6470ac 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -187,7 +187,7 @@
 
 static
 void __check_new_state(enum wimax_st old_state, enum wimax_st new_state,
-		       unsigned allowed_states_bm)
+		       unsigned int allowed_states_bm)
 {
 	if (WARN_ON(((1 << new_state) & allowed_states_bm) == 0)) {
 		printk(KERN_ERR "SW BUG! Forbidden state change %u -> %u\n",
@@ -425,7 +425,8 @@
 size_t wimax_addr_scnprint(char *addr_str, size_t addr_str_size,
 			   unsigned char *addr, size_t addr_len)
 {
-	unsigned cnt, total;
+	unsigned int cnt, total;
+
 	for (total = cnt = 0; cnt < addr_len; cnt++)
 		total += scnprintf(addr_str + total, addr_str_size - total,
 				   "%02x%c", addr[cnt],
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 140c1d2..d5005c5 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -356,20 +356,26 @@
 static int nl80211_msg_put_channel(struct sk_buff *msg,
 				   struct ieee80211_channel *chan)
 {
-	NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_FREQ,
-		    chan->center_freq);
+	if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ,
+			chan->center_freq))
+		goto nla_put_failure;
 
-	if (chan->flags & IEEE80211_CHAN_DISABLED)
-		NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_DISABLED);
-	if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
-		NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN);
-	if (chan->flags & IEEE80211_CHAN_NO_IBSS)
-		NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_NO_IBSS);
-	if (chan->flags & IEEE80211_CHAN_RADAR)
-		NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_RADAR);
+	if ((chan->flags & IEEE80211_CHAN_DISABLED) &&
+	    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_DISABLED))
+		goto nla_put_failure;
+	if ((chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+	    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN))
+		goto nla_put_failure;
+	if ((chan->flags & IEEE80211_CHAN_NO_IBSS) &&
+	    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS))
+		goto nla_put_failure;
+	if ((chan->flags & IEEE80211_CHAN_RADAR) &&
+	    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
+		goto nla_put_failure;
 
-	NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
-		    DBM_TO_MBM(chan->max_power));
+	if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
+			DBM_TO_MBM(chan->max_power)))
+		goto nla_put_failure;
 
 	return 0;
 
@@ -621,8 +627,8 @@
 
 	i = 0;
 	while (ifmodes) {
-		if (ifmodes & 1)
-			NLA_PUT_FLAG(msg, i);
+		if ((ifmodes & 1) && nla_put_flag(msg, i))
+			goto nla_put_failure;
 		ifmodes >>= 1;
 		i++;
 	}
@@ -665,8 +671,9 @@
 			nl_limit = nla_nest_start(msg, j + 1);
 			if (!nl_limit)
 				goto nla_put_failure;
-			NLA_PUT_U32(msg, NL80211_IFACE_LIMIT_MAX,
-				    c->limits[j].max);
+			if (nla_put_u32(msg, NL80211_IFACE_LIMIT_MAX,
+					c->limits[j].max))
+				goto nla_put_failure;
 			if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES,
 						c->limits[j].types))
 				goto nla_put_failure;
@@ -675,13 +682,14 @@
 
 		nla_nest_end(msg, nl_limits);
 
-		if (c->beacon_int_infra_match)
-			NLA_PUT_FLAG(msg,
-				NL80211_IFACE_COMB_STA_AP_BI_MATCH);
-		NLA_PUT_U32(msg, NL80211_IFACE_COMB_NUM_CHANNELS,
-			    c->num_different_channels);
-		NLA_PUT_U32(msg, NL80211_IFACE_COMB_MAXNUM,
-			    c->max_interfaces);
+		if (c->beacon_int_infra_match &&
+		    nla_put_flag(msg, NL80211_IFACE_COMB_STA_AP_BI_MATCH))
+			goto nla_put_failure;
+		if (nla_put_u32(msg, NL80211_IFACE_COMB_NUM_CHANNELS,
+				c->num_different_channels) ||
+		    nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM,
+				c->max_interfaces))
+			goto nla_put_failure;
 
 		nla_nest_end(msg, nl_combi);
 	}
@@ -712,64 +720,74 @@
 	if (!hdr)
 		return -1;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx);
-	NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy));
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx) ||
+	    nla_put_string(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)) ||
+	    nla_put_u32(msg, NL80211_ATTR_GENERATION,
+			cfg80211_rdev_list_generation) ||
+	    nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
+		       dev->wiphy.retry_short) ||
+	    nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
+		       dev->wiphy.retry_long) ||
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
+			dev->wiphy.frag_threshold) ||
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
+			dev->wiphy.rts_threshold) ||
+	    nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
+		       dev->wiphy.coverage_class) ||
+	    nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
+		       dev->wiphy.max_scan_ssids) ||
+	    nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS,
+		       dev->wiphy.max_sched_scan_ssids) ||
+	    nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
+			dev->wiphy.max_scan_ie_len) ||
+	    nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
+			dev->wiphy.max_sched_scan_ie_len) ||
+	    nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS,
+		       dev->wiphy.max_match_sets))
+		goto nla_put_failure;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_GENERATION,
-		    cfg80211_rdev_list_generation);
+	if ((dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) &&
+	    nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN))
+		goto nla_put_failure;
+	if ((dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) &&
+	    nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH))
+		goto nla_put_failure;
+	if ((dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
+	    nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD))
+		goto nla_put_failure;
+	if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) &&
+	    nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT))
+		goto nla_put_failure;
+	if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
+	    nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT))
+		goto nla_put_failure;
+	if ((dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) &&
+	    nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP))
+		goto nla_put_failure;
 
-	NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
-		   dev->wiphy.retry_short);
-	NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
-		   dev->wiphy.retry_long);
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
-		    dev->wiphy.frag_threshold);
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
-		    dev->wiphy.rts_threshold);
-	NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
-		    dev->wiphy.coverage_class);
-	NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
-		   dev->wiphy.max_scan_ssids);
-	NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS,
-		   dev->wiphy.max_sched_scan_ssids);
-	NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
-		    dev->wiphy.max_scan_ie_len);
-	NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
-		    dev->wiphy.max_sched_scan_ie_len);
-	NLA_PUT_U8(msg, NL80211_ATTR_MAX_MATCH_SETS,
-		   dev->wiphy.max_match_sets);
+	if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES,
+		    sizeof(u32) * dev->wiphy.n_cipher_suites,
+		    dev->wiphy.cipher_suites))
+		goto nla_put_failure;
 
-	if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN);
-	if (dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_MESH_AUTH);
-	if (dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_AP_UAPSD);
-	if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_ROAM_SUPPORT);
-	if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_SUPPORT);
-	if (dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP);
+	if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS,
+		       dev->wiphy.max_num_pmkids))
+		goto nla_put_failure;
 
-	NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES,
-		sizeof(u32) * dev->wiphy.n_cipher_suites,
-		dev->wiphy.cipher_suites);
+	if ((dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) &&
+	    nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE))
+		goto nla_put_failure;
 
-	NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_PMKIDS,
-		   dev->wiphy.max_num_pmkids);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX,
+			dev->wiphy.available_antennas_tx) ||
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
+			dev->wiphy.available_antennas_rx))
+		goto nla_put_failure;
 
-	if (dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE);
-
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX,
-		    dev->wiphy.available_antennas_tx);
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
-		    dev->wiphy.available_antennas_rx);
-
-	if (dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD)
-		NLA_PUT_U32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
-			    dev->wiphy.probe_resp_offload);
+	if ((dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) &&
+	    nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
+			dev->wiphy.probe_resp_offload))
+		goto nla_put_failure;
 
 	if ((dev->wiphy.available_antennas_tx ||
 	     dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) {
@@ -777,8 +795,11 @@
 		int res;
 		res = dev->ops->get_antenna(&dev->wiphy, &tx_ant, &rx_ant);
 		if (!res) {
-			NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, tx_ant);
-			NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX, rx_ant);
+			if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX,
+					tx_ant) ||
+			    nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX,
+					rx_ant))
+				goto nla_put_failure;
 		}
 	}
 
@@ -799,17 +820,17 @@
 			goto nla_put_failure;
 
 		/* add HT info */
-		if (dev->wiphy.bands[band]->ht_cap.ht_supported) {
-			NLA_PUT(msg, NL80211_BAND_ATTR_HT_MCS_SET,
-				sizeof(dev->wiphy.bands[band]->ht_cap.mcs),
-				&dev->wiphy.bands[band]->ht_cap.mcs);
-			NLA_PUT_U16(msg, NL80211_BAND_ATTR_HT_CAPA,
-				dev->wiphy.bands[band]->ht_cap.cap);
-			NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR,
-				dev->wiphy.bands[band]->ht_cap.ampdu_factor);
-			NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY,
-				dev->wiphy.bands[band]->ht_cap.ampdu_density);
-		}
+		if (dev->wiphy.bands[band]->ht_cap.ht_supported &&
+		    (nla_put(msg, NL80211_BAND_ATTR_HT_MCS_SET,
+			     sizeof(dev->wiphy.bands[band]->ht_cap.mcs),
+			     &dev->wiphy.bands[band]->ht_cap.mcs) ||
+		     nla_put_u16(msg, NL80211_BAND_ATTR_HT_CAPA,
+				 dev->wiphy.bands[band]->ht_cap.cap) ||
+		     nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR,
+				dev->wiphy.bands[band]->ht_cap.ampdu_factor) ||
+		     nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY,
+				dev->wiphy.bands[band]->ht_cap.ampdu_density)))
+			goto nla_put_failure;
 
 		/* add frequencies */
 		nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS);
@@ -842,11 +863,13 @@
 				goto nla_put_failure;
 
 			rate = &dev->wiphy.bands[band]->bitrates[i];
-			NLA_PUT_U32(msg, NL80211_BITRATE_ATTR_RATE,
-				    rate->bitrate);
-			if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)
-				NLA_PUT_FLAG(msg,
-					NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE);
+			if (nla_put_u32(msg, NL80211_BITRATE_ATTR_RATE,
+					rate->bitrate))
+				goto nla_put_failure;
+			if ((rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) &&
+			    nla_put_flag(msg,
+					 NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE))
+				goto nla_put_failure;
 
 			nla_nest_end(msg, nl_rate);
 		}
@@ -866,7 +889,8 @@
 	 do {							\
 		if (dev->ops->op) {				\
 			i++;					\
-			NLA_PUT_U32(msg, i, NL80211_CMD_ ## n);	\
+			if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \
+				goto nla_put_failure;		\
 		}						\
 	} while (0)
 
@@ -894,7 +918,8 @@
 	CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL);
 	if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
 		i++;
-		NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS);
+		if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS))
+			goto nla_put_failure;
 	}
 	CMD(set_channel, SET_CHANNEL);
 	CMD(set_wds_peer, SET_WDS_PEER);
@@ -908,7 +933,8 @@
 	CMD(set_noack_map, SET_NOACK_MAP);
 	if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) {
 		i++;
-		NLA_PUT_U32(msg, i, NL80211_CMD_REGISTER_BEACONS);
+		if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS))
+			goto nla_put_failure;
 	}
 
 #ifdef CONFIG_NL80211_TESTMODE
@@ -919,23 +945,27 @@
 
 	if (dev->ops->connect || dev->ops->auth) {
 		i++;
-		NLA_PUT_U32(msg, i, NL80211_CMD_CONNECT);
+		if (nla_put_u32(msg, i, NL80211_CMD_CONNECT))
+			goto nla_put_failure;
 	}
 
 	if (dev->ops->disconnect || dev->ops->deauth) {
 		i++;
-		NLA_PUT_U32(msg, i, NL80211_CMD_DISCONNECT);
+		if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT))
+			goto nla_put_failure;
 	}
 
 	nla_nest_end(msg, nl_cmds);
 
 	if (dev->ops->remain_on_channel &&
-	    dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
-		NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
-			    dev->wiphy.max_remain_on_channel_duration);
+	    (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) &&
+	    nla_put_u32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
+			dev->wiphy.max_remain_on_channel_duration))
+		goto nla_put_failure;
 
-	if (dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK);
+	if ((dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) &&
+	    nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK))
+		goto nla_put_failure;
 
 	if (mgmt_stypes) {
 		u16 stypes;
@@ -953,9 +983,10 @@
 			i = 0;
 			stypes = mgmt_stypes[ift].tx;
 			while (stypes) {
-				if (stypes & 1)
-					NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE,
-						    (i << 4) | IEEE80211_FTYPE_MGMT);
+				if ((stypes & 1) &&
+				    nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE,
+						(i << 4) | IEEE80211_FTYPE_MGMT))
+					goto nla_put_failure;
 				stypes >>= 1;
 				i++;
 			}
@@ -975,9 +1006,10 @@
 			i = 0;
 			stypes = mgmt_stypes[ift].rx;
 			while (stypes) {
-				if (stypes & 1)
-					NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE,
-						    (i << 4) | IEEE80211_FTYPE_MGMT);
+				if ((stypes & 1) &&
+				    nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE,
+						(i << 4) | IEEE80211_FTYPE_MGMT))
+					goto nla_put_failure;
 				stypes >>= 1;
 				i++;
 			}
@@ -994,22 +1026,23 @@
 		if (!nl_wowlan)
 			goto nla_put_failure;
 
-		if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY);
-		if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT);
-		if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT);
-		if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED);
-		if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE);
-		if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST);
-		if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE);
-		if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE);
+		if (((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) ||
+		    ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
+		    ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
+		    ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) ||
+		    ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
+		    ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
+		    ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
+		    ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
+		    goto nla_put_failure;
 		if (dev->wiphy.wowlan.n_patterns) {
 			struct nl80211_wowlan_pattern_support pat = {
 				.max_patterns = dev->wiphy.wowlan.n_patterns,
@@ -1018,8 +1051,9 @@
 				.max_pattern_len =
 					dev->wiphy.wowlan.pattern_max_len,
 			};
-			NLA_PUT(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
-				sizeof(pat), &pat);
+			if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
+				    sizeof(pat), &pat))
+				goto nla_put_failure;
 		}
 
 		nla_nest_end(msg, nl_wowlan);
@@ -1032,16 +1066,20 @@
 	if (nl80211_put_iface_combinations(&dev->wiphy, msg))
 		goto nla_put_failure;
 
-	if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME)
-		NLA_PUT_U32(msg, NL80211_ATTR_DEVICE_AP_SME,
-			    dev->wiphy.ap_sme_capa);
+	if ((dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) &&
+	    nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME,
+			dev->wiphy.ap_sme_capa))
+		goto nla_put_failure;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_FEATURE_FLAGS, dev->wiphy.features);
+	if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS,
+			dev->wiphy.features))
+		goto nla_put_failure;
 
-	if (dev->wiphy.ht_capa_mod_mask)
-		NLA_PUT(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
-			sizeof(*dev->wiphy.ht_capa_mod_mask),
-			dev->wiphy.ht_capa_mod_mask);
+	if (dev->wiphy.ht_capa_mod_mask &&
+	    nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
+		    sizeof(*dev->wiphy.ht_capa_mod_mask),
+		    dev->wiphy.ht_capa_mod_mask))
+		goto nla_put_failure;
 
 	return genlmsg_end(msg, hdr);
 
@@ -1492,26 +1530,27 @@
 	if (!hdr)
 		return -1;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype);
-
-	NLA_PUT_U32(msg, NL80211_ATTR_GENERATION,
-		    rdev->devlist_generation ^
-			(cfg80211_rdev_list_generation << 2));
+	if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFTYPE,
+			dev->ieee80211_ptr->iftype) ||
+	    nla_put_u32(msg, NL80211_ATTR_GENERATION,
+			rdev->devlist_generation ^
+			(cfg80211_rdev_list_generation << 2)))
+		goto nla_put_failure;
 
 	if (rdev->ops->get_channel) {
 		struct ieee80211_channel *chan;
 		enum nl80211_channel_type channel_type;
 
 		chan = rdev->ops->get_channel(&rdev->wiphy, &channel_type);
-		if (chan) {
-			NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ,
-				    chan->center_freq);
-			NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE,
-				    channel_type);
-		}
+		if (chan &&
+		    (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ,
+				    chan->center_freq) ||
+		     nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE,
+				    channel_type)))
+			goto nla_put_failure;
 	}
 
 	return genlmsg_end(msg, hdr);
@@ -1810,35 +1849,34 @@
 	struct nlattr *key;
 	struct get_key_cookie *cookie = c;
 
-	if (params->key)
-		NLA_PUT(cookie->msg, NL80211_ATTR_KEY_DATA,
-			params->key_len, params->key);
-
-	if (params->seq)
-		NLA_PUT(cookie->msg, NL80211_ATTR_KEY_SEQ,
-			params->seq_len, params->seq);
-
-	if (params->cipher)
-		NLA_PUT_U32(cookie->msg, NL80211_ATTR_KEY_CIPHER,
-			    params->cipher);
+	if ((params->key &&
+	     nla_put(cookie->msg, NL80211_ATTR_KEY_DATA,
+		     params->key_len, params->key)) ||
+	    (params->seq &&
+	     nla_put(cookie->msg, NL80211_ATTR_KEY_SEQ,
+		     params->seq_len, params->seq)) ||
+	    (params->cipher &&
+	     nla_put_u32(cookie->msg, NL80211_ATTR_KEY_CIPHER,
+			 params->cipher)))
+		goto nla_put_failure;
 
 	key = nla_nest_start(cookie->msg, NL80211_ATTR_KEY);
 	if (!key)
 		goto nla_put_failure;
 
-	if (params->key)
-		NLA_PUT(cookie->msg, NL80211_KEY_DATA,
-			params->key_len, params->key);
+	if ((params->key &&
+	     nla_put(cookie->msg, NL80211_KEY_DATA,
+		     params->key_len, params->key)) ||
+	    (params->seq &&
+	     nla_put(cookie->msg, NL80211_KEY_SEQ,
+		     params->seq_len, params->seq)) ||
+	    (params->cipher &&
+	     nla_put_u32(cookie->msg, NL80211_KEY_CIPHER,
+			 params->cipher)))
+		goto nla_put_failure;
 
-	if (params->seq)
-		NLA_PUT(cookie->msg, NL80211_KEY_SEQ,
-			params->seq_len, params->seq);
-
-	if (params->cipher)
-		NLA_PUT_U32(cookie->msg, NL80211_KEY_CIPHER,
-			    params->cipher);
-
-	NLA_PUT_U8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx);
+	if (nla_put_u8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx))
+		goto nla_put_failure;
 
 	nla_nest_end(cookie->msg, key);
 
@@ -1896,10 +1934,12 @@
 	cookie.msg = msg;
 	cookie.idx = key_idx;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-	NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_idx);
-	if (mac_addr)
-		NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
+	if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+	    nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_idx))
+		goto nla_put_failure;
+	if (mac_addr &&
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
+		goto nla_put_failure;
 
 	if (pairwise && mac_addr &&
 	    !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
@@ -2389,15 +2429,15 @@
 
 	/* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
 	bitrate = cfg80211_calculate_bitrate(info);
-	if (bitrate > 0)
-		NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
-
-	if (info->flags & RATE_INFO_FLAGS_MCS)
-		NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS, info->mcs);
-	if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH)
-		NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH);
-	if (info->flags & RATE_INFO_FLAGS_SHORT_GI)
-		NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI);
+	if ((bitrate > 0 &&
+	     nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate)) ||
+	    ((info->flags & RATE_INFO_FLAGS_MCS) &&
+	     nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs)) ||
+	    ((info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) &&
+	     nla_put_flag(msg, NL80211_RATE_INFO_40_MHZ_WIDTH)) ||
+	    ((info->flags & RATE_INFO_FLAGS_SHORT_GI) &&
+	     nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI)))
+		goto nla_put_failure;
 
 	nla_nest_end(msg, rate);
 	return true;
@@ -2419,43 +2459,50 @@
 	if (!hdr)
 		return -1;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
-
-	NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, sinfo->generation);
+	if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) ||
+	    nla_put_u32(msg, NL80211_ATTR_GENERATION, sinfo->generation))
+		goto nla_put_failure;
 
 	sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO);
 	if (!sinfoattr)
 		goto nla_put_failure;
-	if (sinfo->filled & STATION_INFO_CONNECTED_TIME)
-		NLA_PUT_U32(msg, NL80211_STA_INFO_CONNECTED_TIME,
-			    sinfo->connected_time);
-	if (sinfo->filled & STATION_INFO_INACTIVE_TIME)
-		NLA_PUT_U32(msg, NL80211_STA_INFO_INACTIVE_TIME,
-			    sinfo->inactive_time);
-	if (sinfo->filled & STATION_INFO_RX_BYTES)
-		NLA_PUT_U32(msg, NL80211_STA_INFO_RX_BYTES,
-			    sinfo->rx_bytes);
-	if (sinfo->filled & STATION_INFO_TX_BYTES)
-		NLA_PUT_U32(msg, NL80211_STA_INFO_TX_BYTES,
-			    sinfo->tx_bytes);
-	if (sinfo->filled & STATION_INFO_LLID)
-		NLA_PUT_U16(msg, NL80211_STA_INFO_LLID,
-			    sinfo->llid);
-	if (sinfo->filled & STATION_INFO_PLID)
-		NLA_PUT_U16(msg, NL80211_STA_INFO_PLID,
-			    sinfo->plid);
-	if (sinfo->filled & STATION_INFO_PLINK_STATE)
-		NLA_PUT_U8(msg, NL80211_STA_INFO_PLINK_STATE,
-			    sinfo->plink_state);
+	if ((sinfo->filled & STATION_INFO_CONNECTED_TIME) &&
+	    nla_put_u32(msg, NL80211_STA_INFO_CONNECTED_TIME,
+			sinfo->connected_time))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_INACTIVE_TIME) &&
+	    nla_put_u32(msg, NL80211_STA_INFO_INACTIVE_TIME,
+			sinfo->inactive_time))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_RX_BYTES) &&
+	    nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES,
+			sinfo->rx_bytes))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_TX_BYTES) &&
+	    nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES,
+			sinfo->tx_bytes))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_LLID) &&
+	    nla_put_u16(msg, NL80211_STA_INFO_LLID, sinfo->llid))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_PLID) &&
+	    nla_put_u16(msg, NL80211_STA_INFO_PLID, sinfo->plid))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_PLINK_STATE) &&
+	    nla_put_u8(msg, NL80211_STA_INFO_PLINK_STATE,
+		       sinfo->plink_state))
+		goto nla_put_failure;
 	switch (rdev->wiphy.signal_type) {
 	case CFG80211_SIGNAL_TYPE_MBM:
-		if (sinfo->filled & STATION_INFO_SIGNAL)
-			NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL,
-				   sinfo->signal);
-		if (sinfo->filled & STATION_INFO_SIGNAL_AVG)
-			NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG,
-				   sinfo->signal_avg);
+		if ((sinfo->filled & STATION_INFO_SIGNAL) &&
+		    nla_put_u8(msg, NL80211_STA_INFO_SIGNAL,
+			       sinfo->signal))
+			goto nla_put_failure;
+		if ((sinfo->filled & STATION_INFO_SIGNAL_AVG) &&
+		    nla_put_u8(msg, NL80211_STA_INFO_SIGNAL_AVG,
+			       sinfo->signal_avg))
+			goto nla_put_failure;
 		break;
 	default:
 		break;
@@ -2470,52 +2517,60 @@
 					  NL80211_STA_INFO_RX_BITRATE))
 			goto nla_put_failure;
 	}
-	if (sinfo->filled & STATION_INFO_RX_PACKETS)
-		NLA_PUT_U32(msg, NL80211_STA_INFO_RX_PACKETS,
-			    sinfo->rx_packets);
-	if (sinfo->filled & STATION_INFO_TX_PACKETS)
-		NLA_PUT_U32(msg, NL80211_STA_INFO_TX_PACKETS,
-			    sinfo->tx_packets);
-	if (sinfo->filled & STATION_INFO_TX_RETRIES)
-		NLA_PUT_U32(msg, NL80211_STA_INFO_TX_RETRIES,
-			    sinfo->tx_retries);
-	if (sinfo->filled & STATION_INFO_TX_FAILED)
-		NLA_PUT_U32(msg, NL80211_STA_INFO_TX_FAILED,
-			    sinfo->tx_failed);
-	if (sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT)
-		NLA_PUT_U32(msg, NL80211_STA_INFO_BEACON_LOSS,
-			    sinfo->beacon_loss_count);
+	if ((sinfo->filled & STATION_INFO_RX_PACKETS) &&
+	    nla_put_u32(msg, NL80211_STA_INFO_RX_PACKETS,
+			sinfo->rx_packets))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_TX_PACKETS) &&
+	    nla_put_u32(msg, NL80211_STA_INFO_TX_PACKETS,
+			sinfo->tx_packets))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_TX_RETRIES) &&
+	    nla_put_u32(msg, NL80211_STA_INFO_TX_RETRIES,
+			sinfo->tx_retries))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_TX_FAILED) &&
+	    nla_put_u32(msg, NL80211_STA_INFO_TX_FAILED,
+			sinfo->tx_failed))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT) &&
+	    nla_put_u32(msg, NL80211_STA_INFO_BEACON_LOSS,
+			sinfo->beacon_loss_count))
+		goto nla_put_failure;
 	if (sinfo->filled & STATION_INFO_BSS_PARAM) {
 		bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM);
 		if (!bss_param)
 			goto nla_put_failure;
 
-		if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT)
-			NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_CTS_PROT);
-		if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE)
-			NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE);
-		if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME)
-			NLA_PUT_FLAG(msg,
-				     NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME);
-		NLA_PUT_U8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD,
-			   sinfo->bss_param.dtim_period);
-		NLA_PUT_U16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL,
-			    sinfo->bss_param.beacon_interval);
+		if (((sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT) &&
+		     nla_put_flag(msg, NL80211_STA_BSS_PARAM_CTS_PROT)) ||
+		    ((sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE) &&
+		     nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE)) ||
+		    ((sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME) &&
+		     nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME)) ||
+		    nla_put_u8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD,
+			       sinfo->bss_param.dtim_period) ||
+		    nla_put_u16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL,
+				sinfo->bss_param.beacon_interval))
+			goto nla_put_failure;
 
 		nla_nest_end(msg, bss_param);
 	}
-	if (sinfo->filled & STATION_INFO_STA_FLAGS)
-		NLA_PUT(msg, NL80211_STA_INFO_STA_FLAGS,
-			sizeof(struct nl80211_sta_flag_update),
-			&sinfo->sta_flags);
-	if (sinfo->filled & STATION_INFO_T_OFFSET)
-		NLA_PUT_U64(msg, NL80211_STA_INFO_T_OFFSET,
-			    sinfo->t_offset);
+	if ((sinfo->filled & STATION_INFO_STA_FLAGS) &&
+	    nla_put(msg, NL80211_STA_INFO_STA_FLAGS,
+		    sizeof(struct nl80211_sta_flag_update),
+		    &sinfo->sta_flags))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_T_OFFSET) &&
+		nla_put_u64(msg, NL80211_STA_INFO_T_OFFSET,
+			    sinfo->t_offset))
+		goto nla_put_failure;
 	nla_nest_end(msg, sinfoattr);
 
-	if (sinfo->filled & STATION_INFO_ASSOC_REQ_IES)
-		NLA_PUT(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
-			sinfo->assoc_req_ies);
+	if ((sinfo->filled & STATION_INFO_ASSOC_REQ_IES) &&
+	    nla_put(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
+		    sinfo->assoc_req_ies))
+		goto nla_put_failure;
 
 	return genlmsg_end(msg, hdr);
 
@@ -2937,36 +2992,37 @@
 	if (!hdr)
 		return -1;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, dst);
-	NLA_PUT(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop);
-
-	NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, pinfo->generation);
+	if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dst) ||
+	    nla_put(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop) ||
+	    nla_put_u32(msg, NL80211_ATTR_GENERATION, pinfo->generation))
+		goto nla_put_failure;
 
 	pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO);
 	if (!pinfoattr)
 		goto nla_put_failure;
-	if (pinfo->filled & MPATH_INFO_FRAME_QLEN)
-		NLA_PUT_U32(msg, NL80211_MPATH_INFO_FRAME_QLEN,
-			    pinfo->frame_qlen);
-	if (pinfo->filled & MPATH_INFO_SN)
-		NLA_PUT_U32(msg, NL80211_MPATH_INFO_SN,
-			    pinfo->sn);
-	if (pinfo->filled & MPATH_INFO_METRIC)
-		NLA_PUT_U32(msg, NL80211_MPATH_INFO_METRIC,
-			    pinfo->metric);
-	if (pinfo->filled & MPATH_INFO_EXPTIME)
-		NLA_PUT_U32(msg, NL80211_MPATH_INFO_EXPTIME,
-			    pinfo->exptime);
-	if (pinfo->filled & MPATH_INFO_FLAGS)
-		NLA_PUT_U8(msg, NL80211_MPATH_INFO_FLAGS,
-			    pinfo->flags);
-	if (pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT)
-		NLA_PUT_U32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT,
-			    pinfo->discovery_timeout);
-	if (pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES)
-		NLA_PUT_U8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES,
-			    pinfo->discovery_retries);
+	if ((pinfo->filled & MPATH_INFO_FRAME_QLEN) &&
+	    nla_put_u32(msg, NL80211_MPATH_INFO_FRAME_QLEN,
+			pinfo->frame_qlen))
+		goto nla_put_failure;
+	if (((pinfo->filled & MPATH_INFO_SN) &&
+	     nla_put_u32(msg, NL80211_MPATH_INFO_SN, pinfo->sn)) ||
+	    ((pinfo->filled & MPATH_INFO_METRIC) &&
+	     nla_put_u32(msg, NL80211_MPATH_INFO_METRIC,
+			 pinfo->metric)) ||
+	    ((pinfo->filled & MPATH_INFO_EXPTIME) &&
+	     nla_put_u32(msg, NL80211_MPATH_INFO_EXPTIME,
+			 pinfo->exptime)) ||
+	    ((pinfo->filled & MPATH_INFO_FLAGS) &&
+	     nla_put_u8(msg, NL80211_MPATH_INFO_FLAGS,
+			pinfo->flags)) ||
+	    ((pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT) &&
+	     nla_put_u32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT,
+			 pinfo->discovery_timeout)) ||
+	    ((pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES) &&
+	     nla_put_u8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES,
+			pinfo->discovery_retries)))
+		goto nla_put_failure;
 
 	nla_nest_end(msg, pinfoattr);
 
@@ -3292,49 +3348,50 @@
 	pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_CONFIG);
 	if (!pinfoattr)
 		goto nla_put_failure;
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-	NLA_PUT_U16(msg, NL80211_MESHCONF_RETRY_TIMEOUT,
-			cur_params.dot11MeshRetryTimeout);
-	NLA_PUT_U16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT,
-			cur_params.dot11MeshConfirmTimeout);
-	NLA_PUT_U16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT,
-			cur_params.dot11MeshHoldingTimeout);
-	NLA_PUT_U16(msg, NL80211_MESHCONF_MAX_PEER_LINKS,
-			cur_params.dot11MeshMaxPeerLinks);
-	NLA_PUT_U8(msg, NL80211_MESHCONF_MAX_RETRIES,
-			cur_params.dot11MeshMaxRetries);
-	NLA_PUT_U8(msg, NL80211_MESHCONF_TTL,
-			cur_params.dot11MeshTTL);
-	NLA_PUT_U8(msg, NL80211_MESHCONF_ELEMENT_TTL,
-			cur_params.element_ttl);
-	NLA_PUT_U8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS,
-			cur_params.auto_open_plinks);
-	NLA_PUT_U32(msg, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
-			cur_params.dot11MeshNbrOffsetMaxNeighbor);
-	NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
-			cur_params.dot11MeshHWMPmaxPREQretries);
-	NLA_PUT_U32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME,
-			cur_params.path_refresh_time);
-	NLA_PUT_U16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
-			cur_params.min_discovery_timeout);
-	NLA_PUT_U32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
-			cur_params.dot11MeshHWMPactivePathTimeout);
-	NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
-			cur_params.dot11MeshHWMPpreqMinInterval);
-	NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
-			cur_params.dot11MeshHWMPperrMinInterval);
-	NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
-			cur_params.dot11MeshHWMPnetDiameterTraversalTime);
-	NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE,
-			cur_params.dot11MeshHWMPRootMode);
-	NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL,
-			cur_params.dot11MeshHWMPRannInterval);
-	NLA_PUT_U8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
-			cur_params.dot11MeshGateAnnouncementProtocol);
-	NLA_PUT_U8(msg, NL80211_MESHCONF_FORWARDING,
-			cur_params.dot11MeshForwarding);
-	NLA_PUT_U32(msg, NL80211_MESHCONF_RSSI_THRESHOLD,
-			cur_params.rssi_threshold);
+	if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+	    nla_put_u16(msg, NL80211_MESHCONF_RETRY_TIMEOUT,
+			cur_params.dot11MeshRetryTimeout) ||
+	    nla_put_u16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT,
+			cur_params.dot11MeshConfirmTimeout) ||
+	    nla_put_u16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT,
+			cur_params.dot11MeshHoldingTimeout) ||
+	    nla_put_u16(msg, NL80211_MESHCONF_MAX_PEER_LINKS,
+			cur_params.dot11MeshMaxPeerLinks) ||
+	    nla_put_u8(msg, NL80211_MESHCONF_MAX_RETRIES,
+		       cur_params.dot11MeshMaxRetries) ||
+	    nla_put_u8(msg, NL80211_MESHCONF_TTL,
+		       cur_params.dot11MeshTTL) ||
+	    nla_put_u8(msg, NL80211_MESHCONF_ELEMENT_TTL,
+		       cur_params.element_ttl) ||
+	    nla_put_u8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS,
+		       cur_params.auto_open_plinks) ||
+	    nla_put_u32(msg, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
+			cur_params.dot11MeshNbrOffsetMaxNeighbor) ||
+	    nla_put_u8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
+		       cur_params.dot11MeshHWMPmaxPREQretries) ||
+	    nla_put_u32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME,
+			cur_params.path_refresh_time) ||
+	    nla_put_u16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
+			cur_params.min_discovery_timeout) ||
+	    nla_put_u32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
+			cur_params.dot11MeshHWMPactivePathTimeout) ||
+	    nla_put_u16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
+			cur_params.dot11MeshHWMPpreqMinInterval) ||
+	    nla_put_u16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
+			cur_params.dot11MeshHWMPperrMinInterval) ||
+	    nla_put_u16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
+			cur_params.dot11MeshHWMPnetDiameterTraversalTime) ||
+	    nla_put_u8(msg, NL80211_MESHCONF_HWMP_ROOTMODE,
+		       cur_params.dot11MeshHWMPRootMode) ||
+	    nla_put_u16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL,
+			cur_params.dot11MeshHWMPRannInterval) ||
+	    nla_put_u8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
+		       cur_params.dot11MeshGateAnnouncementProtocol) ||
+	    nla_put_u8(msg, NL80211_MESHCONF_FORWARDING,
+		       cur_params.dot11MeshForwarding) ||
+	    nla_put_u32(msg, NL80211_MESHCONF_RSSI_THRESHOLD,
+			cur_params.rssi_threshold))
+		goto nla_put_failure;
 	nla_nest_end(msg, pinfoattr);
 	genlmsg_end(msg, hdr);
 	return genlmsg_reply(msg, info);
@@ -3576,11 +3633,12 @@
 	if (!hdr)
 		goto put_failure;
 
-	NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2,
-		cfg80211_regdomain->alpha2);
-	if (cfg80211_regdomain->dfs_region)
-		NLA_PUT_U8(msg, NL80211_ATTR_DFS_REGION,
-			   cfg80211_regdomain->dfs_region);
+	if (nla_put_string(msg, NL80211_ATTR_REG_ALPHA2,
+			   cfg80211_regdomain->alpha2) ||
+	    (cfg80211_regdomain->dfs_region &&
+	     nla_put_u8(msg, NL80211_ATTR_DFS_REGION,
+			cfg80211_regdomain->dfs_region)))
+		goto nla_put_failure;
 
 	nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES);
 	if (!nl_reg_rules)
@@ -3600,18 +3658,19 @@
 		if (!nl_reg_rule)
 			goto nla_put_failure;
 
-		NLA_PUT_U32(msg, NL80211_ATTR_REG_RULE_FLAGS,
-			reg_rule->flags);
-		NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_START,
-			freq_range->start_freq_khz);
-		NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_END,
-			freq_range->end_freq_khz);
-		NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW,
-			freq_range->max_bandwidth_khz);
-		NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN,
-			power_rule->max_antenna_gain);
-		NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP,
-			power_rule->max_eirp);
+		if (nla_put_u32(msg, NL80211_ATTR_REG_RULE_FLAGS,
+				reg_rule->flags) ||
+		    nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_START,
+				freq_range->start_freq_khz) ||
+		    nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_END,
+				freq_range->end_freq_khz) ||
+		    nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW,
+				freq_range->max_bandwidth_khz) ||
+		    nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN,
+				power_rule->max_antenna_gain) ||
+		    nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP,
+				power_rule->max_eirp))
+			goto nla_put_failure;
 
 		nla_nest_end(msg, nl_reg_rule);
 	}
@@ -4182,37 +4241,44 @@
 
 	genl_dump_check_consistent(cb, hdr, &nl80211_fam);
 
-	NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex);
+	if (nla_put_u32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex))
+		goto nla_put_failure;
 
 	bss = nla_nest_start(msg, NL80211_ATTR_BSS);
 	if (!bss)
 		goto nla_put_failure;
-	if (!is_zero_ether_addr(res->bssid))
-		NLA_PUT(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid);
-	if (res->information_elements && res->len_information_elements)
-		NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS,
-			res->len_information_elements,
-			res->information_elements);
-	if (res->beacon_ies && res->len_beacon_ies &&
-	    res->beacon_ies != res->information_elements)
-		NLA_PUT(msg, NL80211_BSS_BEACON_IES,
-			res->len_beacon_ies, res->beacon_ies);
-	if (res->tsf)
-		NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf);
-	if (res->beacon_interval)
-		NLA_PUT_U16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval);
-	NLA_PUT_U16(msg, NL80211_BSS_CAPABILITY, res->capability);
-	NLA_PUT_U32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq);
-	NLA_PUT_U32(msg, NL80211_BSS_SEEN_MS_AGO,
-		jiffies_to_msecs(jiffies - intbss->ts));
+	if ((!is_zero_ether_addr(res->bssid) &&
+	     nla_put(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid)) ||
+	    (res->information_elements && res->len_information_elements &&
+	     nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS,
+		     res->len_information_elements,
+		     res->information_elements)) ||
+	    (res->beacon_ies && res->len_beacon_ies &&
+	     res->beacon_ies != res->information_elements &&
+	     nla_put(msg, NL80211_BSS_BEACON_IES,
+		     res->len_beacon_ies, res->beacon_ies)))
+		goto nla_put_failure;
+	if (res->tsf &&
+	    nla_put_u64(msg, NL80211_BSS_TSF, res->tsf))
+		goto nla_put_failure;
+	if (res->beacon_interval &&
+	    nla_put_u16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval))
+		goto nla_put_failure;
+	if (nla_put_u16(msg, NL80211_BSS_CAPABILITY, res->capability) ||
+	    nla_put_u32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq) ||
+	    nla_put_u32(msg, NL80211_BSS_SEEN_MS_AGO,
+			jiffies_to_msecs(jiffies - intbss->ts)))
+		goto nla_put_failure;
 
 	switch (rdev->wiphy.signal_type) {
 	case CFG80211_SIGNAL_TYPE_MBM:
-		NLA_PUT_U32(msg, NL80211_BSS_SIGNAL_MBM, res->signal);
+		if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal))
+			goto nla_put_failure;
 		break;
 	case CFG80211_SIGNAL_TYPE_UNSPEC:
-		NLA_PUT_U8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal);
+		if (nla_put_u8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal))
+			goto nla_put_failure;
 		break;
 	default:
 		break;
@@ -4221,14 +4287,16 @@
 	switch (wdev->iftype) {
 	case NL80211_IFTYPE_P2P_CLIENT:
 	case NL80211_IFTYPE_STATION:
-		if (intbss == wdev->current_bss)
-			NLA_PUT_U32(msg, NL80211_BSS_STATUS,
-				    NL80211_BSS_STATUS_ASSOCIATED);
+		if (intbss == wdev->current_bss &&
+		    nla_put_u32(msg, NL80211_BSS_STATUS,
+				NL80211_BSS_STATUS_ASSOCIATED))
+			goto nla_put_failure;
 		break;
 	case NL80211_IFTYPE_ADHOC:
-		if (intbss == wdev->current_bss)
-			NLA_PUT_U32(msg, NL80211_BSS_STATUS,
-				    NL80211_BSS_STATUS_IBSS_JOINED);
+		if (intbss == wdev->current_bss &&
+		    nla_put_u32(msg, NL80211_BSS_STATUS,
+				NL80211_BSS_STATUS_IBSS_JOINED))
+			goto nla_put_failure;
 		break;
 	default:
 		break;
@@ -4297,34 +4365,43 @@
 	if (!hdr)
 		return -ENOMEM;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
+	if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex))
+		goto nla_put_failure;
 
 	infoattr = nla_nest_start(msg, NL80211_ATTR_SURVEY_INFO);
 	if (!infoattr)
 		goto nla_put_failure;
 
-	NLA_PUT_U32(msg, NL80211_SURVEY_INFO_FREQUENCY,
-		    survey->channel->center_freq);
-	if (survey->filled & SURVEY_INFO_NOISE_DBM)
-		NLA_PUT_U8(msg, NL80211_SURVEY_INFO_NOISE,
-			    survey->noise);
-	if (survey->filled & SURVEY_INFO_IN_USE)
-		NLA_PUT_FLAG(msg, NL80211_SURVEY_INFO_IN_USE);
-	if (survey->filled & SURVEY_INFO_CHANNEL_TIME)
-		NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME,
-			    survey->channel_time);
-	if (survey->filled & SURVEY_INFO_CHANNEL_TIME_BUSY)
-		NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY,
-			    survey->channel_time_busy);
-	if (survey->filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY)
-		NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY,
-			    survey->channel_time_ext_busy);
-	if (survey->filled & SURVEY_INFO_CHANNEL_TIME_RX)
-		NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_RX,
-			    survey->channel_time_rx);
-	if (survey->filled & SURVEY_INFO_CHANNEL_TIME_TX)
-		NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_TX,
-			    survey->channel_time_tx);
+	if (nla_put_u32(msg, NL80211_SURVEY_INFO_FREQUENCY,
+			survey->channel->center_freq))
+		goto nla_put_failure;
+
+	if ((survey->filled & SURVEY_INFO_NOISE_DBM) &&
+	    nla_put_u8(msg, NL80211_SURVEY_INFO_NOISE, survey->noise))
+		goto nla_put_failure;
+	if ((survey->filled & SURVEY_INFO_IN_USE) &&
+	    nla_put_flag(msg, NL80211_SURVEY_INFO_IN_USE))
+		goto nla_put_failure;
+	if ((survey->filled & SURVEY_INFO_CHANNEL_TIME) &&
+	    nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME,
+			survey->channel_time))
+		goto nla_put_failure;
+	if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_BUSY) &&
+	    nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY,
+			survey->channel_time_busy))
+		goto nla_put_failure;
+	if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY) &&
+	    nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY,
+			survey->channel_time_ext_busy))
+		goto nla_put_failure;
+	if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_RX) &&
+	    nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_RX,
+			survey->channel_time_rx))
+		goto nla_put_failure;
+	if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_TX) &&
+	    nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_TX,
+			survey->channel_time_tx))
+		goto nla_put_failure;
 
 	nla_nest_end(msg, infoattr);
 
@@ -5005,7 +5082,7 @@
 					   NL80211_CMD_TESTMODE);
 		struct nlattr *tmdata;
 
-		if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx) < 0) {
+		if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) {
 			genlmsg_cancel(skb, hdr);
 			break;
 		}
@@ -5056,7 +5133,8 @@
 		return NULL;
 	}
 
-	NLA_PUT_U32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+	if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx))
+		goto nla_put_failure;
 	data = nla_nest_start(skb, NL80211_ATTR_TESTDATA);
 
 	((void **)skb->cb)[0] = rdev;
@@ -5435,7 +5513,8 @@
 	if (err)
 		goto free_msg;
 
-	NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+	if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -5725,7 +5804,8 @@
 		goto free_msg;
 
 	if (msg) {
-		NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+		if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+			goto nla_put_failure;
 
 		genlmsg_end(msg, hdr);
 		return genlmsg_reply(msg, info);
@@ -5830,7 +5910,8 @@
 	else
 		ps_state = NL80211_PS_DISABLED;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_PS_STATE, ps_state);
+	if (nla_put_u32(msg, NL80211_ATTR_PS_STATE, ps_state))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 	return genlmsg_reply(msg, info);
@@ -5977,20 +6058,21 @@
 		if (!nl_wowlan)
 			goto nla_put_failure;
 
-		if (rdev->wowlan->any)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY);
-		if (rdev->wowlan->disconnect)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT);
-		if (rdev->wowlan->magic_pkt)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT);
-		if (rdev->wowlan->gtk_rekey_failure)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE);
-		if (rdev->wowlan->eap_identity_req)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST);
-		if (rdev->wowlan->four_way_handshake)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE);
-		if (rdev->wowlan->rfkill_release)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE);
+		if ((rdev->wowlan->any &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) ||
+		    (rdev->wowlan->disconnect &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
+		    (rdev->wowlan->magic_pkt &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
+		    (rdev->wowlan->gtk_rekey_failure &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
+		    (rdev->wowlan->eap_identity_req &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
+		    (rdev->wowlan->four_way_handshake &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
+		    (rdev->wowlan->rfkill_release &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
+			goto nla_put_failure;
 		if (rdev->wowlan->n_patterns) {
 			struct nlattr *nl_pats, *nl_pat;
 			int i, pat_len;
@@ -6005,12 +6087,13 @@
 				if (!nl_pat)
 					goto nla_put_failure;
 				pat_len = rdev->wowlan->patterns[i].pattern_len;
-				NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_MASK,
-					DIV_ROUND_UP(pat_len, 8),
-					rdev->wowlan->patterns[i].mask);
-				NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
-					pat_len,
-					rdev->wowlan->patterns[i].pattern);
+				if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK,
+					    DIV_ROUND_UP(pat_len, 8),
+					    rdev->wowlan->patterns[i].mask) ||
+				    nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
+					    pat_len,
+					    rdev->wowlan->patterns[i].pattern))
+					goto nla_put_failure;
 				nla_nest_end(msg, nl_pat);
 			}
 			nla_nest_end(msg, nl_pats);
@@ -6287,7 +6370,8 @@
 	if (err)
 		goto free_msg;
 
-	NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+	if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -6955,19 +7039,24 @@
 	nest = nla_nest_start(msg, NL80211_ATTR_SCAN_SSIDS);
 	if (!nest)
 		goto nla_put_failure;
-	for (i = 0; i < req->n_ssids; i++)
-		NLA_PUT(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid);
+	for (i = 0; i < req->n_ssids; i++) {
+		if (nla_put(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid))
+			goto nla_put_failure;
+	}
 	nla_nest_end(msg, nest);
 
 	nest = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES);
 	if (!nest)
 		goto nla_put_failure;
-	for (i = 0; i < req->n_channels; i++)
-		NLA_PUT_U32(msg, i, req->channels[i]->center_freq);
+	for (i = 0; i < req->n_channels; i++) {
+		if (nla_put_u32(msg, i, req->channels[i]->center_freq))
+			goto nla_put_failure;
+	}
 	nla_nest_end(msg, nest);
 
-	if (req->ie)
-		NLA_PUT(msg, NL80211_ATTR_IE, req->ie_len, req->ie);
+	if (req->ie &&
+	    nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie))
+		goto nla_put_failure;
 
 	return 0;
  nla_put_failure:
@@ -6986,8 +7075,9 @@
 	if (!hdr)
 		return -1;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
+		goto nla_put_failure;
 
 	/* ignore errors and send incomplete event anyway */
 	nl80211_add_scan_req(msg, rdev);
@@ -7011,8 +7101,9 @@
 	if (!hdr)
 		return -1;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
+		goto nla_put_failure;
 
 	return genlmsg_end(msg, hdr);
 
@@ -7135,26 +7226,33 @@
 	}
 
 	/* Userspace can always count this one always being set */
-	NLA_PUT_U8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator);
+	if (nla_put_u8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator))
+		goto nla_put_failure;
 
-	if (request->alpha2[0] == '0' && request->alpha2[1] == '0')
-		NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE,
-			   NL80211_REGDOM_TYPE_WORLD);
-	else if (request->alpha2[0] == '9' && request->alpha2[1] == '9')
-		NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE,
-			   NL80211_REGDOM_TYPE_CUSTOM_WORLD);
-	else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') ||
-		 request->intersect)
-		NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE,
-			   NL80211_REGDOM_TYPE_INTERSECTION);
-	else {
-		NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE,
-			   NL80211_REGDOM_TYPE_COUNTRY);
-		NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, request->alpha2);
+	if (request->alpha2[0] == '0' && request->alpha2[1] == '0') {
+		if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
+			       NL80211_REGDOM_TYPE_WORLD))
+			goto nla_put_failure;
+	} else if (request->alpha2[0] == '9' && request->alpha2[1] == '9') {
+		if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
+			       NL80211_REGDOM_TYPE_CUSTOM_WORLD))
+			goto nla_put_failure;
+	} else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') ||
+		   request->intersect) {
+		if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
+			       NL80211_REGDOM_TYPE_INTERSECTION))
+			goto nla_put_failure;
+	} else {
+		if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
+			       NL80211_REGDOM_TYPE_COUNTRY) ||
+		    nla_put_string(msg, NL80211_ATTR_REG_ALPHA2,
+				   request->alpha2))
+			goto nla_put_failure;
 	}
 
-	if (wiphy_idx_valid(request->wiphy_idx))
-		NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx);
+	if (wiphy_idx_valid(request->wiphy_idx) &&
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7188,9 +7286,10 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_FRAME, len, buf))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7268,10 +7367,11 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT_FLAG(msg, NL80211_ATTR_TIMED_OUT);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7319,15 +7419,15 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	if (bssid)
-		NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
-	NLA_PUT_U16(msg, NL80211_ATTR_STATUS_CODE, status);
-	if (req_ie)
-		NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie);
-	if (resp_ie)
-		NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    (bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) ||
+	    nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, status) ||
+	    (req_ie &&
+	     nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
+	    (resp_ie &&
+	     nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie)))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7359,13 +7459,14 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
-	if (req_ie)
-		NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie);
-	if (resp_ie)
-		NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid) ||
+	    (req_ie &&
+	     nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
+	    (resp_ie &&
+	     nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie)))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7396,14 +7497,14 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	if (from_ap && reason)
-		NLA_PUT_U16(msg, NL80211_ATTR_REASON_CODE, reason);
-	if (from_ap)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_DISCONNECTED_BY_AP);
-	if (ie)
-		NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    (from_ap && reason &&
+	     nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason)) ||
+	    (from_ap &&
+	     nla_put_flag(msg, NL80211_ATTR_DISCONNECTED_BY_AP)) ||
+	    (ie && nla_put(msg, NL80211_ATTR_IE, ie_len, ie)))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7434,9 +7535,10 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7467,11 +7569,12 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr);
-	if (ie_len && ie)
-		NLA_PUT(msg, NL80211_ATTR_IE, ie_len , ie);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr) ||
+	    (ie_len && ie &&
+	     nla_put(msg, NL80211_ATTR_IE, ie_len , ie)))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7502,15 +7605,14 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	if (addr)
-		NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
-	NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type);
-	if (key_id != -1)
-		NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id);
-	if (tsc)
-		NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    (addr && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) ||
+	    nla_put_u32(msg, NL80211_ATTR_KEY_TYPE, key_type) ||
+	    (key_id != -1 &&
+	     nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_id)) ||
+	    (tsc && nla_put(msg, NL80211_ATTR_KEY_SEQ, 6, tsc)))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7545,7 +7647,8 @@
 	 * Since we are applying the beacon hint to a wiphy we know its
 	 * wiphy_idx is valid
 	 */
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy));
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy)))
+		goto nla_put_failure;
 
 	/* Before */
 	nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE);
@@ -7597,14 +7700,16 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq);
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type);
-	NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) ||
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type) ||
+	    nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+		goto nla_put_failure;
 
-	if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL)
-		NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration);
+	if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL &&
+	    nla_put_u32(msg, NL80211_ATTR_DURATION, duration))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7675,8 +7780,9 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
+	if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7712,9 +7818,10 @@
 		return true;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr))
+		goto nla_put_failure;
 
 	err = genlmsg_end(msg, hdr);
 	if (err < 0) {
@@ -7763,12 +7870,13 @@
 		return -ENOMEM;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
-	if (sig_dbm)
-		NLA_PUT_U32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm);
-	NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
+	    (sig_dbm &&
+	     nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
+	    nla_put(msg, NL80211_ATTR_FRAME, len, buf))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7798,12 +7906,12 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
-	NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
-	if (ack)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
+	    nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
+	    (ack && nla_put_flag(msg, NL80211_ATTR_ACK)))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7835,15 +7943,17 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
+		goto nla_put_failure;
 
 	pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
 	if (!pinfoattr)
 		goto nla_put_failure;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
-		    rssi_event);
+	if (nla_put_u32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
+			rssi_event))
+		goto nla_put_failure;
 
 	nla_nest_end(msg, pinfoattr);
 
@@ -7876,16 +7986,18 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid))
+		goto nla_put_failure;
 
 	rekey_attr = nla_nest_start(msg, NL80211_ATTR_REKEY_DATA);
 	if (!rekey_attr)
 		goto nla_put_failure;
 
-	NLA_PUT(msg, NL80211_REKEY_DATA_REPLAY_CTR,
-		NL80211_REPLAY_CTR_LEN, replay_ctr);
+	if (nla_put(msg, NL80211_REKEY_DATA_REPLAY_CTR,
+		    NL80211_REPLAY_CTR_LEN, replay_ctr))
+		goto nla_put_failure;
 
 	nla_nest_end(msg, rekey_attr);
 
@@ -7918,17 +8030,19 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
+		goto nla_put_failure;
 
 	attr = nla_nest_start(msg, NL80211_ATTR_PMKSA_CANDIDATE);
 	if (!attr)
 		goto nla_put_failure;
 
-	NLA_PUT_U32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index);
-	NLA_PUT(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid);
-	if (preauth)
-		NLA_PUT_FLAG(msg, NL80211_PMKSA_CANDIDATE_PREAUTH);
+	if (nla_put_u32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index) ||
+	    nla_put(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid) ||
+	    (preauth &&
+	     nla_put_flag(msg, NL80211_PMKSA_CANDIDATE_PREAUTH)))
+		goto nla_put_failure;
 
 	nla_nest_end(msg, attr);
 
@@ -7960,9 +8074,10 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, type);
+	if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, type))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7994,15 +8109,17 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, peer);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer))
+		goto nla_put_failure;
 
 	pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
 	if (!pinfoattr)
 		goto nla_put_failure;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets);
+	if (nla_put_u32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets))
+		goto nla_put_failure;
 
 	nla_nest_end(msg, pinfoattr);
 
@@ -8036,12 +8153,12 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
-	NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
-	if (acked)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
+	    nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
+	    (acked && nla_put_flag(msg, NL80211_ATTR_ACK)))
+		goto nla_put_failure;
 
 	err = genlmsg_end(msg, hdr);
 	if (err < 0) {
@@ -8081,12 +8198,13 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	if (freq)
-		NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
-	if (sig_dbm)
-		NLA_PUT_U32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm);
-	NLA_PUT(msg, NL80211_ATTR_FRAME, len, frame);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    (freq &&
+	     nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq)) ||
+	    (sig_dbm &&
+	     nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
+	    nla_put(msg, NL80211_ATTR_FRAME, len, frame))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
diff --git a/net/wireless/util.c b/net/wireless/util.c
index ffced85..6cba001 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -985,7 +985,7 @@
 			if (rdev->wiphy.software_iftypes & BIT(iftype))
 				continue;
 			for (j = 0; j < c->n_limits; j++) {
-				if (!(limits[j].types & iftype))
+				if (!(limits[j].types & BIT(iftype)))
 					continue;
 				if (limits[j].max < num[iftype])
 					goto cont;
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index af648e0..b0eb7aa 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -256,7 +256,7 @@
 		.max_tokens	= sizeof(struct iw_pmksa),
 	},
 };
-static const unsigned standard_ioctl_num = ARRAY_SIZE(standard_ioctl);
+static const unsigned int standard_ioctl_num = ARRAY_SIZE(standard_ioctl);
 
 /*
  * Meta-data about all the additional standard Wireless Extension events
@@ -306,7 +306,7 @@
 		.max_tokens	= sizeof(struct iw_pmkid_cand),
 	},
 };
-static const unsigned standard_event_num = ARRAY_SIZE(standard_event);
+static const unsigned int standard_event_num = ARRAY_SIZE(standard_event);
 
 /* Size (in bytes) of various events */
 static const int event_type_size[] = {
@@ -402,7 +402,8 @@
 	r->ifi_flags = dev_get_flags(dev);
 	r->ifi_change = 0;	/* Wireless changes don't affect those flags */
 
-	NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
+	if (nla_put_string(skb, IFLA_IFNAME, dev->name))
+		goto nla_put_failure;
 
 	return nlh;
  nla_put_failure:
@@ -428,7 +429,7 @@
 	int hdr_len;				/* Size of the event header */
 	int wrqu_off = 0;			/* Offset in wrqu */
 	/* Don't "optimise" the following variable, it will crash */
-	unsigned	cmd_index;		/* *MUST* be unsigned */
+	unsigned int	cmd_index;		/* *MUST* be unsigned */
 	struct sk_buff *skb;
 	struct nlmsghdr *nlh;
 	struct nlattr *nla;
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
index d2efd29..4323952 100644
--- a/net/x25/sysctl_net_x25.c
+++ b/net/x25/sysctl_net_x25.c
@@ -73,18 +73,12 @@
 	{ 0, },
 };
 
-static struct ctl_path x25_path[] = {
-	{ .procname = "net", },
-	{ .procname = "x25", },
-	{ }
-};
-
 void __init x25_register_sysctl(void)
 {
-	x25_table_header = register_sysctl_paths(x25_path, x25_table);
+	x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table);
 }
 
 void x25_unregister_sysctl(void)
 {
-	unregister_sysctl_table(x25_table_header);
+	unregister_net_sysctl_table(x25_table_header);
 }
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index f0ce862..a8a2363 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -58,7 +58,7 @@
 		if (!sock_owned_by_user(sk)) {
 			queued = x25_process_rx_frame(sk, skb);
 		} else {
-			queued = !sk_add_backlog(sk, skb);
+			queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf);
 		}
 		bh_unlock_sock(sk);
 		sock_put(sk);
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 36384a1..66c63873 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -231,7 +231,7 @@
 	}
 
 	if (dte_facs->calling_len && (facil_mask & X25_MASK_CALLING_AE)) {
-		unsigned bytecount = (dte_facs->calling_len + 1) >> 1;
+		unsigned int bytecount = (dte_facs->calling_len + 1) >> 1;
 		*p++ = X25_FAC_CALLING_AE;
 		*p++ = 1 + bytecount;
 		*p++ = dte_facs->calling_len;
@@ -240,7 +240,7 @@
 	}
 
 	if (dte_facs->called_len && (facil_mask & X25_MASK_CALLED_AE)) {
-		unsigned bytecount = (dte_facs->called_len % 2) ?
+		unsigned int bytecount = (dte_facs->called_len % 2) ?
 		dte_facs->called_len / 2 + 1 :
 		dte_facs->called_len / 2;
 		*p++ = X25_FAC_CALLED_AE;
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h
index 7199d78..716502a 100644
--- a/net/xfrm/xfrm_hash.h
+++ b/net/xfrm/xfrm_hash.h
@@ -45,10 +45,10 @@
 	return (h ^ (h >> 16)) & hmask;
 }
 
-static inline unsigned __xfrm_src_hash(const xfrm_address_t *daddr,
-				       const xfrm_address_t *saddr,
-				       unsigned short family,
-				       unsigned int hmask)
+static inline unsigned int __xfrm_src_hash(const xfrm_address_t *daddr,
+					   const xfrm_address_t *saddr,
+					   unsigned short family,
+					   unsigned int hmask)
 {
 	unsigned int h = family;
 	switch (family) {
diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
index 05640bc..380976f 100644
--- a/net/xfrm/xfrm_sysctl.c
+++ b/net/xfrm/xfrm_sysctl.c
@@ -54,7 +54,7 @@
 	table[2].data = &net->xfrm.sysctl_larval_drop;
 	table[3].data = &net->xfrm.sysctl_acq_expires;
 
-	net->xfrm.sysctl_hdr = register_net_sysctl_table(net, net_core_path, table);
+	net->xfrm.sysctl_hdr = register_net_sysctl(net, "net/core", table);
 	if (!net->xfrm.sysctl_hdr)
 		goto out_register;
 	return 0;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 7128dde..44293b3 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -756,40 +756,50 @@
 {
 	copy_to_user_state(x, p);
 
-	if (x->coaddr)
-		NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
+	if (x->coaddr &&
+	    nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr))
+		goto nla_put_failure;
 
-	if (x->lastused)
-		NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused);
+	if (x->lastused &&
+	    nla_put_u64(skb, XFRMA_LASTUSED, x->lastused))
+		goto nla_put_failure;
 
-	if (x->aead)
-		NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
-	if (x->aalg) {
-		if (copy_to_user_auth(x->aalg, skb))
-			goto nla_put_failure;
+	if (x->aead &&
+	    nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead))
+		goto nla_put_failure;
 
-		NLA_PUT(skb, XFRMA_ALG_AUTH_TRUNC,
-			xfrm_alg_auth_len(x->aalg), x->aalg);
-	}
-	if (x->ealg)
-		NLA_PUT(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
-	if (x->calg)
-		NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
+	if (x->aalg &&
+	    (copy_to_user_auth(x->aalg, skb) ||
+	     nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
+		     xfrm_alg_auth_len(x->aalg), x->aalg)))
+		goto nla_put_failure;
 
-	if (x->encap)
-		NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
+	if (x->ealg &&
+	    nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg))
+		goto nla_put_failure;
 
-	if (x->tfcpad)
-		NLA_PUT_U32(skb, XFRMA_TFCPAD, x->tfcpad);
+	if (x->calg &&
+	    nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg))
+		goto nla_put_failure;
+
+	if (x->encap &&
+	    nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap))
+		goto nla_put_failure;
+
+	if (x->tfcpad &&
+	    nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad))
+		goto nla_put_failure;
 
 	if (xfrm_mark_put(skb, &x->mark))
 		goto nla_put_failure;
 
-	if (x->replay_esn)
-		NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL,
-			xfrm_replay_state_esn_len(x->replay_esn), x->replay_esn);
+	if (x->replay_esn &&
+	    nla_put(skb, XFRMA_REPLAY_ESN_VAL,
+		    xfrm_replay_state_esn_len(x->replay_esn),
+		    x->replay_esn))
+		goto nla_put_failure;
 
-	if (x->security && copy_sec_ctx(x->security, skb) < 0)
+	if (x->security && copy_sec_ctx(x->security, skb))
 		goto nla_put_failure;
 
 	return 0;
@@ -912,8 +922,9 @@
 	sph.spdhcnt = si.spdhcnt;
 	sph.spdhmcnt = si.spdhmcnt;
 
-	NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
-	NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
+	if (nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc) ||
+	    nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph))
+		goto nla_put_failure;
 
 	return nlmsg_end(skb, nlh);
 
@@ -967,8 +978,9 @@
 	sh.sadhmcnt = si.sadhmcnt;
 	sh.sadhcnt = si.sadhcnt;
 
-	NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt);
-	NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
+	if (nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt) ||
+	    nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh))
+		goto nla_put_failure;
 
 	return nlmsg_end(skb, nlh);
 
@@ -1690,21 +1702,27 @@
 	id->reqid = x->props.reqid;
 	id->flags = c->data.aevent;
 
-	if (x->replay_esn)
-		NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL,
-			xfrm_replay_state_esn_len(x->replay_esn),
-			x->replay_esn);
-	else
-		NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
+	if (x->replay_esn) {
+		if (nla_put(skb, XFRMA_REPLAY_ESN_VAL,
+			    xfrm_replay_state_esn_len(x->replay_esn),
+			    x->replay_esn))
+			goto nla_put_failure;
+	} else {
+		if (nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
+			    &x->replay))
+			goto nla_put_failure;
+	}
+	if (nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft))
+		goto nla_put_failure;
 
-	NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
+	if ((id->flags & XFRM_AE_RTHR) &&
+	    nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff))
+		goto nla_put_failure;
 
-	if (id->flags & XFRM_AE_RTHR)
-		NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
-
-	if (id->flags & XFRM_AE_ETHR)
-		NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH,
-			    x->replay_maxage * 10 / HZ);
+	if ((id->flags & XFRM_AE_ETHR) &&
+	    nla_put_u32(skb, XFRMA_ETIMER_THRESH,
+			x->replay_maxage * 10 / HZ))
+		goto nla_put_failure;
 
 	if (xfrm_mark_put(skb, &x->mark))
 		goto nla_put_failure;
@@ -2835,8 +2853,9 @@
 	ur->proto = proto;
 	memcpy(&ur->sel, sel, sizeof(ur->sel));
 
-	if (addr)
-		NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr);
+	if (addr &&
+	    nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr))
+		goto nla_put_failure;
 
 	return nlmsg_end(skb, nlh);
 
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index de639ee..faea0ec 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -1869,12 +1869,6 @@
 			    "No space is necessary after a cast\n" . $hereprev);
 		}
 
-		if ($rawline =~ /^\+[ \t]*\/\*[ \t]*$/ &&
-		    $prevrawline =~ /^\+[ \t]*$/) {
-			CHK("BLOCK_COMMENT_STYLE",
-			    "Don't begin block comments with only a /* line, use /* comment...\n" . $hereprev);
-		}
-
 # check for spaces at the beginning of a line.
 # Exceptions:
 #  1) within comments
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 0586085..52577f0 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -540,35 +540,6 @@
 };
 
 /*
- * Generate the __enabled_CONFIG_* and __enabled_CONFIG_*_MODULE macros for
- * use by the IS_{ENABLED,BUILTIN,MODULE} macros. The _MODULE variant is
- * generated even for booleans so that the IS_ENABLED() macro works.
- */
-static void
-header_print__enabled_symbol(FILE *fp, struct symbol *sym, const char *value, void *arg)
-{
-
-	switch (sym->type) {
-	case S_BOOLEAN:
-	case S_TRISTATE: {
-		fprintf(fp, "#define __enabled_" CONFIG_ "%s %d\n",
-		    sym->name, (*value == 'y'));
-		fprintf(fp, "#define __enabled_" CONFIG_ "%s_MODULE %d\n",
-		    sym->name, (*value == 'm'));
-		break;
-	}
-	default:
-		break;
-	}
-}
-
-static struct conf_printer header__enabled_printer_cb =
-{
-	.print_symbol = header_print__enabled_symbol,
-	.print_comment = header_print_comment,
-};
-
-/*
  * Tristate printer
  *
  * This printer is used when generating the `include/config/tristate.conf' file.
@@ -949,16 +920,11 @@
 	conf_write_heading(out_h, &header_printer_cb, NULL);
 
 	for_all_symbols(i, sym) {
-		if (!sym->name)
-			continue;
-
 		sym_calc_value(sym);
-
-		conf_write_symbol(out_h, sym, &header__enabled_printer_cb, NULL);
-
-		if (!(sym->flags & SYMBOL_WRITE))
+		if (!(sym->flags & SYMBOL_WRITE) || !sym->name)
 			continue;
 
+		/* write symbol to auto.conf, tristate and header files */
 		conf_write_symbol(out, sym, &kconfig_printer_cb, (void *)1);
 
 		conf_write_symbol(tristate, sym, &tristate_printer_cb, (void *)1);
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 8e730cc..44ddaa5 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -1100,6 +1100,10 @@
 	if (!sym->st_shndx || get_secindex(info, sym) >= info->num_sections)
 		return;
 
+	/* We're looking for an object */
+	if (ELF_ST_TYPE(sym->st_info) != STT_OBJECT)
+		return;
+
 	/* All our symbols are of form <prefix>__mod_XXX_device_table. */
 	name = strstr(symname, "__mod_");
 	if (!name)
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 3f01fd9..c4e7d15 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -132,8 +132,10 @@
 	/* strip trailing .o */
 	s = strrchr(p, '.');
 	if (s != NULL)
-		if (strcmp(s, ".o") == 0)
+		if (strcmp(s, ".o") == 0) {
 			*s = '\0';
+			mod->is_dot_o = 1;
+		}
 
 	/* add to list */
 	mod->name = p;
@@ -587,7 +589,8 @@
 	unsigned int crc;
 	enum export export;
 
-	if (!is_vmlinux(mod->name) && strncmp(symname, "__ksymtab", 9) == 0)
+	if ((!is_vmlinux(mod->name) || mod->is_dot_o) &&
+	    strncmp(symname, "__ksymtab", 9) == 0)
 		export = export_from_secname(info, get_secindex(info, sym));
 	else
 		export = export_from_sec(info, get_secindex(info, sym));
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index 2031119..51207e4 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -113,6 +113,7 @@
 	int has_cleanup;
 	struct buffer dev_table_buf;
 	char	     srcversion[25];
+	int is_dot_o;
 };
 
 struct elf_info {
diff --git a/scripts/xz_wrap.sh b/scripts/xz_wrap.sh
index 17a5798..7a2d372 100644
--- a/scripts/xz_wrap.sh
+++ b/scripts/xz_wrap.sh
@@ -12,8 +12,8 @@
 BCJ=
 LZMA2OPTS=
 
-case $ARCH in
-	x86|x86_64)     BCJ=--x86 ;;
+case $SRCARCH in
+	x86)            BCJ=--x86 ;;
 	powerpc)        BCJ=--powerpc ;;
 	ia64)           BCJ=--ia64; LZMA2OPTS=pb=4 ;;
 	arm)            BCJ=--arm ;;
diff --git a/security/commoncap.c b/security/commoncap.c
index 0cf4b53..71a166a 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -29,6 +29,7 @@
 #include <linux/securebits.h>
 #include <linux/user_namespace.h>
 #include <linux/binfmts.h>
+#include <linux/personality.h>
 
 /*
  * If a non-root user executes a setuid-root binary in
@@ -505,6 +506,11 @@
 	}
 skip:
 
+	/* if we have fs caps, clear dangerous personality flags */
+	if (!cap_issubset(new->cap_permitted, old->cap_permitted))
+		bprm->per_clear |= PER_CLEAR_ON_SETID;
+
+
 	/* Don't let someone trace a set[ug]id/setpcap binary with the revised
 	 * credentials unless they have the appropriate permit
 	 */
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 81c03a5..45c32f0 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1939,18 +1939,19 @@
 	char *hostsp;
 	struct socket_smack *ssp = sk->sk_security;
 	struct smk_audit_info ad;
-	struct lsm_network_audit net;
 
 	rcu_read_lock();
 	hostsp = smack_host_label(sap);
 	if (hostsp != NULL) {
-		sk_lbl = SMACK_UNLABELED_SOCKET;
 #ifdef CONFIG_AUDIT
+		struct lsm_network_audit net;
+
 		smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
 		ad.a.u.net->family = sap->sin_family;
 		ad.a.u.net->dport = sap->sin_port;
 		ad.a.u.net->v4info.daddr = sap->sin_addr.s_addr;
 #endif
+		sk_lbl = SMACK_UNLABELED_SOCKET;
 		rc = smk_access(ssp->smk_out, hostsp, MAY_WRITE, &ad);
 	} else {
 		sk_lbl = SMACK_CIPSO_SOCKET;
@@ -2809,11 +2810,14 @@
 	struct socket_smack *osp = other->sk_security;
 	struct socket_smack *nsp = newsk->sk_security;
 	struct smk_audit_info ad;
-	struct lsm_network_audit net;
 	int rc = 0;
 
+#ifdef CONFIG_AUDIT
+	struct lsm_network_audit net;
+
 	smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
 	smk_ad_setfield_u_net_sk(&ad, other);
+#endif
 
 	if (!capable(CAP_MAC_OVERRIDE))
 		rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad);
@@ -2842,11 +2846,14 @@
 	struct socket_smack *ssp = sock->sk->sk_security;
 	struct socket_smack *osp = other->sk->sk_security;
 	struct smk_audit_info ad;
-	struct lsm_network_audit net;
 	int rc = 0;
 
+#ifdef CONFIG_AUDIT
+	struct lsm_network_audit net;
+
 	smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
 	smk_ad_setfield_u_net_sk(&ad, other->sk);
+#endif
 
 	if (!capable(CAP_MAC_OVERRIDE))
 		rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad);
@@ -2993,7 +3000,9 @@
 	char *csp;
 	int rc;
 	struct smk_audit_info ad;
+#ifdef CONFIG_AUDIT
 	struct lsm_network_audit net;
+#endif
 	if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
 		return 0;
 
@@ -3156,7 +3165,9 @@
 	char *sp;
 	int rc;
 	struct smk_audit_info ad;
+#ifdef CONFIG_AUDIT
 	struct lsm_network_audit net;
+#endif
 
 	/* handle mapped IPv4 packets arriving via IPv6 sockets */
 	if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
@@ -3629,8 +3640,38 @@
 };
 
 
-static __init void init_smack_know_list(void)
+static __init void init_smack_known_list(void)
 {
+	/*
+	 * Initialize CIPSO locks
+	 */
+	spin_lock_init(&smack_known_huh.smk_cipsolock);
+	spin_lock_init(&smack_known_hat.smk_cipsolock);
+	spin_lock_init(&smack_known_star.smk_cipsolock);
+	spin_lock_init(&smack_known_floor.smk_cipsolock);
+	spin_lock_init(&smack_known_invalid.smk_cipsolock);
+	spin_lock_init(&smack_known_web.smk_cipsolock);
+	/*
+	 * Initialize rule list locks
+	 */
+	mutex_init(&smack_known_huh.smk_rules_lock);
+	mutex_init(&smack_known_hat.smk_rules_lock);
+	mutex_init(&smack_known_floor.smk_rules_lock);
+	mutex_init(&smack_known_star.smk_rules_lock);
+	mutex_init(&smack_known_invalid.smk_rules_lock);
+	mutex_init(&smack_known_web.smk_rules_lock);
+	/*
+	 * Initialize rule lists
+	 */
+	INIT_LIST_HEAD(&smack_known_huh.smk_rules);
+	INIT_LIST_HEAD(&smack_known_hat.smk_rules);
+	INIT_LIST_HEAD(&smack_known_star.smk_rules);
+	INIT_LIST_HEAD(&smack_known_floor.smk_rules);
+	INIT_LIST_HEAD(&smack_known_invalid.smk_rules);
+	INIT_LIST_HEAD(&smack_known_web.smk_rules);
+	/*
+	 * Create the known labels list
+	 */
 	list_add(&smack_known_huh.list, &smack_known_list);
 	list_add(&smack_known_hat.list, &smack_known_list);
 	list_add(&smack_known_star.list, &smack_known_list);
@@ -3665,16 +3706,8 @@
 	cred = (struct cred *) current->cred;
 	cred->security = tsp;
 
-	/* initialize the smack_know_list */
-	init_smack_know_list();
-	/*
-	 * Initialize locks
-	 */
-	spin_lock_init(&smack_known_huh.smk_cipsolock);
-	spin_lock_init(&smack_known_hat.smk_cipsolock);
-	spin_lock_init(&smack_known_star.smk_cipsolock);
-	spin_lock_init(&smack_known_floor.smk_cipsolock);
-	spin_lock_init(&smack_known_invalid.smk_cipsolock);
+	/* initialize the smack_known_list */
+	init_smack_known_list();
 
 	/*
 	 * Register with LSM
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 5c32f36..038811c 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -1614,20 +1614,6 @@
 	smk_cipso_doi();
 	smk_unlbl_ambient(NULL);
 
-	mutex_init(&smack_known_floor.smk_rules_lock);
-	mutex_init(&smack_known_hat.smk_rules_lock);
-	mutex_init(&smack_known_huh.smk_rules_lock);
-	mutex_init(&smack_known_invalid.smk_rules_lock);
-	mutex_init(&smack_known_star.smk_rules_lock);
-	mutex_init(&smack_known_web.smk_rules_lock);
-
-	INIT_LIST_HEAD(&smack_known_floor.smk_rules);
-	INIT_LIST_HEAD(&smack_known_hat.smk_rules);
-	INIT_LIST_HEAD(&smack_known_huh.smk_rules);
-	INIT_LIST_HEAD(&smack_known_invalid.smk_rules);
-	INIT_LIST_HEAD(&smack_known_star.smk_rules);
-	INIT_LIST_HEAD(&smack_known_web.smk_rules);
-
 	return err;
 }
 
diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
index 14a286a..8575861 100644
--- a/sound/core/vmaster.c
+++ b/sound/core/vmaster.c
@@ -419,6 +419,7 @@
  * snd_ctl_add_vmaster_hook - Add a hook to a vmaster control
  * @kcontrol: vmaster kctl element
  * @hook: the hook function
+ * @private_data: the private_data pointer to be saved
  *
  * Adds the given hook to the vmaster control element so that it's called
  * at each time when the value is changed.
diff --git a/sound/isa/sscape.c b/sound/isa/sscape.c
index b4a6aa9..8490f59 100644
--- a/sound/isa/sscape.c
+++ b/sound/isa/sscape.c
@@ -1019,13 +1019,15 @@
 	irq_cfg = get_irq_config(sscape->type, irq[dev]);
 	if (irq_cfg == INVALID_IRQ) {
 		snd_printk(KERN_ERR "sscape: Invalid IRQ %d\n", irq[dev]);
-		return -ENXIO;
+		err = -ENXIO;
+		goto _release_dma;
 	}
 
 	mpu_irq_cfg = get_irq_config(sscape->type, mpu_irq[dev]);
 	if (mpu_irq_cfg == INVALID_IRQ) {
 		snd_printk(KERN_ERR "sscape: Invalid IRQ %d\n", mpu_irq[dev]);
-		return -ENXIO;
+		err = -ENXIO;
+		goto _release_dma;
 	}
 
 	/*
diff --git a/sound/last.c b/sound/last.c
index bdd0857..7ffc182 100644
--- a/sound/last.c
+++ b/sound/last.c
@@ -38,4 +38,4 @@
 	return 0;
 }
 
-__initcall(alsa_sound_last_init);
+late_initcall_sync(alsa_sound_last_init);
diff --git a/sound/oss/msnd_pinnacle.c b/sound/oss/msnd_pinnacle.c
index 2c79d60..536c4c0 100644
--- a/sound/oss/msnd_pinnacle.c
+++ b/sound/oss/msnd_pinnacle.c
@@ -1294,6 +1294,8 @@
 
 static int upload_dsp_code(void)
 {
+	int ret = 0;
+
 	msnd_outb(HPBLKSEL_0, dev.io + HP_BLKS);
 #ifndef HAVE_DSPCODEH
 	INITCODESIZE = mod_firmware_load(INITCODEFILE, &INITCODE);
@@ -1312,7 +1314,8 @@
 	memcpy_toio(dev.base, PERMCODE, PERMCODESIZE);
 	if (msnd_upload_host(&dev, INITCODE, INITCODESIZE) < 0) {
 		printk(KERN_WARNING LOGNAME ": Error uploading to DSP\n");
-		return -ENODEV;
+		ret = -ENODEV;
+		goto out;
 	}
 #ifdef HAVE_DSPCODEH
 	printk(KERN_INFO LOGNAME ": DSP firmware uploaded (resident)\n");
@@ -1320,12 +1323,13 @@
 	printk(KERN_INFO LOGNAME ": DSP firmware uploaded\n");
 #endif
 
+out:
 #ifndef HAVE_DSPCODEH
 	vfree(INITCODE);
 	vfree(PERMCODE);
 #endif
 
-	return 0;
+	return ret;
 }
 
 #ifdef MSND_CLASSIC
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 8816804..5ca0939 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -2,8 +2,8 @@
 
 config SND_TEA575X
 	tristate
-	depends on SND_FM801_TEA575X_BOOL || SND_ES1968_RADIO || RADIO_SF16FMR2
-	default SND_FM801 || SND_ES1968 || RADIO_SF16FMR2
+	depends on SND_FM801_TEA575X_BOOL || SND_ES1968_RADIO || RADIO_SF16FMR2 || RADIO_MAXIRADIO
+	default SND_FM801 || SND_ES1968 || RADIO_SF16FMR2 || RADIO_MAXIRADIO
 
 menuconfig SND_PCI
 	bool "PCI sound devices"
diff --git a/sound/pci/asihpi/hpi_internal.h b/sound/pci/asihpi/hpi_internal.h
index 8c63200..bc86cb7 100644
--- a/sound/pci/asihpi/hpi_internal.h
+++ b/sound/pci/asihpi/hpi_internal.h
@@ -1,7 +1,7 @@
 /******************************************************************************
 
     AudioScience HPI driver
-    Copyright (C) 1997-2011  AudioScience Inc. <support@audioscience.com>
+    Copyright (C) 1997-2012  AudioScience Inc. <support@audioscience.com>
 
     This program is free software; you can redistribute it and/or modify
     it under the terms of version 2 of the GNU General Public License as
@@ -42,7 +42,7 @@
 If this function succeeds, then HpiOs_LockedMem_GetVirtAddr() and
 HpiOs_LockedMem_GetPyhsAddr() will always succed on the returned handle.
 */
-int hpios_locked_mem_alloc(struct consistent_dma_area *p_locked_mem_handle,
+u16 hpios_locked_mem_alloc(struct consistent_dma_area *p_locked_mem_handle,
 							   /**< memory handle */
 	u32 size, /**< Size in bytes to allocate */
 	struct pci_dev *p_os_reference
diff --git a/sound/pci/asihpi/hpios.c b/sound/pci/asihpi/hpios.c
index 87f4385..5ef4fe9 100644
--- a/sound/pci/asihpi/hpios.c
+++ b/sound/pci/asihpi/hpios.c
@@ -1,7 +1,7 @@
 /******************************************************************************
 
     AudioScience HPI driver
-    Copyright (C) 1997-2011  AudioScience Inc. <support@audioscience.com>
+    Copyright (C) 1997-2012  AudioScience Inc. <support@audioscience.com>
 
     This program is free software; you can redistribute it and/or modify
     it under the terms of version 2 of the GNU General Public License as
@@ -39,11 +39,11 @@
 
 }
 
-/** Allocated an area of locked memory for bus master DMA operations.
+/** Allocate an area of locked memory for bus master DMA operations.
 
-On error, return -ENOMEM, and *pMemArea.size = 0
+If allocation fails, return 1, and *pMemArea.size = 0
 */
-int hpios_locked_mem_alloc(struct consistent_dma_area *p_mem_area, u32 size,
+u16 hpios_locked_mem_alloc(struct consistent_dma_area *p_mem_area, u32 size,
 	struct pci_dev *pdev)
 {
 	/*?? any benefit in using managed dmam_alloc_coherent? */
@@ -62,7 +62,7 @@
 		HPI_DEBUG_LOG(WARNING,
 			"failed to allocate %d bytes locked memory\n", size);
 		p_mem_area->size = 0;
-		return -ENOMEM;
+		return 1;
 	}
 }
 
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 9a9f372..56b4f74 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -851,6 +851,9 @@
 	unsigned int pin_amp_workaround:1; /* pin out-amp takes index
 					    * (e.g. Conexant codecs)
 					    */
+	unsigned int single_adc_amp:1; /* adc in-amp takes no index
+					* (e.g. CX20549 codec)
+					*/
 	unsigned int no_sticky_stream:1; /* no sticky-PCM stream assignment */
 	unsigned int pins_shutup:1;	/* pins are shut up */
 	unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index b58b4b1..4c054f4 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -418,7 +418,7 @@
 	else
 		buf2[0] = '\0';
 
-	printk(KERN_INFO "HDMI: supports coding type %s:"
+	_snd_printd(SND_PR_VERBOSE, "HDMI: supports coding type %s:"
 			" channels = %d, rates =%s%s\n",
 			cea_audio_coding_type_names[a->format],
 			a->channels,
@@ -442,14 +442,14 @@
 {
 	int i;
 
-	printk(KERN_INFO "HDMI: detected monitor %s at connection type %s\n",
+	_snd_printd(SND_PR_VERBOSE, "HDMI: detected monitor %s at connection type %s\n",
 			e->monitor_name,
 			eld_connection_type_names[e->conn_type]);
 
 	if (e->spk_alloc) {
 		char buf[SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE];
 		snd_print_channel_allocation(e->spk_alloc, buf, sizeof(buf));
-		printk(KERN_INFO "HDMI: available speakers:%s\n", buf);
+		_snd_printd(SND_PR_VERBOSE, "HDMI: available speakers:%s\n", buf);
 	}
 
 	for (i = 0; i < e->sad_count; i++)
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index 254ab52..e59e2f0 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -651,9 +651,16 @@
 			snd_iprintf(buffer, "  Amp-In caps: ");
 			print_amp_caps(buffer, codec, nid, HDA_INPUT);
 			snd_iprintf(buffer, "  Amp-In vals: ");
-			print_amp_vals(buffer, codec, nid, HDA_INPUT,
-				       wid_caps & AC_WCAP_STEREO,
-				       wid_type == AC_WID_PIN ? 1 : conn_len);
+			if (wid_type == AC_WID_PIN ||
+			    (codec->single_adc_amp &&
+			     wid_type == AC_WID_AUD_IN))
+				print_amp_vals(buffer, codec, nid, HDA_INPUT,
+					       wid_caps & AC_WCAP_STEREO,
+					       1);
+			else
+				print_amp_vals(buffer, codec, nid, HDA_INPUT,
+					       wid_caps & AC_WCAP_STEREO,
+					       conn_len);
 		}
 		if (wid_caps & AC_WCAP_OUT_AMP) {
 			snd_iprintf(buffer, "  Amp-Out caps: ");
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 8c6523b..d906c5b 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -141,7 +141,6 @@
 	unsigned int hp_laptop:1;
 	unsigned int asus:1;
 	unsigned int pin_eapd_ctrls:1;
-	unsigned int single_adc_amp:1;
 
 	unsigned int adc_switching:1;
 
@@ -687,27 +686,26 @@
 static const struct hda_input_mux cxt5045_capture_source = {
 	.num_items = 2,
 	.items = {
-		{ "IntMic", 0x1 },
-		{ "ExtMic", 0x2 },
+		{ "Internal Mic", 0x1 },
+		{ "Mic",          0x2 },
 	}
 };
 
 static const struct hda_input_mux cxt5045_capture_source_benq = {
-	.num_items = 5,
+	.num_items = 4,
 	.items = {
-		{ "IntMic", 0x1 },
-		{ "ExtMic", 0x2 },
-		{ "LineIn", 0x3 },
-		{ "CD",     0x4 },
-		{ "Mixer",  0x0 },
+		{ "Internal Mic", 0x1 },
+		{ "Mic",          0x2 },
+		{ "Line",         0x3 },
+		{ "Mixer",        0x0 },
 	}
 };
 
 static const struct hda_input_mux cxt5045_capture_source_hp530 = {
 	.num_items = 2,
 	.items = {
-		{ "ExtMic", 0x1 },
-		{ "IntMic", 0x2 },
+		{ "Mic",          0x1 },
+		{ "Internal Mic", 0x2 },
 	}
 };
 
@@ -798,10 +796,8 @@
 }
 
 static const struct snd_kcontrol_new cxt5045_mixers[] = {
-	HDA_CODEC_VOLUME("Internal Mic Capture Volume", 0x1a, 0x01, HDA_INPUT),
-	HDA_CODEC_MUTE("Internal Mic Capture Switch", 0x1a, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Capture Volume", 0x1a, 0x02, HDA_INPUT),
-	HDA_CODEC_MUTE("Mic Capture Switch", 0x1a, 0x02, HDA_INPUT),
+	HDA_CODEC_VOLUME("Capture Volume", 0x1a, 0x00, HDA_INPUT),
+	HDA_CODEC_MUTE("Capture Switch", 0x1a, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("PCM Playback Volume", 0x17, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("PCM Playback Switch", 0x17, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x1, HDA_INPUT),
@@ -822,27 +818,15 @@
 };
 
 static const struct snd_kcontrol_new cxt5045_benq_mixers[] = {
-	HDA_CODEC_VOLUME("CD Capture Volume", 0x1a, 0x04, HDA_INPUT),
-	HDA_CODEC_MUTE("CD Capture Switch", 0x1a, 0x04, HDA_INPUT),
-	HDA_CODEC_VOLUME("CD Playback Volume", 0x17, 0x4, HDA_INPUT),
-	HDA_CODEC_MUTE("CD Playback Switch", 0x17, 0x4, HDA_INPUT),
-
-	HDA_CODEC_VOLUME("Line In Capture Volume", 0x1a, 0x03, HDA_INPUT),
-	HDA_CODEC_MUTE("Line In Capture Switch", 0x1a, 0x03, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line In Playback Volume", 0x17, 0x3, HDA_INPUT),
-	HDA_CODEC_MUTE("Line In Playback Switch", 0x17, 0x3, HDA_INPUT),
-
-	HDA_CODEC_VOLUME("Mixer Capture Volume", 0x1a, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("Mixer Capture Switch", 0x1a, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line Playback Volume", 0x17, 0x3, HDA_INPUT),
+	HDA_CODEC_MUTE("Line Playback Switch", 0x17, 0x3, HDA_INPUT),
 
 	{}
 };
 
 static const struct snd_kcontrol_new cxt5045_mixers_hp530[] = {
-	HDA_CODEC_VOLUME("Internal Mic Capture Volume", 0x1a, 0x02, HDA_INPUT),
-	HDA_CODEC_MUTE("Internal Mic Capture Switch", 0x1a, 0x02, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Capture Volume", 0x1a, 0x01, HDA_INPUT),
-	HDA_CODEC_MUTE("Mic Capture Switch", 0x1a, 0x01, HDA_INPUT),
+	HDA_CODEC_VOLUME("Capture Volume", 0x1a, 0x00, HDA_INPUT),
+	HDA_CODEC_MUTE("Capture Switch", 0x1a, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("PCM Playback Volume", 0x17, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("PCM Playback Switch", 0x17, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x2, HDA_INPUT),
@@ -946,10 +930,10 @@
 	/* Output controls */
 	HDA_CODEC_VOLUME("Speaker Playback Volume", 0x10, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Speaker Playback Switch", 0x10, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Node 11 Playback Volume", 0x11, 0x0, HDA_OUTPUT),
-	HDA_CODEC_MUTE("Node 11 Playback Switch", 0x11, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Node 12 Playback Volume", 0x12, 0x0, HDA_OUTPUT),
-	HDA_CODEC_MUTE("Node 12 Playback Switch", 0x12, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("HP-OUT Playback Volume", 0x11, 0x0, HDA_OUTPUT),
+	HDA_CODEC_MUTE("HP-OUT Playback Switch", 0x11, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("LINE1 Playback Volume", 0x12, 0x0, HDA_OUTPUT),
+	HDA_CODEC_MUTE("LINE1 Playback Switch", 0x12, 0x0, HDA_OUTPUT),
 	
 	/* Modes for retasking pin widgets */
 	CXT_PIN_MODE("HP-OUT pin mode", 0x11, CXT_PIN_DIR_INOUT),
@@ -960,16 +944,16 @@
 
 	/* Loopback mixer controls */
 
-	HDA_CODEC_VOLUME("Mixer-1 Volume", 0x17, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("Mixer-1 Switch", 0x17, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mixer-2 Volume", 0x17, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Mixer-2 Switch", 0x17, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mixer-3 Volume", 0x17, 0x2, HDA_INPUT),
-	HDA_CODEC_MUTE("Mixer-3 Switch", 0x17, 0x2, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mixer-4 Volume", 0x17, 0x3, HDA_INPUT),
-	HDA_CODEC_MUTE("Mixer-4 Switch", 0x17, 0x3, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mixer-5 Volume", 0x17, 0x4, HDA_INPUT),
-	HDA_CODEC_MUTE("Mixer-5 Switch", 0x17, 0x4, HDA_INPUT),
+	HDA_CODEC_VOLUME("PCM Volume", 0x17, 0x0, HDA_INPUT),
+	HDA_CODEC_MUTE("PCM Switch", 0x17, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("MIC1 pin Volume", 0x17, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("MIC1 pin Switch", 0x17, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("LINE1 pin Volume", 0x17, 0x2, HDA_INPUT),
+	HDA_CODEC_MUTE("LINE1 pin Switch", 0x17, 0x2, HDA_INPUT),
+	HDA_CODEC_VOLUME("HP-OUT pin Volume", 0x17, 0x3, HDA_INPUT),
+	HDA_CODEC_MUTE("HP-OUT pin Switch", 0x17, 0x3, HDA_INPUT),
+	HDA_CODEC_VOLUME("CD pin Volume", 0x17, 0x4, HDA_INPUT),
+	HDA_CODEC_MUTE("CD pin Switch", 0x17, 0x4, HDA_INPUT),
 	{
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
 		.name = "Input Source",
@@ -978,16 +962,8 @@
 		.put = conexant_mux_enum_put,
 	},
 	/* Audio input controls */
-	HDA_CODEC_VOLUME("Input-1 Volume", 0x1a, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("Input-1 Switch", 0x1a, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Input-2 Volume", 0x1a, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Input-2 Switch", 0x1a, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Input-3 Volume", 0x1a, 0x2, HDA_INPUT),
-	HDA_CODEC_MUTE("Input-3 Switch", 0x1a, 0x2, HDA_INPUT),
-	HDA_CODEC_VOLUME("Input-4 Volume", 0x1a, 0x3, HDA_INPUT),
-	HDA_CODEC_MUTE("Input-4 Switch", 0x1a, 0x3, HDA_INPUT),
-	HDA_CODEC_VOLUME("Input-5 Volume", 0x1a, 0x4, HDA_INPUT),
-	HDA_CODEC_MUTE("Input-5 Switch", 0x1a, 0x4, HDA_INPUT),
+	HDA_CODEC_VOLUME("Capture Volume", 0x1a, 0x0, HDA_INPUT),
+	HDA_CODEC_MUTE("Capture Switch", 0x1a, 0x0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -1009,10 +985,6 @@
 	{0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
 	{0x18, AC_VERB_SET_DIGI_CONVERT_1, 0},
 
-	/* Start with output sum widgets muted and their output gains at min */
-	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-
 	/* Unmute retasking pin widget output buffers since the default
 	 * state appears to be output.  As the pin mode is changed by the
 	 * user the pin mode control will take care of enabling the pin's
@@ -1027,11 +999,11 @@
 	/* Set ADC connection select to match default mixer setting (mic1
 	 * pin)
 	 */
-	{0x1a, AC_VERB_SET_CONNECT_SEL, 0x00},
-	{0x17, AC_VERB_SET_CONNECT_SEL, 0x00},
+	{0x1a, AC_VERB_SET_CONNECT_SEL, 0x01},
+	{0x17, AC_VERB_SET_CONNECT_SEL, 0x01},
 
 	/* Mute all inputs to mixer widget (even unconnected ones) */
-	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, /* Mixer pin */
+	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, /* Mixer */
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, /* Mic1 pin */
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, /* Line pin */
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, /* HP pin */
@@ -1110,7 +1082,7 @@
 	if (!spec)
 		return -ENOMEM;
 	codec->spec = spec;
-	codec->pin_amp_workaround = 1;
+	codec->single_adc_amp = 1;
 
 	spec->multiout.max_channels = 2;
 	spec->multiout.num_dacs = ARRAY_SIZE(cxt5045_dac_nids);
@@ -3999,9 +3971,14 @@
 	int i;
 
 	mute_outputs(codec, spec->multiout.num_dacs, spec->multiout.dac_nids);
-	for (i = 0; i < cfg->hp_outs; i++)
+	for (i = 0; i < cfg->hp_outs; i++) {
+		unsigned int val = PIN_OUT;
+		if (snd_hda_query_pin_caps(codec, cfg->hp_pins[i]) &
+		    AC_PINCAP_HP_DRV)
+			val |= AC_PINCTL_HP_EN;
 		snd_hda_codec_write(codec, cfg->hp_pins[i], 0,
-				    AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP);
+				    AC_VERB_SET_PIN_WIDGET_CONTROL, val);
+	}
 	mute_outputs(codec, cfg->hp_outs, cfg->hp_pins);
 	mute_outputs(codec, cfg->line_outs, cfg->line_out_pins);
 	mute_outputs(codec, cfg->speaker_outs, cfg->speaker_pins);
@@ -4220,7 +4197,7 @@
 		int idx = get_input_connection(codec, adc_nid, nid);
 		if (idx < 0)
 			continue;
-		if (spec->single_adc_amp)
+		if (codec->single_adc_amp)
 			idx = 0;
 		return cx_auto_add_volume_idx(codec, label, pfx,
 					      cidx, adc_nid, HDA_INPUT, idx);
@@ -4275,7 +4252,7 @@
 		if (cidx < 0)
 			continue;
 		input_conn[i] = spec->imux_info[i].adc;
-		if (!spec->single_adc_amp)
+		if (!codec->single_adc_amp)
 			input_conn[i] |= cidx << 8;
 		if (i > 0 && input_conn[i] != input_conn[0])
 			multi_connection = 1;
@@ -4419,8 +4396,10 @@
 
 enum {
 	CXT_PINCFG_LENOVO_X200,
+	CXT_PINCFG_LENOVO_TP410,
 };
 
+/* ThinkPad X200 & co with cxt5051 */
 static const struct cxt_pincfg cxt_pincfg_lenovo_x200[] = {
 	{ 0x16, 0x042140ff }, /* HP (seq# overridden) */
 	{ 0x17, 0x21a11000 }, /* dock-mic */
@@ -4429,15 +4408,33 @@
 	{}
 };
 
-static const struct cxt_pincfg *cxt_pincfg_tbl[] = {
-	[CXT_PINCFG_LENOVO_X200] = cxt_pincfg_lenovo_x200,
+/* ThinkPad 410/420/510/520, X201 & co with cxt5066 */
+static const struct cxt_pincfg cxt_pincfg_lenovo_tp410[] = {
+	{ 0x19, 0x042110ff }, /* HP (seq# overridden) */
+	{ 0x1a, 0x21a190f0 }, /* dock-mic */
+	{ 0x1c, 0x212140ff }, /* dock-HP */
+	{}
 };
 
-static const struct snd_pci_quirk cxt_fixups[] = {
+static const struct cxt_pincfg *cxt_pincfg_tbl[] = {
+	[CXT_PINCFG_LENOVO_X200] = cxt_pincfg_lenovo_x200,
+	[CXT_PINCFG_LENOVO_TP410] = cxt_pincfg_lenovo_tp410,
+};
+
+static const struct snd_pci_quirk cxt5051_fixups[] = {
 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200),
 	{}
 };
 
+static const struct snd_pci_quirk cxt5066_fixups[] = {
+	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
+	SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
+	SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410),
+	SND_PCI_QUIRK(0x17aa, 0x21ce, "Lenovo T420", CXT_PINCFG_LENOVO_TP410),
+	SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
+	{}
+};
+
 /* add "fake" mute amp-caps to DACs on cx5051 so that mixer mute switches
  * can be created (bko#42825)
  */
@@ -4466,19 +4463,21 @@
 	if (!spec)
 		return -ENOMEM;
 	codec->spec = spec;
-	codec->pin_amp_workaround = 1;
 
 	switch (codec->vendor_id) {
 	case 0x14f15045:
-		spec->single_adc_amp = 1;
+		codec->single_adc_amp = 1;
 		break;
 	case 0x14f15051:
 		add_cx5051_fake_mutes(codec);
+		codec->pin_amp_workaround = 1;
+		apply_pin_fixup(codec, cxt5051_fixups, cxt_pincfg_tbl);
 		break;
+	default:
+		codec->pin_amp_workaround = 1;
+		apply_pin_fixup(codec, cxt5066_fixups, cxt_pincfg_tbl);
 	}
 
-	apply_pin_fixup(codec, cxt_fixups, cxt_pincfg_tbl);
-
 	/* Show mute-led control only on HP laptops
 	 * This is a sort of white-list: on HP laptops, EAPD corresponds
 	 * only to the mute-LED without actualy amp function.  Meanwhile,
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 540cd13..83f345f 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -757,8 +757,6 @@
 	struct hdmi_spec *spec = codec->spec;
 	int tag = res >> AC_UNSOL_RES_TAG_SHIFT;
 	int pin_nid;
-	int pd = !!(res & AC_UNSOL_RES_PD);
-	int eldv = !!(res & AC_UNSOL_RES_ELDV);
 	int pin_idx;
 	struct hda_jack_tbl *jack;
 
@@ -768,9 +766,10 @@
 	pin_nid = jack->nid;
 	jack->jack_dirty = 1;
 
-	printk(KERN_INFO
+	_snd_printd(SND_PR_VERBOSE,
 		"HDMI hot plug event: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
-		codec->addr, pin_nid, pd, eldv);
+		codec->addr, pin_nid,
+		!!(res & AC_UNSOL_RES_PD), !!(res & AC_UNSOL_RES_ELDV));
 
 	pin_idx = pin_nid_to_pin_index(spec, pin_nid);
 	if (pin_idx < 0)
@@ -992,7 +991,7 @@
 	if (eld->monitor_present)
 		eld_valid	= !!(present & AC_PINSENSE_ELDV);
 
-	printk(KERN_INFO
+	_snd_printd(SND_PR_VERBOSE,
 		"HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
 		codec->addr, pin_nid, eld->monitor_present, eld_valid);
 
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 9917e55..e65e354 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1445,6 +1445,13 @@
 	ALC_FIXUP_ACT_BUILD,
 };
 
+static void alc_apply_pincfgs(struct hda_codec *codec,
+			      const struct alc_pincfg *cfg)
+{
+	for (; cfg->nid; cfg++)
+		snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val);
+}
+
 static void alc_apply_fixup(struct hda_codec *codec, int action)
 {
 	struct alc_spec *spec = codec->spec;
@@ -1478,9 +1485,7 @@
 			snd_printdd(KERN_INFO "hda_codec: %s: "
 				    "Apply pincfg for %s\n",
 				    codec->chip_name, modelname);
-			for (; cfg->nid; cfg++)
-				snd_hda_codec_set_pincfg(codec, cfg->nid,
-							 cfg->val);
+			alc_apply_pincfgs(codec, cfg);
 			break;
 		case ALC_FIXUP_VERBS:
 			if (action != ALC_FIXUP_ACT_PROBE || !fix->v.verbs)
@@ -3398,8 +3403,10 @@
 	for (;;) {
 		badness = fill_and_eval_dacs(codec, fill_hardwired,
 					     fill_mio_first);
-		if (badness < 0)
+		if (badness < 0) {
+			kfree(best_cfg);
 			return badness;
+		}
 		debug_badness("==> lo_type=%d, wired=%d, mio=%d, badness=0x%x\n",
 			      cfg->line_out_type, fill_hardwired, fill_mio_first,
 			      badness);
@@ -3434,7 +3441,7 @@
 			cfg->line_out_type = AUTO_PIN_SPEAKER_OUT;
 			fill_hardwired = true;
 			continue;
-		} 
+		}
 		if (cfg->hp_outs > 0 &&
 		    cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
 			cfg->speaker_outs = cfg->line_outs;
@@ -3448,7 +3455,7 @@
 			cfg->line_out_type = AUTO_PIN_HP_OUT;
 			fill_hardwired = true;
 			continue;
-		} 
+		}
 		break;
 	}
 
@@ -4423,7 +4430,7 @@
 static int alc880_parse_auto_config(struct hda_codec *codec)
 {
 	static const hda_nid_t alc880_ignore[] = { 0x1d, 0 };
-	static const hda_nid_t alc880_ssids[] = { 0x15, 0x1b, 0x14, 0 }; 
+	static const hda_nid_t alc880_ssids[] = { 0x15, 0x1b, 0x14, 0 };
 	return alc_parse_auto_config(codec, alc880_ignore, alc880_ssids);
 }
 
@@ -4859,6 +4866,7 @@
 	ALC260_FIXUP_GPIO1_TOGGLE,
 	ALC260_FIXUP_REPLACER,
 	ALC260_FIXUP_HP_B1900,
+	ALC260_FIXUP_KN1,
 };
 
 static void alc260_gpio1_automute(struct hda_codec *codec)
@@ -4886,6 +4894,36 @@
 	}
 }
 
+static void alc260_fixup_kn1(struct hda_codec *codec,
+			     const struct alc_fixup *fix, int action)
+{
+	struct alc_spec *spec = codec->spec;
+	static const struct alc_pincfg pincfgs[] = {
+		{ 0x0f, 0x02214000 }, /* HP/speaker */
+		{ 0x12, 0x90a60160 }, /* int mic */
+		{ 0x13, 0x02a19000 }, /* ext mic */
+		{ 0x18, 0x01446000 }, /* SPDIF out */
+		/* disable bogus I/O pins */
+		{ 0x10, 0x411111f0 },
+		{ 0x11, 0x411111f0 },
+		{ 0x14, 0x411111f0 },
+		{ 0x15, 0x411111f0 },
+		{ 0x16, 0x411111f0 },
+		{ 0x17, 0x411111f0 },
+		{ 0x19, 0x411111f0 },
+		{ }
+	};
+
+	switch (action) {
+	case ALC_FIXUP_ACT_PRE_PROBE:
+		alc_apply_pincfgs(codec, pincfgs);
+		break;
+	case ALC_FIXUP_ACT_PROBE:
+		spec->init_amp = ALC_INIT_NONE;
+		break;
+	}
+}
+
 static const struct alc_fixup alc260_fixups[] = {
 	[ALC260_FIXUP_HP_DC5750] = {
 		.type = ALC_FIXUP_PINS,
@@ -4936,7 +4974,11 @@
 		.v.func = alc260_fixup_gpio1_toggle,
 		.chained = true,
 		.chain_id = ALC260_FIXUP_COEF,
-	}
+	},
+	[ALC260_FIXUP_KN1] = {
+		.type = ALC_FIXUP_FUNC,
+		.v.func = alc260_fixup_kn1,
+	},
 };
 
 static const struct snd_pci_quirk alc260_fixup_tbl[] = {
@@ -4946,6 +4988,7 @@
 	SND_PCI_QUIRK(0x103c, 0x280a, "HP dc5750", ALC260_FIXUP_HP_DC5750),
 	SND_PCI_QUIRK(0x103c, 0x30ba, "HP Presario B1900", ALC260_FIXUP_HP_B1900),
 	SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FIXUP_GPIO1),
+	SND_PCI_QUIRK(0x152d, 0x0729, "Quanta KN1", ALC260_FIXUP_KN1),
 	SND_PCI_QUIRK(0x161f, 0x2057, "Replacer 672V", ALC260_FIXUP_REPLACER),
 	SND_PCI_QUIRK(0x1631, 0xc017, "PB V7900", ALC260_FIXUP_COEF),
 	{}
@@ -5269,7 +5312,9 @@
 			{ 0x16, 0x99130111 }, /* CLFE speaker */
 			{ 0x17, 0x99130112 }, /* surround speaker */
 			{ }
-		}
+		},
+		.chained = true,
+		.chain_id = ALC882_FIXUP_GPIO1,
 	},
 	[ALC882_FIXUP_ACER_ASPIRE_8930G] = {
 		.type = ALC_FIXUP_PINS,
@@ -5312,7 +5357,9 @@
 			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x07 },
 			{ 0x20, AC_VERB_SET_PROC_COEF, 0x3050 },
 			{ }
-		}
+		},
+		.chained = true,
+		.chain_id = ALC882_FIXUP_GPIO1,
 	},
 	[ALC885_FIXUP_MACPRO_GPIO] = {
 		.type = ALC_FIXUP_FUNC,
@@ -5359,6 +5406,7 @@
 		      ALC882_FIXUP_ACER_ASPIRE_4930G),
 	SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
 	SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE),
+	SND_PCI_QUIRK(0x1025, 0x026b, "Acer Aspire 8940G", ALC882_FIXUP_ACER_ASPIRE_8930G),
 	SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", ALC882_FIXUP_ACER_ASPIRE_7736),
 	SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD),
 	SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V),
@@ -5384,6 +5432,7 @@
 	SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC889_FIXUP_IMAC91_VREF),
 	SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF),
 	SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF),
+	SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 5,1", ALC885_FIXUP_MACPRO_GPIO),
 	SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
 	SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
 	SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF),
@@ -5399,6 +5448,13 @@
 	{}
 };
 
+static const struct alc_model_fixup alc882_fixup_models[] = {
+	{.id = ALC882_FIXUP_ACER_ASPIRE_4930G, .name = "acer-aspire-4930g"},
+	{.id = ALC882_FIXUP_ACER_ASPIRE_8930G, .name = "acer-aspire-8930g"},
+	{.id = ALC883_FIXUP_ACER_EAPD, .name = "acer-aspire"},
+	{}
+};
+
 /*
  * BIOS auto configuration
  */
@@ -5439,7 +5495,8 @@
 	if (err < 0)
 		goto error;
 
-	alc_pick_fixup(codec, NULL, alc882_fixup_tbl, alc882_fixups);
+	alc_pick_fixup(codec, alc882_fixup_models, alc882_fixup_tbl,
+		       alc882_fixups);
 	alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
 
 	alc_auto_parse_customize_define(codec);
@@ -6079,7 +6136,7 @@
 	 * Basically the device should work as is without the fixup table.
 	 * If BIOS doesn't give a proper info, enable the corresponding
 	 * fixup entry.
-	 */ 
+	 */
 	SND_PCI_QUIRK(0x1043, 0x8330, "ASUS Eeepc P703 P900A",
 		      ALC269_FIXUP_AMIC),
 	SND_PCI_QUIRK(0x1043, 0x1013, "ASUS N61Da", ALC269_FIXUP_AMIC),
@@ -6296,7 +6353,7 @@
 {
 	if (action == ALC_FIXUP_ACT_PRE_PROBE)
 		codec->no_jack_detect = 1;
-}	
+}
 
 static const struct alc_fixup alc861_fixups[] = {
 	[ALC861_FIXUP_FSC_AMILO_PI1505] = {
@@ -6714,7 +6771,7 @@
 	 * Basically the device should work as is without the fixup table.
 	 * If BIOS doesn't give a proper info, enable the corresponding
 	 * fixup entry.
-	 */ 
+	 */
 	SND_PCI_QUIRK(0x1043, 0x1000, "ASUS N50Vm", ALC662_FIXUP_ASUS_MODE1),
 	SND_PCI_QUIRK(0x1043, 0x1092, "ASUS NB", ALC662_FIXUP_ASUS_MODE3),
 	SND_PCI_QUIRK(0x1043, 0x1173, "ASUS K73Jn", ALC662_FIXUP_ASUS_MODE1),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 33a9946..4742cac 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -5063,12 +5063,11 @@
 	if (spec->gpio_led_polarity)
 		muted = !muted;
 
-	/*polarity defines *not* muted state level*/
 	if (!spec->vref_mute_led_nid) {
 		if (muted)
-			spec->gpio_data &= ~spec->gpio_led; /* orange */
+			spec->gpio_data |= spec->gpio_led;
 		else
-			spec->gpio_data |= spec->gpio_led; /* white */
+			spec->gpio_data &= ~spec->gpio_led;
 		stac_gpio_set(codec, spec->gpio_mask,
 				spec->gpio_dir, spec->gpio_data);
 	} else {
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 6508e8b..59d8efa 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -57,7 +57,7 @@
 	select SND_SOC_TPA6130A2 if I2C
 	select SND_SOC_TLV320DAC33 if I2C
 	select SND_SOC_TWL4030 if TWL4030_CORE
-	select SND_SOC_TWL6040 if TWL4030_CORE
+	select SND_SOC_TWL6040 if TWL6040_CORE
 	select SND_SOC_UDA134X
 	select SND_SOC_UDA1380 if I2C
 	select SND_SOC_WL1273 if MFD_WL1273_CORE
@@ -276,7 +276,6 @@
 	tristate
 
 config SND_SOC_TWL6040
-	select TWL6040_CORE
 	tristate
 
 config SND_SOC_UDA134X
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index f8e10ce..b3e24f2 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -140,7 +140,7 @@
  * min : 0xFE : -115.0 dB
  * mute: 0xFF
  */
-static const DECLARE_TLV_DB_SCALE(out_tlv, -11500, 50, 1);
+static const DECLARE_TLV_DB_SCALE(out_tlv, -11550, 50, 1);
 
 static const struct snd_kcontrol_new ak4642_snd_controls[] = {
 
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index d192626..8e92fb8 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -143,11 +143,11 @@
 }
 
 /*
- * using codec assist to small pop, hp_powerup or lineout_powerup
- * should stay setting until vag_powerup is fully ramped down,
- * vag fully ramped down require 400ms.
+ * As manual described, ADC/DAC only works when VAG powerup,
+ * So enabled VAG before ADC/DAC up.
+ * In power down case, we need wait 400ms when vag fully ramped down.
  */
-static int small_pop_event(struct snd_soc_dapm_widget *w,
+static int power_vag_event(struct snd_soc_dapm_widget *w,
 	struct snd_kcontrol *kcontrol, int event)
 {
 	switch (event) {
@@ -156,7 +156,7 @@
 			SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
 		break;
 
-	case SND_SOC_DAPM_PRE_PMD:
+	case SND_SOC_DAPM_POST_PMD:
 		snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
 			SGTL5000_VAG_POWERUP, 0);
 		msleep(400);
@@ -201,12 +201,8 @@
 				mic_bias_event,
 				SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
 
-	SND_SOC_DAPM_PGA_E("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0,
-			small_pop_event,
-			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
-	SND_SOC_DAPM_PGA_E("LO", SGTL5000_CHIP_ANA_POWER, 0, 0, NULL, 0,
-			small_pop_event,
-			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_PGA("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("LO", SGTL5000_CHIP_ANA_POWER, 0, 0, NULL, 0),
 
 	SND_SOC_DAPM_MUX("Capture Mux", SND_SOC_NOPM, 0, 0, &adc_mux),
 	SND_SOC_DAPM_MUX("Headphone Mux", SND_SOC_NOPM, 0, 0, &dac_mux),
@@ -221,8 +217,11 @@
 				0, SGTL5000_CHIP_DIG_POWER,
 				1, 0),
 
-	SND_SOC_DAPM_ADC("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0),
+	SND_SOC_DAPM_SUPPLY("VAG_POWER", SGTL5000_CHIP_ANA_POWER, 7, 0,
+			    power_vag_event,
+			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 
+	SND_SOC_DAPM_ADC("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0),
 	SND_SOC_DAPM_DAC("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0),
 };
 
@@ -231,9 +230,11 @@
 	{"Capture Mux", "LINE_IN", "LINE_IN"},	/* line_in --> adc_mux */
 	{"Capture Mux", "MIC_IN", "MIC_IN"},	/* mic_in --> adc_mux */
 
+	{"ADC", NULL, "VAG_POWER"},
 	{"ADC", NULL, "Capture Mux"},		/* adc_mux --> adc */
 	{"AIFOUT", NULL, "ADC"},		/* adc --> i2s_out */
 
+	{"DAC", NULL, "VAG_POWER"},
 	{"DAC", NULL, "AIFIN"},			/* i2s-->dac,skip audio mux */
 	{"Headphone Mux", "DAC", "DAC"},	/* dac --> hp_mux */
 	{"LO", NULL, "DAC"},			/* dac --> line_out */
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index 2d8c6b8..dc7509b 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -26,7 +26,6 @@
 #include <linux/pm.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
-#include <linux/i2c/twl.h>
 #include <linux/mfd/twl6040.h>
 
 #include <sound/core.h>
@@ -1528,7 +1527,7 @@
 static int twl6040_probe(struct snd_soc_codec *codec)
 {
 	struct twl6040_data *priv;
-	struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev);
+	struct twl6040_codec_data *pdata = dev_get_platdata(codec->dev);
 	struct platform_device *pdev = container_of(codec->dev,
 						   struct platform_device, dev);
 	int ret = 0;
diff --git a/sound/soc/imx/imx-audmux.c b/sound/soc/imx/imx-audmux.c
index 1765a19..f237003 100644
--- a/sound/soc/imx/imx-audmux.c
+++ b/sound/soc/imx/imx-audmux.c
@@ -73,6 +73,9 @@
 	if (!buf)
 		return -ENOMEM;
 
+	if (!audmux_base)
+		return -ENOSYS;
+
 	if (audmux_clk)
 		clk_prepare_enable(audmux_clk);
 
@@ -152,7 +155,7 @@
 		return;
 	}
 
-	for (i = 1; i < 8; i++) {
+	for (i = 0; i < MX31_AUDMUX_PORT6_SSI_PINS_6 + 1; i++) {
 		snprintf(buf, sizeof(buf), "ssi%d", i);
 		if (!debugfs_create_file(buf, 0444, audmux_debugfs_root,
 					 (void *)i, &audmux_debugfs_fops))
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index e00dd0b..deafbfa 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -97,7 +97,7 @@
 
 config SND_OMAP_SOC_OMAP_ABE_TWL6040
 	tristate "SoC Audio support for OMAP boards using ABE and twl6040 codec"
-	depends on TWL4030_CORE && SND_OMAP_SOC && ARCH_OMAP4
+	depends on TWL6040_CORE && SND_OMAP_SOC && ARCH_OMAP4
 	select SND_OMAP_SOC_DMIC
 	select SND_OMAP_SOC_MCPDM
 	select SND_SOC_TWL6040
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index 609abd5..d085837 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -17,6 +17,7 @@
 #include <linux/delay.h>
 #include <linux/clk.h>
 #include <linux/platform_device.h>
+#include <linux/io.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/initval.h>
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index e19c24a..accdcb7 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1081,6 +1081,8 @@
 		snd_soc_dapm_new_controls(&platform->dapm,
 			driver->dapm_widgets, driver->num_dapm_widgets);
 
+	platform->dapm.idle_bias_off = 1;
+
 	if (driver->probe) {
 		ret = driver->probe(platform);
 		if (ret < 0) {
diff --git a/sound/soc/tegra/tegra_i2s.c b/sound/soc/tegra/tegra_i2s.c
index 33509de..e533499 100644
--- a/sound/soc/tegra/tegra_i2s.c
+++ b/sound/soc/tegra/tegra_i2s.c
@@ -79,11 +79,15 @@
 	struct tegra_i2s *i2s = s->private;
 	int i;
 
+	clk_enable(i2s->clk_i2s);
+
 	for (i = 0; i < ARRAY_SIZE(regs); i++) {
 		u32 val = tegra_i2s_read(i2s, regs[i].offset);
 		seq_printf(s, "%s = %08x\n", regs[i].name, val);
 	}
 
+	clk_disable(i2s->clk_i2s);
+
 	return 0;
 }
 
@@ -112,7 +116,7 @@
 		debugfs_remove(i2s->debug);
 }
 #else
-static inline void tegra_i2s_debug_add(struct tegra_i2s *i2s, int id)
+static inline void tegra_i2s_debug_add(struct tegra_i2s *i2s)
 {
 }
 
diff --git a/sound/soc/tegra/tegra_spdif.c b/sound/soc/tegra/tegra_spdif.c
index 475428c..9ff2c60 100644
--- a/sound/soc/tegra/tegra_spdif.c
+++ b/sound/soc/tegra/tegra_spdif.c
@@ -79,11 +79,15 @@
 	struct tegra_spdif *spdif = s->private;
 	int i;
 
+	clk_enable(spdif->clk_spdif_out);
+
 	for (i = 0; i < ARRAY_SIZE(regs); i++) {
 		u32 val = tegra_spdif_read(spdif, regs[i].offset);
 		seq_printf(s, "%s = %08x\n", regs[i].name, val);
 	}
 
+	clk_disable(spdif->clk_spdif_out);
+
 	return 0;
 }
 
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index 416684b..26b823b 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -19,3 +19,5 @@
 cscope*
 config.mak
 config.mak.autogen
+*-bison.*
+*-flex.*
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 820371f..03059e7 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -237,21 +237,20 @@
 FLEX = $(CROSS_COMPILE)flex
 BISON= $(CROSS_COMPILE)bison
 
-event-parser:
-	$(QUIET_BISON)$(BISON) -v util/parse-events.y -d -o $(OUTPUT)util/parse-events-bison.c
+$(OUTPUT)util/parse-events-flex.c: util/parse-events.l
 	$(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c
 
-$(OUTPUT)util/parse-events-flex.c: event-parser
-$(OUTPUT)util/parse-events-bison.c: event-parser
+$(OUTPUT)util/parse-events-bison.c: util/parse-events.y
+	$(QUIET_BISON)$(BISON) -v util/parse-events.y -d -o $(OUTPUT)util/parse-events-bison.c
 
-pmu-parser:
-	$(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c
+$(OUTPUT)util/pmu-flex.c: util/pmu.l
 	$(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c
 
-$(OUTPUT)util/pmu-flex.c: pmu-parser
-$(OUTPUT)util/pmu-bison.c: pmu-parser
+$(OUTPUT)util/pmu-bison.c: util/pmu.y
+	$(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c
 
-$(OUTPUT)util/parse-events.o: event-parser pmu-parser
+$(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c
+$(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c
 
 LIB_FILE=$(OUTPUT)libperf.a
 
@@ -527,7 +526,7 @@
 endif
 
 ifdef NO_GTK2
-	BASIC_CFLAGS += -DNO_GTK2
+	BASIC_CFLAGS += -DNO_GTK2_SUPPORT
 else
 	FLAGS_GTK2=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(shell pkg-config --libs --cflags gtk+-2.0)
 	ifneq ($(call try-cc,$(SOURCE_GTK2),$(FLAGS_GTK2)),y)
@@ -852,8 +851,6 @@
 	@echo '  html		- make html documentation'
 	@echo '  info		- make GNU info documentation (access with info <foo>)'
 	@echo '  pdf		- make pdf documentation'
-	@echo '  event-parser	- make event parser code'
-	@echo '  pmu-parser	- make pmu format parser code'
 	@echo '  TAGS		- use etags to make tag information for source browsing'
 	@echo '  tags		- use ctags to make tag information for source browsing'
 	@echo '  cscope	- use cscope to make interactive browsing database'
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index fb8b5f8..1cad3af 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -17,6 +17,7 @@
 #include "util/debug.h"
 
 #include <sys/prctl.h>
+#include <sys/resource.h>
 
 #include <semaphore.h>
 #include <pthread.h>
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index e3c63ae..8ef59f8 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -42,6 +42,7 @@
 #include "util/debug.h"
 
 #include <assert.h>
+#include <elf.h>
 #include <fcntl.h>
 
 #include <stdio.h>
@@ -59,6 +60,7 @@
 #include <sys/prctl.h>
 #include <sys/wait.h>
 #include <sys/uio.h>
+#include <sys/utsname.h>
 #include <sys/mman.h>
 
 #include <linux/unistd.h>
@@ -162,12 +164,40 @@
 	symbol__annotate_zero_histograms(sym);
 }
 
+static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
+{
+	struct utsname uts;
+	int err = uname(&uts);
+
+	ui__warning("Out of bounds address found:\n\n"
+		    "Addr:   %" PRIx64 "\n"
+		    "DSO:    %s %c\n"
+		    "Map:    %" PRIx64 "-%" PRIx64 "\n"
+		    "Symbol: %" PRIx64 "-%" PRIx64 " %c %s\n"
+		    "Arch:   %s\n"
+		    "Kernel: %s\n"
+		    "Tools:  %s\n\n"
+		    "Not all samples will be on the annotation output.\n\n"
+		    "Please report to linux-kernel@vger.kernel.org\n",
+		    ip, map->dso->long_name, dso__symtab_origin(map->dso),
+		    map->start, map->end, sym->start, sym->end,
+		    sym->binding == STB_GLOBAL ? 'g' :
+		    sym->binding == STB_LOCAL  ? 'l' : 'w', sym->name,
+		    err ? "[unknown]" : uts.machine,
+		    err ? "[unknown]" : uts.release, perf_version_string);
+	if (use_browser <= 0)
+		sleep(5);
+	
+	map->erange_warned = true;
+}
+
 static void perf_top__record_precise_ip(struct perf_top *top,
 					struct hist_entry *he,
 					int counter, u64 ip)
 {
 	struct annotation *notes;
 	struct symbol *sym;
+	int err;
 
 	if (he == NULL || he->ms.sym == NULL ||
 	    ((top->sym_filter_entry == NULL ||
@@ -189,9 +219,12 @@
 	}
 
 	ip = he->ms.map->map_ip(he->ms.map, ip);
-	symbol__inc_addr_samples(sym, he->ms.map, counter, ip);
+	err = symbol__inc_addr_samples(sym, he->ms.map, counter, ip);
 
 	pthread_mutex_unlock(&notes->lock);
+
+	if (err == -ERANGE && !he->ms.map->erange_warned)
+		ui__warn_map_erange(he->ms.map, sym, ip);
 }
 
 static void perf_top__show_details(struct perf_top *top)
@@ -615,6 +648,7 @@
 
 /* Tag samples to be skipped. */
 static const char *skip_symbols[] = {
+	"intel_idle",
 	"default_idle",
 	"native_safe_halt",
 	"cpu_idle",
diff --git a/tools/perf/perf-archive.sh b/tools/perf/perf-archive.sh
index 677e59d..95b6f8b 100644
--- a/tools/perf/perf-archive.sh
+++ b/tools/perf/perf-archive.sh
@@ -29,13 +29,14 @@
 fi
 
 MANIFEST=$(mktemp /tmp/perf-archive-manifest.XXXXXX)
+PERF_BUILDID_LINKDIR=$(readlink -f $PERF_BUILDID_DIR)/
 
 cut -d ' ' -f 1 $BUILDIDS | \
 while read build_id ; do
 	linkname=$PERF_BUILDID_DIR.build-id/${build_id:0:2}/${build_id:2}
 	filename=$(readlink -f $linkname)
 	echo ${linkname#$PERF_BUILDID_DIR} >> $MANIFEST
-	echo ${filename#$PERF_BUILDID_DIR} >> $MANIFEST
+	echo ${filename#$PERF_BUILDID_LINKDIR} >> $MANIFEST
 done
 
 tar cfj $PERF_DATA.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 199f69e..08c6d13 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -64,8 +64,8 @@
 
 	pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
 
-	if (addr > sym->end)
-		return 0;
+	if (addr < sym->start || addr > sym->end)
+		return -ERANGE;
 
 	offset = addr - sym->start;
 	h = annotation__histogram(notes, evidx);
@@ -561,16 +561,12 @@
 {
 	struct annotation *notes = symbol__annotation(sym);
 	struct sym_hist *h = annotation__histogram(notes, evidx);
-	struct objdump_line *pos;
-	int len = sym->end - sym->start;
+	int len = sym->end - sym->start, offset;
 
 	h->sum = 0;
-
-	list_for_each_entry(pos, &notes->src->source, node) {
-		if (pos->offset != -1 && pos->offset < len) {
-			h->addr[pos->offset] = h->addr[pos->offset] * 7 / 8;
-			h->sum += h->addr[pos->offset];
-		}
+	for (offset = 0; offset < len; ++offset) {
+		h->addr[offset] = h->addr[offset] * 7 / 8;
+		h->sum += h->addr[offset];
 	}
 }
 
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 2ec4b60..9f6d630 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -256,6 +256,18 @@
 		if (!cmp) {
 			he->period += period;
 			++he->nr_events;
+
+			/* If the map of an existing hist_entry has
+			 * become out-of-date due to an exec() or
+			 * similar, update it.  Otherwise we will
+			 * mis-adjust symbol addresses when computing
+			 * the history counter to increment.
+			 */
+			if (he->ms.map != entry->ms.map) {
+				he->ms.map = entry->ms.map;
+				if (he->ms.map)
+					he->ms.map->referenced = true;
+			}
 			goto out;
 		}
 
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index dea6d1c..35ae568 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -38,6 +38,7 @@
 	RB_CLEAR_NODE(&self->rb_node);
 	self->groups   = NULL;
 	self->referenced = false;
+	self->erange_warned = false;
 }
 
 struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index b100c20..81371ba 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -33,6 +33,7 @@
 	u64			end;
 	u8 /* enum map_type */	type;
 	bool			referenced;
+	bool			erange_warned;
 	u32			priv;
 	u64			pgoff;
 
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 9412e3b..1efd3be 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -826,8 +826,16 @@
 {
 	const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
 
-	if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest)
-		return perf_session__find_machine(session, event->ip.pid);
+	if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
+		u32 pid;
+
+		if (event->header.type == PERF_RECORD_MMAP)
+			pid = event->mmap.pid;
+		else
+			pid = event->ip.pid;
+
+		return perf_session__find_machine(session, pid);
+	}
 
 	return perf_session__find_host_machine(session);
 }
@@ -868,11 +876,11 @@
 		dump_sample(session, event, sample);
 		if (evsel == NULL) {
 			++session->hists.stats.nr_unknown_id;
-			return -1;
+			return 0;
 		}
 		if (machine == NULL) {
 			++session->hists.stats.nr_unprocessable_samples;
-			return -1;
+			return 0;
 		}
 		return tool->sample(tool, event, sample, evsel, machine);
 	case PERF_RECORD_MMAP:
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index d7a1c4a..2f83e5d 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -125,6 +125,9 @@
 
 static bool map_symbol__toggle_fold(struct map_symbol *self)
 {
+	if (!self)
+		return false;
+
 	if (!self->has_children)
 		return false;
 
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index a457d21..e9fff98 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -240,9 +240,13 @@
 		return -ENODEV;
 	}
 
+	mutex_lock(&kvm->slots_lock);
+
 	kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
-	if (!kvm->arch.iommu_domain)
-		return -ENOMEM;
+	if (!kvm->arch.iommu_domain) {
+		r = -ENOMEM;
+		goto out_unlock;
+	}
 
 	if (!allow_unsafe_assigned_interrupts &&
 	    !iommu_domain_has_cap(kvm->arch.iommu_domain,
@@ -253,17 +257,16 @@
 		       " module option.\n", __func__);
 		iommu_domain_free(kvm->arch.iommu_domain);
 		kvm->arch.iommu_domain = NULL;
-		return -EPERM;
+		r = -EPERM;
+		goto out_unlock;
 	}
 
 	r = kvm_iommu_map_memslots(kvm);
 	if (r)
-		goto out_unmap;
+		kvm_iommu_unmap_memslots(kvm);
 
-	return 0;
-
-out_unmap:
-	kvm_iommu_unmap_memslots(kvm);
+out_unlock:
+	mutex_unlock(&kvm->slots_lock);
 	return r;
 }
 
@@ -310,6 +313,11 @@
 	}
 }
 
+void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+	kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
+}
+
 static int kvm_iommu_unmap_memslots(struct kvm *kvm)
 {
 	int idx;
@@ -320,7 +328,7 @@
 	slots = kvm_memslots(kvm);
 
 	kvm_for_each_memslot(memslot, slots)
-		kvm_iommu_put_pages(kvm, memslot->base_gfn, memslot->npages);
+		kvm_iommu_unmap_pages(kvm, memslot);
 
 	srcu_read_unlock(&kvm->srcu, idx);
 
@@ -335,7 +343,11 @@
 	if (!domain)
 		return 0;
 
+	mutex_lock(&kvm->slots_lock);
 	kvm_iommu_unmap_memslots(kvm);
+	kvm->arch.iommu_domain = NULL;
+	mutex_unlock(&kvm->slots_lock);
+
 	iommu_domain_free(domain);
 	return 0;
 }
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 42b7393..9739b53 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -808,12 +808,13 @@
 	if (r)
 		goto out_free;
 
-	/* map the pages in iommu page table */
+	/* map/unmap the pages in iommu page table */
 	if (npages) {
 		r = kvm_iommu_map_pages(kvm, &new);
 		if (r)
 			goto out_free;
-	}
+	} else
+		kvm_iommu_unmap_pages(kvm, &old);
 
 	r = -ENOMEM;
 	slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),